portable_atomic/imp/fallback/seq_lock_wide.rs
1// SPDX-License-Identifier: Apache-2.0 OR MIT
2
3// Adapted from https://github.com/crossbeam-rs/crossbeam/blob/crossbeam-utils-0.8.7/crossbeam-utils/src/atomic/seq_lock_wide.rs.
4
5use core::{
6 mem::ManuallyDrop,
7 sync::atomic::{self, AtomicUsize, Ordering},
8};
9
10use super::utils::Backoff;
11
12// See mod.rs for details.
13pub(super) type AtomicChunk = AtomicUsize;
14pub(super) type Chunk = usize;
15
16/// A simple stamped lock.
17///
18/// The state is represented as two `AtomicUsize`: `state_hi` for high bits and `state_lo` for low
19/// bits.
20pub(super) struct SeqLock {
21 /// The high bits of the current state of the lock.
22 state_hi: AtomicUsize,
23
24 /// The low bits of the current state of the lock.
25 ///
26 /// All bits except the least significant one hold the current stamp. When locked, the state_lo
27 /// equals 1 and doesn't contain a valid stamp.
28 state_lo: AtomicUsize,
29}
30
31impl SeqLock {
32 #[inline]
33 pub(super) const fn new() -> Self {
34 Self { state_hi: AtomicUsize::new(0), state_lo: AtomicUsize::new(0) }
35 }
36
37 /// If not locked, returns the current stamp.
38 ///
39 /// This method should be called before optimistic reads.
40 #[inline]
41 pub(super) fn optimistic_read(&self) -> Option<(usize, usize)> {
42 // The acquire loads from `state_hi` and `state_lo` synchronize with the release stores in
43 // `SeqLockWriteGuard::drop` and `SeqLockWriteGuard::abort`.
44 //
45 // As a consequence, we can make sure that (1) all writes within the era of `state_hi - 1`
46 // happens before now; and therefore, (2) if `state_lo` is even, all writes within the
47 // critical section of (`state_hi`, `state_lo`) happens before now.
48 let state_hi = self.state_hi.load(Ordering::Acquire);
49 let state_lo = self.state_lo.load(Ordering::Acquire);
50 if state_lo == 1 { None } else { Some((state_hi, state_lo)) }
51 }
52
53 /// Returns `true` if the current stamp is equal to `stamp`.
54 ///
55 /// This method should be called after optimistic reads to check whether they are valid. The
56 /// argument `stamp` should correspond to the one returned by method `optimistic_read`.
57 #[inline]
58 pub(super) fn validate_read(&self, stamp: (usize, usize)) -> bool {
59 // Thanks to the fence, if we're noticing any modification to the data at the critical
60 // section of `(stamp.0, stamp.1)`, then the critical section's write of 1 to state_lo should be
61 // visible.
62 atomic::fence(Ordering::Acquire);
63
64 // So if `state_lo` coincides with `stamp.1`, then either (1) we're noticing no modification
65 // to the data after the critical section of `(stamp.0, stamp.1)`, or (2) `state_lo` wrapped
66 // around.
67 //
68 // If (2) is the case, the acquire ordering ensures we see the new value of `state_hi`.
69 let state_lo = self.state_lo.load(Ordering::Acquire);
70
71 // If (2) is the case and `state_hi` coincides with `stamp.0`, then `state_hi` also wrapped
72 // around, which we give up to correctly validate the read.
73 let state_hi = self.state_hi.load(Ordering::Relaxed);
74
75 // Except for the case that both `state_hi` and `state_lo` wrapped around, the following
76 // condition implies that we're noticing no modification to the data after the critical
77 // section of `(stamp.0, stamp.1)`.
78 (state_hi, state_lo) == stamp
79 }
80
81 /// Grabs the lock for writing.
82 #[inline]
83 pub(super) fn write(&self) -> SeqLockWriteGuard<'_> {
84 let mut backoff = Backoff::new();
85 loop {
86 let previous = self.state_lo.swap(1, Ordering::Acquire);
87
88 if previous != 1 {
89 // To synchronize with the acquire fence in `validate_read` via any modification to
90 // the data at the critical section of `(state_hi, previous)`.
91 atomic::fence(Ordering::Release);
92
93 return SeqLockWriteGuard { lock: self, state_lo: previous };
94 }
95
96 while self.state_lo.load(Ordering::Relaxed) == 1 {
97 backoff.snooze();
98 }
99 }
100 }
101}
102
103/// An RAII guard that releases the lock and increments the stamp when dropped.
104#[must_use]
105pub(super) struct SeqLockWriteGuard<'a> {
106 /// The parent lock.
107 lock: &'a SeqLock,
108
109 /// The stamp before locking.
110 state_lo: usize,
111}
112
113impl SeqLockWriteGuard<'_> {
114 /// Releases the lock without incrementing the stamp.
115 #[inline]
116 pub(super) fn abort(self) {
117 // We specifically don't want to call drop(), since that's
118 // what increments the stamp.
119 let this = ManuallyDrop::new(self);
120
121 // Restore the stamp.
122 //
123 // Release ordering for synchronizing with `optimistic_read`.
124 this.lock.state_lo.store(this.state_lo, Ordering::Release);
125 }
126}
127
128impl Drop for SeqLockWriteGuard<'_> {
129 #[inline]
130 fn drop(&mut self) {
131 let state_lo = self.state_lo.wrapping_add(2);
132
133 // Increase the high bits if the low bits wrap around.
134 //
135 // Release ordering for synchronizing with `optimistic_read`.
136 if state_lo == 0 {
137 let state_hi = self.lock.state_hi.load(Ordering::Relaxed);
138 self.lock.state_hi.store(state_hi.wrapping_add(1), Ordering::Release);
139 }
140
141 // Release the lock and increment the stamp.
142 //
143 // Release ordering for synchronizing with `optimistic_read`.
144 self.lock.state_lo.store(state_lo, Ordering::Release);
145 }
146}
147
148#[cfg(test)]
149mod tests {
150 use super::SeqLock;
151
152 #[test]
153 fn smoke() {
154 let lock = SeqLock::new();
155 let before = lock.optimistic_read().unwrap();
156 assert!(lock.validate_read(before));
157 {
158 let _guard = lock.write();
159 }
160 assert!(!lock.validate_read(before));
161 let after = lock.optimistic_read().unwrap();
162 assert_ne!(before, after);
163 }
164
165 #[test]
166 fn test_abort() {
167 let lock = SeqLock::new();
168 let before = lock.optimistic_read().unwrap();
169 {
170 let guard = lock.write();
171 guard.abort();
172 }
173 let after = lock.optimistic_read().unwrap();
174 assert_eq!(before, after, "aborted write does not update the stamp");
175 }
176}