1#[cfg(not(any(
45 all(target_arch = "avr", portable_atomic_no_asm),
46 feature = "critical-section",
47)))]
48use self::arch::atomic;
49
50#[cfg(not(feature = "critical-section"))]
51#[cfg_attr(
52 all(
53 target_arch = "arm",
54 any(target_feature = "mclass", portable_atomic_target_feature = "mclass"),
55 ),
56 path = "armv6m.rs"
57)]
58#[cfg_attr(
59 all(
60 target_arch = "arm",
61 not(any(target_feature = "mclass", portable_atomic_target_feature = "mclass")),
62 ),
63 path = "armv4t.rs"
64)]
65#[cfg_attr(target_arch = "avr", path = "avr.rs")]
66#[cfg_attr(target_arch = "msp430", path = "msp430.rs")]
67#[cfg_attr(any(target_arch = "riscv32", target_arch = "riscv64"), path = "riscv.rs")]
68#[cfg_attr(target_arch = "xtensa", path = "xtensa.rs")]
69mod arch;
70
71use core::{cell::UnsafeCell, sync::atomic::Ordering};
72
73#[cfg(feature = "critical-section")]
75const IS_ALWAYS_LOCK_FREE: bool = false;
76#[cfg(not(feature = "critical-section"))]
80const IS_ALWAYS_LOCK_FREE: bool = true;
81
82#[cfg(feature = "critical-section")]
83#[inline]
84fn with<F, R>(f: F) -> R
85where
86 F: FnOnce() -> R,
87{
88 critical_section::with(|_| f())
89}
90#[cfg(not(feature = "critical-section"))]
91#[inline(always)]
92fn with<F, R>(f: F) -> R
93where
94 F: FnOnce() -> R,
95{
96 let state = arch::disable();
98
99 let r = f();
100
101 unsafe { arch::restore(state) }
104
105 r
106}
107
108#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
109#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
110#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
111#[cfg_attr(target_pointer_width = "128", repr(C, align(16)))]
112pub(crate) struct AtomicPtr<T> {
113 p: UnsafeCell<*mut T>,
114}
115
116unsafe impl<T> Send for AtomicPtr<T> {}
119unsafe impl<T> Sync for AtomicPtr<T> {}
122
123impl<T> AtomicPtr<T> {
124 #[inline]
125 pub(crate) const fn new(p: *mut T) -> Self {
126 Self { p: UnsafeCell::new(p) }
127 }
128
129 #[inline]
130 pub(crate) fn is_lock_free() -> bool {
131 Self::IS_ALWAYS_LOCK_FREE
132 }
133 pub(crate) const IS_ALWAYS_LOCK_FREE: bool = IS_ALWAYS_LOCK_FREE;
134
135 #[inline]
136 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
137 pub(crate) fn load(&self, order: Ordering) -> *mut T {
138 crate::utils::assert_load_ordering(order);
139 #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
140 {
141 self.as_native().load(order)
142 }
143 #[cfg(any(target_arch = "avr", feature = "critical-section"))]
144 with(|| unsafe { self.p.get().read() })
148 }
149
150 #[inline]
151 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
152 pub(crate) fn store(&self, ptr: *mut T, order: Ordering) {
153 crate::utils::assert_store_ordering(order);
154 #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
155 {
156 self.as_native().store(ptr, order);
157 }
158 #[cfg(any(target_arch = "avr", feature = "critical-section"))]
159 with(|| unsafe { self.p.get().write(ptr) });
163 }
164
165 #[inline]
166 pub(crate) fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
167 let _ = order;
168 #[cfg(all(
169 any(target_arch = "riscv32", target_arch = "riscv64"),
170 not(feature = "critical-section"),
171 any(
172 portable_atomic_force_amo,
173 target_feature = "zaamo",
174 portable_atomic_target_feature = "zaamo",
175 ),
176 ))]
177 {
178 self.as_native().swap(ptr, order)
179 }
180 #[cfg(not(all(
181 any(target_arch = "riscv32", target_arch = "riscv64"),
182 not(feature = "critical-section"),
183 any(
184 portable_atomic_force_amo,
185 target_feature = "zaamo",
186 portable_atomic_target_feature = "zaamo",
187 ),
188 )))]
189 with(|| unsafe {
193 let prev = self.p.get().read();
194 self.p.get().write(ptr);
195 prev
196 })
197 }
198
199 #[inline]
200 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
201 pub(crate) fn compare_exchange(
202 &self,
203 current: *mut T,
204 new: *mut T,
205 success: Ordering,
206 failure: Ordering,
207 ) -> Result<*mut T, *mut T> {
208 crate::utils::assert_compare_exchange_ordering(success, failure);
209 with(|| unsafe {
213 let prev = self.p.get().read();
214 if prev == current {
215 self.p.get().write(new);
216 Ok(prev)
217 } else {
218 Err(prev)
219 }
220 })
221 }
222
223 #[inline]
224 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
225 pub(crate) fn compare_exchange_weak(
226 &self,
227 current: *mut T,
228 new: *mut T,
229 success: Ordering,
230 failure: Ordering,
231 ) -> Result<*mut T, *mut T> {
232 self.compare_exchange(current, new, success, failure)
233 }
234
235 #[inline]
236 pub(crate) const fn as_ptr(&self) -> *mut *mut T {
237 self.p.get()
238 }
239
240 #[cfg(not(any(target_arch = "avr", feature = "critical-section")))]
241 #[inline(always)]
242 fn as_native(&self) -> &atomic::AtomicPtr<T> {
243 unsafe { &*(self as *const Self as *const atomic::AtomicPtr<T>) }
246 }
247}
248
249macro_rules! atomic_int {
250 (base, $atomic_type:ident, $int_type:ident, $align:literal) => {
251 #[repr(C, align($align))]
252 pub(crate) struct $atomic_type {
253 v: UnsafeCell<$int_type>,
254 }
255
256 unsafe impl Sync for $atomic_type {}
260
261 impl $atomic_type {
262 #[inline]
263 pub(crate) const fn new(v: $int_type) -> Self {
264 Self { v: UnsafeCell::new(v) }
265 }
266
267 #[inline]
268 pub(crate) fn is_lock_free() -> bool {
269 Self::IS_ALWAYS_LOCK_FREE
270 }
271 pub(crate) const IS_ALWAYS_LOCK_FREE: bool = IS_ALWAYS_LOCK_FREE;
272
273 #[inline]
274 pub(crate) const fn as_ptr(&self) -> *mut $int_type {
275 self.v.get()
276 }
277 }
278 };
279 (load_store_atomic $([$kind:ident])?, $atomic_type:ident, $int_type:ident, $align:literal) => {
280 atomic_int!(base, $atomic_type, $int_type, $align);
281 #[cfg(all(
282 any(target_arch = "riscv32", target_arch = "riscv64"),
283 not(feature = "critical-section"),
284 any(
285 portable_atomic_force_amo,
286 target_feature = "zaamo",
287 portable_atomic_target_feature = "zaamo",
288 ),
289 ))]
290 atomic_int!(cas $([$kind])?, $atomic_type, $int_type);
291 #[cfg(not(all(
292 any(target_arch = "riscv32", target_arch = "riscv64"),
293 not(feature = "critical-section"),
294 any(
295 portable_atomic_force_amo,
296 target_feature = "zaamo",
297 portable_atomic_target_feature = "zaamo",
298 ),
299 )))]
300 atomic_int!(cas[emulate], $atomic_type, $int_type);
301 impl $atomic_type {
302 #[inline]
303 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
304 pub(crate) fn load(&self, order: Ordering) -> $int_type {
305 crate::utils::assert_load_ordering(order);
306 #[cfg(not(any(
307 all(target_arch = "avr", portable_atomic_no_asm),
308 feature = "critical-section",
309 )))]
310 {
311 self.as_native().load(order)
312 }
313 #[cfg(any(
314 all(target_arch = "avr", portable_atomic_no_asm),
315 feature = "critical-section",
316 ))]
317 with(|| unsafe { self.v.get().read() })
321 }
322
323 #[inline]
324 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
325 pub(crate) fn store(&self, val: $int_type, order: Ordering) {
326 crate::utils::assert_store_ordering(order);
327 #[cfg(not(any(
328 all(target_arch = "avr", portable_atomic_no_asm),
329 feature = "critical-section",
330 )))]
331 {
332 self.as_native().store(val, order);
333 }
334 #[cfg(any(
335 all(target_arch = "avr", portable_atomic_no_asm),
336 feature = "critical-section",
337 ))]
338 with(|| unsafe { self.v.get().write(val) });
342 }
343
344 #[cfg(not(any(
345 all(target_arch = "avr", portable_atomic_no_asm),
346 feature = "critical-section",
347 )))]
348 #[inline(always)]
349 fn as_native(&self) -> &atomic::$atomic_type {
350 unsafe { &*(self as *const Self as *const atomic::$atomic_type) }
353 }
354 }
355
356 #[cfg(not(all(target_arch = "msp430", not(feature = "critical-section"))))]
357 impl_default_no_fetch_ops!($atomic_type, $int_type);
358 impl_default_bit_opts!($atomic_type, $int_type);
359 #[cfg(not(all(target_arch = "msp430", not(feature = "critical-section"))))]
360 impl $atomic_type {
361 #[inline]
362 pub(crate) fn not(&self, order: Ordering) {
363 self.fetch_not(order);
364 }
365 }
366 #[cfg(all(target_arch = "msp430", not(feature = "critical-section")))]
367 impl $atomic_type {
368 #[inline]
369 pub(crate) fn add(&self, val: $int_type, order: Ordering) {
370 self.as_native().add(val, order);
371 }
372 #[inline]
373 pub(crate) fn sub(&self, val: $int_type, order: Ordering) {
374 self.as_native().sub(val, order);
375 }
376 #[inline]
377 pub(crate) fn and(&self, val: $int_type, order: Ordering) {
378 self.as_native().and(val, order);
379 }
380 #[inline]
381 pub(crate) fn or(&self, val: $int_type, order: Ordering) {
382 self.as_native().or(val, order);
383 }
384 #[inline]
385 pub(crate) fn xor(&self, val: $int_type, order: Ordering) {
386 self.as_native().xor(val, order);
387 }
388 #[inline]
389 pub(crate) fn not(&self, order: Ordering) {
390 self.as_native().not(order);
391 }
392 }
393 };
394 (all_critical_session, $atomic_type:ident, $int_type:ident, $align:literal) => {
395 atomic_int!(base, $atomic_type, $int_type, $align);
396 atomic_int!(cas[emulate], $atomic_type, $int_type);
397 impl_default_no_fetch_ops!($atomic_type, $int_type);
398 impl_default_bit_opts!($atomic_type, $int_type);
399 impl $atomic_type {
400 #[inline]
401 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
402 pub(crate) fn load(&self, order: Ordering) -> $int_type {
403 crate::utils::assert_load_ordering(order);
404 with(|| unsafe { self.v.get().read() })
408 }
409
410 #[inline]
411 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
412 pub(crate) fn store(&self, val: $int_type, order: Ordering) {
413 crate::utils::assert_store_ordering(order);
414 with(|| unsafe { self.v.get().write(val) });
418 }
419
420 #[inline]
421 pub(crate) fn not(&self, order: Ordering) {
422 self.fetch_not(order);
423 }
424 }
425 };
426 (cas[emulate], $atomic_type:ident, $int_type:ident) => {
427 impl $atomic_type {
428 #[inline]
429 pub(crate) fn swap(&self, val: $int_type, _order: Ordering) -> $int_type {
430 with(|| unsafe {
434 let prev = self.v.get().read();
435 self.v.get().write(val);
436 prev
437 })
438 }
439
440 #[inline]
441 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
442 pub(crate) fn compare_exchange(
443 &self,
444 current: $int_type,
445 new: $int_type,
446 success: Ordering,
447 failure: Ordering,
448 ) -> Result<$int_type, $int_type> {
449 crate::utils::assert_compare_exchange_ordering(success, failure);
450 with(|| unsafe {
454 let prev = self.v.get().read();
455 if prev == current {
456 self.v.get().write(new);
457 Ok(prev)
458 } else {
459 Err(prev)
460 }
461 })
462 }
463
464 #[inline]
465 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
466 pub(crate) fn compare_exchange_weak(
467 &self,
468 current: $int_type,
469 new: $int_type,
470 success: Ordering,
471 failure: Ordering,
472 ) -> Result<$int_type, $int_type> {
473 self.compare_exchange(current, new, success, failure)
474 }
475
476 #[inline]
477 pub(crate) fn fetch_add(&self, val: $int_type, _order: Ordering) -> $int_type {
478 with(|| unsafe {
482 let prev = self.v.get().read();
483 self.v.get().write(prev.wrapping_add(val));
484 prev
485 })
486 }
487
488 #[inline]
489 pub(crate) fn fetch_sub(&self, val: $int_type, _order: Ordering) -> $int_type {
490 with(|| unsafe {
494 let prev = self.v.get().read();
495 self.v.get().write(prev.wrapping_sub(val));
496 prev
497 })
498 }
499
500 #[inline]
501 pub(crate) fn fetch_and(&self, val: $int_type, _order: Ordering) -> $int_type {
502 with(|| unsafe {
506 let prev = self.v.get().read();
507 self.v.get().write(prev & val);
508 prev
509 })
510 }
511
512 #[inline]
513 pub(crate) fn fetch_nand(&self, val: $int_type, _order: Ordering) -> $int_type {
514 with(|| unsafe {
518 let prev = self.v.get().read();
519 self.v.get().write(!(prev & val));
520 prev
521 })
522 }
523
524 #[inline]
525 pub(crate) fn fetch_or(&self, val: $int_type, _order: Ordering) -> $int_type {
526 with(|| unsafe {
530 let prev = self.v.get().read();
531 self.v.get().write(prev | val);
532 prev
533 })
534 }
535
536 #[inline]
537 pub(crate) fn fetch_xor(&self, val: $int_type, _order: Ordering) -> $int_type {
538 with(|| unsafe {
542 let prev = self.v.get().read();
543 self.v.get().write(prev ^ val);
544 prev
545 })
546 }
547
548 #[inline]
549 pub(crate) fn fetch_max(&self, val: $int_type, _order: Ordering) -> $int_type {
550 with(|| unsafe {
554 let prev = self.v.get().read();
555 self.v.get().write(core::cmp::max(prev, val));
556 prev
557 })
558 }
559
560 #[inline]
561 pub(crate) fn fetch_min(&self, val: $int_type, _order: Ordering) -> $int_type {
562 with(|| unsafe {
566 let prev = self.v.get().read();
567 self.v.get().write(core::cmp::min(prev, val));
568 prev
569 })
570 }
571
572 #[inline]
573 pub(crate) fn fetch_not(&self, _order: Ordering) -> $int_type {
574 with(|| unsafe {
578 let prev = self.v.get().read();
579 self.v.get().write(!prev);
580 prev
581 })
582 }
583
584 #[inline]
585 pub(crate) fn fetch_neg(&self, _order: Ordering) -> $int_type {
586 with(|| unsafe {
590 let prev = self.v.get().read();
591 self.v.get().write(prev.wrapping_neg());
592 prev
593 })
594 }
595 #[inline]
596 pub(crate) fn neg(&self, order: Ordering) {
597 self.fetch_neg(order);
598 }
599 }
600 };
601 (cas, $atomic_type:ident, $int_type:ident) => {
604 impl $atomic_type {
605 #[inline]
606 pub(crate) fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
607 self.as_native().swap(val, order)
608 }
609
610 #[inline]
611 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
612 pub(crate) fn compare_exchange(
613 &self,
614 current: $int_type,
615 new: $int_type,
616 success: Ordering,
617 failure: Ordering,
618 ) -> Result<$int_type, $int_type> {
619 crate::utils::assert_compare_exchange_ordering(success, failure);
620 with(|| unsafe {
624 let prev = self.v.get().read();
625 if prev == current {
626 self.v.get().write(new);
627 Ok(prev)
628 } else {
629 Err(prev)
630 }
631 })
632 }
633
634 #[inline]
635 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
636 pub(crate) fn compare_exchange_weak(
637 &self,
638 current: $int_type,
639 new: $int_type,
640 success: Ordering,
641 failure: Ordering,
642 ) -> Result<$int_type, $int_type> {
643 self.compare_exchange(current, new, success, failure)
644 }
645
646 #[inline]
647 pub(crate) fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
648 self.as_native().fetch_add(val, order)
649 }
650 #[inline]
651 pub(crate) fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
652 self.as_native().fetch_sub(val, order)
653 }
654 #[inline]
655 pub(crate) fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
656 self.as_native().fetch_and(val, order)
657 }
658
659 #[inline]
660 pub(crate) fn fetch_nand(&self, val: $int_type, _order: Ordering) -> $int_type {
661 with(|| unsafe {
665 let prev = self.v.get().read();
666 self.v.get().write(!(prev & val));
667 prev
668 })
669 }
670
671 #[inline]
672 pub(crate) fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
673 self.as_native().fetch_or(val, order)
674 }
675 #[inline]
676 pub(crate) fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
677 self.as_native().fetch_xor(val, order)
678 }
679 #[inline]
680 pub(crate) fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
681 self.as_native().fetch_max(val, order)
682 }
683 #[inline]
684 pub(crate) fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
685 self.as_native().fetch_min(val, order)
686 }
687 #[inline]
688 pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
689 self.as_native().fetch_not(order)
690 }
691
692 #[inline]
693 pub(crate) fn fetch_neg(&self, _order: Ordering) -> $int_type {
694 with(|| unsafe {
698 let prev = self.v.get().read();
699 self.v.get().write(prev.wrapping_neg());
700 prev
701 })
702 }
703 #[inline]
704 pub(crate) fn neg(&self, order: Ordering) {
705 self.fetch_neg(order);
706 }
707 }
708 };
709 (cas[sub_word], $atomic_type:ident, $int_type:ident) => {
711 #[cfg(any(target_feature = "zabha", portable_atomic_target_feature = "zabha"))]
712 atomic_int!(cas, $atomic_type, $int_type);
713 #[cfg(not(any(target_feature = "zabha", portable_atomic_target_feature = "zabha")))]
714 impl $atomic_type {
715 #[inline]
716 pub(crate) fn swap(&self, val: $int_type, _order: Ordering) -> $int_type {
717 with(|| unsafe {
721 let prev = self.v.get().read();
722 self.v.get().write(val);
723 prev
724 })
725 }
726
727 #[inline]
728 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
729 pub(crate) fn compare_exchange(
730 &self,
731 current: $int_type,
732 new: $int_type,
733 success: Ordering,
734 failure: Ordering,
735 ) -> Result<$int_type, $int_type> {
736 crate::utils::assert_compare_exchange_ordering(success, failure);
737 with(|| unsafe {
741 let prev = self.v.get().read();
742 if prev == current {
743 self.v.get().write(new);
744 Ok(prev)
745 } else {
746 Err(prev)
747 }
748 })
749 }
750
751 #[inline]
752 #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
753 pub(crate) fn compare_exchange_weak(
754 &self,
755 current: $int_type,
756 new: $int_type,
757 success: Ordering,
758 failure: Ordering,
759 ) -> Result<$int_type, $int_type> {
760 self.compare_exchange(current, new, success, failure)
761 }
762
763 #[inline]
764 pub(crate) fn fetch_add(&self, val: $int_type, _order: Ordering) -> $int_type {
765 with(|| unsafe {
769 let prev = self.v.get().read();
770 self.v.get().write(prev.wrapping_add(val));
771 prev
772 })
773 }
774
775 #[inline]
776 pub(crate) fn fetch_sub(&self, val: $int_type, _order: Ordering) -> $int_type {
777 with(|| unsafe {
781 let prev = self.v.get().read();
782 self.v.get().write(prev.wrapping_sub(val));
783 prev
784 })
785 }
786
787 #[inline]
788 pub(crate) fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
789 self.as_native().fetch_and(val, order)
790 }
791
792 #[inline]
793 pub(crate) fn fetch_nand(&self, val: $int_type, _order: Ordering) -> $int_type {
794 with(|| unsafe {
798 let prev = self.v.get().read();
799 self.v.get().write(!(prev & val));
800 prev
801 })
802 }
803
804 #[inline]
805 pub(crate) fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
806 self.as_native().fetch_or(val, order)
807 }
808 #[inline]
809 pub(crate) fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
810 self.as_native().fetch_xor(val, order)
811 }
812
813 #[inline]
814 pub(crate) fn fetch_max(&self, val: $int_type, _order: Ordering) -> $int_type {
815 with(|| unsafe {
819 let prev = self.v.get().read();
820 self.v.get().write(core::cmp::max(prev, val));
821 prev
822 })
823 }
824
825 #[inline]
826 pub(crate) fn fetch_min(&self, val: $int_type, _order: Ordering) -> $int_type {
827 with(|| unsafe {
831 let prev = self.v.get().read();
832 self.v.get().write(core::cmp::min(prev, val));
833 prev
834 })
835 }
836
837 #[inline]
838 pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
839 self.as_native().fetch_not(order)
840 }
841
842 #[inline]
843 pub(crate) fn fetch_neg(&self, _order: Ordering) -> $int_type {
844 with(|| unsafe {
848 let prev = self.v.get().read();
849 self.v.get().write(prev.wrapping_neg());
850 prev
851 })
852 }
853 #[inline]
854 pub(crate) fn neg(&self, order: Ordering) {
855 self.fetch_neg(order);
856 }
857 }
858 };
859}
860
861#[cfg(target_pointer_width = "16")]
862#[cfg(not(target_arch = "avr"))]
863atomic_int!(load_store_atomic, AtomicIsize, isize, 2);
864#[cfg(target_pointer_width = "16")]
865#[cfg(not(target_arch = "avr"))]
866atomic_int!(load_store_atomic, AtomicUsize, usize, 2);
867#[cfg(target_arch = "avr")]
868atomic_int!(all_critical_session, AtomicIsize, isize, 2);
869#[cfg(target_arch = "avr")]
870atomic_int!(all_critical_session, AtomicUsize, usize, 2);
871#[cfg(target_pointer_width = "32")]
872atomic_int!(load_store_atomic, AtomicIsize, isize, 4);
873#[cfg(target_pointer_width = "32")]
874atomic_int!(load_store_atomic, AtomicUsize, usize, 4);
875#[cfg(target_pointer_width = "64")]
876atomic_int!(load_store_atomic, AtomicIsize, isize, 8);
877#[cfg(target_pointer_width = "64")]
878atomic_int!(load_store_atomic, AtomicUsize, usize, 8);
879#[cfg(target_pointer_width = "128")]
880atomic_int!(load_store_atomic, AtomicIsize, isize, 16);
881#[cfg(target_pointer_width = "128")]
882atomic_int!(load_store_atomic, AtomicUsize, usize, 16);
883
884#[cfg(not(all(target_arch = "avr", portable_atomic_no_asm)))]
885atomic_int!(load_store_atomic[sub_word], AtomicI8, i8, 1);
886#[cfg(not(all(target_arch = "avr", portable_atomic_no_asm)))]
887atomic_int!(load_store_atomic[sub_word], AtomicU8, u8, 1);
888#[cfg(all(target_arch = "avr", portable_atomic_no_asm))]
889atomic_int!(all_critical_session, AtomicI8, i8, 1);
890#[cfg(all(target_arch = "avr", portable_atomic_no_asm))]
891atomic_int!(all_critical_session, AtomicU8, u8, 1);
892#[cfg(not(target_arch = "avr"))]
893atomic_int!(load_store_atomic[sub_word], AtomicI16, i16, 2);
894#[cfg(not(target_arch = "avr"))]
895atomic_int!(load_store_atomic[sub_word], AtomicU16, u16, 2);
896#[cfg(target_arch = "avr")]
897atomic_int!(all_critical_session, AtomicI16, i16, 2);
898#[cfg(target_arch = "avr")]
899atomic_int!(all_critical_session, AtomicU16, u16, 2);
900
901#[cfg(not(target_pointer_width = "16"))]
902atomic_int!(load_store_atomic, AtomicI32, i32, 4);
903#[cfg(not(target_pointer_width = "16"))]
904atomic_int!(load_store_atomic, AtomicU32, u32, 4);
905#[cfg(target_pointer_width = "16")]
906#[cfg(any(test, feature = "fallback"))]
907atomic_int!(all_critical_session, AtomicI32, i32, 4);
908#[cfg(target_pointer_width = "16")]
909#[cfg(any(test, feature = "fallback"))]
910atomic_int!(all_critical_session, AtomicU32, u32, 4);
911
912cfg_has_fast_atomic_64! {
913 atomic_int!(load_store_atomic, AtomicI64, i64, 8);
914 atomic_int!(load_store_atomic, AtomicU64, u64, 8);
915}
916#[cfg(any(test, feature = "fallback"))]
917cfg_no_fast_atomic_64! {
918 atomic_int!(all_critical_session, AtomicI64, i64, 8);
919 atomic_int!(all_critical_session, AtomicU64, u64, 8);
920}
921
922#[cfg(any(test, feature = "fallback"))]
923atomic_int!(all_critical_session, AtomicI128, i128, 16);
924#[cfg(any(test, feature = "fallback"))]
925atomic_int!(all_critical_session, AtomicU128, u128, 16);
926
927#[cfg(test)]
928mod tests {
929 use super::*;
930
931 test_atomic_ptr_single_thread!();
932 test_atomic_int_single_thread!(i8);
933 test_atomic_int_single_thread!(u8);
934 test_atomic_int_single_thread!(i16);
935 test_atomic_int_single_thread!(u16);
936 test_atomic_int_single_thread!(i32);
937 test_atomic_int_single_thread!(u32);
938 test_atomic_int_single_thread!(i64);
939 test_atomic_int_single_thread!(u64);
940 test_atomic_int_single_thread!(i128);
941 test_atomic_int_single_thread!(u128);
942 test_atomic_int_single_thread!(isize);
943 test_atomic_int_single_thread!(usize);
944}