|
1 | 1 | //! In-tree implementations of the [`rtic_time::Monotonic`] (reexported) trait for |
2 | 2 | //! timers & clocks found on commonly used microcontrollers. |
3 | 3 | //! |
4 | | -//! If you are using a microcontroller where CAS operations are not available natively, you might |
5 | | -//! have to enable the `critical-section` or `unsafe-assume-single-core` feature of the |
6 | | -//! [`portable-atomic`](https://docs.rs/portable-atomic/latest/portable_atomic/) dependency |
7 | | -//! yourself for this dependency to compile. |
8 | | -//! |
9 | 4 | //! To enable the implementations, you must enable a feature for the specific MCU you're targeting. |
10 | 5 | //! |
11 | 6 | //! # Cortex-M Systick |
@@ -135,3 +130,113 @@ pub(crate) unsafe fn set_monotonic_prio( |
135 | 130 |
|
136 | 131 | nvic.set_priority(interrupt, hw_prio); |
137 | 132 | } |
| 133 | + |
| 134 | +mod atomic { |
| 135 | + //! Use a critical section for atomics in case the HW does not support atomics of specific |
| 136 | + //! sizes. |
| 137 | +
|
| 138 | + #![allow(unused)] |
| 139 | + |
| 140 | + pub use core::sync::atomic::Ordering; |
| 141 | + |
| 142 | + #[cfg(target_has_atomic = "32")] |
| 143 | + pub use core::sync::atomic::AtomicU32; |
| 144 | + #[cfg(not(target_has_atomic = "32"))] |
| 145 | + pub struct AtomicU32(core::cell::UnsafeCell<u32>); |
| 146 | + |
| 147 | + #[cfg(not(target_has_atomic = "32"))] |
| 148 | + impl AtomicU32 { |
| 149 | + /// Create a new atomic. |
| 150 | + #[inline] |
| 151 | + pub const fn new(val: u32) -> Self { |
| 152 | + Self(core::cell::UnsafeCell::new(val)) |
| 153 | + } |
| 154 | + |
| 155 | + /// Store the value. |
| 156 | + #[inline] |
| 157 | + pub fn store(&self, val: u32, _ordering: Ordering) { |
| 158 | + critical_section::with(|_| unsafe { |
| 159 | + core::sync::atomic::compiler_fence(Ordering::SeqCst); |
| 160 | + self.0.get().write(val); |
| 161 | + core::sync::atomic::compiler_fence(Ordering::SeqCst); |
| 162 | + }); |
| 163 | + } |
| 164 | + |
| 165 | + /// Read the value. |
| 166 | + #[inline] |
| 167 | + pub fn load(&self, _ordering: Ordering) -> u32 { |
| 168 | + critical_section::with(|_| unsafe { |
| 169 | + core::sync::atomic::compiler_fence(Ordering::SeqCst); |
| 170 | + let r = self.0.get().read(); |
| 171 | + core::sync::atomic::compiler_fence(Ordering::SeqCst); |
| 172 | + r |
| 173 | + }) |
| 174 | + } |
| 175 | + |
| 176 | + /// Read the value. |
| 177 | + #[inline] |
| 178 | + pub fn fetch_add(&self, val: u32, _ordering: Ordering) -> u32 { |
| 179 | + critical_section::with(|_| { |
| 180 | + core::sync::atomic::compiler_fence(Ordering::SeqCst); |
| 181 | + let curr = unsafe { self.0.get().read() }; |
| 182 | + unsafe { self.0.get().write(curr.wrapping_add(val)) }; |
| 183 | + core::sync::atomic::compiler_fence(Ordering::SeqCst); |
| 184 | + curr |
| 185 | + }) |
| 186 | + } |
| 187 | + } |
| 188 | + |
| 189 | + #[cfg(not(target_has_atomic = "32"))] |
| 190 | + unsafe impl Sync for AtomicU32 {} |
| 191 | + |
| 192 | + #[cfg(target_has_atomic = "64")] |
| 193 | + pub use core::sync::atomic::AtomicU64; |
| 194 | + |
| 195 | + #[cfg(not(target_has_atomic = "64"))] |
| 196 | + pub struct AtomicU64(core::cell::UnsafeCell<u64>); |
| 197 | + |
| 198 | + #[cfg(not(target_has_atomic = "64"))] |
| 199 | + impl AtomicU64 { |
| 200 | + /// Create a new atomic. |
| 201 | + #[inline] |
| 202 | + pub const fn new(val: u64) -> Self { |
| 203 | + Self(core::cell::UnsafeCell::new(val)) |
| 204 | + } |
| 205 | + |
| 206 | + /// Store the value. |
| 207 | + #[inline] |
| 208 | + pub fn store(&self, val: u64, _ordering: Ordering) { |
| 209 | + critical_section::with(|_| unsafe { |
| 210 | + core::sync::atomic::compiler_fence(Ordering::SeqCst); |
| 211 | + self.0.get().write(val); |
| 212 | + core::sync::atomic::compiler_fence(Ordering::SeqCst); |
| 213 | + }); |
| 214 | + } |
| 215 | + |
| 216 | + /// Read the value. |
| 217 | + #[inline] |
| 218 | + pub fn load(&self, _ordering: Ordering) -> u64 { |
| 219 | + critical_section::with(|_| unsafe { |
| 220 | + core::sync::atomic::compiler_fence(Ordering::SeqCst); |
| 221 | + let r = self.0.get().read(); |
| 222 | + core::sync::atomic::compiler_fence(Ordering::SeqCst); |
| 223 | + r |
| 224 | + }) |
| 225 | + } |
| 226 | + |
| 227 | + /// Read the value. |
| 228 | + #[inline] |
| 229 | + pub fn fetch_add(&self, val: u64, _ordering: Ordering) -> u64 { |
| 230 | + critical_section::with(|_| { |
| 231 | + core::sync::atomic::compiler_fence(Ordering::SeqCst); |
| 232 | + let curr = unsafe { self.0.get().read() }; |
| 233 | + unsafe { self.0.get().write(curr.wrapping_add(val)) }; |
| 234 | + core::sync::atomic::compiler_fence(Ordering::SeqCst); |
| 235 | + curr |
| 236 | + }) |
| 237 | + } |
| 238 | + } |
| 239 | + |
| 240 | + #[cfg(not(target_has_atomic = "64"))] |
| 241 | + unsafe impl Sync for AtomicU64 {} |
| 242 | +} |
0 commit comments