atsamd_hal/sercom/i2c/
async_api.rs

1use crate::{
2    async_hal::interrupts::{Binding, Handler, InterruptSource},
3    sercom::{
4        i2c::{self, impl_ehal::chunk_operations, AnyConfig, Flags, I2c},
5        Sercom,
6    },
7    typelevel::NoneT,
8};
9use core::{marker::PhantomData, task::Poll};
10
11use embedded_hal_async::i2c::{ErrorType, I2c as I2cTrait, Operation};
12
13/// Interrupt handler for async I2C operarions
14pub struct InterruptHandler<S: Sercom> {
15    _private: (),
16    _sercom: PhantomData<S>,
17}
18
19impl<S: Sercom> crate::typelevel::Sealed for InterruptHandler<S> {}
20
21impl<S: Sercom> Handler<S::Interrupt> for InterruptHandler<S> {
22    // TODO the ISR gets called twice on every MB request???
23    #[inline]
24    unsafe fn on_interrupt() {
25        let mut peripherals = unsafe { crate::pac::Peripherals::steal() };
26        let i2cm = S::reg_block(&mut peripherals).i2cm();
27        let flags_to_check = Flags::all();
28        let flags_pending = Flags::from_bits_truncate(i2cm.intflag().read().bits());
29
30        // Disable interrupts, but don't clear the flags. The future will take care of
31        // clearing flags and re-enabling interrupts when woken.
32        if flags_to_check.intersects(flags_pending) {
33            i2cm.intenclr()
34                .write(|w| unsafe { w.bits(flags_pending.bits()) });
35            S::rx_waker().wake();
36        }
37    }
38}
39
40impl<C, S> I2c<C>
41where
42    C: AnyConfig<Sercom = S>,
43    S: Sercom,
44{
45    /// Turn an [`I2c`] into an [`I2cFuture`]
46    #[inline]
47    pub fn into_future<I>(self, _interrupts: I) -> I2cFuture<C>
48    where
49        I: Binding<S::Interrupt, InterruptHandler<S>>,
50    {
51        S::Interrupt::unpend();
52        unsafe { S::Interrupt::enable() };
53
54        I2cFuture { i2c: self }
55    }
56}
57
58/// `async` version of [`I2c`].
59///
60/// Create this struct by calling [`I2c::into_future`](I2c::into_future).
61pub struct I2cFuture<C, D = NoneT>
62where
63    C: AnyConfig,
64{
65    i2c: I2c<C, D>,
66}
67
68impl<C, S, D> I2cFuture<C, D>
69where
70    C: AnyConfig<Sercom = S>,
71    S: Sercom,
72{
73    async fn wait_flags(&mut self, flags_to_wait: Flags) {
74        core::future::poll_fn(|cx| {
75            // Scope maybe_pending so we don't forget to re-poll the register later down.
76            {
77                let maybe_pending = self.i2c.config.as_ref().registers.read_flags();
78                if flags_to_wait.intersects(maybe_pending) {
79                    return Poll::Ready(());
80                }
81            }
82
83            self.i2c.disable_interrupts(Flags::all());
84            // By convention, I2C uses the sercom's RX waker.
85            S::rx_waker().register(cx.waker());
86            self.i2c.enable_interrupts(flags_to_wait);
87            let maybe_pending = self.i2c.config.as_ref().registers.read_flags();
88
89            if !flags_to_wait.intersects(maybe_pending) {
90                Poll::Pending
91            } else {
92                Poll::Ready(())
93            }
94        })
95        .await;
96    }
97}
98
99impl<C, S> I2cFuture<C, NoneT>
100where
101    C: AnyConfig<Sercom = S>,
102    S: Sercom,
103{
104    /// Return the underlying [`I2c`].
105    pub fn free(self) -> I2c<C> {
106        self.i2c
107    }
108
109    /// Use a DMA channel for reads/writes
110    #[cfg(feature = "dma")]
111    pub fn with_dma_channel<D: crate::dmac::AnyChannel<Status = crate::dmac::ReadyFuture>>(
112        self,
113        dma_channel: D,
114    ) -> I2cFuture<C, D> {
115        I2cFuture {
116            i2c: I2c {
117                config: self.i2c.config,
118                _dma_channel: dma_channel,
119            },
120        }
121    }
122
123    async fn write_one(&mut self, byte: u8) -> Result<(), i2c::Error> {
124        self.wait_flags(Flags::MB | Flags::ERROR).await;
125        self.i2c.read_status().check_bus_error()?;
126        self.i2c.config.as_mut().registers.write_one(byte);
127        Ok(())
128    }
129
130    async fn read_one(&mut self) -> Result<u8, i2c::Error> {
131        self.wait_flags(Flags::SB | Flags::ERROR).await;
132        self.i2c.read_status().check_bus_error()?;
133        Ok(self.i2c.config.as_mut().registers.read_one())
134    }
135}
136
137impl<C: AnyConfig, D> AsRef<I2c<C, D>> for I2cFuture<C, D> {
138    #[inline]
139    fn as_ref(&self) -> &I2c<C, D> {
140        &self.i2c
141    }
142}
143
144impl<C: AnyConfig, D> AsMut<I2c<C, D>> for I2cFuture<C, D> {
145    #[inline]
146    fn as_mut(&mut self) -> &mut I2c<C, D> {
147        &mut self.i2c
148    }
149}
150
151impl<C, D> ErrorType for I2cFuture<C, D>
152where
153    C: AnyConfig,
154{
155    type Error = i2c::Error;
156}
157
158impl<C: AnyConfig> I2cFuture<C> {
159    #[inline]
160    async fn do_read(&mut self, addr: u8, buffer: &mut [u8]) -> Result<(), i2c::Error> {
161        self.i2c.config.as_mut().registers.start_read(addr)?;
162
163        // Some manual iterator gumph because we need to ack bytes after the first.
164        let mut iter = buffer.iter_mut();
165        *iter.next().expect("buffer len is at least 1") = self.read_one().await?;
166
167        for byte in iter {
168            // Ack the last byte so we can receive another one
169            self.i2c.config.as_mut().registers.cmd_read();
170            *byte = self.read_one().await?;
171        }
172
173        Ok(())
174    }
175
176    #[inline]
177    async fn continue_read(&mut self, buffer: &mut [u8]) -> Result<(), i2c::Error> {
178        for byte in buffer.iter_mut() {
179            self.i2c.config.as_mut().registers.cmd_read();
180            *byte = self.read_one().await?;
181        }
182        Ok(())
183    }
184
185    #[inline]
186    async fn do_write(&mut self, address: u8, buffer: &[u8]) -> Result<(), i2c::Error> {
187        self.i2c.config.as_mut().registers.start_write(address)?;
188
189        for byte in buffer {
190            self.write_one(*byte).await?;
191        }
192        Ok(())
193    }
194
195    #[inline]
196    async fn continue_write(&mut self, buffer: &[u8]) -> Result<(), i2c::Error> {
197        for byte in buffer {
198            self.write_one(*byte).await?;
199        }
200        Ok(())
201    }
202}
203
204impl<C: AnyConfig> I2cTrait for I2cFuture<C> {
205    #[inline]
206    async fn read(&mut self, address: u8, buffer: &mut [u8]) -> Result<(), Self::Error> {
207        self.do_read(address, buffer).await?;
208        self.i2c.config.as_mut().registers.cmd_stop();
209        Ok(())
210    }
211
212    #[inline]
213    async fn write(&mut self, address: u8, bytes: &[u8]) -> Result<(), Self::Error> {
214        self.do_write(address, bytes).await?;
215        self.i2c.cmd_stop();
216        Ok(())
217    }
218
219    #[inline]
220    async fn write_read(
221        &mut self,
222        address: u8,
223        write_buf: &[u8],
224        read_buf: &mut [u8],
225    ) -> Result<(), Self::Error> {
226        self.do_write(address, write_buf).await?;
227        self.i2c.config.as_mut().registers.cmd_repeated_start();
228        self.do_read(address, read_buf).await?;
229        Ok(())
230    }
231
232    #[inline]
233    async fn transaction(
234        &mut self,
235        address: u8,
236        operations: &mut [Operation<'_>],
237    ) -> Result<(), Self::Error> {
238        let mut op_groups = chunk_operations(operations).peekable();
239
240        while let Some(group) = op_groups.next() {
241            let mut group = group.iter_mut();
242            // Unwrapping is OK here because chunk_operations will never give us a 0-length
243            // chunk.
244            let op = group.next().unwrap();
245
246            // First operation in the group - send a START with the address, and the first
247            // operation.
248            match op {
249                Operation::Read(buf) => self.do_read(address, buf).await?,
250                Operation::Write(buf) => self.do_write(address, buf).await?,
251            }
252
253            // For all subsequent operations, just send/read more bytes without any more
254            // ceremony.
255            for op in group {
256                match op {
257                    Operation::Read(buf) => self.continue_read(buf).await?,
258                    Operation::Write(buf) => self.continue_write(buf).await?,
259                }
260            }
261
262            let regs = &mut self.i2c.config.as_mut().registers;
263            if op_groups.peek().is_some() {
264                // If we still have more groups to go, send a repeated start
265                regs.cmd_repeated_start();
266            } else {
267                // Otherwise, send a stop
268                regs.cmd_stop();
269            }
270        }
271
272        Ok(())
273    }
274}
275
276#[cfg(feature = "dma")]
277mod dma {
278    use embedded_hal_1::i2c::Operation;
279    use embedded_hal_async::i2c::I2c as I2cTrait;
280
281    use super::*;
282    use crate::dmac::sram::DmacDescriptor;
283    use crate::dmac::{AnyChannel, Channel, ReadyFuture};
284    use crate::sercom::dma::async_dma::{read_dma_linked, write_dma_linked};
285    use crate::sercom::dma::SharedSliceBuffer;
286
287    #[cfg(feature = "dma")]
288    /// Convenience type for a [`I2cFuture`] in DMA mode.
289    ///
290    /// The type parameter `I` represents the DMA channel ID (`ChX`).
291    pub type I2cFutureDma<C, I> = I2cFuture<C, Channel<I, ReadyFuture>>;
292
293    impl<C, D> I2cFuture<C, D>
294    where
295        C: AnyConfig,
296        D: AnyChannel<Status = ReadyFuture>,
297    {
298        /// Reclaim the DMA channel. Any subsequent I2C operations will no
299        /// longer use DMA.
300        pub fn take_dma_channel(self) -> (I2cFuture<C, crate::typelevel::NoneT>, D) {
301            let (i2c, channel) = self.i2c.take_dma_channel();
302            (I2cFuture { i2c }, channel)
303        }
304    }
305
306    impl<C, S, D> I2cFuture<C, D>
307    where
308        C: AnyConfig<Sercom = S>,
309        S: Sercom,
310        D: AnyChannel<Status = ReadyFuture>,
311    {
312        /// Make an async I2C write transaction, with the option to add in
313        /// linked transfers after this first transaction.
314        ///
315        /// # Safety
316        ///
317        /// If `next` is [`Some`], the pointer in its `descaddr` field, along
318        /// with the descriptor it points to, etc, must point to a valid
319        /// [`DmacDescriptor`] memory location, or be null. They must not be
320        /// circular (ie, points to itself). Any linked transfer must
321        /// strictly be a write transaction (source pointer is a byte buffer,
322        /// destination pointer is the SERCOM DATA register).
323        #[inline]
324        pub(super) async unsafe fn write_linked(
325            &mut self,
326            address: u8,
327            bytes: &[u8],
328            next: Option<&mut DmacDescriptor>,
329        ) -> Result<(), i2c::Error> {
330            self.i2c.prepare_write_linked(address, bytes, &next)?;
331
332            let sercom_ptr = self.i2c.sercom_ptr();
333            let mut bytes = SharedSliceBuffer::from_slice(bytes);
334
335            write_dma_linked::<_, _, S>(&mut self.i2c._dma_channel, sercom_ptr, &mut bytes, next)
336                .await?;
337
338            // Unfortunately, gotta take a polling approach here as there is no interrupt
339            // source that can notify us of an IDLE bus state. Fortunately, it's usually not
340            // long. 8-10 times around the loop will do the trick.
341            while !self.i2c.read_status().is_idle() {
342                core::hint::spin_loop();
343            }
344            self.i2c.read_status().check_bus_error()?;
345            Ok(())
346        }
347
348        /// Asynchronously read into a buffer using DMA.
349        #[inline]
350        pub(super) async unsafe fn read_linked(
351            &mut self,
352            address: u8,
353            mut buffer: &mut [u8],
354            next: Option<&mut DmacDescriptor>,
355        ) -> Result<(), i2c::Error> {
356            self.i2c.prepare_read_linked(address, buffer, &next)?;
357            let i2c_ptr = self.i2c.sercom_ptr();
358
359            read_dma_linked::<_, _, S>(&mut self.i2c._dma_channel, i2c_ptr, &mut buffer, next)
360                .await?;
361
362            // Unfortunately, gotta take a polling approach here as there is no interrupt
363            // source that can notify us of an IDLE bus state. Fortunately, it's usually not
364            // long. 8-10 times around the loop will do the trick.
365            while !self.i2c.read_status().is_idle() {
366                core::hint::spin_loop();
367            }
368            self.i2c.read_status().check_bus_error()?;
369
370            Ok(())
371        }
372
373        /// Asynchronously write from a buffer, then read into a buffer, all
374        /// using DMA.
375        ///
376        /// This is an extremely common pattern: for example, writing a
377        /// register address, then read its value from the slave.
378        #[inline]
379        pub async fn write_read(
380            &mut self,
381            addr: u8,
382            write_buf: &[u8],
383            read_buf: &mut [u8],
384        ) -> Result<(), i2c::Error> {
385            unsafe {
386                self.write_linked(addr, write_buf, None).await?;
387                self.read_linked(addr, read_buf, None).await?;
388            }
389            Ok(())
390        }
391    }
392
393    impl<C, D> I2cTrait for I2cFuture<C, D>
394    where
395        C: AnyConfig,
396        D: AnyChannel<Status = ReadyFuture>,
397    {
398        #[inline]
399        async fn read(&mut self, address: u8, buffer: &mut [u8]) -> Result<(), Self::Error> {
400            unsafe { self.read_linked(address, buffer, None).await }
401        }
402
403        #[inline]
404        async fn write(&mut self, address: u8, bytes: &[u8]) -> Result<(), Self::Error> {
405            unsafe { self.write_linked(address, bytes, None).await }
406        }
407
408        #[inline]
409        async fn write_read(
410            &mut self,
411            address: u8,
412            wr_buffer: &[u8],
413            rd_buffer: &mut [u8],
414        ) -> Result<(), Self::Error> {
415            self.write_read(address, wr_buffer, rd_buffer).await?;
416            Ok(())
417        }
418
419        #[inline]
420        async fn transaction(
421            &mut self,
422            address: u8,
423            operations: &mut [Operation<'_>],
424        ) -> Result<(), Self::Error> {
425            use crate::dmac::{channel, sram::DmacDescriptor};
426            use crate::sercom::dma::SharedSliceBuffer;
427            use Operation::{Read, Write};
428
429            const NUM_LINKED_TRANSFERS: usize = 16;
430
431            if operations.is_empty() {
432                return Ok(());
433            }
434
435            let mut sercom_ptr = self.i2c.sercom_ptr();
436
437            // Reserve some space for linked DMA transfer descriptors.
438            // Uses 256 bytes of memory.
439            //
440            // In practice this means that we can only support 17 continuously
441            // linked operations of the same type (R/W) before having to issue
442            // an I2C STOP. DMA-enabled I2C transfers automatically issue stop
443            // commands, and there is no way to turn off that behaviour.
444            //
445            //  In the event that we have more than 17 contiguous operations of
446            //  the same type, we must revert to the byte-by-byte I2C implementations.
447            let mut descriptors = heapless::Vec::<DmacDescriptor, NUM_LINKED_TRANSFERS>::new();
448
449            // Arrange operations in groups of contiguous types (R/W)
450            let op_groups = operations.chunk_by_mut(|this, next| {
451                matches!((this, next), (Write(_), Write(_)) | (Read(_), Read(_)))
452            });
453
454            for group in op_groups {
455                descriptors.clear();
456
457                // Default to byte-by-byte impl if we have more than 17 continuous operations,
458                // as we would overflow our DMA linked transfer reeserved space otherwise.
459                if group.len() > NUM_LINKED_TRANSFERS {
460                    self.i2c.transaction_byte_by_byte(address, group)?;
461                } else {
462                    // --- Setup all linked descriptors ---
463
464                    // Skip the first operation; we will deal with it when creating the I2C transfer
465                    // (read_dma_linked/write_dma_linked). Every other operation is a linked
466                    // transfer, and we must treat them accordingly.
467                    for op in group.iter_mut().skip(1) {
468                        match op {
469                            Read(ref mut buffer) => {
470                                if buffer.is_empty() {
471                                    continue;
472                                }
473                                // Add a new linked descriptor to the stack
474                                descriptors
475                                    .push(DmacDescriptor::default())
476                                    .unwrap_or_else(|_| panic!("BUG: DMAC descriptors overflow"));
477                                let last_descriptor = descriptors.last_mut().unwrap();
478                                let next_ptr =
479                                    (last_descriptor as *mut DmacDescriptor).wrapping_add(1);
480
481                                unsafe {
482                                    channel::write_descriptor(
483                                        last_descriptor,
484                                        &mut sercom_ptr,
485                                        buffer,
486                                        // Always link the next descriptor. We then set the last
487                                        // transfer's link pointer to null lower down in the code.
488                                        next_ptr,
489                                    );
490                                }
491                            }
492
493                            Write(bytes) => {
494                                if bytes.is_empty() {
495                                    continue;
496                                }
497                                // Add a new linked descriptor to the stack
498                                descriptors
499                                    .push(DmacDescriptor::default())
500                                    .unwrap_or_else(|_| panic!("BUG: DMAC descriptors overflow"));
501                                let last_descriptor = descriptors.last_mut().unwrap();
502                                let next_ptr =
503                                    (last_descriptor as *mut DmacDescriptor).wrapping_add(1);
504
505                                let mut bytes = SharedSliceBuffer::from_slice(bytes);
506                                unsafe {
507                                    channel::write_descriptor(
508                                        last_descriptor,
509                                        &mut bytes,
510                                        &mut sercom_ptr,
511                                        // Always link the next descriptor. We then set the last
512                                        // transfer's link pointer to null lower down in the code.
513                                        next_ptr,
514                                    );
515                                }
516                            }
517                        }
518                    }
519
520                    // Set the last descriptor to a null pointer to stop the transfer, and avoid
521                    // buffer overflow UB.
522                    if let Some(d) = descriptors.last_mut() {
523                        d.set_next_descriptor(core::ptr::null_mut());
524                    }
525
526                    // Now setup and perform the actual transfer
527                    match group.first_mut().unwrap() {
528                        Read(ref mut buffer) => unsafe {
529                            self.read_linked(address, buffer, descriptors.first_mut())
530                                .await?;
531                        },
532                        Write(bytes) => unsafe {
533                            self.write_linked(address, bytes, descriptors.first_mut())
534                                .await?;
535                        },
536                    }
537                }
538            }
539
540            Ok(())
541        }
542    }
543}
544
545#[cfg(feature = "dma")]
546pub use dma::*;