@@ -20,6 +20,11 @@ BUILD_ASSERT(IS_ENABLED(CONFIG_PCIE), "DT need CONFIG_PCIE");
2020#include <zephyr/logging/log.h>
2121LOG_MODULE_REGISTER (spi_pw , CONFIG_SPI_LOG_LEVEL );
2222
23+ #if defined(CONFIG_SPI_PW_LPSS_DMA )
24+ #include <zephyr/drivers/dma.h>
25+ #include <zephyr/drivers/dma/dma_intel_lpss.h>
26+ #endif
27+
2328#include "spi_pw.h"
2429
2530static uint32_t spi_pw_reg_read (const struct device * dev , uint32_t offset )
@@ -207,12 +212,16 @@ static void spi_pw_rx_thld_set(const struct device *dev,
207212{
208213 uint32_t reg_data ;
209214
210- /* Rx threshold */
211- reg_data = spi_pw_reg_read (dev , PW_SPI_REG_SIRF );
212- reg_data &= (uint32_t ) ~(PW_SPI_WM_MASK );
213- reg_data |= PW_SPI_SIRF_WM_DFLT ;
214- if (spi -> ctx .rx_len && spi -> ctx .rx_len < spi -> fifo_depth ) {
215- reg_data = spi -> ctx .rx_len - 1 ;
215+ if (IS_ENABLED (CONFIG_SPI_PW_LPSS_DMA )) {
216+ reg_data = 0 ;
217+ } else {
218+ /* Rx threshold */
219+ reg_data = spi_pw_reg_read (dev , PW_SPI_REG_SIRF );
220+ reg_data &= (uint32_t ) ~(PW_SPI_WM_MASK );
221+ reg_data |= PW_SPI_SIRF_WM_DFLT ;
222+ if (spi -> ctx .rx_len && spi -> ctx .rx_len < spi -> fifo_depth ) {
223+ reg_data = spi -> ctx .rx_len - 1 ;
224+ }
216225 }
217226 spi_pw_reg_write (dev , PW_SPI_REG_SIRF , reg_data );
218227}
@@ -321,6 +330,181 @@ static void spi_pw_config_clk(const struct device *dev,
321330 spi_pw_reg_write (dev , PW_SPI_REG_CTRLR0 , ctrlr0 );
322331}
323332
333+ #ifdef CONFIG_SPI_PW_LPSS_DMA
334+ static void spi_pw_idma_intr_enable (const struct device * dev , bool enable )
335+ {
336+ uint32_t reg ;
337+
338+ reg = spi_pw_reg_read (dev , PW_SPI_REG_CTRLR1 );
339+
340+ if (enable ) {
341+ reg |= PW_SPI_IDMA_INTR ;
342+ spi_pw_reg_write (dev , PW_SPI_REG_CTRLR1 , reg );
343+ } else {
344+ reg &= ~(PW_SPI_IDMA_INTR );
345+ spi_pw_reg_write (dev , PW_SPI_REG_CTRLR1 , reg );
346+ }
347+ }
348+
349+ static void cb_idma_transfer (const struct device * dma , void * user_data ,
350+ uint32_t channel , int status )
351+ {
352+ const struct device * dev = (const struct device * )user_data ;
353+ struct spi_pw_data * spi = dev -> data ;
354+ const struct spi_pw_config * info = dev -> config ;
355+
356+ dma_stop (info -> dma_dev , channel );
357+ /* See tx or rx finished */
358+ if (channel == DMA_INTEL_LPSS_TX_CHAN ) {
359+ spi -> dma_tx_finished = true;
360+
361+ /* Waiting for TX FIFO empty */
362+ while (is_pw_ssp_busy (dev )) {
363+ }
364+ } else if (channel == DMA_INTEL_LPSS_RX_CHAN ) {
365+ spi -> dma_rx_finished = true;
366+ }
367+ /* All tx and rx finished */
368+ if ((spi -> dma_tx_finished == true) &&
369+ (spi -> dma_rx_finished == true)) {
370+ spi_pw_idma_intr_enable (dev , false);
371+ spi_pw_intr_disable (dev );
372+ spi_pw_ssp_disable (dev );
373+ spi_pw_cs_ctrl_enable (dev , false);
374+ spi_context_complete (& spi -> ctx , dev , status );
375+ }
376+ }
377+
378+ static inline void * spi_pw_dr_phy_addr (const struct device * dev )
379+ {
380+ struct spi_pw_data * spi = dev -> data ;
381+ /* Physical FIFO addr */
382+ return (void * ) (spi -> phy_addr + PW_SPI_REG_SSDR );
383+ }
384+
385+ static int32_t spi_pw_idma_transfer (const struct device * dev ,
386+ const struct spi_buf_set * tx_bufs ,
387+ const struct spi_buf_set * rx_bufs )
388+ {
389+ struct spi_pw_data * spi = dev -> data ;
390+ const struct spi_pw_config * info = dev -> config ;
391+ struct dma_config dma_cfg_tx = { 0 };
392+ struct dma_config dma_cfg_rx = { 0 };
393+ struct dma_block_config * dma_blocks_tx = NULL ;
394+ struct dma_block_config * dma_blocks_rx = NULL ;
395+ size_t tx_count = tx_bufs ? tx_bufs -> count : 0 ;
396+ size_t rx_count = rx_bufs ? rx_bufs -> count : 0 ;
397+ size_t i ;
398+
399+ spi -> dma_tx_finished = false;
400+ spi -> dma_rx_finished = false;
401+
402+ if (!device_is_ready (info -> dma_dev )) {
403+ LOG_DBG ("DMA device is not ready" );
404+ return - ENODEV ;
405+ }
406+
407+ if (tx_count > 0 ) {
408+ dma_blocks_tx = k_calloc (tx_count , sizeof (struct dma_block_config ));
409+ if (!dma_blocks_tx ) {
410+ LOG_ERR ("Failed to allocate TX dma_block_config" );
411+ return - ENOMEM ;
412+ }
413+ for (i = 0 ; i < tx_count ; i ++ ) {
414+ dma_blocks_tx [i ].block_size = tx_bufs -> buffers [i ].len ;
415+ dma_blocks_tx [i ].source_address = (uint64_t )tx_bufs -> buffers [i ].buf ;
416+ dma_blocks_tx [i ].dest_address = (uint64_t )spi_pw_dr_phy_addr (dev );
417+ dma_blocks_tx [i ].next_block =
418+ (i + 1 < tx_count ) ? & dma_blocks_tx [i + 1 ] : NULL ;
419+ }
420+ dma_cfg_tx .dma_slot = 0U ;
421+ dma_cfg_tx .channel_direction = MEMORY_TO_PERIPHERAL ;
422+ dma_cfg_tx .source_data_size = 1U ;
423+ dma_cfg_tx .dest_data_size = 1U ;
424+ dma_cfg_tx .source_burst_length = 1U ;
425+ dma_cfg_tx .dest_burst_length = 1U ;
426+ dma_cfg_tx .dma_callback = cb_idma_transfer ;
427+ dma_cfg_tx .user_data = (void * )dev ;
428+ dma_cfg_tx .complete_callback_en = 0U ;
429+ dma_cfg_tx .error_callback_dis = 1U ;
430+ dma_cfg_tx .block_count = tx_count ;
431+ dma_cfg_tx .head_block = & dma_blocks_tx [0 ];
432+ if (dma_config (info -> dma_dev , DMA_INTEL_LPSS_TX_CHAN , & dma_cfg_tx )) {
433+ LOG_DBG ("Error TX transfer" );
434+ k_free (dma_blocks_tx );
435+ return - EIO ;
436+ }
437+ if (dma_start (info -> dma_dev , DMA_INTEL_LPSS_TX_CHAN )) {
438+ LOG_DBG ("Error TX transfer" );
439+ k_free (dma_blocks_tx );
440+ return - EIO ;
441+ }
442+ }
443+
444+ if (rx_count > 0 ) {
445+ dma_blocks_rx = k_calloc (rx_count , sizeof (struct dma_block_config ));
446+ if (!dma_blocks_rx ) {
447+ LOG_ERR ("Failed to allocate RX dma_block_config" );
448+ if (dma_blocks_tx ) {
449+ k_free (dma_blocks_tx );
450+ }
451+ return - ENOMEM ;
452+ }
453+ for (i = 0 ; i < rx_count ; i ++ ) {
454+ dma_blocks_rx [i ].block_size = rx_bufs -> buffers [i ].len ;
455+ dma_blocks_rx [i ].dest_address = (uint64_t )rx_bufs -> buffers [i ].buf ;
456+ dma_blocks_rx [i ].source_address = (uint64_t )spi_pw_dr_phy_addr (dev );
457+ dma_blocks_rx [i ].next_block =
458+ (i + 1 < rx_count ) ? & dma_blocks_rx [i + 1 ] : NULL ;
459+ }
460+ dma_cfg_rx .dma_slot = 1U ;
461+ dma_cfg_rx .channel_direction = PERIPHERAL_TO_MEMORY ;
462+ dma_cfg_rx .source_data_size = 1U ;
463+ dma_cfg_rx .dest_data_size = 1U ;
464+ dma_cfg_rx .source_burst_length = 1U ;
465+ dma_cfg_rx .dest_burst_length = 1U ;
466+ dma_cfg_rx .dma_callback = cb_idma_transfer ;
467+ dma_cfg_rx .user_data = (void * )dev ;
468+ dma_cfg_rx .complete_callback_en = 0U ;
469+ dma_cfg_rx .error_callback_dis = 1U ;
470+ dma_cfg_rx .block_count = rx_count ;
471+ dma_cfg_rx .head_block = & dma_blocks_rx [0 ];
472+ if (dma_config (info -> dma_dev , DMA_INTEL_LPSS_RX_CHAN , & dma_cfg_rx )) {
473+ LOG_DBG ("Error RX transfer" );
474+ if (dma_blocks_tx ) {
475+ k_free (dma_blocks_tx );
476+ }
477+ k_free (dma_blocks_rx );
478+ return - EIO ;
479+ }
480+ if (dma_start (info -> dma_dev , DMA_INTEL_LPSS_RX_CHAN )) {
481+ LOG_DBG ("Error RX transfer" );
482+ if (dma_blocks_tx ) {
483+ k_free (dma_blocks_tx );
484+ }
485+ k_free (dma_blocks_rx );
486+ return - EIO ;
487+ }
488+ }
489+
490+ /* enable ssp interrupts */
491+ spi_pw_intr_enable (dev , true);
492+
493+ /* Enable dma intr */
494+ spi_pw_idma_intr_enable (dev , true);
495+
496+ if (dma_blocks_tx ) {
497+ k_free (dma_blocks_tx );
498+ }
499+ if (dma_blocks_rx ) {
500+ k_free (dma_blocks_rx );
501+ }
502+
503+ return 0 ;
504+ }
505+
506+ #else
507+
324508static void spi_pw_completed (const struct device * dev , int err )
325509{
326510 struct spi_pw_data * spi = dev -> data ;
@@ -539,6 +723,7 @@ static int spi_pw_transfer(const struct device *dev)
539723
540724 return err ;
541725}
726+ #endif
542727
543728static int spi_pw_configure (const struct device * dev ,
544729 const struct spi_pw_config * info ,
@@ -658,6 +843,16 @@ static int transceive(const struct device *dev,
658843 spi_pw_ssp_enable (dev );
659844
660845#ifdef CONFIG_SPI_PW_INTERRUPT
846+ #ifdef CONFIG_SPI_PW_LPSS_DMA
847+
848+ LOG_DBG ("DMA Mode" );
849+ err = spi_pw_idma_transfer (dev , tx_bufs , rx_bufs );
850+ if (err ) {
851+ goto out ;
852+ }
853+
854+ err = spi_context_wait_for_completion (& spi -> ctx );
855+ #else
661856 LOG_DBG ("Interrupt Mode" );
662857
663858 /* Enable interrupts */
@@ -668,6 +863,7 @@ static int transceive(const struct device *dev,
668863 }
669864
670865 err = spi_context_wait_for_completion (& spi -> ctx );
866+ #endif
671867#else
672868 LOG_DBG ("Polling Mode" );
673869
@@ -727,10 +923,16 @@ static int spi_pw_release(const struct device *dev,
727923static void spi_pw_isr (const void * arg )
728924{
729925 const struct device * dev = (const struct device * )arg ;
926+ #ifdef CONFIG_SPI_PW_LPSS_DMA
927+ const struct spi_pw_config * info = dev -> config ;
928+
929+ dma_intel_lpss_isr (info -> dma_dev );
930+ #else
730931 int err ;
731932
732933 err = spi_pw_transfer (dev );
733934 spi_pw_completed (dev , err );
935+ #endif
734936}
735937#endif
736938
@@ -774,7 +976,23 @@ static int spi_pw_init(const struct device *dev)
774976 pcie_set_cmd (info -> pcie -> bdf ,
775977 PCIE_CONF_CMDSTAT_MASTER ,
776978 true);
777-
979+ #if defined(CONFIG_SPI_PW_LPSS_DMA )
980+ uintptr_t base ;
981+
982+ base = DEVICE_MMIO_GET (dev ) + DMA_INTEL_LPSS_OFFSET ;
983+ dma_intel_lpss_set_base (info -> dma_dev , base );
984+
985+ pcie_set_cmd (info -> pcie -> bdf , PCIE_CONF_CMDSTAT_MASTER , true);
986+ /* Assign physical & virtual address to dma instance */
987+ spi -> phy_addr = mbar .phys_addr ;
988+ spi -> base_addr = (uint32_t )(DEVICE_MMIO_GET (dev ) + PW_SPI_IDMA_OFFSET );
989+ sys_write32 ((uint32_t )spi -> phy_addr ,
990+ DEVICE_MMIO_GET (dev ) + DMA_INTEL_LPSS_REMAP_LOW );
991+ sys_write32 ((uint32_t )(spi -> phy_addr >> DMA_INTEL_LPSS_ADDR_RIGHT_SHIFT ),
992+ DEVICE_MMIO_GET (dev ) + DMA_INTEL_LPSS_REMAP_HI );
993+ LOG_DBG ("spi inst phy addr: [0x%lx], mmio addr: [0x%lx]" ,
994+ spi -> phy_addr , spi -> base_addr );
995+ #endif
778996 } else {
779997 DEVICE_MMIO_MAP (dev , K_MEM_CACHE_NONE );
780998 }
@@ -785,6 +1003,10 @@ static int spi_pw_init(const struct device *dev)
7851003 /* Bring ssp out of reset */
7861004 spi_pw_ssp_reset (dev );
7871005
1006+ #if defined(CONFIG_SPI_PW_LPSS_DMA )
1007+ /* Enable DMA controller */
1008+ dma_intel_lpss_setup (info -> dma_dev );
1009+ #endif
7881010 /* Disable ssp operation */
7891011 spi_pw_ssp_disable (dev );
7901012
@@ -824,6 +1046,12 @@ static int spi_pw_init(const struct device *dev)
8241046
8251047#ifdef CONFIG_SPI_PW_INTERRUPT
8261048
1049+ #define SPI_CONFIG_DMA_INIT (n ) \
1050+ COND_CODE_1(CONFIG_SPI_PW_LPSS_DMA, \
1051+ (COND_CODE_1(DT_INST_NODE_HAS_PROP(n, dmas), \
1052+ (.dma_dev = DEVICE_DT_GET(DT_INST_DMAS_CTLR_BY_IDX(n, 0)),), \
1053+ ())), ())
1054+
8271055#define SPI_INTEL_IRQ_FLAGS_SENSE0 (n ) 0
8281056#define SPI_INTEL_IRQ_FLAGS_SENSE1 (n ) DT_INST_IRQ(n, sense)
8291057#define SPI_INTEL_IRQ_FLAGS (n ) \
@@ -870,6 +1098,7 @@ static int spi_pw_init(const struct device *dev)
8701098 .irq_config = spi_##n##_irq_init, \
8711099 .clock_freq = DT_INST_PROP(n, clock_frequency), \
8721100 INIT_PCIE(n) \
1101+ SPI_CONFIG_DMA_INIT(n) \
8731102 }; \
8741103 SPI_DEVICE_DT_INST_DEFINE(n, spi_pw_init, NULL, \
8751104 &spi_##n##_data, &spi_##n##_config, \
0 commit comments