Previous generations of Tegra supported DMA operations by an external
DMA controller, but the QSPI on Tegra234 devices now have an internal
DMA controller.
Internal DMA: Uses the QSPI controller's built-in DMA engine, which is
limited in capabilities and tied directly to the QSPI module.
External DMA: Utilizes a separate, GPCDMA DMA controller that can
transfer data between QSPI and any memory location.
Native DMA Initialization: Introduce routines to initialize and
configure native DMA channels for both transmit and receive paths.
Set up DMA mapping functions to manage buffer addresses effectively.
Enhance Transfer Logic: Implement logic to choose between CPU-based
and DMA-based transfers based on data size.
Change-Id: Icf3ef4767947cef67821c092ecd9ea6bccb2a4e4
Signed-off-by: Vishwaroop A <va@nvidia.com>
---
drivers/spi/spi-tegra210-quad.c | 218 ++++++++++++++++++--------------
1 file changed, 126 insertions(+), 92 deletions(-)
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 04f41e92c1e2..066caee85c52 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -111,6 +111,9 @@
#define QSPI_DMA_BLK 0x024
#define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0)
+#define QSPI_DMA_MEM_ADDRESS_REG 0x028
+#define QSPI_DMA_HI_ADDRESS_REG 0x02c
+
#define QSPI_TX_FIFO 0x108
#define QSPI_RX_FIFO 0x188
@@ -167,9 +170,9 @@ enum tegra_qspi_transfer_type {
};
struct tegra_qspi_soc_data {
- bool has_dma;
bool cmb_xfer_capable;
bool supports_tpm;
+ bool has_ext_dma;
unsigned int cs_count;
};
@@ -605,17 +608,21 @@ static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_trans
len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
- dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
- dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
+ if (t->tx_buf)
+ dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
+ if (t->rx_buf)
+ dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
}
static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
{
struct dma_slave_config dma_sconfig = { 0 };
+ dma_addr_t rx_dma_phys, tx_dma_phys;
unsigned int len;
u8 dma_burst;
int ret = 0;
u32 val;
+ bool has_ext_dma = tqspi->soc_data->has_ext_dma;
if (tqspi->is_packed) {
ret = tegra_qspi_dma_map_xfer(tqspi, t);
@@ -634,60 +641,85 @@ static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct
len = tqspi->curr_dma_words * 4;
/* set attention level based on length of transfer */
- val = 0;
- if (len & 0xf) {
- val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
- dma_burst = 1;
- } else if (((len) >> 4) & 0x1) {
- val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
- dma_burst = 4;
- } else {
- val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
- dma_burst = 8;
+ if (has_ext_dma) {
+ val = 0;
+ if (len & 0xf) {
+ val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
+ dma_burst = 1;
+ } else if (((len) >> 4) & 0x1) {
+ val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
+ dma_burst = 4;
+ } else {
+ val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
+ dma_burst = 8;
+ }
+
+ tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
}
- tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
tqspi->dma_control_reg = val;
dma_sconfig.device_fc = true;
- if (tqspi->cur_direction & DATA_DIR_TX) {
- dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
- dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.dst_maxburst = dma_burst;
- ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
- if (ret < 0) {
- dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
- return ret;
- }
- tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
- ret = tegra_qspi_start_tx_dma(tqspi, t, len);
- if (ret < 0) {
- dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
- return ret;
+ if ((tqspi->cur_direction & DATA_DIR_TX)) {
+ if (has_ext_dma) {
+ dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
+ return ret;
+ }
+
+ tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
+ ret = tegra_qspi_start_tx_dma(tqspi, t, len);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
+ return ret;
+ }
+ } else {
+ if (tqspi->is_packed)
+ tx_dma_phys = t->tx_dma;
+ else
+ tx_dma_phys = tqspi->tx_dma_phys;
+ tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
+ tegra_qspi_writel(tqspi, lower_32_bits(tx_dma_phys),
+ QSPI_DMA_MEM_ADDRESS_REG);
+ tegra_qspi_writel(tqspi, (upper_32_bits(tx_dma_phys) & 0xff),
+ QSPI_DMA_HI_ADDRESS_REG);
}
}
if (tqspi->cur_direction & DATA_DIR_RX) {
- dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
- dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dma_sconfig.src_maxburst = dma_burst;
- ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
- if (ret < 0) {
- dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
- return ret;
- }
-
- dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
- tqspi->dma_buf_size,
- DMA_FROM_DEVICE);
-
- ret = tegra_qspi_start_rx_dma(tqspi, t, len);
- if (ret < 0) {
- dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
- if (tqspi->cur_direction & DATA_DIR_TX)
- dmaengine_terminate_all(tqspi->tx_dma_chan);
- return ret;
+ if (has_ext_dma) {
+ dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = dma_burst;
+ ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
+ return ret;
+ }
+ dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
+ tqspi->dma_buf_size, DMA_FROM_DEVICE);
+ ret = tegra_qspi_start_rx_dma(tqspi, t, len);
+ if (ret < 0) {
+ dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
+ if (tqspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ return ret;
+ }
+ } else {
+ if (tqspi->is_packed)
+ rx_dma_phys = t->rx_dma;
+ else
+ rx_dma_phys = tqspi->rx_dma_phys;
+
+ tegra_qspi_writel(tqspi, (rx_dma_phys & 0xffffffff),
+ QSPI_DMA_MEM_ADDRESS_REG);
+ tegra_qspi_writel(tqspi, ((rx_dma_phys >> 32) & 0xff),
+ QSPI_DMA_HI_ADDRESS_REG);
}
}
@@ -726,9 +758,6 @@ static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct s
static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
{
- if (!tqspi->soc_data->has_dma)
- return;
-
if (tqspi->tx_dma_buf) {
dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
tqspi->tx_dma_buf, tqspi->tx_dma_phys);
@@ -759,16 +788,26 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
u32 *dma_buf;
int err;
- if (!tqspi->soc_data->has_dma)
- return 0;
+ if (tqspi->soc_data->has_ext_dma) {
+ dma_chan = dma_request_chan(tqspi->dev, "rx");
+ if (IS_ERR(dma_chan)) {
+ err = PTR_ERR(dma_chan);
+ goto err_out;
+ }
- dma_chan = dma_request_chan(tqspi->dev, "rx");
- if (IS_ERR(dma_chan)) {
- err = PTR_ERR(dma_chan);
- goto err_out;
- }
+ tqspi->rx_dma_chan = dma_chan;
- tqspi->rx_dma_chan = dma_chan;
+ dma_chan = dma_request_chan(tqspi->dev, "tx");
+ if (IS_ERR(dma_chan)) {
+ err = PTR_ERR(dma_chan);
+ goto err_out;
+ }
+
+ tqspi->tx_dma_chan = dma_chan;
+ } else {
+ tqspi->rx_dma_chan = NULL;
+ tqspi->tx_dma_chan = NULL;
+ }
dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
if (!dma_buf) {
@@ -779,14 +818,6 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
tqspi->rx_dma_buf = dma_buf;
tqspi->rx_dma_phys = dma_phys;
- dma_chan = dma_request_chan(tqspi->dev, "tx");
- if (IS_ERR(dma_chan)) {
- err = PTR_ERR(dma_chan);
- goto err_out;
- }
-
- tqspi->tx_dma_chan = dma_chan;
-
dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
if (!dma_buf) {
err = -ENOMEM;
@@ -1056,6 +1087,7 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
struct spi_message *msg)
{
bool is_first_msg = true;
+ bool has_ext_dma = tqspi->soc_data->has_ext_dma;
struct spi_transfer *xfer;
struct spi_device *spi = msg->spi;
u8 transfer_phase = 0;
@@ -1128,15 +1160,12 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
if (WARN_ON(ret == 0)) {
dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n",
ret);
- if (tqspi->is_curr_dma_xfer &&
- (tqspi->cur_direction & DATA_DIR_TX))
- dmaengine_terminate_all
- (tqspi->tx_dma_chan);
-
- if (tqspi->is_curr_dma_xfer &&
- (tqspi->cur_direction & DATA_DIR_RX))
- dmaengine_terminate_all
- (tqspi->rx_dma_chan);
+ if (tqspi->is_curr_dma_xfer && has_ext_dma) {
+ if (tqspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ if (tqspi->cur_direction & DATA_DIR_RX)
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ }
/* Abort transfer by resetting pio/dma bit */
if (!tqspi->is_curr_dma_xfer) {
@@ -1197,6 +1226,7 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
struct spi_device *spi = msg->spi;
struct spi_transfer *transfer;
bool is_first_msg = true;
+ bool has_ext_dma = tqspi->soc_data->has_ext_dma;
int ret = 0, val = 0;
msg->status = 0;
@@ -1251,10 +1281,12 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
QSPI_DMA_TIMEOUT);
if (WARN_ON(ret == 0)) {
dev_err(tqspi->dev, "transfer timeout\n");
- if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
- dmaengine_terminate_all(tqspi->tx_dma_chan);
- if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
- dmaengine_terminate_all(tqspi->rx_dma_chan);
+ if (tqspi->is_curr_dma_xfer && has_ext_dma) {
+ if (tqspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ if (tqspi->cur_direction & DATA_DIR_RX)
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ }
tegra_qspi_handle_error(tqspi);
ret = -EIO;
goto complete_xfer;
@@ -1323,7 +1355,7 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
return false;
xfer = list_next_entry(xfer, transfer_list);
}
- if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
+ if (!tqspi->soc_data->has_ext_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
return false;
return true;
@@ -1388,30 +1420,32 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
if (tqspi->cur_direction & DATA_DIR_TX) {
if (tqspi->tx_status) {
- dmaengine_terminate_all(tqspi->tx_dma_chan);
- err += 1;
- } else {
+ if (tqspi->tx_dma_chan)
+ dmaengine_terminate_all(tqspi->tx_dma_chan);
+ err++;
+ } else if (tqspi->tx_dma_chan) {
wait_status = wait_for_completion_interruptible_timeout(
&tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
if (wait_status <= 0) {
dmaengine_terminate_all(tqspi->tx_dma_chan);
dev_err(tqspi->dev, "failed TX DMA transfer\n");
- err += 1;
+ err++;
}
}
}
if (tqspi->cur_direction & DATA_DIR_RX) {
if (tqspi->rx_status) {
- dmaengine_terminate_all(tqspi->rx_dma_chan);
- err += 2;
- } else {
+ if (tqspi->rx_dma_chan)
+ dmaengine_terminate_all(tqspi->rx_dma_chan);
+ err++;
+ } else if (tqspi->rx_dma_chan) {
wait_status = wait_for_completion_interruptible_timeout(
&tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
if (wait_status <= 0) {
dmaengine_terminate_all(tqspi->rx_dma_chan);
dev_err(tqspi->dev, "failed RX DMA transfer\n");
- err += 2;
+ err++;
}
}
}
@@ -1474,28 +1508,28 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
}
static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
- .has_dma = true,
+ .has_ext_dma = true,
.cmb_xfer_capable = false,
.supports_tpm = false,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
- .has_dma = true,
+ .has_ext_dma = true,
.cmb_xfer_capable = true,
.supports_tpm = false,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
- .has_dma = false,
+ .has_ext_dma = false,
.cmb_xfer_capable = true,
.supports_tpm = true,
.cs_count = 1,
};
static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
- .has_dma = false,
+ .has_ext_dma = true,
.cmb_xfer_capable = true,
.supports_tpm = true,
.cs_count = 4,
--
2.17.1
On Fri, Jan 03, 2025 at 06:04:07AM +0000, Vishwaroop A wrote: > Change-Id: Icf3ef4767947cef67821c092ecd9ea6bccb2a4e4 Please don't include internal only things like change IDs in upstream submissions.
Hi Vishwaroop,
kernel test robot noticed the following build warnings:
[auto build test WARNING on broonie-spi/for-next]
[also build test WARNING on robh/for-next broonie-sound/for-next linus/master v6.13-rc5 next-20241220]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Vishwaroop-A/arm64-tegra-Configure-QSPI-clocks-and-add-DMA/20250103-141217
base: https://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git for-next
patch link: https://lore.kernel.org/r/20250103060407.1064107-7-va%40nvidia.com
patch subject: [PATCH V1 6/6] spi: tegra210-quad: Introduce native DMA support
config: arm-randconfig-001-20250104 (https://download.01.org/0day-ci/archive/20250104/202501040605.Ndat3QJw-lkp@intel.com/config)
compiler: clang version 20.0.0git (https://github.com/llvm/llvm-project 096551537b2a747a3387726ca618ceeb3950e9bc)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250104/202501040605.Ndat3QJw-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501040605.Ndat3QJw-lkp@intel.com/
All warnings (new ones prefixed by >>):
In file included from drivers/spi/spi-tegra210-quad.c:8:
In file included from include/linux/dmaengine.h:12:
In file included from include/linux/scatterlist.h:8:
In file included from include/linux/mm.h:2223:
include/linux/vmstat.h:518:36: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
518 | return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
| ~~~~~~~~~~~ ^ ~~~
>> drivers/spi/spi-tegra210-quad.c:721:43: warning: shift count >= width of type [-Wshift-count-overflow]
721 | tegra_qspi_writel(tqspi, ((rx_dma_phys >> 32) & 0xff),
| ^ ~~
2 warnings generated.
vim +721 drivers/spi/spi-tegra210-quad.c
616
617 static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
618 {
619 struct dma_slave_config dma_sconfig = { 0 };
620 dma_addr_t rx_dma_phys, tx_dma_phys;
621 unsigned int len;
622 u8 dma_burst;
623 int ret = 0;
624 u32 val;
625 bool has_ext_dma = tqspi->soc_data->has_ext_dma;
626
627 if (tqspi->is_packed) {
628 ret = tegra_qspi_dma_map_xfer(tqspi, t);
629 if (ret < 0)
630 return ret;
631 }
632
633 val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
634 tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
635
636 tegra_qspi_unmask_irq(tqspi);
637
638 if (tqspi->is_packed)
639 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
640 else
641 len = tqspi->curr_dma_words * 4;
642
643 /* set attention level based on length of transfer */
644 if (has_ext_dma) {
645 val = 0;
646 if (len & 0xf) {
647 val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
648 dma_burst = 1;
649 } else if (((len) >> 4) & 0x1) {
650 val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
651 dma_burst = 4;
652 } else {
653 val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
654 dma_burst = 8;
655 }
656
657 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
658 }
659
660 tqspi->dma_control_reg = val;
661
662 dma_sconfig.device_fc = true;
663
664 if ((tqspi->cur_direction & DATA_DIR_TX)) {
665 if (has_ext_dma) {
666 dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
667 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
668 dma_sconfig.dst_maxburst = dma_burst;
669 ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
670 if (ret < 0) {
671 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
672 return ret;
673 }
674
675 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
676 ret = tegra_qspi_start_tx_dma(tqspi, t, len);
677 if (ret < 0) {
678 dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
679 return ret;
680 }
681 } else {
682 if (tqspi->is_packed)
683 tx_dma_phys = t->tx_dma;
684 else
685 tx_dma_phys = tqspi->tx_dma_phys;
686 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
687 tegra_qspi_writel(tqspi, lower_32_bits(tx_dma_phys),
688 QSPI_DMA_MEM_ADDRESS_REG);
689 tegra_qspi_writel(tqspi, (upper_32_bits(tx_dma_phys) & 0xff),
690 QSPI_DMA_HI_ADDRESS_REG);
691 }
692 }
693
694 if (tqspi->cur_direction & DATA_DIR_RX) {
695 if (has_ext_dma) {
696 dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
697 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
698 dma_sconfig.src_maxburst = dma_burst;
699 ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
700 if (ret < 0) {
701 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
702 return ret;
703 }
704 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
705 tqspi->dma_buf_size, DMA_FROM_DEVICE);
706 ret = tegra_qspi_start_rx_dma(tqspi, t, len);
707 if (ret < 0) {
708 dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
709 if (tqspi->cur_direction & DATA_DIR_TX)
710 dmaengine_terminate_all(tqspi->tx_dma_chan);
711 return ret;
712 }
713 } else {
714 if (tqspi->is_packed)
715 rx_dma_phys = t->rx_dma;
716 else
717 rx_dma_phys = tqspi->rx_dma_phys;
718
719 tegra_qspi_writel(tqspi, (rx_dma_phys & 0xffffffff),
720 QSPI_DMA_MEM_ADDRESS_REG);
> 721 tegra_qspi_writel(tqspi, ((rx_dma_phys >> 32) & 0xff),
722 QSPI_DMA_HI_ADDRESS_REG);
723 }
724 }
725
726 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
727
728 tqspi->is_curr_dma_xfer = true;
729 tqspi->dma_control_reg = val;
730 val |= QSPI_DMA_EN;
731 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
732
733 return ret;
734 }
735
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
Hi Vishwaroop,
kernel test robot noticed the following build warnings:
[auto build test WARNING on broonie-spi/for-next]
[also build test WARNING on robh/for-next broonie-sound/for-next linus/master v6.13-rc5 next-20241220]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Vishwaroop-A/arm64-tegra-Configure-QSPI-clocks-and-add-DMA/20250103-141217
base: https://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git for-next
patch link: https://lore.kernel.org/r/20250103060407.1064107-7-va%40nvidia.com
patch subject: [PATCH V1 6/6] spi: tegra210-quad: Introduce native DMA support
config: sparc-randconfig-002-20250103 (https://download.01.org/0day-ci/archive/20250103/202501032202.m6t4wlQB-lkp@intel.com/config)
compiler: sparc-linux-gcc (GCC) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250103/202501032202.m6t4wlQB-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501032202.m6t4wlQB-lkp@intel.com/
All warnings (new ones prefixed by >>):
drivers/spi/spi-tegra210-quad.c: In function 'tegra_qspi_start_dma_based_transfer':
>> drivers/spi/spi-tegra210-quad.c:721:64: warning: right shift count >= width of type [-Wshift-count-overflow]
721 | tegra_qspi_writel(tqspi, ((rx_dma_phys >> 32) & 0xff),
| ^~
vim +721 drivers/spi/spi-tegra210-quad.c
616
617 static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
618 {
619 struct dma_slave_config dma_sconfig = { 0 };
620 dma_addr_t rx_dma_phys, tx_dma_phys;
621 unsigned int len;
622 u8 dma_burst;
623 int ret = 0;
624 u32 val;
625 bool has_ext_dma = tqspi->soc_data->has_ext_dma;
626
627 if (tqspi->is_packed) {
628 ret = tegra_qspi_dma_map_xfer(tqspi, t);
629 if (ret < 0)
630 return ret;
631 }
632
633 val = QSPI_DMA_BLK_SET(tqspi->curr_dma_words - 1);
634 tegra_qspi_writel(tqspi, val, QSPI_DMA_BLK);
635
636 tegra_qspi_unmask_irq(tqspi);
637
638 if (tqspi->is_packed)
639 len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
640 else
641 len = tqspi->curr_dma_words * 4;
642
643 /* set attention level based on length of transfer */
644 if (has_ext_dma) {
645 val = 0;
646 if (len & 0xf) {
647 val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
648 dma_burst = 1;
649 } else if (((len) >> 4) & 0x1) {
650 val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
651 dma_burst = 4;
652 } else {
653 val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
654 dma_burst = 8;
655 }
656
657 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
658 }
659
660 tqspi->dma_control_reg = val;
661
662 dma_sconfig.device_fc = true;
663
664 if ((tqspi->cur_direction & DATA_DIR_TX)) {
665 if (has_ext_dma) {
666 dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
667 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
668 dma_sconfig.dst_maxburst = dma_burst;
669 ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
670 if (ret < 0) {
671 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
672 return ret;
673 }
674
675 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
676 ret = tegra_qspi_start_tx_dma(tqspi, t, len);
677 if (ret < 0) {
678 dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
679 return ret;
680 }
681 } else {
682 if (tqspi->is_packed)
683 tx_dma_phys = t->tx_dma;
684 else
685 tx_dma_phys = tqspi->tx_dma_phys;
686 tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
687 tegra_qspi_writel(tqspi, lower_32_bits(tx_dma_phys),
688 QSPI_DMA_MEM_ADDRESS_REG);
689 tegra_qspi_writel(tqspi, (upper_32_bits(tx_dma_phys) & 0xff),
690 QSPI_DMA_HI_ADDRESS_REG);
691 }
692 }
693
694 if (tqspi->cur_direction & DATA_DIR_RX) {
695 if (has_ext_dma) {
696 dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
697 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
698 dma_sconfig.src_maxburst = dma_burst;
699 ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
700 if (ret < 0) {
701 dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
702 return ret;
703 }
704 dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
705 tqspi->dma_buf_size, DMA_FROM_DEVICE);
706 ret = tegra_qspi_start_rx_dma(tqspi, t, len);
707 if (ret < 0) {
708 dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
709 if (tqspi->cur_direction & DATA_DIR_TX)
710 dmaengine_terminate_all(tqspi->tx_dma_chan);
711 return ret;
712 }
713 } else {
714 if (tqspi->is_packed)
715 rx_dma_phys = t->rx_dma;
716 else
717 rx_dma_phys = tqspi->rx_dma_phys;
718
719 tegra_qspi_writel(tqspi, (rx_dma_phys & 0xffffffff),
720 QSPI_DMA_MEM_ADDRESS_REG);
> 721 tegra_qspi_writel(tqspi, ((rx_dma_phys >> 32) & 0xff),
722 QSPI_DMA_HI_ADDRESS_REG);
723 }
724 }
725
726 tegra_qspi_writel(tqspi, tqspi->command1_reg, QSPI_COMMAND1);
727
728 tqspi->is_curr_dma_xfer = true;
729 tqspi->dma_control_reg = val;
730 val |= QSPI_DMA_EN;
731 tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
732
733 return ret;
734 }
735
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
© 2016 - 2026 Red Hat, Inc.