From 8b6bc5fd71e677864d1a3b896b3069a6e0c5e214 Mon Sep 17 00:00:00 2001 From: Zhenfang Wang Date: Thu, 12 Sep 2019 13:47:18 +0800 Subject: [PATCH 1/8] dmaengine: sprd: Fix the link-list pointer register configuration issue We will set the link-list pointer register point to next link-list configuration's physical address, which can load DMA configuration from the link-list node automatically. But the link-list node's physical address can be larger than 32bits, and now Spreadtrum DMA driver only supports 32bits physical address, which may cause loading a incorrect DMA configuration when starting the link-list transfer mode. According to the DMA datasheet, we can use SRC_BLK_STEP register (bit28 - bit31) to save the high bits of the link-list node's physical address to fix this issue. Fixes: 4ac695464763 ("dmaengine: sprd: Support DMA link-list mode") Signed-off-by: Zhenfang Wang Signed-off-by: Baolin Wang Link: https://lore.kernel.org/r/eadfe9295499efa003e1c344e67e2890f9d1d780.1568267061.git.baolin.wang@linaro.org Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 525dc7338fe3..a4a91f233121 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -134,6 +134,10 @@ #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0 #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0) +/* SPRD DMA_SRC_BLK_STEP register definition */ +#define SPRD_DMA_LLIST_HIGH_MASK GENMASK(31, 28) +#define SPRD_DMA_LLIST_HIGH_SHIFT 28 + /* define DMA channel mode & trigger mode mask */ #define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0) #define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0) @@ -717,6 +721,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, u32 int_mode = flags & SPRD_DMA_INT_MASK; int src_datawidth, dst_datawidth, src_step, dst_step; u32 temp, fix_mode = 0, fix_en = 0; + phys_addr_t llist_ptr; if (dir == DMA_MEM_TO_DEV) { src_step = sprd_dma_get_step(slave_cfg->src_addr_width); @@ -814,13 +819,16 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, * Set the link-list pointer point to next link-list * configuration's physical address. */ - hw->llist_ptr = schan->linklist.phy_addr + temp; + llist_ptr = schan->linklist.phy_addr + temp; + hw->llist_ptr = lower_32_bits(llist_ptr); + hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) & + SPRD_DMA_LLIST_HIGH_MASK; } else { hw->llist_ptr = 0; + hw->src_blk_step = 0; } hw->frg_step = 0; - hw->src_blk_step = 0; hw->des_blk_step = 0; return 0; } From 9ec691f48b5ef741a48af8932ccaec859c67e8f1 Mon Sep 17 00:00:00 2001 From: Sameer Pujar Date: Mon, 16 Sep 2019 15:05:13 +0530 Subject: [PATCH 2/8] dmaengine: tegra210-adma: fix transfer failure >From Tegra186 onwards OUTSTANDING_REQUESTS field is added in channel configuration register(bits 7:4) which defines the maximum number of reads from the source and writes to the destination that may be outstanding at any given point of time. This field must be programmed with a value between 1 and 8. A value of 0 will prevent any transfers from happening. Thus added 'has_outstanding_reqs' bool member in chip data structure and is set to false for Tegra210, since the field is not applicable. For Tegra186 it is set to true and channel configuration is updated with maximum outstanding requests. Fixes: 433de642a76c ("dmaengine: tegra210-adma: add support for Tegra186/Tegra194") Cc: stable@vger.kernel.org Signed-off-by: Sameer Pujar Acked-by: Jon Hunter Link: https://lore.kernel.org/r/1568626513-16541-1-git-send-email-spujar@nvidia.com Signed-off-by: Vinod Koul --- drivers/dma/tegra210-adma.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 5f8adf5c1f20..6e1268552f74 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -40,6 +40,7 @@ #define ADMA_CH_CONFIG_MAX_BURST_SIZE 16 #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf) #define ADMA_CH_CONFIG_MAX_BUFS 8 +#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4) #define ADMA_CH_FIFO_CTRL 0x2c #define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8) @@ -77,6 +78,7 @@ struct tegra_adma; * @ch_req_tx_shift: Register offset for AHUB transmit channel select. * @ch_req_rx_shift: Register offset for AHUB receive channel select. * @ch_base_offset: Register offset of DMA channel registers. + * @has_outstanding_reqs: If DMA channel can have outstanding requests. * @ch_fifo_ctrl: Default value for channel FIFO CTRL register. * @ch_req_mask: Mask for Tx or Rx channel select. * @ch_req_max: Maximum number of Tx or Rx channels available. @@ -95,6 +97,7 @@ struct tegra_adma_chip_data { unsigned int ch_req_max; unsigned int ch_reg_size; unsigned int nr_channels; + bool has_outstanding_reqs; }; /* @@ -594,6 +597,8 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, ADMA_CH_CTRL_FLOWCTRL_EN; ch_regs->config |= cdata->adma_get_burst_config(burst_size); ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); + if (cdata->has_outstanding_reqs) + ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8); ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl; ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; @@ -778,6 +783,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = { .ch_req_tx_shift = 28, .ch_req_rx_shift = 24, .ch_base_offset = 0, + .has_outstanding_reqs = false, .ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT, .ch_req_mask = 0xf, .ch_req_max = 10, @@ -792,6 +798,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = { .ch_req_tx_shift = 27, .ch_req_rx_shift = 22, .ch_base_offset = 0x10000, + .has_outstanding_reqs = true, .ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT, .ch_req_mask = 0x1f, .ch_req_max = 20, From bd73dfabdda280fc5f05bdec79b6721b4b2f035f Mon Sep 17 00:00:00 2001 From: Robin Gong Date: Tue, 24 Sep 2019 09:49:18 +0000 Subject: [PATCH 3/8] dmaengine: imx-sdma: fix size check for sdma script_number Illegal memory will be touch if SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 (41) exceed the size of structure sdma_script_start_addrs(40), thus cause memory corrupt such as slob block header so that kernel trap into while() loop forever in slob_free(). Please refer to below code piece in imx-sdma.c: for (i = 0; i < sdma->script_number; i++) if (addr_arr[i] > 0) saddr_arr[i] = addr_arr[i]; /* memory corrupt here */ That issue was brought by commit a572460be9cf ("dmaengine: imx-sdma: Add support for version 3 firmware") because SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 (38->41 3 scripts added) not align with script number added in sdma_script_start_addrs(2 scripts). Fixes: a572460be9cf ("dmaengine: imx-sdma: Add support for version 3 firmware") Cc: stable@vger.kernel Link: https://www.spinics.net/lists/arm-kernel/msg754895.html Signed-off-by: Robin Gong Reported-by: Jurgen Lambrecht Link: https://lore.kernel.org/r/1569347584-3478-1-git-send-email-yibin.gong@nxp.com [vkoul: update the patch title] Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 8 ++++++++ include/linux/platform_data/dma-imx-sdma.h | 3 +++ 2 files changed, 11 insertions(+) diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 9ba74ab7e912..c27e206a764c 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1707,6 +1707,14 @@ static void sdma_add_scripts(struct sdma_engine *sdma, if (!sdma->script_number) sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; + if (sdma->script_number > sizeof(struct sdma_script_start_addrs) + / sizeof(s32)) { + dev_err(sdma->dev, + "SDMA script number %d not match with firmware.\n", + sdma->script_number); + return; + } + for (i = 0; i < sdma->script_number; i++) if (addr_arr[i] > 0) saddr_arr[i] = addr_arr[i]; diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h index 6eaa53cef0bd..30e676b36b24 100644 --- a/include/linux/platform_data/dma-imx-sdma.h +++ b/include/linux/platform_data/dma-imx-sdma.h @@ -51,7 +51,10 @@ struct sdma_script_start_addrs { /* End of v2 array */ s32 zcanfd_2_mcu_addr; s32 zqspi_2_mcu_addr; + s32 mcu_2_ecspi_addr; /* End of v3 array */ + s32 mcu_2_zqspi_addr; + /* End of v4 array */ }; /** From 68fe2b520cee829ed518b4b1f64d2a557bcbffe1 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Thu, 26 Sep 2019 16:20:57 +0530 Subject: [PATCH 4/8] dmaengine: xilinx_dma: Fix 64-bit simple AXIDMA transfer In AXI DMA simple mode also pass MSB bits of source and destination address to xilinx_write function. It fixes simple AXI DMA operation mode using 64-bit addressing. Signed-off-by: Radhey Shyam Pandey Link: https://lore.kernel.org/r/1569495060-18117-2-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index e7dc3c4dc8e0..1fbe0258578b 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -1354,7 +1354,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) node); hw = &segment->hw; - xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); + xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, + xilinx_prep_dma_addr_t(hw->buf_addr)); /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, From 6c6de1ddb1be3840f2ed5cc9d009a622720940c9 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Thu, 26 Sep 2019 16:20:58 +0530 Subject: [PATCH 5/8] dmaengine: xilinx_dma: Fix control reg update in vdma_channel_set_config In vdma_channel_set_config clear the delay, frame count and master mask before updating their new values. It avoids programming incorrect state when input parameters are different from default. Signed-off-by: Radhey Shyam Pandey Acked-by: Appana Durga Kedareswara rao Signed-off-by: Michal Simek Link: https://lore.kernel.org/r/1569495060-18117-3-git-send-email-radhey.shyam.pandey@xilinx.com Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 1fbe0258578b..5d56f1e4d332 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -68,6 +68,9 @@ #define XILINX_DMA_DMACR_CIRC_EN BIT(1) #define XILINX_DMA_DMACR_RUNSTOP BIT(0) #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) +#define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24) +#define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16) +#define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8) #define XILINX_DMA_REG_DMASR 0x0004 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) @@ -2118,8 +2121,10 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, chan->config.gen_lock = cfg->gen_lock; chan->config.master = cfg->master; + dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN; if (cfg->gen_lock && chan->genlock) { dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; + dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK; dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; } @@ -2135,11 +2140,13 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, chan->config.delay = cfg->delay; if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { + dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK; dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; chan->config.coalesc = cfg->coalesc; } if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { + dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK; dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; chan->config.delay = cfg->delay; } From ec1ac309596a7bdf206743b092748205f6cd5720 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 9 Oct 2019 17:11:30 +0800 Subject: [PATCH 6/8] dmaengine: sprd: Fix the possible memory leak issue If we terminate the channel to free all descriptors associated with this channel, we will leak the memory of current descriptor if the current descriptor is not completed, since it had been deteled from the desc_issued list and have not been added into the desc_completed list. Thus we should check if current descriptor is completed or not, when freeing the descriptors associated with one channel, if not, we should free it to avoid this issue. Fixes: 9b3b8171f7f4 ("dmaengine: sprd: Add Spreadtrum DMA driver") Reported-by: Zhenfang Wang Tested-by: Zhenfang Wang Signed-off-by: Baolin Wang Link: https://lore.kernel.org/r/170dbbc6d5366b6fa974ce2d366652e23a334251.1570609788.git.baolin.wang@linaro.org Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index a4a91f233121..8546ad034720 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -212,6 +212,7 @@ struct sprd_dma_dev { struct sprd_dma_chn channels[0]; }; +static void sprd_dma_free_desc(struct virt_dma_desc *vd); static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param); static struct of_dma_filter_info sprd_dma_info = { .filter_fn = sprd_dma_filter_fn, @@ -613,12 +614,19 @@ static int sprd_dma_alloc_chan_resources(struct dma_chan *chan) static void sprd_dma_free_chan_resources(struct dma_chan *chan) { struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + struct virt_dma_desc *cur_vd = NULL; unsigned long flags; spin_lock_irqsave(&schan->vc.lock, flags); + if (schan->cur_desc) + cur_vd = &schan->cur_desc->vd; + sprd_dma_stop(schan); spin_unlock_irqrestore(&schan->vc.lock, flags); + if (cur_vd) + sprd_dma_free_desc(cur_vd); + vchan_free_chan_resources(&schan->vc); pm_runtime_put(chan->device->dev); } @@ -1031,15 +1039,22 @@ static int sprd_dma_resume(struct dma_chan *chan) static int sprd_dma_terminate_all(struct dma_chan *chan) { struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + struct virt_dma_desc *cur_vd = NULL; unsigned long flags; LIST_HEAD(head); spin_lock_irqsave(&schan->vc.lock, flags); + if (schan->cur_desc) + cur_vd = &schan->cur_desc->vd; + sprd_dma_stop(schan); vchan_get_all_descriptors(&schan->vc, &head); spin_unlock_irqrestore(&schan->vc.lock, flags); + if (cur_vd) + sprd_dma_free_desc(cur_vd); + vchan_dma_desc_free_list(&schan->vc, &head); return 0; } From 7667819385457b4aeb5fac94f67f52ab52cc10d5 Mon Sep 17 00:00:00 2001 From: Jeffrey Hugo Date: Thu, 17 Oct 2019 08:26:06 -0700 Subject: [PATCH 7/8] dmaengine: qcom: bam_dma: Fix resource leak bam_dma_terminate_all() will leak resources if any of the transactions are committed to the hardware (present in the desc fifo), and not complete. Since bam_dma_terminate_all() does not cause the hardware to be updated, the hardware will still operate on any previously committed transactions. This can cause memory corruption if the memory for the transaction has been reassigned, and will cause a sync issue between the BAM and its client(s). Fix this by properly updating the hardware in bam_dma_terminate_all(). Fixes: e7c0fe2a5c84 ("dmaengine: add Qualcomm BAM dma driver") Signed-off-by: Jeffrey Hugo Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20191017152606.34120-1-jeffrey.l.hugo@gmail.com Signed-off-by: Vinod Koul --- drivers/dma/qcom/bam_dma.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 8e90a405939d..ef73f65224b1 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -694,6 +694,25 @@ static int bam_dma_terminate_all(struct dma_chan *chan) /* remove all transactions, including active transaction */ spin_lock_irqsave(&bchan->vc.lock, flag); + /* + * If we have transactions queued, then some might be committed to the + * hardware in the desc fifo. The only way to reset the desc fifo is + * to do a hardware reset (either by pipe or the entire block). + * bam_chan_init_hw() will trigger a pipe reset, and also reinit the + * pipe. If the pipe is left disabled (default state after pipe reset) + * and is accessed by a connected hardware engine, a fatal error in + * the BAM will occur. There is a small window where this could happen + * with bam_chan_init_hw(), but it is assumed that the caller has + * stopped activity on any attached hardware engine. Make sure to do + * this first so that the BAM hardware doesn't cause memory corruption + * by accessing freed resources. + */ + if (!list_empty(&bchan->desc_list)) { + async_desc = list_first_entry(&bchan->desc_list, + struct bam_async_desc, desc_node); + bam_chan_init_hw(bchan, async_desc->dir); + } + list_for_each_entry_safe(async_desc, tmp, &bchan->desc_list, desc_node) { list_add(&async_desc->vd.node, &bchan->vc.desc_issued); From bacdcb6675e170bb2e8d3824da220e10274f42a7 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 23 Oct 2019 08:31:38 -0700 Subject: [PATCH 8/8] dmaengine: cppi41: Fix cppi41_dma_prep_slave_sg() when idle Yegor Yefremov reported that musb and ftdi uart can fail for the first open of the uart unless connected using a hub. This is because the first dma call done by musb_ep_program() must wait if cppi41 is PM runtime suspended. Otherwise musb_ep_program() continues with other non-dma packets before the DMA transfer is started causing at least ftdi uarts to fail to receive data. Let's fix the issue by waking up cppi41 with PM runtime calls added to cppi41_dma_prep_slave_sg() and return NULL if still idled. This way we have musb_ep_program() continue with PIO until cppi41 is awake. Fixes: fdea2d09b997 ("dmaengine: cppi41: Add basic PM runtime support") Reported-by: Yegor Yefremov Signed-off-by: Tony Lindgren Cc: stable@vger.kernel.org # v4.9+ Link: https://lore.kernel.org/r/20191023153138.23442-1-tony@atomide.com Signed-off-by: Vinod Koul --- drivers/dma/ti/cppi41.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c index 2f946f55076c..8c2f7ebe998c 100644 --- a/drivers/dma/ti/cppi41.c +++ b/drivers/dma/ti/cppi41.c @@ -586,9 +586,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( enum dma_transfer_direction dir, unsigned long tx_flags, void *context) { struct cppi41_channel *c = to_cpp41_chan(chan); + struct dma_async_tx_descriptor *txd = NULL; + struct cppi41_dd *cdd = c->cdd; struct cppi41_desc *d; struct scatterlist *sg; unsigned int i; + int error; + + error = pm_runtime_get(cdd->ddev.dev); + if (error < 0) { + pm_runtime_put_noidle(cdd->ddev.dev); + + return NULL; + } + + if (cdd->is_suspended) + goto err_out_not_ready; d = c->desc; for_each_sg(sgl, sg, sg_len, i) { @@ -611,7 +624,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( d++; } - return &c->txd; + txd = &c->txd; + +err_out_not_ready: + pm_runtime_mark_last_busy(cdd->ddev.dev); + pm_runtime_put_autosuspend(cdd->ddev.dev); + + return txd; } static void cppi41_compute_td_desc(struct cppi41_desc *d)