diff options
Diffstat (limited to 'target/linux/ipq40xx/patches-4.14/040-dmaengine-qcom-bam-Process-multiple-pending-descript.patch')
-rw-r--r-- | target/linux/ipq40xx/patches-4.14/040-dmaengine-qcom-bam-Process-multiple-pending-descript.patch | 53 |
1 files changed, 34 insertions, 19 deletions
diff --git a/target/linux/ipq40xx/patches-4.14/040-dmaengine-qcom-bam-Process-multiple-pending-descript.patch b/target/linux/ipq40xx/patches-4.14/040-dmaengine-qcom-bam-Process-multiple-pending-descript.patch index 881d08c7e5..164b3fb48e 100644 --- a/target/linux/ipq40xx/patches-4.14/040-dmaengine-qcom-bam-Process-multiple-pending-descript.patch +++ b/target/linux/ipq40xx/patches-4.14/040-dmaengine-qcom-bam-Process-multiple-pending-descript.patch @@ -132,7 +132,7 @@ Signed-off-by: Vinod Koul <vinod.koul@intel.com> async_desc->num_desc = num_alloc; async_desc->curr_desc = async_desc->desc; -@@ -685,14 +688,16 @@ err_out: +@@ -685,29 +688,16 @@ err_out: static int bam_dma_terminate_all(struct dma_chan *chan) { struct bam_chan *bchan = to_bam_chan(chan); @@ -142,17 +142,32 @@ Signed-off-by: Vinod Koul <vinod.koul@intel.com> /* remove all transactions, including active transaction */ spin_lock_irqsave(&bchan->vc.lock, flag); +- /* +- * If we have transactions queued, then some might be committed to the +- * hardware in the desc fifo. The only way to reset the desc fifo is +- * to do a hardware reset (either by pipe or the entire block). +- * bam_chan_init_hw() will trigger a pipe reset, and also reinit the +- * pipe. If the pipe is left disabled (default state after pipe reset) +- * and is accessed by a connected hardware engine, a fatal error in +- * the BAM will occur. There is a small window where this could happen +- * with bam_chan_init_hw(), but it is assumed that the caller has +- * stopped activity on any attached hardware engine. Make sure to do +- * this first so that the BAM hardware doesn't cause memory corruption +- * by accessing freed resources. +- */ - if (bchan->curr_txd) { +- bam_chan_init_hw(bchan, bchan->curr_txd->dir); - list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued); - bchan->curr_txd = NULL; -+ list_for_each_entry_safe(async_desc, tmp, -+ &bchan->desc_list, desc_node) { -+ list_add(&async_desc->vd.node, &bchan->vc.desc_issued); -+ list_del(&async_desc->desc_node); - } +- } ++ list_for_each_entry_safe(async_desc, tmp, ++ &bchan->desc_list, desc_node) { ++ list_add(&async_desc->vd.node, &bchan->vc.desc_issued); ++ list_del(&async_desc->desc_node); vchan_get_all_descriptors(&bchan->vc, &head); -@@ -764,9 +769,9 @@ static int bam_resume(struct dma_chan *c + spin_unlock_irqrestore(&bchan->vc.lock, flag); +@@ -778,9 +768,9 @@ static int bam_resume(struct dma_chan *c */ static u32 process_channel_irqs(struct bam_device *bdev) { @@ -164,7 +179,7 @@ Signed-off-by: Vinod Koul <vinod.koul@intel.com> srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE)); -@@ -786,27 +791,40 @@ static u32 process_channel_irqs(struct b +@@ -800,27 +790,40 @@ static u32 process_channel_irqs(struct b writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR)); spin_lock_irqsave(&bchan->vc.lock, flags); @@ -214,7 +229,7 @@ Signed-off-by: Vinod Koul <vinod.koul@intel.com> } spin_unlock_irqrestore(&bchan->vc.lock, flags); -@@ -868,6 +886,7 @@ static enum dma_status bam_tx_status(str +@@ -882,6 +885,7 @@ static enum dma_status bam_tx_status(str struct dma_tx_state *txstate) { struct bam_chan *bchan = to_bam_chan(chan); @@ -222,7 +237,7 @@ Signed-off-by: Vinod Koul <vinod.koul@intel.com> struct virt_dma_desc *vd; int ret; size_t residue = 0; -@@ -883,11 +902,17 @@ static enum dma_status bam_tx_status(str +@@ -897,11 +901,17 @@ static enum dma_status bam_tx_status(str spin_lock_irqsave(&bchan->vc.lock, flags); vd = vchan_find_desc(&bchan->vc, cookie); @@ -244,7 +259,7 @@ Signed-off-by: Vinod Koul <vinod.koul@intel.com> spin_unlock_irqrestore(&bchan->vc.lock, flags); -@@ -928,63 +953,86 @@ static void bam_start_dma(struct bam_cha +@@ -942,63 +952,86 @@ static void bam_start_dma(struct bam_cha { struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc); struct bam_device *bdev = bchan->bdev; @@ -311,19 +326,19 @@ Signed-off-by: Vinod Koul <vinod.koul@intel.com> + async_desc->xfer_len = avail; + else + async_desc->xfer_len = async_desc->num_desc; -+ + +- if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { +- u32 partial = MAX_DESCRIPTORS - bchan->tail; + /* set any special flags on the last descriptor */ + if (async_desc->num_desc == async_desc->xfer_len) + desc[async_desc->xfer_len - 1].flags |= + cpu_to_le16(async_desc->flags); -- if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { -- u32 partial = MAX_DESCRIPTORS - bchan->tail; -+ vd = vchan_next_desc(&bchan->vc); - - memcpy(&fifo[bchan->tail], desc, - partial * sizeof(struct bam_desc_hw)); - memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) * ++ vd = vchan_next_desc(&bchan->vc); ++ + dmaengine_desc_get_callback(&async_desc->vd.tx, &cb); + + /* @@ -367,7 +382,7 @@ Signed-off-by: Vinod Koul <vinod.koul@intel.com> /* ensure descriptor writes and dma start not reordered */ wmb(); -@@ -1013,7 +1061,7 @@ static void dma_tasklet(unsigned long da +@@ -1027,7 +1060,7 @@ static void dma_tasklet(unsigned long da bchan = &bdev->channels[i]; spin_lock_irqsave(&bchan->vc.lock, flags); @@ -376,7 +391,7 @@ Signed-off-by: Vinod Koul <vinod.koul@intel.com> bam_start_dma(bchan); spin_unlock_irqrestore(&bchan->vc.lock, flags); } -@@ -1034,7 +1082,7 @@ static void bam_issue_pending(struct dma +@@ -1048,7 +1081,7 @@ static void bam_issue_pending(struct dma spin_lock_irqsave(&bchan->vc.lock, flags); /* if work pending and idle, start a transaction */ @@ -385,7 +400,7 @@ Signed-off-by: Vinod Koul <vinod.koul@intel.com> bam_start_dma(bchan); spin_unlock_irqrestore(&bchan->vc.lock, flags); -@@ -1138,6 +1186,7 @@ static void bam_channel_init(struct bam_ +@@ -1152,6 +1185,7 @@ static void bam_channel_init(struct bam_ vchan_init(&bchan->vc, &bdev->common); bchan->vc.desc_free = bam_dma_free_desc; |