* [PATCH] dma/hisilicon: fix stop dmadev fail
@ 2025-10-13 9:22 Chengwen Feng
2025-10-15 9:36 ` Thomas Monjalon
0 siblings, 1 reply; 2+ messages in thread
From: Chengwen Feng @ 2025-10-13 9:22 UTC (permalink / raw)
To: thomas; +Cc: dev
Stop dmadev may fail if there are pending DMA transfers, we need make
sure there are no pending DMA transfers when stop.
This commit uses following scheme:
1. flag stop proc so that new request will not process.
2. setting drop flag for all descriptor to quick complete.
3. waiting dmadev to complete.
Fixes: 3c5f5f03a047 ("dma/hisilicon: add control path")
Cc: stable@dpdk.org
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
drivers/dma/hisilicon/hisi_dmadev.c | 36 +++++++++++++++++++++++++++--
drivers/dma/hisilicon/hisi_dmadev.h | 2 ++
2 files changed, 36 insertions(+), 2 deletions(-)
diff --git a/drivers/dma/hisilicon/hisi_dmadev.c b/drivers/dma/hisilicon/hisi_dmadev.c
index 019c4a8189..ff6eb10167 100644
--- a/drivers/dma/hisilicon/hisi_dmadev.c
+++ b/drivers/dma/hisilicon/hisi_dmadev.c
@@ -376,6 +376,7 @@ hisi_dma_start(struct rte_dma_dev *dev)
hw->cq_head = 0;
hw->cqs_completed = 0;
hw->cqe_vld = 1;
+ hw->stop_proc = 0;
hw->submitted = 0;
hw->completed = 0;
hw->errors = 0;
@@ -387,9 +388,37 @@ hisi_dma_start(struct rte_dma_dev *dev)
return 0;
}
+static int
+hisi_dma_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan,
+ enum rte_dma_vchan_status *status);
static int
hisi_dma_stop(struct rte_dma_dev *dev)
{
+#define MAX_WAIT_MSEC 10
+ struct hisi_dma_dev *hw = dev->data->dev_private;
+ enum rte_dma_vchan_status status;
+ uint32_t i;
+
+ /* Flag stop processing new requests. */
+ hw->stop_proc = 1;
+ rte_delay_ms(1);
+
+ /* Force set drop flag so that the hardware can quickly complete. */
+ for (i = 0; i <= hw->sq_depth_mask; i++)
+ hw->sqe[i].dw0 |= SQE_DROP_FLAG;
+
+ i = 0;
+ do {
+ hisi_dma_vchan_status(dev, 0, &status);
+ if (status != RTE_DMA_VCHAN_ACTIVE)
+ break;
+ rte_delay_ms(1);
+ } while (i++ < MAX_WAIT_MSEC);
+ if (status == RTE_DMA_VCHAN_ACTIVE) {
+ HISI_DMA_ERR(hw, "dev is still active!");
+ return -EBUSY;
+ }
+
return hisi_dma_reset_hw(dev->data->dev_private);
}
@@ -548,14 +577,14 @@ hisi_dma_dump(const struct rte_dma_dev *dev, FILE *f)
" revision: 0x%x queue_id: %u ring_size: %u\n"
" ridx: %u cridx: %u\n"
" sq_head: %u sq_tail: %u cq_sq_head: %u\n"
- " cq_head: %u cqs_completed: %u cqe_vld: %u\n"
+ " cq_head: %u cqs_completed: %u cqe_vld: %u stop_proc: %u\n"
" submitted: %" PRIu64 " completed: %" PRIu64 " errors: %"
PRIu64 " qfulls: %" PRIu64 "\n",
hw->revision, hw->queue_id,
hw->sq_depth_mask > 0 ? hw->sq_depth_mask + 1 : 0,
hw->ridx, hw->cridx,
hw->sq_head, hw->sq_tail, hw->cq_sq_head,
- hw->cq_head, hw->cqs_completed, hw->cqe_vld,
+ hw->cq_head, hw->cqs_completed, hw->cqe_vld, hw->stop_proc,
hw->submitted, hw->completed, hw->errors, hw->qfulls);
hisi_dma_dump_queue(hw, f);
hisi_dma_dump_common(hw, f);
@@ -573,6 +602,9 @@ hisi_dma_copy(void *dev_private, uint16_t vchan,
RTE_SET_USED(vchan);
+ if (unlikely(hw->stop_proc > 0))
+ return -EPERM;
+
if (((hw->sq_tail + 1) & hw->sq_depth_mask) == hw->sq_head) {
hw->qfulls++;
return -ENOSPC;
diff --git a/drivers/dma/hisilicon/hisi_dmadev.h b/drivers/dma/hisilicon/hisi_dmadev.h
index 90301e6b00..aab87c40be 100644
--- a/drivers/dma/hisilicon/hisi_dmadev.h
+++ b/drivers/dma/hisilicon/hisi_dmadev.h
@@ -141,6 +141,7 @@ enum {
struct hisi_dma_sqe {
uint32_t dw0;
+#define SQE_DROP_FLAG BIT(4)
#define SQE_FENCE_FLAG BIT(10)
#define SQE_OPCODE_M2M 0x4
uint32_t dw1;
@@ -211,6 +212,7 @@ struct hisi_dma_dev {
*/
uint16_t cqs_completed;
uint8_t cqe_vld; /**< valid bit for CQE, will change for every round. */
+ volatile uint8_t stop_proc; /**< whether stop processing new requests. */
uint64_t submitted;
uint64_t completed;
--
2.17.1
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH] dma/hisilicon: fix stop dmadev fail
2025-10-13 9:22 [PATCH] dma/hisilicon: fix stop dmadev fail Chengwen Feng
@ 2025-10-15 9:36 ` Thomas Monjalon
0 siblings, 0 replies; 2+ messages in thread
From: Thomas Monjalon @ 2025-10-15 9:36 UTC (permalink / raw)
To: Chengwen Feng; +Cc: dev
13/10/2025 11:22, Chengwen Feng:
> Stop dmadev may fail if there are pending DMA transfers, we need make
> sure there are no pending DMA transfers when stop.
>
> This commit uses following scheme:
> 1. flag stop proc so that new request will not process.
> 2. setting drop flag for all descriptor to quick complete.
> 3. waiting dmadev to complete.
>
> Fixes: 3c5f5f03a047 ("dma/hisilicon: add control path")
> Cc: stable@dpdk.org
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
[...]
> +static int
> +hisi_dma_vchan_status(const struct rte_dma_dev *dev, uint16_t vchan,
> + enum rte_dma_vchan_status *status);
This declaration can be avoided by moving hisi_dma_stop()
which is anyway almost new.
> static int
> hisi_dma_stop(struct rte_dma_dev *dev)
> {
> +#define MAX_WAIT_MSEC 10
> + struct hisi_dma_dev *hw = dev->data->dev_private;
> + enum rte_dma_vchan_status status;
> + uint32_t i;
> +
> + /* Flag stop processing new requests. */
> + hw->stop_proc = 1;
> + rte_delay_ms(1);
> +
> + /* Force set drop flag so that the hardware can quickly complete. */
> + for (i = 0; i <= hw->sq_depth_mask; i++)
> + hw->sqe[i].dw0 |= SQE_DROP_FLAG;
> +
> + i = 0;
> + do {
> + hisi_dma_vchan_status(dev, 0, &status);
> + if (status != RTE_DMA_VCHAN_ACTIVE)
> + break;
> + rte_delay_ms(1);
> + } while (i++ < MAX_WAIT_MSEC);
> + if (status == RTE_DMA_VCHAN_ACTIVE) {
> + HISI_DMA_ERR(hw, "dev is still active!");
> + return -EBUSY;
> + }
> +
> return hisi_dma_reset_hw(dev->data->dev_private);
> }
Applied with suggested move, thanks.
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2025-10-15 9:36 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-10-13 9:22 [PATCH] dma/hisilicon: fix stop dmadev fail Chengwen Feng
2025-10-15 9:36 ` Thomas Monjalon
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).