From: Chengwen Feng <fengchengwen@huawei.com>
To: <thomas@monjalon.net>, <dev@dpdk.org>
Cc: <gmuthukrishn@marvell.com>, <tangkunshan@huawei.com>
Subject: [PATCH 1/2] dma/skeleton: support SG copy ops
Date: Fri, 26 Jan 2024 08:57:25 +0000 [thread overview]
Message-ID: <20240126085726.54581-2-fengchengwen@huawei.com> (raw)
In-Reply-To: <20240126085726.54581-1-fengchengwen@huawei.com>
Add support scatter gather copy.
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
drivers/dma/skeleton/skeleton_dmadev.c | 96 ++++++++++++++++++++++++--
drivers/dma/skeleton/skeleton_dmadev.h | 28 ++++++--
2 files changed, 113 insertions(+), 11 deletions(-)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
index eab03852dd..d1d257a064 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.c
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021-2024 HiSilicon Limited
*/
#include <inttypes.h>
@@ -37,10 +37,12 @@ skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
RTE_DMA_CAPA_SVA |
- RTE_DMA_CAPA_OPS_COPY;
+ RTE_DMA_CAPA_OPS_COPY |
+ RTE_DMA_CAPA_OPS_COPY_SG;
dev_info->max_vchans = 1;
dev_info->max_desc = SKELDMA_MAX_DESC;
dev_info->min_desc = SKELDMA_MIN_DESC;
+ dev_info->max_sges = SKELDMA_MAX_SGES;
return 0;
}
@@ -55,6 +57,49 @@ skeldma_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
return 0;
}
+static inline void
+do_copy_sg_one(struct rte_dma_sge *src, struct rte_dma_sge *dst, uint16_t nb_dst, uint64_t offset)
+{
+ uint32_t src_off = 0, dst_off = 0;
+ uint32_t copy_len = 0;
+ uint64_t tmp = 0;
+ uint16_t i;
+
+ /* Locate the segment from which the copy is started. */
+ for (i = 0; i < nb_dst; i++) {
+ tmp += dst[i].length;
+ if (offset < tmp) {
+ copy_len = tmp - offset;
+ dst_off = dst[i].length - copy_len;
+ break;
+ }
+ }
+
+ for (/* Use the above index */; i < nb_dst; i++, copy_len = dst[i].length) {
+ copy_len = RTE_MIN(copy_len, src->length - src_off);
+ rte_memcpy((uint8_t *)(uintptr_t)dst[i].addr + dst_off,
+ (uint8_t *)(uintptr_t)src->addr + src_off,
+ copy_len);
+ src_off += copy_len;
+ if (src_off >= src->length)
+ break;
+ dst_off = 0;
+ }
+}
+
+static inline void
+do_copy_sg(struct skeldma_desc *desc)
+{
+ uint64_t offset = 0;
+ uint16_t i;
+
+ for (i = 0; i < desc->copy_sg.nb_src; i++) {
+ do_copy_sg_one(&desc->copy_sg.src[i], desc->copy_sg.dst,
+ desc->copy_sg.nb_dst, offset);
+ offset += desc->copy_sg.src[i].length;
+ }
+}
+
static uint32_t
cpucopy_thread(void *param)
{
@@ -76,9 +121,13 @@ cpucopy_thread(void *param)
rte_delay_us_sleep(SLEEP_US_VAL);
continue;
}
-
hw->zero_req_count = 0;
- rte_memcpy(desc->dst, desc->src, desc->len);
+
+ if (desc->op == SKELDMA_OP_COPY)
+ rte_memcpy(desc->copy.dst, desc->copy.src, desc->copy.len);
+ else if (desc->op == SKELDMA_OP_COPY_SG)
+ do_copy_sg(desc);
+
__atomic_fetch_add(&hw->completed_count, 1, __ATOMIC_RELEASE);
(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
}
@@ -368,10 +417,42 @@ skeldma_copy(void *dev_private, uint16_t vchan,
ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
if (ret)
return -ENOSPC;
- desc->src = (void *)(uintptr_t)src;
- desc->dst = (void *)(uintptr_t)dst;
- desc->len = length;
+ desc->op = SKELDMA_OP_COPY;
+ desc->ridx = hw->ridx;
+ desc->copy.src = (void *)(uintptr_t)src;
+ desc->copy.dst = (void *)(uintptr_t)dst;
+ desc->copy.len = length;
+ if (flags & RTE_DMA_OP_FLAG_SUBMIT)
+ submit(hw, desc);
+ else
+ (void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
+ hw->submitted_count++;
+
+ return hw->ridx++;
+}
+
+static int
+skeldma_copy_sg(void *dev_private, uint16_t vchan,
+ const struct rte_dma_sge *src,
+ const struct rte_dma_sge *dst,
+ uint16_t nb_src, uint16_t nb_dst,
+ uint64_t flags)
+{
+ struct skeldma_hw *hw = dev_private;
+ struct skeldma_desc *desc;
+ int ret;
+
+ RTE_SET_USED(vchan);
+
+ ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
+ if (ret)
+ return -ENOSPC;
+ desc->op = SKELDMA_OP_COPY_SG;
desc->ridx = hw->ridx;
+ memcpy(desc->copy_sg.src, src, sizeof(*src) * nb_src);
+ memcpy(desc->copy_sg.dst, dst, sizeof(*dst) * nb_dst);
+ desc->copy_sg.nb_src = nb_src;
+ desc->copy_sg.nb_dst = nb_dst;
if (flags & RTE_DMA_OP_FLAG_SUBMIT)
submit(hw, desc);
else
@@ -491,6 +572,7 @@ skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
dev->dev_ops = &skeldma_ops;
dev->fp_obj->dev_private = dev->data->dev_private;
dev->fp_obj->copy = skeldma_copy;
+ dev->fp_obj->copy_sg = skeldma_copy_sg;
dev->fp_obj->submit = skeldma_submit;
dev->fp_obj->completed = skeldma_completed;
dev->fp_obj->completed_status = skeldma_completed_status;
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
index 3582db852a..7d32dd5095 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.h
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -1,20 +1,40 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021-2024 HiSilicon Limited
*/
#ifndef SKELETON_DMADEV_H
#define SKELETON_DMADEV_H
+#include <rte_dmadev.h>
#include <rte_ring.h>
#include <rte_thread.h>
#define SKELDMA_ARG_LCORE "lcore"
+#define SKELDMA_MAX_SGES 4
+
+enum skeldma_op {
+ SKELDMA_OP_COPY,
+ SKELDMA_OP_COPY_SG,
+};
+
struct skeldma_desc {
- void *src;
- void *dst;
- uint32_t len;
+ enum skeldma_op op;
uint16_t ridx; /* ring idx */
+
+ union {
+ struct {
+ void *src;
+ void *dst;
+ uint32_t len;
+ } copy;
+ struct {
+ struct rte_dma_sge src[SKELDMA_MAX_SGES];
+ struct rte_dma_sge dst[SKELDMA_MAX_SGES];
+ uint16_t nb_src;
+ uint16_t nb_dst;
+ } copy_sg;
+ };
};
struct skeldma_hw {
--
2.17.1
next prev parent reply other threads:[~2024-01-26 9:01 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-26 8:57 [PATCH 0/2] dma/skeleton: add support for SG copy and fill ops Chengwen Feng
2024-01-26 8:57 ` Chengwen Feng [this message]
2024-03-06 20:48 ` [PATCH 1/2] dma/skeleton: support SG copy ops Thomas Monjalon
2024-03-07 10:44 ` Ferruh Yigit
2024-03-07 13:12 ` Thomas Monjalon
2024-03-07 13:15 ` Morten Brørup
2024-01-26 8:57 ` [PATCH 2/2] dma/skeleton: support fill ops Chengwen Feng
2024-03-06 20:49 ` [PATCH 0/2] dma/skeleton: add support for SG copy and " Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240126085726.54581-2-fengchengwen@huawei.com \
--to=fengchengwen@huawei.com \
--cc=dev@dpdk.org \
--cc=gmuthukrishn@marvell.com \
--cc=tangkunshan@huawei.com \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).