From: Kommula Shiva Shankar <kshankar@marvell.com>
To: <jerinj@marvell.com>, <amitprakashs@marvell.com>,
<vattunuru@marvell.com>, <fengchengwen@huawei.com>,
<dev@dpdk.org>
Cc: <ndabilpuram@marvell.com>, <pbhagavatula@marvell.com>
Subject: [PATCH RFC 4/4] dma/cnxk: implement enqueue dequeue ops
Date: Wed, 29 Jan 2025 20:06:49 +0530 [thread overview]
Message-ID: <20250129143649.3887989-4-kshankar@marvell.com> (raw)
In-Reply-To: <20250129143649.3887989-1-kshankar@marvell.com>
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Implement DMA enqueue/dequeue operations when
application enables it via configuration.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Change-Id: I57883ce5d358bf23a9d940ed513d0dc762227dcc
---
drivers/dma/cnxk/cnxk_dmadev.c | 25 +++++-
drivers/dma/cnxk/cnxk_dmadev.h | 7 ++
drivers/dma/cnxk/cnxk_dmadev_fp.c | 140 ++++++++++++++++++++++++++++++
3 files changed, 171 insertions(+), 1 deletion(-)
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 60b3d28d65..18a4914013 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -19,7 +19,7 @@ cnxk_dmadev_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_inf
dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_MEM_TO_DEV |
RTE_DMA_CAPA_DEV_TO_MEM | RTE_DMA_CAPA_DEV_TO_DEV |
RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_COPY_SG |
- RTE_DMA_CAPA_M2D_AUTO_FREE;
+ RTE_DMA_CAPA_M2D_AUTO_FREE | RTE_DMA_CAPA_OPS_ENQ_DEQ;
if (roc_feature_dpi_has_priority()) {
dev_info->dev_capa |= RTE_DMA_CAPA_PRI_POLICY_SP;
dev_info->nb_priorities = CN10K_DPI_MAX_PRI;
@@ -114,6 +114,21 @@ cnxk_dmadev_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
if (roc_feature_dpi_has_priority())
dpivf->rdpi.priority = conf->priority;
+ if (conf->enable_enq_deq) {
+ dev->fp_obj->copy = NULL;
+ dev->fp_obj->fill = NULL;
+ dev->fp_obj->submit = NULL;
+ dev->fp_obj->copy_sg = NULL;
+ dev->fp_obj->completed = NULL;
+ dev->fp_obj->completed_status = NULL;
+
+ dev->fp_obj->enqueue = cnxk_dma_ops_enqueue;
+ dev->fp_obj->dequeue = cnxk_dma_ops_dequeue;
+
+ if (roc_model_is_cn10k())
+ dev->fp_obj->enqueue = cn10k_dma_ops_enqueue;
+ }
+
return 0;
}
@@ -270,6 +285,14 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
return -ENOMEM;
}
+ size = (max_desc * sizeof(struct rte_dma_op *));
+ dpi_conf->c_desc.ops = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (dpi_conf->c_desc.ops == NULL) {
+ plt_err("Failed to allocate for ops array");
+ rte_free(dpi_conf->c_desc.compl_ptr);
+ return -ENOMEM;
+ }
+
for (i = 0; i < max_desc; i++)
dpi_conf->c_desc.compl_ptr[i * CNXK_DPI_COMPL_OFFSET] = CNXK_DPI_REQ_CDATA;
diff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h
index 39fd6afbe9..2615cb5b73 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.h
+++ b/drivers/dma/cnxk/cnxk_dmadev.h
@@ -93,6 +93,7 @@ struct cnxk_dpi_cdesc_data_s {
uint16_t head;
uint16_t tail;
uint8_t *compl_ptr;
+ struct rte_dma_op **ops;
};
struct cnxk_dpi_conf {
@@ -132,5 +133,11 @@ int cn10k_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iov
int cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge *src,
const struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst,
uint64_t flags);
+uint16_t cnxk_dma_ops_enqueue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops,
+ uint16_t nb_ops);
+uint16_t cn10k_dma_ops_enqueue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops,
+ uint16_t nb_ops);
+uint16_t cnxk_dma_ops_dequeue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops,
+ uint16_t nb_ops);
#endif
diff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c b/drivers/dma/cnxk/cnxk_dmadev_fp.c
index 340c7601d7..ca9ae7cd3f 100644
--- a/drivers/dma/cnxk/cnxk_dmadev_fp.c
+++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c
@@ -675,3 +675,143 @@ cnxk_dma_adapter_dequeue(uintptr_t get_work1)
return (uintptr_t)op;
}
+
+uint16_t
+cnxk_dma_ops_enqueue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops, uint16_t nb_ops)
+{
+ struct cnxk_dpi_vf_s *dpivf = dev_private;
+ struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan];
+ const struct rte_dma_sge *fptr, *lptr;
+ uint16_t src, dst, nwords = 0;
+ struct rte_dma_op *op;
+ uint16_t space, i;
+ uint8_t *comp_ptr;
+ uint64_t hdr[4];
+ int rc;
+
+ space = (dpi_conf->c_desc.max_cnt + 1) -
+ ((dpi_conf->c_desc.tail - dpi_conf->c_desc.head) & dpi_conf->c_desc.max_cnt);
+ space = RTE_MIN(space, nb_ops);
+
+ for (i = 0; i < space; i++) {
+ op = ops[i];
+ comp_ptr =
+ &dpi_conf->c_desc.compl_ptr[dpi_conf->c_desc.tail * CNXK_DPI_COMPL_OFFSET];
+ dpi_conf->c_desc.ops[dpi_conf->c_desc.tail] = op;
+ CNXK_DPI_STRM_INC(dpi_conf->c_desc, tail);
+
+ hdr[1] = dpi_conf->cmd.u | ((op->flags & RTE_DMA_OP_FLAG_AUTO_FREE) << 37);
+ hdr[2] = (uint64_t)comp_ptr;
+
+ src = op->nb_src;
+ dst = op->nb_dst;
+ /*
+ * For inbound case, src pointers are last pointers.
+ * For all other cases, src pointers are first pointers.
+ */
+ if (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) {
+ fptr = &op->src_dst_seg[src];
+ lptr = &op->src_dst_seg[0];
+ RTE_SWAP(src, dst);
+ } else {
+ fptr = &op->src_dst_seg[0];
+ lptr = &op->src_dst_seg[src];
+ }
+ hdr[0] = ((uint64_t)dst << 54) | (uint64_t)src << 48;
+
+ rc = __dpi_queue_write_sg(dpivf, hdr, fptr, lptr, src, dst);
+ if (rc) {
+ CNXK_DPI_STRM_DEC(dpi_conf->c_desc, tail);
+ goto done;
+ }
+ nwords += CNXK_DPI_CMD_LEN(src, dst);
+ }
+
+done:
+ if (nwords) {
+ rte_wmb();
+ plt_write64(nwords, dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+ dpi_conf->stats.submitted += i;
+ }
+
+ return i;
+}
+
+uint16_t
+cn10k_dma_ops_enqueue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops, uint16_t nb_ops)
+{
+ struct cnxk_dpi_vf_s *dpivf = dev_private;
+ struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan];
+ uint16_t space, i, nwords = 0;
+ struct rte_dma_op *op;
+ uint16_t src, dst;
+ uint8_t *comp_ptr;
+ uint64_t hdr[4];
+ int rc;
+
+ space = (dpi_conf->c_desc.max_cnt + 1) -
+ ((dpi_conf->c_desc.tail - dpi_conf->c_desc.head) & dpi_conf->c_desc.max_cnt);
+ space = RTE_MIN(space, nb_ops);
+
+ for (i = 0; i < space; i++) {
+ op = ops[i];
+ src = op->nb_src;
+ dst = op->nb_dst;
+ comp_ptr =
+ &dpi_conf->c_desc.compl_ptr[dpi_conf->c_desc.tail * CNXK_DPI_COMPL_OFFSET];
+ dpi_conf->c_desc.ops[dpi_conf->c_desc.tail] = op;
+ CNXK_DPI_STRM_INC(dpi_conf->c_desc, tail);
+
+ hdr[0] = dpi_conf->cmd.u | (dst << 6) | src;
+ hdr[1] = (uint64_t)comp_ptr;
+ hdr[2] = (1UL << 47) | ((op->flags & RTE_DMA_OP_FLAG_AUTO_FREE) << 43);
+
+ rc = __dpi_queue_write_sg(dpivf, hdr, &op->src_dst_seg[0], &op->src_dst_seg[src],
+ src, dst);
+ if (rc) {
+ CNXK_DPI_STRM_DEC(dpi_conf->c_desc, tail);
+ goto done;
+ }
+ nwords += CNXK_DPI_CMD_LEN(src, dst);
+ }
+
+done:
+ if (nwords) {
+ rte_wmb();
+ plt_write64(nwords, dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+ dpi_conf->stats.submitted += i;
+ }
+
+ return i;
+}
+
+uint16_t
+cnxk_dma_ops_dequeue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops, uint16_t nb_ops)
+{
+ struct cnxk_dpi_vf_s *dpivf = dev_private;
+ struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan];
+ struct cnxk_dpi_cdesc_data_s *c_desc = &dpi_conf->c_desc;
+ struct rte_dma_op *op;
+ uint16_t space, cnt;
+ uint8_t status;
+
+ space = (c_desc->tail - c_desc->head) & c_desc->max_cnt;
+ space = RTE_MIN(nb_ops, space);
+ for (cnt = 0; cnt < space; cnt++) {
+ status = c_desc->compl_ptr[c_desc->head * CNXK_DPI_COMPL_OFFSET];
+ op = c_desc->ops[c_desc->head];
+ op->status = status;
+ ops[cnt] = op;
+ if (status) {
+ if (status == CNXK_DPI_REQ_CDATA)
+ break;
+ dpi_conf->stats.errors++;
+ }
+ c_desc->compl_ptr[c_desc->head * CNXK_DPI_COMPL_OFFSET] = CNXK_DPI_REQ_CDATA;
+ CNXK_DPI_STRM_INC(*c_desc, head);
+ }
+
+ dpi_conf->stats.completed += cnt;
+
+ return cnt;
+}
--
2.43.0
prev parent reply other threads:[~2025-01-29 14:37 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-29 14:36 [PATCH RFC 1/4] dmadev: add enqueue dequeue operations Kommula Shiva Shankar
2025-01-29 14:36 ` [PATCH RFC 2/4] eventdev: refactor rte_event_dma_adapater_op calls Kommula Shiva Shankar
2025-01-29 14:36 ` [PATCH RFC 3/4] doc: update prog guide to use rte_dma_op Kommula Shiva Shankar
2025-01-29 14:36 ` Kommula Shiva Shankar [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250129143649.3887989-4-kshankar@marvell.com \
--to=kshankar@marvell.com \
--cc=amitprakashs@marvell.com \
--cc=dev@dpdk.org \
--cc=fengchengwen@huawei.com \
--cc=jerinj@marvell.com \
--cc=ndabilpuram@marvell.com \
--cc=pbhagavatula@marvell.com \
--cc=vattunuru@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).