DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 1/3] common/cnxk: dma result to an offset of the event
@ 2023-12-08  8:28 Amit Prakash Shukla
  2023-12-08  8:28 ` [PATCH 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
                   ` (2 more replies)
  0 siblings, 3 replies; 19+ messages in thread
From: Amit Prakash Shukla @ 2023-12-08  8:28 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev, jerinj, vattunuru, pbhagavatula, Amit Prakash Shukla

Adds support to configure writing result to offset of the DMA
response event.

Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
 drivers/common/cnxk/roc_dpi.c       |  6 +++++-
 drivers/common/cnxk/roc_dpi.h       |  2 +-
 drivers/common/cnxk/roc_dpi_priv.h  |  4 ++++
 drivers/common/cnxk/roc_idev.c      | 20 ++++++++++++++++++++
 drivers/common/cnxk/roc_idev_priv.h |  3 +++
 5 files changed, 33 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_dpi.c b/drivers/common/cnxk/roc_dpi.c
index c241168294..1ee777d779 100644
--- a/drivers/common/cnxk/roc_dpi.c
+++ b/drivers/common/cnxk/roc_dpi.c
@@ -83,6 +83,9 @@ roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, uin
 	mbox_msg.s.aura = aura;
 	mbox_msg.s.sso_pf_func = idev_sso_pffunc_get();
 	mbox_msg.s.npa_pf_func = idev_npa_pffunc_get();
+	mbox_msg.s.wqecsoff = idev_dma_cs_offset_get();
+	if (mbox_msg.s.wqecsoff)
+		mbox_msg.s.wqecs = 1;
 
 	rc = send_msg_to_pf(&pci_dev->addr, (const char *)&mbox_msg,
 			    sizeof(dpi_mbox_msg_t));
@@ -94,7 +97,7 @@ roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, uin
 }
 
 int
-roc_dpi_dev_init(struct roc_dpi *roc_dpi)
+roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset)
 {
 	struct plt_pci_device *pci_dev = roc_dpi->pci_dev;
 	uint16_t vfid;
@@ -103,6 +106,7 @@ roc_dpi_dev_init(struct roc_dpi *roc_dpi)
 	vfid = ((pci_dev->addr.devid & 0x1F) << 3) | (pci_dev->addr.function & 0x7);
 	vfid -= 1;
 	roc_dpi->vfid = vfid;
+	idev_dma_cs_offset_set(offset);
 
 	return 0;
 }
diff --git a/drivers/common/cnxk/roc_dpi.h b/drivers/common/cnxk/roc_dpi.h
index 4ebde5b8a6..978e2badb2 100644
--- a/drivers/common/cnxk/roc_dpi.h
+++ b/drivers/common/cnxk/roc_dpi.h
@@ -11,7 +11,7 @@ struct roc_dpi {
 	uint16_t vfid;
 } __plt_cache_aligned;
 
-int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi);
+int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset);
 int __roc_api roc_dpi_dev_fini(struct roc_dpi *roc_dpi);
 
 int __roc_api roc_dpi_configure(struct roc_dpi *dpi, uint32_t chunk_sz, uint64_t aura,
diff --git a/drivers/common/cnxk/roc_dpi_priv.h b/drivers/common/cnxk/roc_dpi_priv.h
index 518a3e7351..52962c8bc0 100644
--- a/drivers/common/cnxk/roc_dpi_priv.h
+++ b/drivers/common/cnxk/roc_dpi_priv.h
@@ -31,6 +31,10 @@ typedef union dpi_mbox_msg_t {
 		uint64_t sso_pf_func : 16;
 		/* NPA PF function */
 		uint64_t npa_pf_func : 16;
+		/* WQE queue DMA completion status enable */
+		uint64_t wqecs : 1;
+		/* WQE queue DMA completion status offset */
+		uint64_t wqecsoff : 8;
 	} s;
 } dpi_mbox_msg_t;
 
diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index e6c6b34d78..7b922c8bae 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -301,6 +301,26 @@ idev_sso_set(struct roc_sso *sso)
 		__atomic_store_n(&idev->sso, sso, __ATOMIC_RELEASE);
 }
 
+void
+idev_dma_cs_offset_set(uint8_t offset)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (idev != NULL)
+		idev->dma_cs_offset = offset;
+}
+
+uint8_t
+idev_dma_cs_offset_get(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (idev != NULL)
+		return idev->dma_cs_offset;
+
+	return 0;
+}
+
 uint64_t
 roc_idev_nix_inl_meta_aura_get(void)
 {
diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index 80f8465e1c..cf63c58d92 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -37,6 +37,7 @@ struct idev_cfg {
 	struct roc_nix_list roc_nix_list;
 	plt_spinlock_t nix_inl_dev_lock;
 	plt_spinlock_t npa_dev_lock;
+	uint8_t dma_cs_offset;
 };
 
 /* Generic */
@@ -55,6 +56,8 @@ void idev_sso_pffunc_set(uint16_t sso_pf_func);
 uint16_t idev_sso_pffunc_get(void);
 struct roc_sso *idev_sso_get(void);
 void idev_sso_set(struct roc_sso *sso);
+void idev_dma_cs_offset_set(uint8_t offset);
+uint8_t idev_dma_cs_offset_get(void);
 
 /* idev lmt */
 uint16_t idev_lmt_pffunc_get(void);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 2/3] dma/cnxk: support for DMA event enqueue dequeue
  2023-12-08  8:28 [PATCH 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
@ 2023-12-08  8:28 ` Amit Prakash Shukla
  2023-12-08  8:28 ` [PATCH 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
  2024-02-09 19:43 ` [PATCH v2 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
  2 siblings, 0 replies; 19+ messages in thread
From: Amit Prakash Shukla @ 2023-12-08  8:28 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton, Vamsi Attunuru
  Cc: dev, jerinj, ndabilpuram, Amit Prakash Shukla

Added cnxk driver support for dma event enqueue and dequeue.
Also added changes for work queue entry completion status.

Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
 doc/guides/eventdevs/cnxk.rst        |   5 +
 drivers/dma/cnxk/cnxk_dma_event_dp.h |  21 +++
 drivers/dma/cnxk/cnxk_dmadev.c       |   2 +-
 drivers/dma/cnxk/cnxk_dmadev.h       |  18 ++-
 drivers/dma/cnxk/cnxk_dmadev_fp.c    | 212 +++++++++++++++++++++++++++
 drivers/dma/cnxk/meson.build         |   9 +-
 drivers/dma/cnxk/version.map         |   9 ++
 7 files changed, 273 insertions(+), 3 deletions(-)
 create mode 100644 drivers/dma/cnxk/cnxk_dma_event_dp.h
 create mode 100644 drivers/dma/cnxk/version.map

diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index cccb8a0304..9ff1052c53 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -227,3 +227,8 @@ ethernet devices connected to event device to override this applications can
 use `force_rx_bp=1` device arguments.
 Using unique mempool per each ethernet device is recommended when they are
 connected to event device.
+
+DMA adapter new mode support
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+DMA driver does not support DMA adapter configured in new mode.
diff --git a/drivers/dma/cnxk/cnxk_dma_event_dp.h b/drivers/dma/cnxk/cnxk_dma_event_dp.h
new file mode 100644
index 0000000000..a526d25665
--- /dev/null
+++ b/drivers/dma/cnxk/cnxk_dma_event_dp.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#ifndef _CNXK_DMA_EVENT_DP_H_
+#define _CNXK_DMA_EVENT_DP_H_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_eventdev.h>
+
+__rte_internal
+uint16_t cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uint16_t cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uintptr_t cnxk_dma_adapter_dequeue(uintptr_t get_work1);
+#endif /* _CNXK_DMA_EVENT_DP_H_ */
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 1e7f49792c..a748331da1 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -592,7 +592,7 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_de
 	rdpi = &dpivf->rdpi;
 
 	rdpi->pci_dev = pci_dev;
-	rc = roc_dpi_dev_init(rdpi);
+	rc = roc_dpi_dev_init(rdpi, offsetof(struct cnxk_dpi_compl_s, wqecs));
 	if (rc < 0)
 		goto err_out_free;
 
diff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h
index 350ae73b5c..332325d6b6 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.h
+++ b/drivers/dma/cnxk/cnxk_dmadev.h
@@ -19,6 +19,8 @@
 
 #include <roc_api.h>
 
+#include "cnxk_dma_event_dp.h"
+
 #define CNXK_DPI_MAX_POINTER		    15
 #define CNXK_DPI_STRM_INC(s, var)	    ((s).var = ((s).var + 1) & (s).max_cnt)
 #define CNXK_DPI_STRM_DEC(s, var)	    ((s).var = ((s).var - 1) == -1 ? (s).max_cnt :	\
@@ -40,6 +42,11 @@
  */
 #define CNXK_DPI_REQ_CDATA 0xFF
 
+/* Set Completion data to 0xDEADBEEF when request submitted for SSO.
+ * This helps differentiate if the dequeue is called after cnxk enueue.
+ */
+#define CNXK_DPI_REQ_SSO_CDATA    0xDEADBEEF
+
 union cnxk_dpi_instr_cmd {
 	uint64_t u;
 	struct cn9k_dpi_instr_cmd {
@@ -85,7 +92,10 @@ union cnxk_dpi_instr_cmd {
 
 struct cnxk_dpi_compl_s {
 	uint64_t cdata;
-	void *cb_data;
+	void *op;
+	uint16_t dev_id;
+	uint16_t vchan;
+	uint32_t wqecs;
 };
 
 struct cnxk_dpi_cdesc_data_s {
@@ -95,6 +105,11 @@ struct cnxk_dpi_cdesc_data_s {
 	uint16_t tail;
 };
 
+struct cnxk_dma_adapter_info {
+	bool enabled;               /* Set if vchan queue is added to dma adapter. */
+	struct rte_mempool *req_mp; /* DMA inflight request mempool. */
+};
+
 struct cnxk_dpi_conf {
 	union cnxk_dpi_instr_cmd cmd;
 	struct cnxk_dpi_cdesc_data_s c_desc;
@@ -103,6 +118,7 @@ struct cnxk_dpi_conf {
 	uint16_t desc_idx;
 	struct rte_dma_stats stats;
 	uint64_t completed_offset;
+	struct cnxk_dma_adapter_info adapter_info;
 };
 
 struct cnxk_dpi_vf_s {
diff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c b/drivers/dma/cnxk/cnxk_dmadev_fp.c
index 95df19a2db..85a8e1310e 100644
--- a/drivers/dma/cnxk/cnxk_dmadev_fp.c
+++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c
@@ -5,6 +5,13 @@
 #include <rte_vect.h>
 
 #include "cnxk_dmadev.h"
+#include <rte_event_dma_adapter.h>
+
+#include <cn10k_eventdev.h>
+#include <cnxk_eventdev.h>
+#include <rte_mcslock.h>
+
+rte_mcslock_t *dpi_ml;
 
 static __plt_always_inline void
 __dpi_cpy_scalar(uint64_t *src, uint64_t *dst, uint8_t n)
@@ -434,3 +441,208 @@ cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge
 
 	return dpi_conf->desc_idx++;
 }
+
+static inline uint64_t
+cnxk_dma_adapter_format_event(uint64_t event)
+{
+	uint64_t w0;
+	w0 = (event & 0xFFC000000000) >> 6 |
+	     (event & 0xFFFFFFF) | RTE_EVENT_TYPE_DMADEV << 28;
+
+	return w0;
+}
+
+uint16_t
+cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *src, *dst;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	struct cn10k_sso_hws *work;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t ml_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn10k_sso_hws *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+			     sizeof(struct rte_event_dma_adapter_op));
+		dpivf =	rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+
+		hdr[0] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 54);
+		hdr[0] |= (nb_dst << 6) | nb_src;
+		hdr[1] = ((uint64_t)comp_ptr);
+		hdr[2] = cnxk_dma_adapter_format_event(rsp_info->event);
+
+		src = &op->src_seg[0];
+		dst = &op->dst_seg[0];
+
+		if (CNXK_TAG_IS_HEAD(work->gw_rdata) ||
+		    ((CNXK_TT_FROM_TAG(work->gw_rdata) == SSO_TT_ORDERED) &&
+		    (rsp_info->sched_type & DPI_HDR_TT_MASK) ==
+			    RTE_SCHED_TYPE_ORDERED))
+			roc_sso_hws_head_wait(work->base);
+
+		rte_mcslock_lock(&dpi_ml, &ml_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, src, dst, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpi_ml, &ml_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpi_ml, &ml_me);
+	}
+
+	return count;
+}
+
+uint16_t
+cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *fptr, *lptr;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	struct cn9k_sso_hws *work;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t ml_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn9k_sso_hws *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+			    sizeof(struct rte_event_dma_adapter_op));
+		dpivf =	rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		hdr[1] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 36);
+		hdr[2] = (uint64_t)comp_ptr;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+		/*
+		 * For inbound case, src pointers are last pointers.
+		 * For all other cases, src pointers are first pointers.
+		 */
+		if (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) {
+			fptr = &op->dst_seg[0];
+			lptr = &op->src_seg[0];
+			RTE_SWAP(nb_src, nb_dst);
+		} else {
+			fptr = &op->src_seg[0];
+			lptr = &op->dst_seg[0];
+		}
+
+		hdr[0] = ((uint64_t)nb_dst << 54) | (uint64_t)nb_src << 48;
+		hdr[0] |= cnxk_dma_adapter_format_event(rsp_info->event);
+
+		if ((rsp_info->sched_type & DPI_HDR_TT_MASK) == RTE_SCHED_TYPE_ORDERED)
+			roc_sso_hws_head_wait(work->base);
+
+		rte_mcslock_lock(&dpi_ml, &ml_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, fptr, lptr, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpi_ml, &ml_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpi_ml, &ml_me);
+	}
+
+	return count;
+}
+
+uintptr_t
+cnxk_dma_adapter_dequeue(uintptr_t get_work1)
+{
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	rte_mcslock_t ml_me;
+	uint8_t *wqecs;
+
+	comp_ptr = (struct cnxk_dpi_compl_s *)get_work1;
+
+	/* Dequeue can be called without calling cnx_enqueue in case of
+	 * dma_adapter. When its called from adapter, dma op will not be
+	 * embedded in completion pointer. In those cases return op.
+	 */
+	if (comp_ptr->cdata != CNXK_DPI_REQ_SSO_CDATA)
+		return (uintptr_t)comp_ptr;
+
+	dpivf =	rte_dma_fp_objs[comp_ptr->dev_id].dev_private;
+	dpi_conf = &dpivf->conf[comp_ptr->vchan];
+
+	rte_mcslock_lock(&dpi_ml, &ml_me);
+	wqecs = (uint8_t *)&comp_ptr->wqecs;
+	if (__atomic_load_n(wqecs, __ATOMIC_RELAXED) != 0)
+		dpi_conf->stats.errors++;
+
+	/* Take into account errors also. This is similar to
+	 * cnxk_dmadev_completed_status().
+	 */
+	dpi_conf->stats.completed++;
+	rte_mcslock_unlock(&dpi_ml, &ml_me);
+
+	op = (struct rte_event_dma_adapter_op *)comp_ptr->op;
+
+	rte_mempool_put(dpi_conf->adapter_info.req_mp, comp_ptr);
+
+	return (uintptr_t)op;
+}
diff --git a/drivers/dma/cnxk/meson.build b/drivers/dma/cnxk/meson.build
index e557349368..8ccc1c2cb7 100644
--- a/drivers/dma/cnxk/meson.build
+++ b/drivers/dma/cnxk/meson.build
@@ -8,6 +8,13 @@ foreach flag: error_cflags
     endif
 endforeach
 
-deps += ['bus_pci', 'common_cnxk', 'dmadev']
+driver_sdk_headers = files(
+        'cnxk_dma_event_dp.h',
+)
+
+deps += ['bus_pci', 'common_cnxk', 'dmadev', 'eventdev']
+
+includes += include_directories('../../event/cnxk')
+
 sources = files('cnxk_dmadev.c', 'cnxk_dmadev_fp.c')
 require_iova_in_mbuf = false
diff --git a/drivers/dma/cnxk/version.map b/drivers/dma/cnxk/version.map
new file mode 100644
index 0000000000..6cc1c6aaa5
--- /dev/null
+++ b/drivers/dma/cnxk/version.map
@@ -0,0 +1,9 @@
+INTERNAL {
+	global:
+
+	cn10k_dma_adapter_enqueue;
+	cn9k_dma_adapter_enqueue;
+	cnxk_dma_adapter_dequeue;
+
+	local: *;
+};
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH 3/3] event/cnxk: support DMA event functions
  2023-12-08  8:28 [PATCH 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
  2023-12-08  8:28 ` [PATCH 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
@ 2023-12-08  8:28 ` Amit Prakash Shukla
  2024-02-09 19:43 ` [PATCH v2 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
  2 siblings, 0 replies; 19+ messages in thread
From: Amit Prakash Shukla @ 2023-12-08  8:28 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton
  Cc: dev, jerinj, vattunuru, ndabilpuram, Amit Prakash Shukla

Added support of dma driver callback assignment to eventdev
enqueue and dequeue. The change also defines dma adapter
capabilities function.

Depends-on: patch-134955 ("lib/dmadev: get DMA device using device ID")

Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c      | 70 +++++++++++++++++
 drivers/event/cnxk/cn10k_worker.h        |  3 +
 drivers/event/cnxk/cn9k_eventdev.c       | 67 ++++++++++++++++
 drivers/event/cnxk/cn9k_worker.h         |  3 +
 drivers/event/cnxk/cnxk_eventdev.h       |  3 +
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 97 ++++++++++++++++++++++++
 drivers/event/cnxk/meson.build           |  3 +-
 7 files changed, 244 insertions(+), 2 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index bb0c910553..498f97ff2e 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -8,6 +8,9 @@
 #include "cn10k_cryptodev_ops.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
+#include "cnxk_dma_event_dp.h"
+
+#include <rte_dmadev_pmd.h>
 
 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                           \
 	deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
@@ -477,6 +480,8 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	else
 		event_dev->ca_enqueue = cn10k_cpt_sg_ver1_crypto_adapter_enqueue;
 
+	event_dev->dma_enqueue = cn10k_dma_adapter_enqueue;
+
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
 		CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue, sso_hws_tx_adptr_enq_seg);
 	else
@@ -1020,6 +1025,67 @@ cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev,
 	return 0;
 }
 
+static int
+cn10k_dma_adapter_caps_get(const struct rte_eventdev *event_dev,
+			   const int16_t dma_dev_id, uint32_t *caps)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	*caps = RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
+
+	return 0;
+}
+
+static int
+cn10k_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id,
+			    const struct rte_event *event)
+{
+	struct rte_dma_dev *dma_dev;
+	int ret;
+
+	RTE_SET_USED(event);
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	ret = cnxk_dma_adapter_vchan_add(event_dev, dma_dev_id, vchan_id);
+	cn10k_sso_set_priv_mem(event_dev, NULL);
+
+	return ret;
+}
+
+static int
+cn10k_dma_adapter_vchan_del(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	return cnxk_dma_adapter_vchan_del(dma_dev_id, vchan_id);
+}
+
+
+
 static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -1061,6 +1127,10 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
 	.crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
 	.crypto_adapter_vector_limits_get = cn10k_crypto_adapter_vec_limits,
 
+	.dma_adapter_caps_get = cn10k_dma_adapter_caps_get,
+	.dma_adapter_vchan_add = cn10k_dma_adapter_vchan_add,
+	.dma_adapter_vchan_del = cn10k_dma_adapter_vchan_del,
+
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
 	.xstats_get_names = cnxk_sso_xstats_get_names,
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 8aa916fa12..0036495d98 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -7,6 +7,7 @@
 
 #include <rte_eventdev.h>
 #include "cn10k_cryptodev_event_dp.h"
+#include "cnxk_dma_event_dp.h"
 #include "cn10k_rx.h"
 #include "cnxk_worker.h"
 #include "cn10k_eventdev.h"
@@ -236,6 +237,8 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 		/* Mark vector mempool object as get */
 		RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
 					  (void **)&u64[1], 1, 1);
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_DMADEV) {
+		u64[1] = cnxk_dma_adapter_dequeue(u64[1]);
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 9fb9ca0d63..5365a38f52 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -6,6 +6,8 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
 
+#include <rte_dmadev_pmd.h>
+
 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
 
 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
@@ -511,6 +513,8 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 					      sso_hws_dual_tx_adptr_enq);
 	}
 
+	event_dev->dma_enqueue = cn9k_dma_adapter_enqueue;
+
 	event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
 	rte_mb();
 #else
@@ -1018,6 +1022,65 @@ cn9k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
 				 cn9k_sso_set_priv_mem);
 }
 
+static int
+cn9k_dma_adapter_caps_get(const struct rte_eventdev *event_dev,
+			  const int16_t dma_dev_id, uint32_t *caps)
+{
+	struct rte_dma_dev *dma_dev;
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	*caps = RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
+
+	return 0;
+}
+
+static int
+cn9k_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id,
+			    const struct rte_event *event)
+{
+	struct rte_dma_dev *dma_dev;
+	int ret;
+
+	RTE_SET_USED(event);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	ret = cnxk_dma_adapter_vchan_add(event_dev, dma_dev_id, vchan_id);
+	cn9k_sso_set_priv_mem(event_dev, NULL);
+
+	return ret;
+}
+
+static int
+cn9k_dma_adapter_vchan_del(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	return cnxk_dma_adapter_vchan_del(dma_dev_id, vchan_id);
+}
+
 static struct eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
@@ -1056,6 +1119,10 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
 	.crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
 	.crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
 
+	.dma_adapter_caps_get = cn9k_dma_adapter_caps_get,
+	.dma_adapter_vchan_add = cn9k_dma_adapter_vchan_add,
+	.dma_adapter_vchan_del = cn9k_dma_adapter_vchan_del,
+
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
 	.xstats_get_names = cnxk_sso_xstats_get_names,
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 0451157812..e8863e42fc 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -11,6 +11,7 @@
 #include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
+#include "cnxk_dma_event_dp.h"
 #include "cn9k_cryptodev_ops.h"
 
 #include "cn9k_ethdev.h"
@@ -205,6 +206,8 @@ cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
 		if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
 			cn9k_sso_process_tstamp(u64[1], mbuf, tstamp[port]);
 		u64[1] = mbuf;
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_DMADEV) {
+		u64[1] = cnxk_dma_adapter_dequeue(u64[1]);
 	}
 }
 
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index d42d1afa1a..fa99dede85 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -284,4 +284,7 @@ int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			       const struct rte_event_crypto_adapter_queue_conf *conf);
 int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id);
 
+int cnxk_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			       const int16_t dma_dev_id, uint16_t vchan_id);
+int cnxk_dma_adapter_vchan_del(const int16_t dma_dev_id, uint16_t vchan_id);
 #endif /* __CNXK_EVENTDEV_H__ */
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 92aea92389..a2a59b16c9 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -5,6 +5,7 @@
 #include "cnxk_cryptodev_ops.h"
 #include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
+#include "cnxk_dmadev.h"
 
 void
 cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
@@ -737,3 +738,99 @@ cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
 
 	return 0;
 }
+
+static int
+dma_adapter_vchan_setup(const int16_t dma_dev_id, struct cnxk_dpi_conf *vchan,
+			uint16_t vchan_id)
+{
+	char name[RTE_MEMPOOL_NAMESIZE];
+	uint32_t cache_size, nb_req;
+	unsigned int req_size;
+
+	snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_dma_req_%u:%u", dma_dev_id, vchan_id);
+	req_size = sizeof(struct cnxk_dpi_compl_s);
+
+	nb_req = vchan->c_desc.max_cnt;
+	cache_size = 16;
+	nb_req += (cache_size * rte_lcore_count());
+
+	vchan->adapter_info.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, 0,
+							NULL, NULL, NULL, NULL, rte_socket_id(), 0);
+	if (vchan->adapter_info.req_mp == NULL)
+		return -ENOMEM;
+
+	vchan->adapter_info.enabled = true;
+
+	return 0;
+}
+
+int
+cnxk_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			   const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
+	uint32_t adptr_xae_cnt = 0;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct cnxk_dpi_conf *vchan;
+	int ret;
+
+	dpivf = rte_dma_fp_objs[dma_dev_id].dev_private;
+	if ((int16_t)vchan_id == -1) {
+		uint16_t vchan_id;
+
+		for (vchan_id = 0; vchan_id < dpivf->num_vchans; vchan_id++) {
+			vchan = &dpivf->conf[vchan_id];
+			ret = dma_adapter_vchan_setup(dma_dev_id, vchan, vchan_id);
+			if (ret) {
+				cnxk_dma_adapter_vchan_del(dma_dev_id, -1);
+				return ret;
+			}
+			adptr_xae_cnt += vchan->adapter_info.req_mp->size;
+		}
+	} else {
+		vchan = &dpivf->conf[vchan_id];
+		ret = dma_adapter_vchan_setup(dma_dev_id, vchan, vchan_id);
+		if (ret)
+			return ret;
+		adptr_xae_cnt = vchan->adapter_info.req_mp->size;
+	}
+
+	/* Update dma adapter XAE count */
+	sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
+	cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	return 0;
+}
+
+static int
+dma_adapter_vchan_free(struct cnxk_dpi_conf *vchan)
+{
+	rte_mempool_free(vchan->adapter_info.req_mp);
+	vchan->adapter_info.enabled = false;
+
+	return 0;
+}
+
+int
+cnxk_dma_adapter_vchan_del(const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct cnxk_dpi_vf_s *dpivf;
+	struct cnxk_dpi_conf *vchan;
+
+	dpivf = rte_dma_fp_objs[dma_dev_id].dev_private;
+	if ((int16_t)vchan_id == -1) {
+		uint16_t vchan_id;
+
+		for (vchan_id = 0; vchan_id < dpivf->num_vchans; vchan_id++) {
+			vchan = &dpivf->conf[vchan_id];
+			if (vchan->adapter_info.enabled)
+				dma_adapter_vchan_free(vchan);
+		}
+	} else {
+		vchan = &dpivf->conf[vchan_id];
+		if (vchan->adapter_info.enabled)
+			dma_adapter_vchan_free(vchan);
+	}
+
+	return 0;
+}
diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build
index 13281d687f..f2e07b8665 100644
--- a/drivers/event/cnxk/meson.build
+++ b/drivers/event/cnxk/meson.build
@@ -316,8 +316,7 @@ foreach flag: extra_flags
 endforeach
 
 headers = files('rte_pmd_cnxk_eventdev.h')
-deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk']
-
+deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk', 'dma_cnxk']
 require_iova_in_mbuf = false
 
 annotate_locks = false
-- 
2.25.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v2 1/3] common/cnxk: dma result to an offset of the event
  2023-12-08  8:28 [PATCH 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
  2023-12-08  8:28 ` [PATCH 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
  2023-12-08  8:28 ` [PATCH 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
@ 2024-02-09 19:43 ` Amit Prakash Shukla
  2024-02-09 19:43   ` [PATCH v2 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
                     ` (3 more replies)
  2 siblings, 4 replies; 19+ messages in thread
From: Amit Prakash Shukla @ 2024-02-09 19:43 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: dev, jerinj, pbhagavatula, vattunuru, sthotton, amitprakashs

Adds support to configure writing result to offset of the DMA
response event.

Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 drivers/common/cnxk/roc_dpi.c       |  6 +++++-
 drivers/common/cnxk/roc_dpi.h       |  2 +-
 drivers/common/cnxk/roc_dpi_priv.h  |  4 ++++
 drivers/common/cnxk/roc_idev.c      | 20 ++++++++++++++++++++
 drivers/common/cnxk/roc_idev_priv.h |  3 +++
 5 files changed, 33 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/roc_dpi.c b/drivers/common/cnxk/roc_dpi.c
index c241168294..1ee777d779 100644
--- a/drivers/common/cnxk/roc_dpi.c
+++ b/drivers/common/cnxk/roc_dpi.c
@@ -83,6 +83,9 @@ roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, uin
 	mbox_msg.s.aura = aura;
 	mbox_msg.s.sso_pf_func = idev_sso_pffunc_get();
 	mbox_msg.s.npa_pf_func = idev_npa_pffunc_get();
+	mbox_msg.s.wqecsoff = idev_dma_cs_offset_get();
+	if (mbox_msg.s.wqecsoff)
+		mbox_msg.s.wqecs = 1;
 
 	rc = send_msg_to_pf(&pci_dev->addr, (const char *)&mbox_msg,
 			    sizeof(dpi_mbox_msg_t));
@@ -94,7 +97,7 @@ roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, uin
 }
 
 int
-roc_dpi_dev_init(struct roc_dpi *roc_dpi)
+roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset)
 {
 	struct plt_pci_device *pci_dev = roc_dpi->pci_dev;
 	uint16_t vfid;
@@ -103,6 +106,7 @@ roc_dpi_dev_init(struct roc_dpi *roc_dpi)
 	vfid = ((pci_dev->addr.devid & 0x1F) << 3) | (pci_dev->addr.function & 0x7);
 	vfid -= 1;
 	roc_dpi->vfid = vfid;
+	idev_dma_cs_offset_set(offset);
 
 	return 0;
 }
diff --git a/drivers/common/cnxk/roc_dpi.h b/drivers/common/cnxk/roc_dpi.h
index 4ebde5b8a6..978e2badb2 100644
--- a/drivers/common/cnxk/roc_dpi.h
+++ b/drivers/common/cnxk/roc_dpi.h
@@ -11,7 +11,7 @@ struct roc_dpi {
 	uint16_t vfid;
 } __plt_cache_aligned;
 
-int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi);
+int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset);
 int __roc_api roc_dpi_dev_fini(struct roc_dpi *roc_dpi);
 
 int __roc_api roc_dpi_configure(struct roc_dpi *dpi, uint32_t chunk_sz, uint64_t aura,
diff --git a/drivers/common/cnxk/roc_dpi_priv.h b/drivers/common/cnxk/roc_dpi_priv.h
index 518a3e7351..52962c8bc0 100644
--- a/drivers/common/cnxk/roc_dpi_priv.h
+++ b/drivers/common/cnxk/roc_dpi_priv.h
@@ -31,6 +31,10 @@ typedef union dpi_mbox_msg_t {
 		uint64_t sso_pf_func : 16;
 		/* NPA PF function */
 		uint64_t npa_pf_func : 16;
+		/* WQE queue DMA completion status enable */
+		uint64_t wqecs : 1;
+		/* WQE queue DMA completion status offset */
+		uint64_t wqecsoff : 8;
 	} s;
 } dpi_mbox_msg_t;
 
diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index e6c6b34d78..7b922c8bae 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -301,6 +301,26 @@ idev_sso_set(struct roc_sso *sso)
 		__atomic_store_n(&idev->sso, sso, __ATOMIC_RELEASE);
 }
 
+void
+idev_dma_cs_offset_set(uint8_t offset)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (idev != NULL)
+		idev->dma_cs_offset = offset;
+}
+
+uint8_t
+idev_dma_cs_offset_get(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (idev != NULL)
+		return idev->dma_cs_offset;
+
+	return 0;
+}
+
 uint64_t
 roc_idev_nix_inl_meta_aura_get(void)
 {
diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index 80f8465e1c..cf63c58d92 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -37,6 +37,7 @@ struct idev_cfg {
 	struct roc_nix_list roc_nix_list;
 	plt_spinlock_t nix_inl_dev_lock;
 	plt_spinlock_t npa_dev_lock;
+	uint8_t dma_cs_offset;
 };
 
 /* Generic */
@@ -55,6 +56,8 @@ void idev_sso_pffunc_set(uint16_t sso_pf_func);
 uint16_t idev_sso_pffunc_get(void);
 struct roc_sso *idev_sso_get(void);
 void idev_sso_set(struct roc_sso *sso);
+void idev_dma_cs_offset_set(uint8_t offset);
+uint8_t idev_dma_cs_offset_get(void);
 
 /* idev lmt */
 uint16_t idev_lmt_pffunc_get(void);
-- 
2.34.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v2 2/3] dma/cnxk: support for DMA event enqueue dequeue
  2024-02-09 19:43 ` [PATCH v2 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
@ 2024-02-09 19:43   ` Amit Prakash Shukla
  2024-02-09 19:43   ` [PATCH v2 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 19+ messages in thread
From: Amit Prakash Shukla @ 2024-02-09 19:43 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton, Vamsi Attunuru
  Cc: dev, jerinj, ndabilpuram, amitprakashs

Added cnxk driver support for dma event enqueue and dequeue.
Also added changes for work queue entry completion status and
dual workslot DMA event enqueue.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 doc/guides/eventdevs/cnxk.rst        |   5 +
 drivers/dma/cnxk/cnxk_dma_event_dp.h |  24 +++
 drivers/dma/cnxk/cnxk_dmadev.c       |   3 +-
 drivers/dma/cnxk/cnxk_dmadev.h       |  20 +-
 drivers/dma/cnxk/cnxk_dmadev_fp.c    | 290 +++++++++++++++++++++++++++
 drivers/dma/cnxk/meson.build         |   9 +-
 drivers/dma/cnxk/version.map         |  10 +
 drivers/event/cnxk/cn9k_eventdev.c   |   2 +
 8 files changed, 360 insertions(+), 3 deletions(-)
 create mode 100644 drivers/dma/cnxk/cnxk_dma_event_dp.h
 create mode 100644 drivers/dma/cnxk/version.map

diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index cccb8a0304..9ff1052c53 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -227,3 +227,8 @@ ethernet devices connected to event device to override this applications can
 use `force_rx_bp=1` device arguments.
 Using unique mempool per each ethernet device is recommended when they are
 connected to event device.
+
+DMA adapter new mode support
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+DMA driver does not support DMA adapter configured in new mode.
diff --git a/drivers/dma/cnxk/cnxk_dma_event_dp.h b/drivers/dma/cnxk/cnxk_dma_event_dp.h
new file mode 100644
index 0000000000..5f890ab18b
--- /dev/null
+++ b/drivers/dma/cnxk/cnxk_dma_event_dp.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#ifndef _CNXK_DMA_EVENT_DP_H_
+#define _CNXK_DMA_EVENT_DP_H_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_eventdev.h>
+
+__rte_internal
+uint16_t cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uint16_t cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uint16_t cn9k_dma_adapter_dual_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uintptr_t cnxk_dma_adapter_dequeue(uintptr_t get_work1);
+#endif /* _CNXK_DMA_EVENT_DP_H_ */
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 1e7f49792c..4ab3cfbdf2 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -589,10 +589,11 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_de
 		dmadev->fp_obj->copy_sg = cn10k_dmadev_copy_sg;
 	}
 
+	dpivf->mcs_lock = NULL;
 	rdpi = &dpivf->rdpi;
 
 	rdpi->pci_dev = pci_dev;
-	rc = roc_dpi_dev_init(rdpi);
+	rc = roc_dpi_dev_init(rdpi, offsetof(struct cnxk_dpi_compl_s, wqecs));
 	if (rc < 0)
 		goto err_out_free;
 
diff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h
index 350ae73b5c..610a360ba2 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.h
+++ b/drivers/dma/cnxk/cnxk_dmadev.h
@@ -14,11 +14,14 @@
 #include <rte_eal.h>
 #include <rte_lcore.h>
 #include <rte_mbuf_pool_ops.h>
+#include <rte_mcslock.h>
 #include <rte_mempool.h>
 #include <rte_pci.h>
 
 #include <roc_api.h>
 
+#include "cnxk_dma_event_dp.h"
+
 #define CNXK_DPI_MAX_POINTER		    15
 #define CNXK_DPI_STRM_INC(s, var)	    ((s).var = ((s).var + 1) & (s).max_cnt)
 #define CNXK_DPI_STRM_DEC(s, var)	    ((s).var = ((s).var - 1) == -1 ? (s).max_cnt :	\
@@ -40,6 +43,11 @@
  */
 #define CNXK_DPI_REQ_CDATA 0xFF
 
+/* Set Completion data to 0xDEADBEEF when request submitted for SSO.
+ * This helps differentiate if the dequeue is called after cnxk enueue.
+ */
+#define CNXK_DPI_REQ_SSO_CDATA    0xDEADBEEF
+
 union cnxk_dpi_instr_cmd {
 	uint64_t u;
 	struct cn9k_dpi_instr_cmd {
@@ -85,7 +93,10 @@ union cnxk_dpi_instr_cmd {
 
 struct cnxk_dpi_compl_s {
 	uint64_t cdata;
-	void *cb_data;
+	void *op;
+	uint16_t dev_id;
+	uint16_t vchan;
+	uint32_t wqecs;
 };
 
 struct cnxk_dpi_cdesc_data_s {
@@ -95,6 +106,11 @@ struct cnxk_dpi_cdesc_data_s {
 	uint16_t tail;
 };
 
+struct cnxk_dma_adapter_info {
+	bool enabled;               /* Set if vchan queue is added to dma adapter. */
+	struct rte_mempool *req_mp; /* DMA inflight request mempool. */
+};
+
 struct cnxk_dpi_conf {
 	union cnxk_dpi_instr_cmd cmd;
 	struct cnxk_dpi_cdesc_data_s c_desc;
@@ -103,6 +119,7 @@ struct cnxk_dpi_conf {
 	uint16_t desc_idx;
 	struct rte_dma_stats stats;
 	uint64_t completed_offset;
+	struct cnxk_dma_adapter_info adapter_info;
 };
 
 struct cnxk_dpi_vf_s {
@@ -112,6 +129,7 @@ struct cnxk_dpi_vf_s {
 	uint16_t chunk_size_m1;
 	struct rte_mempool *chunk_pool;
 	struct cnxk_dpi_conf conf[CNXK_DPI_MAX_VCHANS_PER_QUEUE];
+	RTE_ATOMIC(rte_mcslock_t *) mcs_lock;
 	/* Slow path */
 	struct roc_dpi rdpi;
 	uint32_t aura;
diff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c b/drivers/dma/cnxk/cnxk_dmadev_fp.c
index 95df19a2db..009a871e43 100644
--- a/drivers/dma/cnxk/cnxk_dmadev_fp.c
+++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c
@@ -5,6 +5,10 @@
 #include <rte_vect.h>
 
 #include "cnxk_dmadev.h"
+#include <rte_event_dma_adapter.h>
+
+#include <cn10k_eventdev.h>
+#include <cnxk_eventdev.h>
 
 static __plt_always_inline void
 __dpi_cpy_scalar(uint64_t *src, uint64_t *dst, uint8_t n)
@@ -434,3 +438,289 @@ cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge
 
 	return dpi_conf->desc_idx++;
 }
+
+static inline uint64_t
+cnxk_dma_adapter_format_event(uint64_t event)
+{
+	uint64_t w0;
+	w0 = (event & 0xFFC000000000) >> 6 |
+	     (event & 0xFFFFFFF) | RTE_EVENT_TYPE_DMADEV << 28;
+
+	return w0;
+}
+
+uint16_t
+cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *src, *dst;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	struct cn10k_sso_hws *work;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t mcs_lock_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn10k_sso_hws *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+			     sizeof(struct rte_event_dma_adapter_op));
+		dpivf =	rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+
+		hdr[0] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 54);
+		hdr[0] |= (nb_dst << 6) | nb_src;
+		hdr[1] = ((uint64_t)comp_ptr);
+		hdr[2] = cnxk_dma_adapter_format_event(rsp_info->event);
+
+		src = &op->src_seg[0];
+		dst = &op->dst_seg[0];
+
+		if (CNXK_TAG_IS_HEAD(work->gw_rdata) ||
+		    ((CNXK_TT_FROM_TAG(work->gw_rdata) == SSO_TT_ORDERED) &&
+		    (rsp_info->sched_type & DPI_HDR_TT_MASK) ==
+			    RTE_SCHED_TYPE_ORDERED))
+			roc_sso_hws_head_wait(work->base);
+
+		rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, src, dst, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+	}
+
+	return count;
+}
+
+uint16_t
+cn9k_dma_adapter_dual_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *fptr, *lptr;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cn9k_sso_hws_dual *work;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t mcs_lock_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn9k_sso_hws_dual *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+						sizeof(struct rte_event_dma_adapter_op));
+		dpivf = rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		hdr[1] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 36);
+		hdr[2] = (uint64_t)comp_ptr;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+		/*
+		 * For inbound case, src pointers are last pointers.
+		 * For all other cases, src pointers are first pointers.
+		 */
+		if (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) {
+			fptr = &op->dst_seg[0];
+			lptr = &op->src_seg[0];
+			RTE_SWAP(nb_src, nb_dst);
+		} else {
+			fptr = &op->src_seg[0];
+			lptr = &op->dst_seg[0];
+		}
+
+		hdr[0] = ((uint64_t)nb_dst << 54) | (uint64_t)nb_src << 48;
+		hdr[0] |= cnxk_dma_adapter_format_event(rsp_info->event);
+
+		if ((rsp_info->sched_type & DPI_HDR_TT_MASK) == RTE_SCHED_TYPE_ORDERED)
+			roc_sso_hws_head_wait(work->base[!work->vws]);
+
+		rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, fptr, lptr, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+	}
+
+	return count;
+}
+
+uint16_t
+cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *fptr, *lptr;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	struct cn9k_sso_hws *work;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t mcs_lock_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn9k_sso_hws *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+			    sizeof(struct rte_event_dma_adapter_op));
+		dpivf =	rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		hdr[1] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 36);
+		hdr[2] = (uint64_t)comp_ptr;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+		/*
+		 * For inbound case, src pointers are last pointers.
+		 * For all other cases, src pointers are first pointers.
+		 */
+		if (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) {
+			fptr = &op->dst_seg[0];
+			lptr = &op->src_seg[0];
+			RTE_SWAP(nb_src, nb_dst);
+		} else {
+			fptr = &op->src_seg[0];
+			lptr = &op->dst_seg[0];
+		}
+
+		hdr[0] = ((uint64_t)nb_dst << 54) | (uint64_t)nb_src << 48;
+		hdr[0] |= cnxk_dma_adapter_format_event(rsp_info->event);
+
+		if ((rsp_info->sched_type & DPI_HDR_TT_MASK) == RTE_SCHED_TYPE_ORDERED)
+			roc_sso_hws_head_wait(work->base);
+
+		rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, fptr, lptr, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+	}
+
+	return count;
+}
+
+uintptr_t
+cnxk_dma_adapter_dequeue(uintptr_t get_work1)
+{
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	rte_mcslock_t mcs_lock_me;
+	uint8_t *wqecs;
+
+	comp_ptr = (struct cnxk_dpi_compl_s *)get_work1;
+
+	/* Dequeue can be called without calling cnx_enqueue in case of
+	 * dma_adapter. When its called from adapter, dma op will not be
+	 * embedded in completion pointer. In those cases return op.
+	 */
+	if (comp_ptr->cdata != CNXK_DPI_REQ_SSO_CDATA)
+		return (uintptr_t)comp_ptr;
+
+	dpivf =	rte_dma_fp_objs[comp_ptr->dev_id].dev_private;
+	dpi_conf = &dpivf->conf[comp_ptr->vchan];
+
+	rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+	wqecs = (uint8_t *)&comp_ptr->wqecs;
+	if (__atomic_load_n(wqecs, __ATOMIC_RELAXED) != 0)
+		dpi_conf->stats.errors++;
+
+	/* Take into account errors also. This is similar to
+	 * cnxk_dmadev_completed_status().
+	 */
+	dpi_conf->stats.completed++;
+	rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+
+	op = (struct rte_event_dma_adapter_op *)comp_ptr->op;
+
+	rte_mempool_put(dpi_conf->adapter_info.req_mp, comp_ptr);
+
+	return (uintptr_t)op;
+}
diff --git a/drivers/dma/cnxk/meson.build b/drivers/dma/cnxk/meson.build
index e557349368..8ccc1c2cb7 100644
--- a/drivers/dma/cnxk/meson.build
+++ b/drivers/dma/cnxk/meson.build
@@ -8,6 +8,13 @@ foreach flag: error_cflags
     endif
 endforeach
 
-deps += ['bus_pci', 'common_cnxk', 'dmadev']
+driver_sdk_headers = files(
+        'cnxk_dma_event_dp.h',
+)
+
+deps += ['bus_pci', 'common_cnxk', 'dmadev', 'eventdev']
+
+includes += include_directories('../../event/cnxk')
+
 sources = files('cnxk_dmadev.c', 'cnxk_dmadev_fp.c')
 require_iova_in_mbuf = false
diff --git a/drivers/dma/cnxk/version.map b/drivers/dma/cnxk/version.map
new file mode 100644
index 0000000000..a1490abf97
--- /dev/null
+++ b/drivers/dma/cnxk/version.map
@@ -0,0 +1,10 @@
+INTERNAL {
+	global:
+
+	cn10k_dma_adapter_enqueue;
+	cn9k_dma_adapter_enqueue;
+	cn9k_dma_adapter_dual_enqueue;
+	cnxk_dma_adapter_dequeue;
+
+	local: *;
+};
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 9fb9ca0d63..6620ad61bb 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -460,6 +460,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 		}
 	}
 	event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
+	event_dev->dma_enqueue = cn9k_dma_adapter_enqueue;
 
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
 		CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
@@ -475,6 +476,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 		event_dev->enqueue_forward_burst =
 			cn9k_sso_hws_dual_enq_fwd_burst;
 		event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
+		event_dev->dma_enqueue = cn9k_dma_adapter_dual_enqueue;
 		event_dev->profile_switch = cn9k_sso_hws_dual_profile_switch;
 
 		if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
-- 
2.34.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v2 3/3] event/cnxk: support DMA event functions
  2024-02-09 19:43 ` [PATCH v2 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
  2024-02-09 19:43   ` [PATCH v2 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
@ 2024-02-09 19:43   ` Amit Prakash Shukla
  2024-02-25 15:22   ` [PATCH v2 1/3] common/cnxk: dma result to an offset of the event Jerin Jacob
  2024-02-26  9:43   ` [PATCH v3 " Amit Prakash Shukla
  3 siblings, 0 replies; 19+ messages in thread
From: Amit Prakash Shukla @ 2024-02-09 19:43 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton
  Cc: dev, jerinj, vattunuru, ndabilpuram, amitprakashs

Added support of dma driver callback assignment to eventdev
enqueue and dequeue. The change also defines dma adapter
capabilities function.

Depends-on: series-30612 ("lib/dmadev: get DMA device using device ID")

Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 drivers/event/cnxk/cn10k_eventdev.c      | 70 +++++++++++++++++
 drivers/event/cnxk/cn10k_worker.h        |  3 +
 drivers/event/cnxk/cn9k_eventdev.c       | 67 ++++++++++++++++
 drivers/event/cnxk/cn9k_worker.h         |  3 +
 drivers/event/cnxk/cnxk_eventdev.h       |  3 +
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 97 ++++++++++++++++++++++++
 drivers/event/cnxk/meson.build           |  3 +-
 7 files changed, 244 insertions(+), 2 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index bb0c910553..498f97ff2e 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -8,6 +8,9 @@
 #include "cn10k_cryptodev_ops.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
+#include "cnxk_dma_event_dp.h"
+
+#include <rte_dmadev_pmd.h>
 
 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                           \
 	deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
@@ -477,6 +480,8 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	else
 		event_dev->ca_enqueue = cn10k_cpt_sg_ver1_crypto_adapter_enqueue;
 
+	event_dev->dma_enqueue = cn10k_dma_adapter_enqueue;
+
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
 		CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue, sso_hws_tx_adptr_enq_seg);
 	else
@@ -1020,6 +1025,67 @@ cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev,
 	return 0;
 }
 
+static int
+cn10k_dma_adapter_caps_get(const struct rte_eventdev *event_dev,
+			   const int16_t dma_dev_id, uint32_t *caps)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	*caps = RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
+
+	return 0;
+}
+
+static int
+cn10k_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id,
+			    const struct rte_event *event)
+{
+	struct rte_dma_dev *dma_dev;
+	int ret;
+
+	RTE_SET_USED(event);
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	ret = cnxk_dma_adapter_vchan_add(event_dev, dma_dev_id, vchan_id);
+	cn10k_sso_set_priv_mem(event_dev, NULL);
+
+	return ret;
+}
+
+static int
+cn10k_dma_adapter_vchan_del(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	return cnxk_dma_adapter_vchan_del(dma_dev_id, vchan_id);
+}
+
+
+
 static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -1061,6 +1127,10 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
 	.crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
 	.crypto_adapter_vector_limits_get = cn10k_crypto_adapter_vec_limits,
 
+	.dma_adapter_caps_get = cn10k_dma_adapter_caps_get,
+	.dma_adapter_vchan_add = cn10k_dma_adapter_vchan_add,
+	.dma_adapter_vchan_del = cn10k_dma_adapter_vchan_del,
+
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
 	.xstats_get_names = cnxk_sso_xstats_get_names,
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 8aa916fa12..0036495d98 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -7,6 +7,7 @@
 
 #include <rte_eventdev.h>
 #include "cn10k_cryptodev_event_dp.h"
+#include "cnxk_dma_event_dp.h"
 #include "cn10k_rx.h"
 #include "cnxk_worker.h"
 #include "cn10k_eventdev.h"
@@ -236,6 +237,8 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 		/* Mark vector mempool object as get */
 		RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
 					  (void **)&u64[1], 1, 1);
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_DMADEV) {
+		u64[1] = cnxk_dma_adapter_dequeue(u64[1]);
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 6620ad61bb..3a4d20c78b 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -6,6 +6,8 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
 
+#include <rte_dmadev_pmd.h>
+
 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
 
 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
@@ -513,6 +515,8 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 					      sso_hws_dual_tx_adptr_enq);
 	}
 
+	event_dev->dma_enqueue = cn9k_dma_adapter_enqueue;
+
 	event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
 	rte_mb();
 #else
@@ -1020,6 +1024,65 @@ cn9k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
 				 cn9k_sso_set_priv_mem);
 }
 
+static int
+cn9k_dma_adapter_caps_get(const struct rte_eventdev *event_dev,
+			  const int16_t dma_dev_id, uint32_t *caps)
+{
+	struct rte_dma_dev *dma_dev;
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	*caps = RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
+
+	return 0;
+}
+
+static int
+cn9k_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id,
+			    const struct rte_event *event)
+{
+	struct rte_dma_dev *dma_dev;
+	int ret;
+
+	RTE_SET_USED(event);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	ret = cnxk_dma_adapter_vchan_add(event_dev, dma_dev_id, vchan_id);
+	cn9k_sso_set_priv_mem(event_dev, NULL);
+
+	return ret;
+}
+
+static int
+cn9k_dma_adapter_vchan_del(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	return cnxk_dma_adapter_vchan_del(dma_dev_id, vchan_id);
+}
+
 static struct eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
@@ -1058,6 +1121,10 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
 	.crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
 	.crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
 
+	.dma_adapter_caps_get = cn9k_dma_adapter_caps_get,
+	.dma_adapter_vchan_add = cn9k_dma_adapter_vchan_add,
+	.dma_adapter_vchan_del = cn9k_dma_adapter_vchan_del,
+
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
 	.xstats_get_names = cnxk_sso_xstats_get_names,
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 0451157812..e8863e42fc 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -11,6 +11,7 @@
 #include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
+#include "cnxk_dma_event_dp.h"
 #include "cn9k_cryptodev_ops.h"
 
 #include "cn9k_ethdev.h"
@@ -205,6 +206,8 @@ cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
 		if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
 			cn9k_sso_process_tstamp(u64[1], mbuf, tstamp[port]);
 		u64[1] = mbuf;
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_DMADEV) {
+		u64[1] = cnxk_dma_adapter_dequeue(u64[1]);
 	}
 }
 
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index d42d1afa1a..fa99dede85 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -284,4 +284,7 @@ int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			       const struct rte_event_crypto_adapter_queue_conf *conf);
 int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id);
 
+int cnxk_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			       const int16_t dma_dev_id, uint16_t vchan_id);
+int cnxk_dma_adapter_vchan_del(const int16_t dma_dev_id, uint16_t vchan_id);
 #endif /* __CNXK_EVENTDEV_H__ */
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 92aea92389..a2a59b16c9 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -5,6 +5,7 @@
 #include "cnxk_cryptodev_ops.h"
 #include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
+#include "cnxk_dmadev.h"
 
 void
 cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
@@ -737,3 +738,99 @@ cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
 
 	return 0;
 }
+
+static int
+dma_adapter_vchan_setup(const int16_t dma_dev_id, struct cnxk_dpi_conf *vchan,
+			uint16_t vchan_id)
+{
+	char name[RTE_MEMPOOL_NAMESIZE];
+	uint32_t cache_size, nb_req;
+	unsigned int req_size;
+
+	snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_dma_req_%u:%u", dma_dev_id, vchan_id);
+	req_size = sizeof(struct cnxk_dpi_compl_s);
+
+	nb_req = vchan->c_desc.max_cnt;
+	cache_size = 16;
+	nb_req += (cache_size * rte_lcore_count());
+
+	vchan->adapter_info.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, 0,
+							NULL, NULL, NULL, NULL, rte_socket_id(), 0);
+	if (vchan->adapter_info.req_mp == NULL)
+		return -ENOMEM;
+
+	vchan->adapter_info.enabled = true;
+
+	return 0;
+}
+
+int
+cnxk_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			   const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
+	uint32_t adptr_xae_cnt = 0;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct cnxk_dpi_conf *vchan;
+	int ret;
+
+	dpivf = rte_dma_fp_objs[dma_dev_id].dev_private;
+	if ((int16_t)vchan_id == -1) {
+		uint16_t vchan_id;
+
+		for (vchan_id = 0; vchan_id < dpivf->num_vchans; vchan_id++) {
+			vchan = &dpivf->conf[vchan_id];
+			ret = dma_adapter_vchan_setup(dma_dev_id, vchan, vchan_id);
+			if (ret) {
+				cnxk_dma_adapter_vchan_del(dma_dev_id, -1);
+				return ret;
+			}
+			adptr_xae_cnt += vchan->adapter_info.req_mp->size;
+		}
+	} else {
+		vchan = &dpivf->conf[vchan_id];
+		ret = dma_adapter_vchan_setup(dma_dev_id, vchan, vchan_id);
+		if (ret)
+			return ret;
+		adptr_xae_cnt = vchan->adapter_info.req_mp->size;
+	}
+
+	/* Update dma adapter XAE count */
+	sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
+	cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	return 0;
+}
+
+static int
+dma_adapter_vchan_free(struct cnxk_dpi_conf *vchan)
+{
+	rte_mempool_free(vchan->adapter_info.req_mp);
+	vchan->adapter_info.enabled = false;
+
+	return 0;
+}
+
+int
+cnxk_dma_adapter_vchan_del(const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct cnxk_dpi_vf_s *dpivf;
+	struct cnxk_dpi_conf *vchan;
+
+	dpivf = rte_dma_fp_objs[dma_dev_id].dev_private;
+	if ((int16_t)vchan_id == -1) {
+		uint16_t vchan_id;
+
+		for (vchan_id = 0; vchan_id < dpivf->num_vchans; vchan_id++) {
+			vchan = &dpivf->conf[vchan_id];
+			if (vchan->adapter_info.enabled)
+				dma_adapter_vchan_free(vchan);
+		}
+	} else {
+		vchan = &dpivf->conf[vchan_id];
+		if (vchan->adapter_info.enabled)
+			dma_adapter_vchan_free(vchan);
+	}
+
+	return 0;
+}
diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build
index 13281d687f..f2e07b8665 100644
--- a/drivers/event/cnxk/meson.build
+++ b/drivers/event/cnxk/meson.build
@@ -316,8 +316,7 @@ foreach flag: extra_flags
 endforeach
 
 headers = files('rte_pmd_cnxk_eventdev.h')
-deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk']
-
+deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk', 'dma_cnxk']
 require_iova_in_mbuf = false
 
 annotate_locks = false
-- 
2.34.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 1/3] common/cnxk: dma result to an offset of the event
  2024-02-09 19:43 ` [PATCH v2 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
  2024-02-09 19:43   ` [PATCH v2 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
  2024-02-09 19:43   ` [PATCH v2 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
@ 2024-02-25 15:22   ` Jerin Jacob
  2024-02-26  9:43   ` [PATCH v3 " Amit Prakash Shukla
  3 siblings, 0 replies; 19+ messages in thread
From: Jerin Jacob @ 2024-02-25 15:22 UTC (permalink / raw)
  To: Amit Prakash Shukla
  Cc: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	dev, jerinj, pbhagavatula, vattunuru, sthotton

On Sat, Feb 10, 2024 at 1:14 AM Amit Prakash Shukla
<amitprakashs@marvell.com> wrote:
>
> Adds support to configure writing result to offset of the DMA
> response event.
>
> Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
> ---
> v2:
> - Added dual workslot enqueue support.
> - Fixed compilation error.


Please rebase to next-eventdev. It has the following build issue now.

FAILED: drivers/libtmp_rte_dma_cnxk.a.p/dma_cnxk_cnxk_dmadev.c.o
ccache gcc -Idrivers/libtmp_rte_dma_cnxk.a.p -Idrivers -I../drivers
-Idrivers/dma/cnxk -I../drivers/dma/cnxk -Ilib/dmadev -I../lib/dmadev
-I. -I.. -Iconfig -I../config -Ilib/eal/include -I../lib/eal/include
-Ilib/eal/linux/include -I../l
ib/eal/linux/include -Ilib/eal/x86/include -I../lib/eal/x86/include
-Ilib/eal/common -I../lib/eal/common -Ilib/eal -I../lib/eal
-Ilib/kvargs -I../lib/kvargs -Ilib/log -I../lib/log -Ilib/metrics
-I../lib/metrics -Ilib/telemetry -I../lib/t
elemetry -Idrivers/bus/pci -I../drivers/bus/pci
-I../drivers/bus/pci/linux -Ilib/pci -I../lib/pci
-Idrivers/common/cnxk -I../drivers/common/cnxk -Ilib/net -I../lib/net
-Ilib/ethdev -I../lib/ethdev -Ilib/meter -I../lib/meter -Ilib/mbuf -I
../lib/mbuf -Ilib/mempool -I../lib/mempool -Ilib/ring -I../lib/ring
-Ilib/security -I../lib/security -Ilib/cryptodev -I../lib/cryptodev
-Ilib/rcu -I../lib/rcu -fdiagnostics-color=always
-D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch -Wextra
-Werror -std=c11 -O2 -g -include rte_config.h -Wcast-qual -Wdeprecated
-Wformat -Wformat-nonliteral -Wformat-security -Wmissing-declarations
-Wmissing-prototypes -Wnested-externs -Wold-style-definition
-Wpointer-arith -Wsign-compare -Wst
rict-prototypes -Wundef -Wwrite-strings -Wno-address-of-packed-member
-Wno-packed-not-aligned -Wno-missing-field-initializers
-Wno-zero-length-bounds -D_GNU_SOURCE -fPIC -march=native -mrtm
-DALLOW_EXPERIMENTAL_API -DALLOW_INTERNAL_API -
Wno-format-truncation -Wno-uninitialized
-DRTE_LOG_DEFAULT_LOGTYPE=pmd.dma.cnxk -MD -MQ
drivers/libtmp_rte_dma_cnxk.a.p/dma_cnxk_cnxk_dmadev.c.o -MF
drivers/libtmp_rte_dma_cnxk.a.p/dma_cnxk_cnxk_dmadev.c.o.d -o
drivers/libtmp_rte_dma_cnx
k.a.p/dma_cnxk_cnxk_dmadev.c.o -c ../drivers/dma/cnxk/cnxk_dmadev.c
../drivers/dma/cnxk/cnxk_dmadev.c: In function ‘cnxk_dmadev_probe’:
../drivers/dma/cnxk/cnxk_dmadev.c:595:14: error: too few arguments to
function ‘roc_dpi_dev_init’
  595 |         rc = roc_dpi_dev_init(rdpi);
      |              ^~~~~~~~~~~~~~~~
In file included from ../drivers/common/cnxk/roc_api.h:99,
                 from ../drivers/dma/cnxk/cnxk_dmadev.h:20,
                 from ../drivers/dma/cnxk/cnxk_dmadev.c:5:
../drivers/common/cnxk/roc_dpi.h:14:15: note: declared here
   14 | int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset);

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v3 1/3] common/cnxk: dma result to an offset of the event
  2024-02-09 19:43 ` [PATCH v2 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
                     ` (2 preceding siblings ...)
  2024-02-25 15:22   ` [PATCH v2 1/3] common/cnxk: dma result to an offset of the event Jerin Jacob
@ 2024-02-26  9:43   ` Amit Prakash Shukla
  2024-02-26  9:43     ` [PATCH v3 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
                       ` (2 more replies)
  3 siblings, 3 replies; 19+ messages in thread
From: Amit Prakash Shukla @ 2024-02-26  9:43 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Vamsi Attunuru
  Cc: dev, jerinj, pbhagavatula, sthotton, amitprakashs

Adds support to configure writing result to offset of the DMA
response event.

Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
v3:
- Rebased and fixed compilation error.

v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 drivers/common/cnxk/roc_dpi.c       |  6 +++++-
 drivers/common/cnxk/roc_dpi.h       |  2 +-
 drivers/common/cnxk/roc_dpi_priv.h  |  4 ++++
 drivers/common/cnxk/roc_idev.c      | 20 ++++++++++++++++++++
 drivers/common/cnxk/roc_idev_priv.h |  3 +++
 drivers/dma/cnxk/cnxk_dmadev.c      |  2 +-
 6 files changed, 34 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_dpi.c b/drivers/common/cnxk/roc_dpi.c
index c241168294..1ee777d779 100644
--- a/drivers/common/cnxk/roc_dpi.c
+++ b/drivers/common/cnxk/roc_dpi.c
@@ -83,6 +83,9 @@ roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, uin
 	mbox_msg.s.aura = aura;
 	mbox_msg.s.sso_pf_func = idev_sso_pffunc_get();
 	mbox_msg.s.npa_pf_func = idev_npa_pffunc_get();
+	mbox_msg.s.wqecsoff = idev_dma_cs_offset_get();
+	if (mbox_msg.s.wqecsoff)
+		mbox_msg.s.wqecs = 1;
 
 	rc = send_msg_to_pf(&pci_dev->addr, (const char *)&mbox_msg,
 			    sizeof(dpi_mbox_msg_t));
@@ -94,7 +97,7 @@ roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, uin
 }
 
 int
-roc_dpi_dev_init(struct roc_dpi *roc_dpi)
+roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset)
 {
 	struct plt_pci_device *pci_dev = roc_dpi->pci_dev;
 	uint16_t vfid;
@@ -103,6 +106,7 @@ roc_dpi_dev_init(struct roc_dpi *roc_dpi)
 	vfid = ((pci_dev->addr.devid & 0x1F) << 3) | (pci_dev->addr.function & 0x7);
 	vfid -= 1;
 	roc_dpi->vfid = vfid;
+	idev_dma_cs_offset_set(offset);
 
 	return 0;
 }
diff --git a/drivers/common/cnxk/roc_dpi.h b/drivers/common/cnxk/roc_dpi.h
index 4ebde5b8a6..978e2badb2 100644
--- a/drivers/common/cnxk/roc_dpi.h
+++ b/drivers/common/cnxk/roc_dpi.h
@@ -11,7 +11,7 @@ struct roc_dpi {
 	uint16_t vfid;
 } __plt_cache_aligned;
 
-int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi);
+int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset);
 int __roc_api roc_dpi_dev_fini(struct roc_dpi *roc_dpi);
 
 int __roc_api roc_dpi_configure(struct roc_dpi *dpi, uint32_t chunk_sz, uint64_t aura,
diff --git a/drivers/common/cnxk/roc_dpi_priv.h b/drivers/common/cnxk/roc_dpi_priv.h
index 518a3e7351..52962c8bc0 100644
--- a/drivers/common/cnxk/roc_dpi_priv.h
+++ b/drivers/common/cnxk/roc_dpi_priv.h
@@ -31,6 +31,10 @@ typedef union dpi_mbox_msg_t {
 		uint64_t sso_pf_func : 16;
 		/* NPA PF function */
 		uint64_t npa_pf_func : 16;
+		/* WQE queue DMA completion status enable */
+		uint64_t wqecs : 1;
+		/* WQE queue DMA completion status offset */
+		uint64_t wqecsoff : 8;
 	} s;
 } dpi_mbox_msg_t;
 
diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index 48df3518b0..d0307c666c 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -301,6 +301,26 @@ idev_sso_set(struct roc_sso *sso)
 		__atomic_store_n(&idev->sso, sso, __ATOMIC_RELEASE);
 }
 
+void
+idev_dma_cs_offset_set(uint8_t offset)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (idev != NULL)
+		idev->dma_cs_offset = offset;
+}
+
+uint8_t
+idev_dma_cs_offset_get(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (idev != NULL)
+		return idev->dma_cs_offset;
+
+	return 0;
+}
+
 uint64_t
 roc_idev_nix_inl_meta_aura_get(void)
 {
diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index 8dc1cb25bf..6628b18152 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -43,6 +43,7 @@ struct idev_cfg {
 	struct idev_nix_inl_rx_inj_cfg inl_rx_inj_cfg;
 	plt_spinlock_t nix_inl_dev_lock;
 	plt_spinlock_t npa_dev_lock;
+	uint8_t dma_cs_offset;
 };
 
 /* Generic */
@@ -61,6 +62,8 @@ void idev_sso_pffunc_set(uint16_t sso_pf_func);
 uint16_t idev_sso_pffunc_get(void);
 struct roc_sso *idev_sso_get(void);
 void idev_sso_set(struct roc_sso *sso);
+void idev_dma_cs_offset_set(uint8_t offset);
+uint8_t idev_dma_cs_offset_get(void);
 
 /* idev lmt */
 uint16_t idev_lmt_pffunc_get(void);
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 1e7f49792c..48ab09cc38 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -592,7 +592,7 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_de
 	rdpi = &dpivf->rdpi;
 
 	rdpi->pci_dev = pci_dev;
-	rc = roc_dpi_dev_init(rdpi);
+	rc = roc_dpi_dev_init(rdpi, 0);
 	if (rc < 0)
 		goto err_out_free;
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v3 2/3] dma/cnxk: support for DMA event enqueue dequeue
  2024-02-26  9:43   ` [PATCH v3 " Amit Prakash Shukla
@ 2024-02-26  9:43     ` Amit Prakash Shukla
  2024-02-26  9:43     ` [PATCH v3 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
  2024-03-01 17:32     ` [PATCH v4 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
  2 siblings, 0 replies; 19+ messages in thread
From: Amit Prakash Shukla @ 2024-02-26  9:43 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton, Vamsi Attunuru
  Cc: dev, jerinj, ndabilpuram, amitprakashs

Added cnxk driver support for dma event enqueue and dequeue.
Also added changes for work queue entry completion status and
dual workslot DMA event enqueue.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
v3:
- Rebased and fixed compilation error.

v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 doc/guides/eventdevs/cnxk.rst        |   5 +
 drivers/dma/cnxk/cnxk_dma_event_dp.h |  24 +++
 drivers/dma/cnxk/cnxk_dmadev.c       |   3 +-
 drivers/dma/cnxk/cnxk_dmadev.h       |  20 +-
 drivers/dma/cnxk/cnxk_dmadev_fp.c    | 290 +++++++++++++++++++++++++++
 drivers/dma/cnxk/meson.build         |   9 +-
 drivers/dma/cnxk/version.map         |  10 +
 drivers/event/cnxk/cn9k_eventdev.c   |   2 +
 8 files changed, 360 insertions(+), 3 deletions(-)
 create mode 100644 drivers/dma/cnxk/cnxk_dma_event_dp.h
 create mode 100644 drivers/dma/cnxk/version.map

diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index cccb8a0304..9ff1052c53 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -227,3 +227,8 @@ ethernet devices connected to event device to override this applications can
 use `force_rx_bp=1` device arguments.
 Using unique mempool per each ethernet device is recommended when they are
 connected to event device.
+
+DMA adapter new mode support
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+DMA driver does not support DMA adapter configured in new mode.
diff --git a/drivers/dma/cnxk/cnxk_dma_event_dp.h b/drivers/dma/cnxk/cnxk_dma_event_dp.h
new file mode 100644
index 0000000000..5f890ab18b
--- /dev/null
+++ b/drivers/dma/cnxk/cnxk_dma_event_dp.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#ifndef _CNXK_DMA_EVENT_DP_H_
+#define _CNXK_DMA_EVENT_DP_H_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_eventdev.h>
+
+__rte_internal
+uint16_t cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uint16_t cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uint16_t cn9k_dma_adapter_dual_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uintptr_t cnxk_dma_adapter_dequeue(uintptr_t get_work1);
+#endif /* _CNXK_DMA_EVENT_DP_H_ */
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 48ab09cc38..4ab3cfbdf2 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -589,10 +589,11 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_de
 		dmadev->fp_obj->copy_sg = cn10k_dmadev_copy_sg;
 	}
 
+	dpivf->mcs_lock = NULL;
 	rdpi = &dpivf->rdpi;
 
 	rdpi->pci_dev = pci_dev;
-	rc = roc_dpi_dev_init(rdpi, 0);
+	rc = roc_dpi_dev_init(rdpi, offsetof(struct cnxk_dpi_compl_s, wqecs));
 	if (rc < 0)
 		goto err_out_free;
 
diff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h
index 350ae73b5c..610a360ba2 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.h
+++ b/drivers/dma/cnxk/cnxk_dmadev.h
@@ -14,11 +14,14 @@
 #include <rte_eal.h>
 #include <rte_lcore.h>
 #include <rte_mbuf_pool_ops.h>
+#include <rte_mcslock.h>
 #include <rte_mempool.h>
 #include <rte_pci.h>
 
 #include <roc_api.h>
 
+#include "cnxk_dma_event_dp.h"
+
 #define CNXK_DPI_MAX_POINTER		    15
 #define CNXK_DPI_STRM_INC(s, var)	    ((s).var = ((s).var + 1) & (s).max_cnt)
 #define CNXK_DPI_STRM_DEC(s, var)	    ((s).var = ((s).var - 1) == -1 ? (s).max_cnt :	\
@@ -40,6 +43,11 @@
  */
 #define CNXK_DPI_REQ_CDATA 0xFF
 
+/* Set Completion data to 0xDEADBEEF when request submitted for SSO.
+ * This helps differentiate if the dequeue is called after cnxk enueue.
+ */
+#define CNXK_DPI_REQ_SSO_CDATA    0xDEADBEEF
+
 union cnxk_dpi_instr_cmd {
 	uint64_t u;
 	struct cn9k_dpi_instr_cmd {
@@ -85,7 +93,10 @@ union cnxk_dpi_instr_cmd {
 
 struct cnxk_dpi_compl_s {
 	uint64_t cdata;
-	void *cb_data;
+	void *op;
+	uint16_t dev_id;
+	uint16_t vchan;
+	uint32_t wqecs;
 };
 
 struct cnxk_dpi_cdesc_data_s {
@@ -95,6 +106,11 @@ struct cnxk_dpi_cdesc_data_s {
 	uint16_t tail;
 };
 
+struct cnxk_dma_adapter_info {
+	bool enabled;               /* Set if vchan queue is added to dma adapter. */
+	struct rte_mempool *req_mp; /* DMA inflight request mempool. */
+};
+
 struct cnxk_dpi_conf {
 	union cnxk_dpi_instr_cmd cmd;
 	struct cnxk_dpi_cdesc_data_s c_desc;
@@ -103,6 +119,7 @@ struct cnxk_dpi_conf {
 	uint16_t desc_idx;
 	struct rte_dma_stats stats;
 	uint64_t completed_offset;
+	struct cnxk_dma_adapter_info adapter_info;
 };
 
 struct cnxk_dpi_vf_s {
@@ -112,6 +129,7 @@ struct cnxk_dpi_vf_s {
 	uint16_t chunk_size_m1;
 	struct rte_mempool *chunk_pool;
 	struct cnxk_dpi_conf conf[CNXK_DPI_MAX_VCHANS_PER_QUEUE];
+	RTE_ATOMIC(rte_mcslock_t *) mcs_lock;
 	/* Slow path */
 	struct roc_dpi rdpi;
 	uint32_t aura;
diff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c b/drivers/dma/cnxk/cnxk_dmadev_fp.c
index 95df19a2db..009a871e43 100644
--- a/drivers/dma/cnxk/cnxk_dmadev_fp.c
+++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c
@@ -5,6 +5,10 @@
 #include <rte_vect.h>
 
 #include "cnxk_dmadev.h"
+#include <rte_event_dma_adapter.h>
+
+#include <cn10k_eventdev.h>
+#include <cnxk_eventdev.h>
 
 static __plt_always_inline void
 __dpi_cpy_scalar(uint64_t *src, uint64_t *dst, uint8_t n)
@@ -434,3 +438,289 @@ cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge
 
 	return dpi_conf->desc_idx++;
 }
+
+static inline uint64_t
+cnxk_dma_adapter_format_event(uint64_t event)
+{
+	uint64_t w0;
+	w0 = (event & 0xFFC000000000) >> 6 |
+	     (event & 0xFFFFFFF) | RTE_EVENT_TYPE_DMADEV << 28;
+
+	return w0;
+}
+
+uint16_t
+cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *src, *dst;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	struct cn10k_sso_hws *work;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t mcs_lock_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn10k_sso_hws *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+			     sizeof(struct rte_event_dma_adapter_op));
+		dpivf =	rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+
+		hdr[0] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 54);
+		hdr[0] |= (nb_dst << 6) | nb_src;
+		hdr[1] = ((uint64_t)comp_ptr);
+		hdr[2] = cnxk_dma_adapter_format_event(rsp_info->event);
+
+		src = &op->src_seg[0];
+		dst = &op->dst_seg[0];
+
+		if (CNXK_TAG_IS_HEAD(work->gw_rdata) ||
+		    ((CNXK_TT_FROM_TAG(work->gw_rdata) == SSO_TT_ORDERED) &&
+		    (rsp_info->sched_type & DPI_HDR_TT_MASK) ==
+			    RTE_SCHED_TYPE_ORDERED))
+			roc_sso_hws_head_wait(work->base);
+
+		rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, src, dst, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+	}
+
+	return count;
+}
+
+uint16_t
+cn9k_dma_adapter_dual_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *fptr, *lptr;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cn9k_sso_hws_dual *work;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t mcs_lock_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn9k_sso_hws_dual *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+						sizeof(struct rte_event_dma_adapter_op));
+		dpivf = rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		hdr[1] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 36);
+		hdr[2] = (uint64_t)comp_ptr;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+		/*
+		 * For inbound case, src pointers are last pointers.
+		 * For all other cases, src pointers are first pointers.
+		 */
+		if (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) {
+			fptr = &op->dst_seg[0];
+			lptr = &op->src_seg[0];
+			RTE_SWAP(nb_src, nb_dst);
+		} else {
+			fptr = &op->src_seg[0];
+			lptr = &op->dst_seg[0];
+		}
+
+		hdr[0] = ((uint64_t)nb_dst << 54) | (uint64_t)nb_src << 48;
+		hdr[0] |= cnxk_dma_adapter_format_event(rsp_info->event);
+
+		if ((rsp_info->sched_type & DPI_HDR_TT_MASK) == RTE_SCHED_TYPE_ORDERED)
+			roc_sso_hws_head_wait(work->base[!work->vws]);
+
+		rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, fptr, lptr, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+	}
+
+	return count;
+}
+
+uint16_t
+cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *fptr, *lptr;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	struct cn9k_sso_hws *work;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t mcs_lock_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn9k_sso_hws *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+			    sizeof(struct rte_event_dma_adapter_op));
+		dpivf =	rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		hdr[1] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 36);
+		hdr[2] = (uint64_t)comp_ptr;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+		/*
+		 * For inbound case, src pointers are last pointers.
+		 * For all other cases, src pointers are first pointers.
+		 */
+		if (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) {
+			fptr = &op->dst_seg[0];
+			lptr = &op->src_seg[0];
+			RTE_SWAP(nb_src, nb_dst);
+		} else {
+			fptr = &op->src_seg[0];
+			lptr = &op->dst_seg[0];
+		}
+
+		hdr[0] = ((uint64_t)nb_dst << 54) | (uint64_t)nb_src << 48;
+		hdr[0] |= cnxk_dma_adapter_format_event(rsp_info->event);
+
+		if ((rsp_info->sched_type & DPI_HDR_TT_MASK) == RTE_SCHED_TYPE_ORDERED)
+			roc_sso_hws_head_wait(work->base);
+
+		rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, fptr, lptr, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+	}
+
+	return count;
+}
+
+uintptr_t
+cnxk_dma_adapter_dequeue(uintptr_t get_work1)
+{
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	rte_mcslock_t mcs_lock_me;
+	uint8_t *wqecs;
+
+	comp_ptr = (struct cnxk_dpi_compl_s *)get_work1;
+
+	/* Dequeue can be called without calling cnx_enqueue in case of
+	 * dma_adapter. When its called from adapter, dma op will not be
+	 * embedded in completion pointer. In those cases return op.
+	 */
+	if (comp_ptr->cdata != CNXK_DPI_REQ_SSO_CDATA)
+		return (uintptr_t)comp_ptr;
+
+	dpivf =	rte_dma_fp_objs[comp_ptr->dev_id].dev_private;
+	dpi_conf = &dpivf->conf[comp_ptr->vchan];
+
+	rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+	wqecs = (uint8_t *)&comp_ptr->wqecs;
+	if (__atomic_load_n(wqecs, __ATOMIC_RELAXED) != 0)
+		dpi_conf->stats.errors++;
+
+	/* Take into account errors also. This is similar to
+	 * cnxk_dmadev_completed_status().
+	 */
+	dpi_conf->stats.completed++;
+	rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+
+	op = (struct rte_event_dma_adapter_op *)comp_ptr->op;
+
+	rte_mempool_put(dpi_conf->adapter_info.req_mp, comp_ptr);
+
+	return (uintptr_t)op;
+}
diff --git a/drivers/dma/cnxk/meson.build b/drivers/dma/cnxk/meson.build
index e557349368..8ccc1c2cb7 100644
--- a/drivers/dma/cnxk/meson.build
+++ b/drivers/dma/cnxk/meson.build
@@ -8,6 +8,13 @@ foreach flag: error_cflags
     endif
 endforeach
 
-deps += ['bus_pci', 'common_cnxk', 'dmadev']
+driver_sdk_headers = files(
+        'cnxk_dma_event_dp.h',
+)
+
+deps += ['bus_pci', 'common_cnxk', 'dmadev', 'eventdev']
+
+includes += include_directories('../../event/cnxk')
+
 sources = files('cnxk_dmadev.c', 'cnxk_dmadev_fp.c')
 require_iova_in_mbuf = false
diff --git a/drivers/dma/cnxk/version.map b/drivers/dma/cnxk/version.map
new file mode 100644
index 0000000000..a1490abf97
--- /dev/null
+++ b/drivers/dma/cnxk/version.map
@@ -0,0 +1,10 @@
+INTERNAL {
+	global:
+
+	cn10k_dma_adapter_enqueue;
+	cn9k_dma_adapter_enqueue;
+	cn9k_dma_adapter_dual_enqueue;
+	cnxk_dma_adapter_dequeue;
+
+	local: *;
+};
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 5e6305947b..00a87b3bcd 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -460,6 +460,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 		}
 	}
 	event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
+	event_dev->dma_enqueue = cn9k_dma_adapter_enqueue;
 
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
 		CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
@@ -475,6 +476,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 		event_dev->enqueue_forward_burst =
 			cn9k_sso_hws_dual_enq_fwd_burst;
 		event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
+		event_dev->dma_enqueue = cn9k_dma_adapter_dual_enqueue;
 		event_dev->profile_switch = cn9k_sso_hws_dual_profile_switch;
 
 		if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
-- 
2.34.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v3 3/3] event/cnxk: support DMA event functions
  2024-02-26  9:43   ` [PATCH v3 " Amit Prakash Shukla
  2024-02-26  9:43     ` [PATCH v3 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
@ 2024-02-26  9:43     ` Amit Prakash Shukla
  2024-02-29 16:52       ` Jerin Jacob
  2024-03-01 17:32     ` [PATCH v4 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
  2 siblings, 1 reply; 19+ messages in thread
From: Amit Prakash Shukla @ 2024-02-26  9:43 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton
  Cc: dev, jerinj, vattunuru, ndabilpuram, amitprakashs

Added support of dma driver callback assignment to eventdev
enqueue and dequeue. The change also defines dma adapter
capabilities function.

Depends-on: series-30612 ("lib/dmadev: get DMA device using device ID")

Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
v3:
- Rebased and fixed compilation error.

v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 drivers/event/cnxk/cn10k_eventdev.c      | 70 +++++++++++++++++
 drivers/event/cnxk/cn10k_worker.h        |  3 +
 drivers/event/cnxk/cn9k_eventdev.c       | 67 ++++++++++++++++
 drivers/event/cnxk/cn9k_worker.h         |  3 +
 drivers/event/cnxk/cnxk_eventdev.h       |  3 +
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 97 ++++++++++++++++++++++++
 drivers/event/cnxk/meson.build           |  3 +-
 7 files changed, 244 insertions(+), 2 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 221f419055..18f3b402c9 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -8,6 +8,9 @@
 #include "cn10k_cryptodev_ops.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
+#include "cnxk_dma_event_dp.h"
+
+#include <rte_dmadev_pmd.h>
 
 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                           \
 	deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
@@ -477,6 +480,8 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	else
 		event_dev->ca_enqueue = cn10k_cpt_sg_ver1_crypto_adapter_enqueue;
 
+	event_dev->dma_enqueue = cn10k_dma_adapter_enqueue;
+
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
 		CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue, sso_hws_tx_adptr_enq_seg);
 	else
@@ -1020,6 +1025,67 @@ cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev,
 	return 0;
 }
 
+static int
+cn10k_dma_adapter_caps_get(const struct rte_eventdev *event_dev,
+			   const int16_t dma_dev_id, uint32_t *caps)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	*caps = RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
+
+	return 0;
+}
+
+static int
+cn10k_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id,
+			    const struct rte_event *event)
+{
+	struct rte_dma_dev *dma_dev;
+	int ret;
+
+	RTE_SET_USED(event);
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	ret = cnxk_dma_adapter_vchan_add(event_dev, dma_dev_id, vchan_id);
+	cn10k_sso_set_priv_mem(event_dev, NULL);
+
+	return ret;
+}
+
+static int
+cn10k_dma_adapter_vchan_del(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	return cnxk_dma_adapter_vchan_del(dma_dev_id, vchan_id);
+}
+
+
+
 static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -1061,6 +1127,10 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
 	.crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
 	.crypto_adapter_vector_limits_get = cn10k_crypto_adapter_vec_limits,
 
+	.dma_adapter_caps_get = cn10k_dma_adapter_caps_get,
+	.dma_adapter_vchan_add = cn10k_dma_adapter_vchan_add,
+	.dma_adapter_vchan_del = cn10k_dma_adapter_vchan_del,
+
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
 	.xstats_get_names = cnxk_sso_xstats_get_names,
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 8aa916fa12..0036495d98 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -7,6 +7,7 @@
 
 #include <rte_eventdev.h>
 #include "cn10k_cryptodev_event_dp.h"
+#include "cnxk_dma_event_dp.h"
 #include "cn10k_rx.h"
 #include "cnxk_worker.h"
 #include "cn10k_eventdev.h"
@@ -236,6 +237,8 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 		/* Mark vector mempool object as get */
 		RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
 					  (void **)&u64[1], 1, 1);
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_DMADEV) {
+		u64[1] = cnxk_dma_adapter_dequeue(u64[1]);
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 00a87b3bcd..3cd6f448f0 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -6,6 +6,8 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
 
+#include <rte_dmadev_pmd.h>
+
 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
 
 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
@@ -513,6 +515,8 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 					      sso_hws_dual_tx_adptr_enq);
 	}
 
+	event_dev->dma_enqueue = cn9k_dma_adapter_enqueue;
+
 	event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
 	rte_mb();
 #else
@@ -1020,6 +1024,65 @@ cn9k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
 				 cn9k_sso_set_priv_mem);
 }
 
+static int
+cn9k_dma_adapter_caps_get(const struct rte_eventdev *event_dev,
+			  const int16_t dma_dev_id, uint32_t *caps)
+{
+	struct rte_dma_dev *dma_dev;
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	*caps = RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
+
+	return 0;
+}
+
+static int
+cn9k_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id,
+			    const struct rte_event *event)
+{
+	struct rte_dma_dev *dma_dev;
+	int ret;
+
+	RTE_SET_USED(event);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	ret = cnxk_dma_adapter_vchan_add(event_dev, dma_dev_id, vchan_id);
+	cn9k_sso_set_priv_mem(event_dev, NULL);
+
+	return ret;
+}
+
+static int
+cn9k_dma_adapter_vchan_del(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	return cnxk_dma_adapter_vchan_del(dma_dev_id, vchan_id);
+}
+
 static struct eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
@@ -1058,6 +1121,10 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
 	.crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
 	.crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
 
+	.dma_adapter_caps_get = cn9k_dma_adapter_caps_get,
+	.dma_adapter_vchan_add = cn9k_dma_adapter_vchan_add,
+	.dma_adapter_vchan_del = cn9k_dma_adapter_vchan_del,
+
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
 	.xstats_get_names = cnxk_sso_xstats_get_names,
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 0451157812..e8863e42fc 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -11,6 +11,7 @@
 #include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
+#include "cnxk_dma_event_dp.h"
 #include "cn9k_cryptodev_ops.h"
 
 #include "cn9k_ethdev.h"
@@ -205,6 +206,8 @@ cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
 		if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
 			cn9k_sso_process_tstamp(u64[1], mbuf, tstamp[port]);
 		u64[1] = mbuf;
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_DMADEV) {
+		u64[1] = cnxk_dma_adapter_dequeue(u64[1]);
 	}
 }
 
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index d42d1afa1a..fa99dede85 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -284,4 +284,7 @@ int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			       const struct rte_event_crypto_adapter_queue_conf *conf);
 int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id);
 
+int cnxk_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			       const int16_t dma_dev_id, uint16_t vchan_id);
+int cnxk_dma_adapter_vchan_del(const int16_t dma_dev_id, uint16_t vchan_id);
 #endif /* __CNXK_EVENTDEV_H__ */
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 92aea92389..a2a59b16c9 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -5,6 +5,7 @@
 #include "cnxk_cryptodev_ops.h"
 #include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
+#include "cnxk_dmadev.h"
 
 void
 cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
@@ -737,3 +738,99 @@ cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
 
 	return 0;
 }
+
+static int
+dma_adapter_vchan_setup(const int16_t dma_dev_id, struct cnxk_dpi_conf *vchan,
+			uint16_t vchan_id)
+{
+	char name[RTE_MEMPOOL_NAMESIZE];
+	uint32_t cache_size, nb_req;
+	unsigned int req_size;
+
+	snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_dma_req_%u:%u", dma_dev_id, vchan_id);
+	req_size = sizeof(struct cnxk_dpi_compl_s);
+
+	nb_req = vchan->c_desc.max_cnt;
+	cache_size = 16;
+	nb_req += (cache_size * rte_lcore_count());
+
+	vchan->adapter_info.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, 0,
+							NULL, NULL, NULL, NULL, rte_socket_id(), 0);
+	if (vchan->adapter_info.req_mp == NULL)
+		return -ENOMEM;
+
+	vchan->adapter_info.enabled = true;
+
+	return 0;
+}
+
+int
+cnxk_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			   const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
+	uint32_t adptr_xae_cnt = 0;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct cnxk_dpi_conf *vchan;
+	int ret;
+
+	dpivf = rte_dma_fp_objs[dma_dev_id].dev_private;
+	if ((int16_t)vchan_id == -1) {
+		uint16_t vchan_id;
+
+		for (vchan_id = 0; vchan_id < dpivf->num_vchans; vchan_id++) {
+			vchan = &dpivf->conf[vchan_id];
+			ret = dma_adapter_vchan_setup(dma_dev_id, vchan, vchan_id);
+			if (ret) {
+				cnxk_dma_adapter_vchan_del(dma_dev_id, -1);
+				return ret;
+			}
+			adptr_xae_cnt += vchan->adapter_info.req_mp->size;
+		}
+	} else {
+		vchan = &dpivf->conf[vchan_id];
+		ret = dma_adapter_vchan_setup(dma_dev_id, vchan, vchan_id);
+		if (ret)
+			return ret;
+		adptr_xae_cnt = vchan->adapter_info.req_mp->size;
+	}
+
+	/* Update dma adapter XAE count */
+	sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
+	cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	return 0;
+}
+
+static int
+dma_adapter_vchan_free(struct cnxk_dpi_conf *vchan)
+{
+	rte_mempool_free(vchan->adapter_info.req_mp);
+	vchan->adapter_info.enabled = false;
+
+	return 0;
+}
+
+int
+cnxk_dma_adapter_vchan_del(const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct cnxk_dpi_vf_s *dpivf;
+	struct cnxk_dpi_conf *vchan;
+
+	dpivf = rte_dma_fp_objs[dma_dev_id].dev_private;
+	if ((int16_t)vchan_id == -1) {
+		uint16_t vchan_id;
+
+		for (vchan_id = 0; vchan_id < dpivf->num_vchans; vchan_id++) {
+			vchan = &dpivf->conf[vchan_id];
+			if (vchan->adapter_info.enabled)
+				dma_adapter_vchan_free(vchan);
+		}
+	} else {
+		vchan = &dpivf->conf[vchan_id];
+		if (vchan->adapter_info.enabled)
+			dma_adapter_vchan_free(vchan);
+	}
+
+	return 0;
+}
diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build
index 13281d687f..f2e07b8665 100644
--- a/drivers/event/cnxk/meson.build
+++ b/drivers/event/cnxk/meson.build
@@ -316,8 +316,7 @@ foreach flag: extra_flags
 endforeach
 
 headers = files('rte_pmd_cnxk_eventdev.h')
-deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk']
-
+deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk', 'dma_cnxk']
 require_iova_in_mbuf = false
 
 annotate_locks = false
-- 
2.34.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v3 3/3] event/cnxk: support DMA event functions
  2024-02-26  9:43     ` [PATCH v3 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
@ 2024-02-29 16:52       ` Jerin Jacob
  0 siblings, 0 replies; 19+ messages in thread
From: Jerin Jacob @ 2024-02-29 16:52 UTC (permalink / raw)
  To: Amit Prakash Shukla
  Cc: Pavan Nikhilesh, Shijith Thotton, dev, jerinj, vattunuru, ndabilpuram

On Mon, Feb 26, 2024 at 3:30 PM Amit Prakash Shukla
<amitprakashs@marvell.com> wrote:
>
> Added support of dma driver callback assignment to eventdev
> enqueue and dequeue. The change also defines dma adapter
> capabilities function.
>
> Depends-on: series-30612 ("lib/dmadev: get DMA device using device ID")
>
> Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>



# Please update release note add entry in PMD driver for this new feature
# Fix following build issue when building each patch at time.

[2850/3208] Generating drivers/rte_common_idpf.sym_chk with a custom
command (wrapped by meson to capture output)
[2851/3208] Linking static target drivers/librte_net_cnxk.a
[2852/3208] Compiling C object
drivers/libtmp_rte_dma_cnxk.a.p/dma_cnxk_cnxk_dmadev_fp.c.o
[2853/3208] Linking static target drivers/libtmp_rte_dma_cnxk.a
[2854/3208] Generating drivers/rte_net_cnxk.sym_chk with a custom
command (wrapped by meson to capture output)
[2855/3208] Compiling C object
drivers/libtmp_rte_event_cnxk.a.p/event_cnxk_cn9k_eventdev.c.o
FAILED: drivers/libtmp_rte_event_cnxk.a.p/event_cnxk_cn9k_eventdev.c.o
ccache aarch64-linux-gnu-gcc -Idrivers/libtmp_rte_event_cnxk.a.p
-Idrivers -I../drivers -Idrivers/event/cnxk -I../drivers/event/cnxk
-Ilib/eventdev -I../lib/eventdev -I. -I.. -Iconfig -I../config
-Ilib/eal/include -I../lib/eal/include -Ilib/eal/linux/include
-I../lib/eal/linux/include -Ilib/eal/arm/include
-I../lib/eal/arm/include -Ilib/eal/common -I../lib/eal/common
-Ilib/eal -I../lib/eal -Ilib/kvargs -I../lib/kvargs -Ilib/log
-I../lib/log -Ilib/metrics -I../lib/metrics -Ilib/telemetry
-I../lib/telemetry -Ilib/ring -I../lib/ring -Ilib/ethdev
-I../lib/ethdev -Ilib/net -I../lib/net -Ilib/mbuf -I../lib/mbuf
-Ilib/mempool -I../lib/mempool -Ilib/meter -I../lib/meter -Ilib/hash
-I../lib/hash -Ilib/rcu -I../lib/rcu -Ilib/timer -I../lib/timer
-Ilib/cryptodev -I../lib/cryptodev -Ilib/dmadev -I../lib/dmadev
-Idrivers/bus/pci -I../drivers/bus/pci -I../drivers/bus/pci/linux
-Ilib/pci -I../lib/pci -Idrivers/common/cnxk -I../drivers/common/cnxk
-Ilib/security -I../lib/security -Idrivers/net/cnxk
-I../drivers/net/cnxk -Idrivers/bus/vdev -I../drivers/bus/vdev
-Idrivers/mempool/cnxk -I../drivers/mempool/cnxk -Idrivers/crypto/cnxk
-I../drivers/crypto/cnxk -I/export/cross_prefix/prefix/include
-fdiagnostics-color=always -D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch
-Wextra -Werror -std=c11 -O2 -g -include rte_config.h -Wcast-qual
-Wdeprecated -Wformat -Wformat-nonliteral -Wformat-security
-Wmissing-declarations -Wmissing-prototypes -Wnested-externs
-Wold-style-definition -Wpointer-arith -Wsign-compare
-Wstrict-prototypes -Wundef -Wwrite-strings
-Wno-address-of-packed-member -Wno-packed-not-aligned
-Wno-missing-field-initializers -Wno-zero-length-bounds -D_GNU_SOURCE
-fPIC -march=armv8-a+crc -moutline-atomics -DALLOW_EXPERIMENTAL_API
-DALLOW_INTERNAL_API -Wno-format-truncation -flax-vector-conversions
-Wno-strict-aliasing -DRTE_LOG_DEFAULT_LOGTYPE=pmd.event.cnxk -MD -MQ
drivers/libtmp_rte_event_cnxk.a.p/event_cnxk_cn9k_eventdev.c.o -MF
drivers/libtmp_rte_event_cnxk.a.p/event_cnxk_cn9k_eventdev.c.o.d -o
drivers/libtmp_rte_event_cnxk.a.p/event_cnxk_cn9k_eventdev.c.o -c
../drivers/event/cnxk/cn9k_eventdev.c
../drivers/event/cnxk/cn9k_eventdev.c: In function 'cn9k_sso_fp_fns_set':
../drivers/event/cnxk/cn9k_eventdev.c:463:34: error:
'cn9k_dma_adapter_enqueue' undeclared (first use in this function);
did you mean 'event_dma_adapter_enqueue_t'?
  463 |         event_dev->dma_enqueue = cn9k_dma_adapter_enqueue;
      |                                  ^~~~~~~~~~~~~~~~~~~~~~~~
      |                                  event_dma_adapter_enqueue_t
../drivers/event/cnxk/cn9k_eventdev.c:463:34: note: each undeclared
identifier is reported only once for each function it appears in
../drivers/event/cnxk/cn9k_eventdev.c:479:42: error:
'cn9k_dma_adapter_dual_enqueue' undeclared (first use in this
function)
  479 |                 event_dev->dma_enqueue = cn9k_dma_adapter_dual_enqueue;
      |                                          ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[2856/3208] Generating drivers/rte_dma_cnxk.pmd.c with a custom command
[2857/3208] Generating drivers/rte_bus_dpaa.sym_chk with a custom
command (wrapped by meson to capture output)
[2858/3208] Generating drivers/rte_bus_fslmc.sym_chk with a custom
command (wrapped by meson to capture output)
[2859/3208] Generating lib/pipeline.sym_chk with a custom command
(wrapped by meson to capture output)
[2860/3208] Generating lib/ethdev.sym_chk with a custom command
(wrapped by meson to capture output)
[2861/3208] Generating lib/eal.sym_chk with a custom command (wrapped
by meson to capture output)
[2862/3208] Generating drivers/rte_common_sfc_efx.sym_chk with a
custom command (wrapped by meson to capture output)
[2863/3208] Generating drivers/rte_common_cnxk.sym_chk with a custom
command (wrapped by meson to capture output)
ninja: build stopped: subcommand failed.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v4 1/3] common/cnxk: dma result to an offset of the event
  2024-02-26  9:43   ` [PATCH v3 " Amit Prakash Shukla
  2024-02-26  9:43     ` [PATCH v3 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
  2024-02-26  9:43     ` [PATCH v3 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
@ 2024-03-01 17:32     ` Amit Prakash Shukla
  2024-03-01 17:32       ` [PATCH v4 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
                         ` (2 more replies)
  2 siblings, 3 replies; 19+ messages in thread
From: Amit Prakash Shukla @ 2024-03-01 17:32 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Vamsi Attunuru
  Cc: dev, jerinj, pbhagavatula, anoobj, asasidharan, Amit Prakash Shukla

Adds support to configure writing result to offset of the DMA
response event.

Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
v4:
- Fixed compilation error.
- Updated release notes.

v3:
- Rebased and fixed compilation error.

v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 drivers/common/cnxk/roc_dpi.c       |  6 +++++-
 drivers/common/cnxk/roc_dpi.h       |  2 +-
 drivers/common/cnxk/roc_dpi_priv.h  |  4 ++++
 drivers/common/cnxk/roc_idev.c      | 20 ++++++++++++++++++++
 drivers/common/cnxk/roc_idev_priv.h |  3 +++
 drivers/dma/cnxk/cnxk_dmadev.c      |  2 +-
 6 files changed, 34 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_dpi.c b/drivers/common/cnxk/roc_dpi.c
index c241168294..1ee777d779 100644
--- a/drivers/common/cnxk/roc_dpi.c
+++ b/drivers/common/cnxk/roc_dpi.c
@@ -83,6 +83,9 @@ roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, uin
 	mbox_msg.s.aura = aura;
 	mbox_msg.s.sso_pf_func = idev_sso_pffunc_get();
 	mbox_msg.s.npa_pf_func = idev_npa_pffunc_get();
+	mbox_msg.s.wqecsoff = idev_dma_cs_offset_get();
+	if (mbox_msg.s.wqecsoff)
+		mbox_msg.s.wqecs = 1;
 
 	rc = send_msg_to_pf(&pci_dev->addr, (const char *)&mbox_msg,
 			    sizeof(dpi_mbox_msg_t));
@@ -94,7 +97,7 @@ roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, uin
 }
 
 int
-roc_dpi_dev_init(struct roc_dpi *roc_dpi)
+roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset)
 {
 	struct plt_pci_device *pci_dev = roc_dpi->pci_dev;
 	uint16_t vfid;
@@ -103,6 +106,7 @@ roc_dpi_dev_init(struct roc_dpi *roc_dpi)
 	vfid = ((pci_dev->addr.devid & 0x1F) << 3) | (pci_dev->addr.function & 0x7);
 	vfid -= 1;
 	roc_dpi->vfid = vfid;
+	idev_dma_cs_offset_set(offset);
 
 	return 0;
 }
diff --git a/drivers/common/cnxk/roc_dpi.h b/drivers/common/cnxk/roc_dpi.h
index 4ebde5b8a6..978e2badb2 100644
--- a/drivers/common/cnxk/roc_dpi.h
+++ b/drivers/common/cnxk/roc_dpi.h
@@ -11,7 +11,7 @@ struct roc_dpi {
 	uint16_t vfid;
 } __plt_cache_aligned;
 
-int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi);
+int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset);
 int __roc_api roc_dpi_dev_fini(struct roc_dpi *roc_dpi);
 
 int __roc_api roc_dpi_configure(struct roc_dpi *dpi, uint32_t chunk_sz, uint64_t aura,
diff --git a/drivers/common/cnxk/roc_dpi_priv.h b/drivers/common/cnxk/roc_dpi_priv.h
index 518a3e7351..52962c8bc0 100644
--- a/drivers/common/cnxk/roc_dpi_priv.h
+++ b/drivers/common/cnxk/roc_dpi_priv.h
@@ -31,6 +31,10 @@ typedef union dpi_mbox_msg_t {
 		uint64_t sso_pf_func : 16;
 		/* NPA PF function */
 		uint64_t npa_pf_func : 16;
+		/* WQE queue DMA completion status enable */
+		uint64_t wqecs : 1;
+		/* WQE queue DMA completion status offset */
+		uint64_t wqecsoff : 8;
 	} s;
 } dpi_mbox_msg_t;
 
diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index 48df3518b0..d0307c666c 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -301,6 +301,26 @@ idev_sso_set(struct roc_sso *sso)
 		__atomic_store_n(&idev->sso, sso, __ATOMIC_RELEASE);
 }
 
+void
+idev_dma_cs_offset_set(uint8_t offset)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (idev != NULL)
+		idev->dma_cs_offset = offset;
+}
+
+uint8_t
+idev_dma_cs_offset_get(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (idev != NULL)
+		return idev->dma_cs_offset;
+
+	return 0;
+}
+
 uint64_t
 roc_idev_nix_inl_meta_aura_get(void)
 {
diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index 8dc1cb25bf..6628b18152 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -43,6 +43,7 @@ struct idev_cfg {
 	struct idev_nix_inl_rx_inj_cfg inl_rx_inj_cfg;
 	plt_spinlock_t nix_inl_dev_lock;
 	plt_spinlock_t npa_dev_lock;
+	uint8_t dma_cs_offset;
 };
 
 /* Generic */
@@ -61,6 +62,8 @@ void idev_sso_pffunc_set(uint16_t sso_pf_func);
 uint16_t idev_sso_pffunc_get(void);
 struct roc_sso *idev_sso_get(void);
 void idev_sso_set(struct roc_sso *sso);
+void idev_dma_cs_offset_set(uint8_t offset);
+uint8_t idev_dma_cs_offset_get(void);
 
 /* idev lmt */
 uint16_t idev_lmt_pffunc_get(void);
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 1e7f49792c..48ab09cc38 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -592,7 +592,7 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_de
 	rdpi = &dpivf->rdpi;
 
 	rdpi->pci_dev = pci_dev;
-	rc = roc_dpi_dev_init(rdpi);
+	rc = roc_dpi_dev_init(rdpi, 0);
 	if (rc < 0)
 		goto err_out_free;
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v4 2/3] dma/cnxk: support for DMA event enqueue dequeue
  2024-03-01 17:32     ` [PATCH v4 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
@ 2024-03-01 17:32       ` Amit Prakash Shukla
  2024-03-03 15:39         ` Jerin Jacob
  2024-03-01 17:32       ` [PATCH v4 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
  2024-03-03 21:08       ` [PATCH v5 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
  2 siblings, 1 reply; 19+ messages in thread
From: Amit Prakash Shukla @ 2024-03-01 17:32 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton, Vamsi Attunuru
  Cc: dev, jerinj, ndabilpuram, anoobj, asasidharan, Amit Prakash Shukla

Added cnxk driver support for dma event enqueue and dequeue.
Also added changes for work queue entry completion status and
dual workslot DMA event enqueue.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
v4:
- Fixed compilation error.
- Updated release notes.

v3:
- Rebased and fixed compilation error.

v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 doc/guides/eventdevs/cnxk.rst          |   5 +
 doc/guides/rel_notes/release_24_03.rst |   4 +
 drivers/dma/cnxk/cnxk_dma_event_dp.h   |  24 ++
 drivers/dma/cnxk/cnxk_dmadev.c         |   3 +-
 drivers/dma/cnxk/cnxk_dmadev.h         |  20 +-
 drivers/dma/cnxk/cnxk_dmadev_fp.c      | 290 +++++++++++++++++++++++++
 drivers/dma/cnxk/meson.build           |   9 +-
 drivers/dma/cnxk/version.map           |  10 +
 drivers/event/cnxk/cn9k_eventdev.c     |   2 +
 drivers/event/cnxk/cn9k_worker.h       |   1 +
 drivers/event/cnxk/meson.build         |   2 +-
 11 files changed, 366 insertions(+), 4 deletions(-)
 create mode 100644 drivers/dma/cnxk/cnxk_dma_event_dp.h
 create mode 100644 drivers/dma/cnxk/version.map

diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index cccb8a0304..9ff1052c53 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -227,3 +227,8 @@ ethernet devices connected to event device to override this applications can
 use `force_rx_bp=1` device arguments.
 Using unique mempool per each ethernet device is recommended when they are
 connected to event device.
+
+DMA adapter new mode support
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+DMA driver does not support DMA adapter configured in new mode.
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 8d440d56a5..39899244f6 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -141,6 +141,10 @@ New Features
     to support TLS v1.2, TLS v1.3 and DTLS v1.2.
   * Added PMD API to allow raw submission of instructions to CPT.
 
+* **Updated Marvell cnxk DMA driver.**
+
+  * Added support for DMA event enqueue and dequeue.
+  * Added support for dual workslot DMA event enqueue.
 
 Removed Items
 -------------
diff --git a/drivers/dma/cnxk/cnxk_dma_event_dp.h b/drivers/dma/cnxk/cnxk_dma_event_dp.h
new file mode 100644
index 0000000000..5f890ab18b
--- /dev/null
+++ b/drivers/dma/cnxk/cnxk_dma_event_dp.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#ifndef _CNXK_DMA_EVENT_DP_H_
+#define _CNXK_DMA_EVENT_DP_H_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_eventdev.h>
+
+__rte_internal
+uint16_t cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uint16_t cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uint16_t cn9k_dma_adapter_dual_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uintptr_t cnxk_dma_adapter_dequeue(uintptr_t get_work1);
+#endif /* _CNXK_DMA_EVENT_DP_H_ */
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 48ab09cc38..4ab3cfbdf2 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -589,10 +589,11 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_de
 		dmadev->fp_obj->copy_sg = cn10k_dmadev_copy_sg;
 	}
 
+	dpivf->mcs_lock = NULL;
 	rdpi = &dpivf->rdpi;
 
 	rdpi->pci_dev = pci_dev;
-	rc = roc_dpi_dev_init(rdpi, 0);
+	rc = roc_dpi_dev_init(rdpi, offsetof(struct cnxk_dpi_compl_s, wqecs));
 	if (rc < 0)
 		goto err_out_free;
 
diff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h
index 350ae73b5c..610a360ba2 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.h
+++ b/drivers/dma/cnxk/cnxk_dmadev.h
@@ -14,11 +14,14 @@
 #include <rte_eal.h>
 #include <rte_lcore.h>
 #include <rte_mbuf_pool_ops.h>
+#include <rte_mcslock.h>
 #include <rte_mempool.h>
 #include <rte_pci.h>
 
 #include <roc_api.h>
 
+#include "cnxk_dma_event_dp.h"
+
 #define CNXK_DPI_MAX_POINTER		    15
 #define CNXK_DPI_STRM_INC(s, var)	    ((s).var = ((s).var + 1) & (s).max_cnt)
 #define CNXK_DPI_STRM_DEC(s, var)	    ((s).var = ((s).var - 1) == -1 ? (s).max_cnt :	\
@@ -40,6 +43,11 @@
  */
 #define CNXK_DPI_REQ_CDATA 0xFF
 
+/* Set Completion data to 0xDEADBEEF when request submitted for SSO.
+ * This helps differentiate if the dequeue is called after cnxk enueue.
+ */
+#define CNXK_DPI_REQ_SSO_CDATA    0xDEADBEEF
+
 union cnxk_dpi_instr_cmd {
 	uint64_t u;
 	struct cn9k_dpi_instr_cmd {
@@ -85,7 +93,10 @@ union cnxk_dpi_instr_cmd {
 
 struct cnxk_dpi_compl_s {
 	uint64_t cdata;
-	void *cb_data;
+	void *op;
+	uint16_t dev_id;
+	uint16_t vchan;
+	uint32_t wqecs;
 };
 
 struct cnxk_dpi_cdesc_data_s {
@@ -95,6 +106,11 @@ struct cnxk_dpi_cdesc_data_s {
 	uint16_t tail;
 };
 
+struct cnxk_dma_adapter_info {
+	bool enabled;               /* Set if vchan queue is added to dma adapter. */
+	struct rte_mempool *req_mp; /* DMA inflight request mempool. */
+};
+
 struct cnxk_dpi_conf {
 	union cnxk_dpi_instr_cmd cmd;
 	struct cnxk_dpi_cdesc_data_s c_desc;
@@ -103,6 +119,7 @@ struct cnxk_dpi_conf {
 	uint16_t desc_idx;
 	struct rte_dma_stats stats;
 	uint64_t completed_offset;
+	struct cnxk_dma_adapter_info adapter_info;
 };
 
 struct cnxk_dpi_vf_s {
@@ -112,6 +129,7 @@ struct cnxk_dpi_vf_s {
 	uint16_t chunk_size_m1;
 	struct rte_mempool *chunk_pool;
 	struct cnxk_dpi_conf conf[CNXK_DPI_MAX_VCHANS_PER_QUEUE];
+	RTE_ATOMIC(rte_mcslock_t *) mcs_lock;
 	/* Slow path */
 	struct roc_dpi rdpi;
 	uint32_t aura;
diff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c b/drivers/dma/cnxk/cnxk_dmadev_fp.c
index 95df19a2db..009a871e43 100644
--- a/drivers/dma/cnxk/cnxk_dmadev_fp.c
+++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c
@@ -5,6 +5,10 @@
 #include <rte_vect.h>
 
 #include "cnxk_dmadev.h"
+#include <rte_event_dma_adapter.h>
+
+#include <cn10k_eventdev.h>
+#include <cnxk_eventdev.h>
 
 static __plt_always_inline void
 __dpi_cpy_scalar(uint64_t *src, uint64_t *dst, uint8_t n)
@@ -434,3 +438,289 @@ cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge
 
 	return dpi_conf->desc_idx++;
 }
+
+static inline uint64_t
+cnxk_dma_adapter_format_event(uint64_t event)
+{
+	uint64_t w0;
+	w0 = (event & 0xFFC000000000) >> 6 |
+	     (event & 0xFFFFFFF) | RTE_EVENT_TYPE_DMADEV << 28;
+
+	return w0;
+}
+
+uint16_t
+cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *src, *dst;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	struct cn10k_sso_hws *work;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t mcs_lock_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn10k_sso_hws *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+			     sizeof(struct rte_event_dma_adapter_op));
+		dpivf =	rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+
+		hdr[0] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 54);
+		hdr[0] |= (nb_dst << 6) | nb_src;
+		hdr[1] = ((uint64_t)comp_ptr);
+		hdr[2] = cnxk_dma_adapter_format_event(rsp_info->event);
+
+		src = &op->src_seg[0];
+		dst = &op->dst_seg[0];
+
+		if (CNXK_TAG_IS_HEAD(work->gw_rdata) ||
+		    ((CNXK_TT_FROM_TAG(work->gw_rdata) == SSO_TT_ORDERED) &&
+		    (rsp_info->sched_type & DPI_HDR_TT_MASK) ==
+			    RTE_SCHED_TYPE_ORDERED))
+			roc_sso_hws_head_wait(work->base);
+
+		rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, src, dst, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+	}
+
+	return count;
+}
+
+uint16_t
+cn9k_dma_adapter_dual_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *fptr, *lptr;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cn9k_sso_hws_dual *work;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t mcs_lock_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn9k_sso_hws_dual *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+						sizeof(struct rte_event_dma_adapter_op));
+		dpivf = rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		hdr[1] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 36);
+		hdr[2] = (uint64_t)comp_ptr;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+		/*
+		 * For inbound case, src pointers are last pointers.
+		 * For all other cases, src pointers are first pointers.
+		 */
+		if (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) {
+			fptr = &op->dst_seg[0];
+			lptr = &op->src_seg[0];
+			RTE_SWAP(nb_src, nb_dst);
+		} else {
+			fptr = &op->src_seg[0];
+			lptr = &op->dst_seg[0];
+		}
+
+		hdr[0] = ((uint64_t)nb_dst << 54) | (uint64_t)nb_src << 48;
+		hdr[0] |= cnxk_dma_adapter_format_event(rsp_info->event);
+
+		if ((rsp_info->sched_type & DPI_HDR_TT_MASK) == RTE_SCHED_TYPE_ORDERED)
+			roc_sso_hws_head_wait(work->base[!work->vws]);
+
+		rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, fptr, lptr, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+	}
+
+	return count;
+}
+
+uint16_t
+cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *fptr, *lptr;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	struct cn9k_sso_hws *work;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t mcs_lock_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn9k_sso_hws *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+			    sizeof(struct rte_event_dma_adapter_op));
+		dpivf =	rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		hdr[1] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 36);
+		hdr[2] = (uint64_t)comp_ptr;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+		/*
+		 * For inbound case, src pointers are last pointers.
+		 * For all other cases, src pointers are first pointers.
+		 */
+		if (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) {
+			fptr = &op->dst_seg[0];
+			lptr = &op->src_seg[0];
+			RTE_SWAP(nb_src, nb_dst);
+		} else {
+			fptr = &op->src_seg[0];
+			lptr = &op->dst_seg[0];
+		}
+
+		hdr[0] = ((uint64_t)nb_dst << 54) | (uint64_t)nb_src << 48;
+		hdr[0] |= cnxk_dma_adapter_format_event(rsp_info->event);
+
+		if ((rsp_info->sched_type & DPI_HDR_TT_MASK) == RTE_SCHED_TYPE_ORDERED)
+			roc_sso_hws_head_wait(work->base);
+
+		rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, fptr, lptr, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+	}
+
+	return count;
+}
+
+uintptr_t
+cnxk_dma_adapter_dequeue(uintptr_t get_work1)
+{
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	rte_mcslock_t mcs_lock_me;
+	uint8_t *wqecs;
+
+	comp_ptr = (struct cnxk_dpi_compl_s *)get_work1;
+
+	/* Dequeue can be called without calling cnx_enqueue in case of
+	 * dma_adapter. When its called from adapter, dma op will not be
+	 * embedded in completion pointer. In those cases return op.
+	 */
+	if (comp_ptr->cdata != CNXK_DPI_REQ_SSO_CDATA)
+		return (uintptr_t)comp_ptr;
+
+	dpivf =	rte_dma_fp_objs[comp_ptr->dev_id].dev_private;
+	dpi_conf = &dpivf->conf[comp_ptr->vchan];
+
+	rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+	wqecs = (uint8_t *)&comp_ptr->wqecs;
+	if (__atomic_load_n(wqecs, __ATOMIC_RELAXED) != 0)
+		dpi_conf->stats.errors++;
+
+	/* Take into account errors also. This is similar to
+	 * cnxk_dmadev_completed_status().
+	 */
+	dpi_conf->stats.completed++;
+	rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+
+	op = (struct rte_event_dma_adapter_op *)comp_ptr->op;
+
+	rte_mempool_put(dpi_conf->adapter_info.req_mp, comp_ptr);
+
+	return (uintptr_t)op;
+}
diff --git a/drivers/dma/cnxk/meson.build b/drivers/dma/cnxk/meson.build
index e557349368..8ccc1c2cb7 100644
--- a/drivers/dma/cnxk/meson.build
+++ b/drivers/dma/cnxk/meson.build
@@ -8,6 +8,13 @@ foreach flag: error_cflags
     endif
 endforeach
 
-deps += ['bus_pci', 'common_cnxk', 'dmadev']
+driver_sdk_headers = files(
+        'cnxk_dma_event_dp.h',
+)
+
+deps += ['bus_pci', 'common_cnxk', 'dmadev', 'eventdev']
+
+includes += include_directories('../../event/cnxk')
+
 sources = files('cnxk_dmadev.c', 'cnxk_dmadev_fp.c')
 require_iova_in_mbuf = false
diff --git a/drivers/dma/cnxk/version.map b/drivers/dma/cnxk/version.map
new file mode 100644
index 0000000000..a1490abf97
--- /dev/null
+++ b/drivers/dma/cnxk/version.map
@@ -0,0 +1,10 @@
+INTERNAL {
+	global:
+
+	cn10k_dma_adapter_enqueue;
+	cn9k_dma_adapter_enqueue;
+	cn9k_dma_adapter_dual_enqueue;
+	cnxk_dma_adapter_dequeue;
+
+	local: *;
+};
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 5e6305947b..00a87b3bcd 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -460,6 +460,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 		}
 	}
 	event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
+	event_dev->dma_enqueue = cn9k_dma_adapter_enqueue;
 
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
 		CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
@@ -475,6 +476,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 		event_dev->enqueue_forward_burst =
 			cn9k_sso_hws_dual_enq_fwd_burst;
 		event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
+		event_dev->dma_enqueue = cn9k_dma_adapter_dual_enqueue;
 		event_dev->profile_switch = cn9k_sso_hws_dual_profile_switch;
 
 		if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 0451157812..0cd8c45e9a 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -11,6 +11,7 @@
 #include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
+#include "cnxk_dma_event_dp.h"
 #include "cn9k_cryptodev_ops.h"
 
 #include "cn9k_ethdev.h"
diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build
index 13281d687f..2a30b97bff 100644
--- a/drivers/event/cnxk/meson.build
+++ b/drivers/event/cnxk/meson.build
@@ -316,7 +316,7 @@ foreach flag: extra_flags
 endforeach
 
 headers = files('rte_pmd_cnxk_eventdev.h')
-deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk']
+deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk', 'dma_cnxk']
 
 require_iova_in_mbuf = false
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v4 3/3] event/cnxk: support DMA event functions
  2024-03-01 17:32     ` [PATCH v4 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
  2024-03-01 17:32       ` [PATCH v4 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
@ 2024-03-01 17:32       ` Amit Prakash Shukla
  2024-03-03 21:08       ` [PATCH v5 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
  2 siblings, 0 replies; 19+ messages in thread
From: Amit Prakash Shukla @ 2024-03-01 17:32 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton
  Cc: dev, jerinj, vattunuru, ndabilpuram, anoobj, asasidharan,
	Amit Prakash Shukla

Added support of dma driver callback assignment to eventdev
enqueue and dequeue. The change also defines dma adapter
capabilities function.

Depends-on: series-30612 ("lib/dmadev: get DMA device using device ID")

Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
v4:
- Fixed compilation error.
- Updated release notes.

v3:
- Rebased and fixed compilation error.

v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 doc/guides/rel_notes/release_24_03.rst   |  4 +
 drivers/event/cnxk/cn10k_eventdev.c      | 70 +++++++++++++++++
 drivers/event/cnxk/cn10k_worker.h        |  3 +
 drivers/event/cnxk/cn9k_eventdev.c       | 67 ++++++++++++++++
 drivers/event/cnxk/cn9k_worker.h         |  2 +
 drivers/event/cnxk/cnxk_eventdev.h       |  3 +
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 97 ++++++++++++++++++++++++
 drivers/event/cnxk/meson.build           |  1 -
 8 files changed, 246 insertions(+), 1 deletion(-)

diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 39899244f6..13ffa32f3d 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -146,6 +146,10 @@ New Features
   * Added support for DMA event enqueue and dequeue.
   * Added support for dual workslot DMA event enqueue.
 
+* **Updated Marvell cnxk eventdev driver.**
+
+  * Added support for DMA driver callback assignment to eventdev enqueue and dequeue.
+
 Removed Items
 -------------
 
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 221f419055..18f3b402c9 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -8,6 +8,9 @@
 #include "cn10k_cryptodev_ops.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
+#include "cnxk_dma_event_dp.h"
+
+#include <rte_dmadev_pmd.h>
 
 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                           \
 	deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
@@ -477,6 +480,8 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	else
 		event_dev->ca_enqueue = cn10k_cpt_sg_ver1_crypto_adapter_enqueue;
 
+	event_dev->dma_enqueue = cn10k_dma_adapter_enqueue;
+
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
 		CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue, sso_hws_tx_adptr_enq_seg);
 	else
@@ -1020,6 +1025,67 @@ cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev,
 	return 0;
 }
 
+static int
+cn10k_dma_adapter_caps_get(const struct rte_eventdev *event_dev,
+			   const int16_t dma_dev_id, uint32_t *caps)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	*caps = RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
+
+	return 0;
+}
+
+static int
+cn10k_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id,
+			    const struct rte_event *event)
+{
+	struct rte_dma_dev *dma_dev;
+	int ret;
+
+	RTE_SET_USED(event);
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	ret = cnxk_dma_adapter_vchan_add(event_dev, dma_dev_id, vchan_id);
+	cn10k_sso_set_priv_mem(event_dev, NULL);
+
+	return ret;
+}
+
+static int
+cn10k_dma_adapter_vchan_del(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	return cnxk_dma_adapter_vchan_del(dma_dev_id, vchan_id);
+}
+
+
+
 static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -1061,6 +1127,10 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
 	.crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
 	.crypto_adapter_vector_limits_get = cn10k_crypto_adapter_vec_limits,
 
+	.dma_adapter_caps_get = cn10k_dma_adapter_caps_get,
+	.dma_adapter_vchan_add = cn10k_dma_adapter_vchan_add,
+	.dma_adapter_vchan_del = cn10k_dma_adapter_vchan_del,
+
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
 	.xstats_get_names = cnxk_sso_xstats_get_names,
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 8aa916fa12..0036495d98 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -7,6 +7,7 @@
 
 #include <rte_eventdev.h>
 #include "cn10k_cryptodev_event_dp.h"
+#include "cnxk_dma_event_dp.h"
 #include "cn10k_rx.h"
 #include "cnxk_worker.h"
 #include "cn10k_eventdev.h"
@@ -236,6 +237,8 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 		/* Mark vector mempool object as get */
 		RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
 					  (void **)&u64[1], 1, 1);
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_DMADEV) {
+		u64[1] = cnxk_dma_adapter_dequeue(u64[1]);
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 00a87b3bcd..3cd6f448f0 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -6,6 +6,8 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
 
+#include <rte_dmadev_pmd.h>
+
 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
 
 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
@@ -513,6 +515,8 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 					      sso_hws_dual_tx_adptr_enq);
 	}
 
+	event_dev->dma_enqueue = cn9k_dma_adapter_enqueue;
+
 	event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
 	rte_mb();
 #else
@@ -1020,6 +1024,65 @@ cn9k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
 				 cn9k_sso_set_priv_mem);
 }
 
+static int
+cn9k_dma_adapter_caps_get(const struct rte_eventdev *event_dev,
+			  const int16_t dma_dev_id, uint32_t *caps)
+{
+	struct rte_dma_dev *dma_dev;
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	*caps = RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
+
+	return 0;
+}
+
+static int
+cn9k_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id,
+			    const struct rte_event *event)
+{
+	struct rte_dma_dev *dma_dev;
+	int ret;
+
+	RTE_SET_USED(event);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	ret = cnxk_dma_adapter_vchan_add(event_dev, dma_dev_id, vchan_id);
+	cn9k_sso_set_priv_mem(event_dev, NULL);
+
+	return ret;
+}
+
+static int
+cn9k_dma_adapter_vchan_del(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	return cnxk_dma_adapter_vchan_del(dma_dev_id, vchan_id);
+}
+
 static struct eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
@@ -1058,6 +1121,10 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
 	.crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
 	.crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
 
+	.dma_adapter_caps_get = cn9k_dma_adapter_caps_get,
+	.dma_adapter_vchan_add = cn9k_dma_adapter_vchan_add,
+	.dma_adapter_vchan_del = cn9k_dma_adapter_vchan_del,
+
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
 	.xstats_get_names = cnxk_sso_xstats_get_names,
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 0cd8c45e9a..e8863e42fc 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -206,6 +206,8 @@ cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
 		if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
 			cn9k_sso_process_tstamp(u64[1], mbuf, tstamp[port]);
 		u64[1] = mbuf;
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_DMADEV) {
+		u64[1] = cnxk_dma_adapter_dequeue(u64[1]);
 	}
 }
 
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index d42d1afa1a..fa99dede85 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -284,4 +284,7 @@ int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			       const struct rte_event_crypto_adapter_queue_conf *conf);
 int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id);
 
+int cnxk_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			       const int16_t dma_dev_id, uint16_t vchan_id);
+int cnxk_dma_adapter_vchan_del(const int16_t dma_dev_id, uint16_t vchan_id);
 #endif /* __CNXK_EVENTDEV_H__ */
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 92aea92389..a2a59b16c9 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -5,6 +5,7 @@
 #include "cnxk_cryptodev_ops.h"
 #include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
+#include "cnxk_dmadev.h"
 
 void
 cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
@@ -737,3 +738,99 @@ cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
 
 	return 0;
 }
+
+static int
+dma_adapter_vchan_setup(const int16_t dma_dev_id, struct cnxk_dpi_conf *vchan,
+			uint16_t vchan_id)
+{
+	char name[RTE_MEMPOOL_NAMESIZE];
+	uint32_t cache_size, nb_req;
+	unsigned int req_size;
+
+	snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_dma_req_%u:%u", dma_dev_id, vchan_id);
+	req_size = sizeof(struct cnxk_dpi_compl_s);
+
+	nb_req = vchan->c_desc.max_cnt;
+	cache_size = 16;
+	nb_req += (cache_size * rte_lcore_count());
+
+	vchan->adapter_info.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, 0,
+							NULL, NULL, NULL, NULL, rte_socket_id(), 0);
+	if (vchan->adapter_info.req_mp == NULL)
+		return -ENOMEM;
+
+	vchan->adapter_info.enabled = true;
+
+	return 0;
+}
+
+int
+cnxk_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			   const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
+	uint32_t adptr_xae_cnt = 0;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct cnxk_dpi_conf *vchan;
+	int ret;
+
+	dpivf = rte_dma_fp_objs[dma_dev_id].dev_private;
+	if ((int16_t)vchan_id == -1) {
+		uint16_t vchan_id;
+
+		for (vchan_id = 0; vchan_id < dpivf->num_vchans; vchan_id++) {
+			vchan = &dpivf->conf[vchan_id];
+			ret = dma_adapter_vchan_setup(dma_dev_id, vchan, vchan_id);
+			if (ret) {
+				cnxk_dma_adapter_vchan_del(dma_dev_id, -1);
+				return ret;
+			}
+			adptr_xae_cnt += vchan->adapter_info.req_mp->size;
+		}
+	} else {
+		vchan = &dpivf->conf[vchan_id];
+		ret = dma_adapter_vchan_setup(dma_dev_id, vchan, vchan_id);
+		if (ret)
+			return ret;
+		adptr_xae_cnt = vchan->adapter_info.req_mp->size;
+	}
+
+	/* Update dma adapter XAE count */
+	sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
+	cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	return 0;
+}
+
+static int
+dma_adapter_vchan_free(struct cnxk_dpi_conf *vchan)
+{
+	rte_mempool_free(vchan->adapter_info.req_mp);
+	vchan->adapter_info.enabled = false;
+
+	return 0;
+}
+
+int
+cnxk_dma_adapter_vchan_del(const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct cnxk_dpi_vf_s *dpivf;
+	struct cnxk_dpi_conf *vchan;
+
+	dpivf = rte_dma_fp_objs[dma_dev_id].dev_private;
+	if ((int16_t)vchan_id == -1) {
+		uint16_t vchan_id;
+
+		for (vchan_id = 0; vchan_id < dpivf->num_vchans; vchan_id++) {
+			vchan = &dpivf->conf[vchan_id];
+			if (vchan->adapter_info.enabled)
+				dma_adapter_vchan_free(vchan);
+		}
+	} else {
+		vchan = &dpivf->conf[vchan_id];
+		if (vchan->adapter_info.enabled)
+			dma_adapter_vchan_free(vchan);
+	}
+
+	return 0;
+}
diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build
index 2a30b97bff..f2e07b8665 100644
--- a/drivers/event/cnxk/meson.build
+++ b/drivers/event/cnxk/meson.build
@@ -317,7 +317,6 @@ endforeach
 
 headers = files('rte_pmd_cnxk_eventdev.h')
 deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk', 'dma_cnxk']
-
 require_iova_in_mbuf = false
 
 annotate_locks = false
-- 
2.34.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v4 2/3] dma/cnxk: support for DMA event enqueue dequeue
  2024-03-01 17:32       ` [PATCH v4 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
@ 2024-03-03 15:39         ` Jerin Jacob
  0 siblings, 0 replies; 19+ messages in thread
From: Jerin Jacob @ 2024-03-03 15:39 UTC (permalink / raw)
  To: Amit Prakash Shukla
  Cc: Pavan Nikhilesh, Shijith Thotton, Vamsi Attunuru, dev, jerinj,
	ndabilpuram, anoobj, asasidharan

On Fri, Mar 1, 2024 at 11:02 PM Amit Prakash Shukla
<amitprakashs@marvell.com> wrote:
>
> Added cnxk driver support for dma event enqueue and dequeue.
> Also added changes for work queue entry completion status and
> dual workslot DMA event enqueue.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>

Fix https://mails.dpdk.org/archives/test-report/2024-March/596514.html

^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v5 1/3] common/cnxk: dma result to an offset of the event
  2024-03-01 17:32     ` [PATCH v4 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
  2024-03-01 17:32       ` [PATCH v4 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
  2024-03-01 17:32       ` [PATCH v4 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
@ 2024-03-03 21:08       ` Amit Prakash Shukla
  2024-03-03 21:08         ` [PATCH v5 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
  2024-03-03 21:08         ` [PATCH v5 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
  2 siblings, 2 replies; 19+ messages in thread
From: Amit Prakash Shukla @ 2024-03-03 21:08 UTC (permalink / raw)
  To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
	Vamsi Attunuru
  Cc: dev, jerinj, pbhagavatula, anoobj, asasidharan, Amit Prakash Shukla

Adds support to configure writing result to offset of the DMA
response event.

Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
v5:
- Fixed checkpatch warning.

v4:
- Fixed compilation error.
- Updated release notes.

v3:
- Rebased and fixed compilation error.

v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 drivers/common/cnxk/roc_dpi.c       |  6 +++++-
 drivers/common/cnxk/roc_dpi.h       |  2 +-
 drivers/common/cnxk/roc_dpi_priv.h  |  4 ++++
 drivers/common/cnxk/roc_idev.c      | 20 ++++++++++++++++++++
 drivers/common/cnxk/roc_idev_priv.h |  3 +++
 drivers/dma/cnxk/cnxk_dmadev.c      |  2 +-
 6 files changed, 34 insertions(+), 3 deletions(-)

diff --git a/drivers/common/cnxk/roc_dpi.c b/drivers/common/cnxk/roc_dpi.c
index c241168294..1ee777d779 100644
--- a/drivers/common/cnxk/roc_dpi.c
+++ b/drivers/common/cnxk/roc_dpi.c
@@ -83,6 +83,9 @@ roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, uin
 	mbox_msg.s.aura = aura;
 	mbox_msg.s.sso_pf_func = idev_sso_pffunc_get();
 	mbox_msg.s.npa_pf_func = idev_npa_pffunc_get();
+	mbox_msg.s.wqecsoff = idev_dma_cs_offset_get();
+	if (mbox_msg.s.wqecsoff)
+		mbox_msg.s.wqecs = 1;
 
 	rc = send_msg_to_pf(&pci_dev->addr, (const char *)&mbox_msg,
 			    sizeof(dpi_mbox_msg_t));
@@ -94,7 +97,7 @@ roc_dpi_configure(struct roc_dpi *roc_dpi, uint32_t chunk_sz, uint64_t aura, uin
 }
 
 int
-roc_dpi_dev_init(struct roc_dpi *roc_dpi)
+roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset)
 {
 	struct plt_pci_device *pci_dev = roc_dpi->pci_dev;
 	uint16_t vfid;
@@ -103,6 +106,7 @@ roc_dpi_dev_init(struct roc_dpi *roc_dpi)
 	vfid = ((pci_dev->addr.devid & 0x1F) << 3) | (pci_dev->addr.function & 0x7);
 	vfid -= 1;
 	roc_dpi->vfid = vfid;
+	idev_dma_cs_offset_set(offset);
 
 	return 0;
 }
diff --git a/drivers/common/cnxk/roc_dpi.h b/drivers/common/cnxk/roc_dpi.h
index 4ebde5b8a6..978e2badb2 100644
--- a/drivers/common/cnxk/roc_dpi.h
+++ b/drivers/common/cnxk/roc_dpi.h
@@ -11,7 +11,7 @@ struct roc_dpi {
 	uint16_t vfid;
 } __plt_cache_aligned;
 
-int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi);
+int __roc_api roc_dpi_dev_init(struct roc_dpi *roc_dpi, uint8_t offset);
 int __roc_api roc_dpi_dev_fini(struct roc_dpi *roc_dpi);
 
 int __roc_api roc_dpi_configure(struct roc_dpi *dpi, uint32_t chunk_sz, uint64_t aura,
diff --git a/drivers/common/cnxk/roc_dpi_priv.h b/drivers/common/cnxk/roc_dpi_priv.h
index 518a3e7351..52962c8bc0 100644
--- a/drivers/common/cnxk/roc_dpi_priv.h
+++ b/drivers/common/cnxk/roc_dpi_priv.h
@@ -31,6 +31,10 @@ typedef union dpi_mbox_msg_t {
 		uint64_t sso_pf_func : 16;
 		/* NPA PF function */
 		uint64_t npa_pf_func : 16;
+		/* WQE queue DMA completion status enable */
+		uint64_t wqecs : 1;
+		/* WQE queue DMA completion status offset */
+		uint64_t wqecsoff : 8;
 	} s;
 } dpi_mbox_msg_t;
 
diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index 48df3518b0..d0307c666c 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -301,6 +301,26 @@ idev_sso_set(struct roc_sso *sso)
 		__atomic_store_n(&idev->sso, sso, __ATOMIC_RELEASE);
 }
 
+void
+idev_dma_cs_offset_set(uint8_t offset)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (idev != NULL)
+		idev->dma_cs_offset = offset;
+}
+
+uint8_t
+idev_dma_cs_offset_get(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+
+	if (idev != NULL)
+		return idev->dma_cs_offset;
+
+	return 0;
+}
+
 uint64_t
 roc_idev_nix_inl_meta_aura_get(void)
 {
diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index 8dc1cb25bf..6628b18152 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -43,6 +43,7 @@ struct idev_cfg {
 	struct idev_nix_inl_rx_inj_cfg inl_rx_inj_cfg;
 	plt_spinlock_t nix_inl_dev_lock;
 	plt_spinlock_t npa_dev_lock;
+	uint8_t dma_cs_offset;
 };
 
 /* Generic */
@@ -61,6 +62,8 @@ void idev_sso_pffunc_set(uint16_t sso_pf_func);
 uint16_t idev_sso_pffunc_get(void);
 struct roc_sso *idev_sso_get(void);
 void idev_sso_set(struct roc_sso *sso);
+void idev_dma_cs_offset_set(uint8_t offset);
+uint8_t idev_dma_cs_offset_get(void);
 
 /* idev lmt */
 uint16_t idev_lmt_pffunc_get(void);
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 1e7f49792c..48ab09cc38 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -592,7 +592,7 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_de
 	rdpi = &dpivf->rdpi;
 
 	rdpi->pci_dev = pci_dev;
-	rc = roc_dpi_dev_init(rdpi);
+	rc = roc_dpi_dev_init(rdpi, 0);
 	if (rc < 0)
 		goto err_out_free;
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v5 2/3] dma/cnxk: support for DMA event enqueue dequeue
  2024-03-03 21:08       ` [PATCH v5 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
@ 2024-03-03 21:08         ` Amit Prakash Shukla
  2024-03-03 21:08         ` [PATCH v5 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
  1 sibling, 0 replies; 19+ messages in thread
From: Amit Prakash Shukla @ 2024-03-03 21:08 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton, Vamsi Attunuru
  Cc: dev, jerinj, ndabilpuram, anoobj, asasidharan, Amit Prakash Shukla

Added cnxk driver support for dma event enqueue and dequeue.
Also added changes for work queue entry completion status and
dual workslot DMA event enqueue.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
v5:
- Fixed checkpatch warning.

v4:
- Fixed compilation error.
- Updated release notes.

v3:
- Rebased and fixed compilation error.

v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 doc/guides/eventdevs/cnxk.rst          |   5 +
 doc/guides/rel_notes/release_24_03.rst |   4 +
 drivers/dma/cnxk/cnxk_dma_event_dp.h   |  24 ++
 drivers/dma/cnxk/cnxk_dmadev.c         |   3 +-
 drivers/dma/cnxk/cnxk_dmadev.h         |  20 +-
 drivers/dma/cnxk/cnxk_dmadev_fp.c      | 290 +++++++++++++++++++++++++
 drivers/dma/cnxk/meson.build           |   9 +-
 drivers/dma/cnxk/version.map           |  10 +
 drivers/event/cnxk/cn9k_eventdev.c     |   2 +
 drivers/event/cnxk/cn9k_worker.h       |   1 +
 drivers/event/cnxk/meson.build         |   2 +-
 11 files changed, 366 insertions(+), 4 deletions(-)
 create mode 100644 drivers/dma/cnxk/cnxk_dma_event_dp.h
 create mode 100644 drivers/dma/cnxk/version.map

diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index cccb8a0304..9ff1052c53 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -227,3 +227,8 @@ ethernet devices connected to event device to override this applications can
 use `force_rx_bp=1` device arguments.
 Using unique mempool per each ethernet device is recommended when they are
 connected to event device.
+
+DMA adapter new mode support
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+DMA driver does not support DMA adapter configured in new mode.
diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 39ffef11b0..59a128a0a9 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -142,6 +142,10 @@ New Features
     to support TLS v1.2, TLS v1.3 and DTLS v1.2.
   * Added PMD API to allow raw submission of instructions to CPT.
 
+* **Updated Marvell cnxk DMA driver.**
+
+  * Added support for DMA event enqueue and dequeue.
+  * Added support for dual workslot DMA event enqueue.
 
 Removed Items
 -------------
diff --git a/drivers/dma/cnxk/cnxk_dma_event_dp.h b/drivers/dma/cnxk/cnxk_dma_event_dp.h
new file mode 100644
index 0000000000..5f890ab18b
--- /dev/null
+++ b/drivers/dma/cnxk/cnxk_dma_event_dp.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#ifndef _CNXK_DMA_EVENT_DP_H_
+#define _CNXK_DMA_EVENT_DP_H_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_eventdev.h>
+
+__rte_internal
+uint16_t cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uint16_t cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uint16_t cn9k_dma_adapter_dual_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events);
+
+__rte_internal
+uintptr_t cnxk_dma_adapter_dequeue(uintptr_t get_work1);
+#endif /* _CNXK_DMA_EVENT_DP_H_ */
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 48ab09cc38..4ab3cfbdf2 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -589,10 +589,11 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_de
 		dmadev->fp_obj->copy_sg = cn10k_dmadev_copy_sg;
 	}
 
+	dpivf->mcs_lock = NULL;
 	rdpi = &dpivf->rdpi;
 
 	rdpi->pci_dev = pci_dev;
-	rc = roc_dpi_dev_init(rdpi, 0);
+	rc = roc_dpi_dev_init(rdpi, offsetof(struct cnxk_dpi_compl_s, wqecs));
 	if (rc < 0)
 		goto err_out_free;
 
diff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h
index 350ae73b5c..610a360ba2 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.h
+++ b/drivers/dma/cnxk/cnxk_dmadev.h
@@ -14,11 +14,14 @@
 #include <rte_eal.h>
 #include <rte_lcore.h>
 #include <rte_mbuf_pool_ops.h>
+#include <rte_mcslock.h>
 #include <rte_mempool.h>
 #include <rte_pci.h>
 
 #include <roc_api.h>
 
+#include "cnxk_dma_event_dp.h"
+
 #define CNXK_DPI_MAX_POINTER		    15
 #define CNXK_DPI_STRM_INC(s, var)	    ((s).var = ((s).var + 1) & (s).max_cnt)
 #define CNXK_DPI_STRM_DEC(s, var)	    ((s).var = ((s).var - 1) == -1 ? (s).max_cnt :	\
@@ -40,6 +43,11 @@
  */
 #define CNXK_DPI_REQ_CDATA 0xFF
 
+/* Set Completion data to 0xDEADBEEF when request submitted for SSO.
+ * This helps differentiate if the dequeue is called after cnxk enueue.
+ */
+#define CNXK_DPI_REQ_SSO_CDATA    0xDEADBEEF
+
 union cnxk_dpi_instr_cmd {
 	uint64_t u;
 	struct cn9k_dpi_instr_cmd {
@@ -85,7 +93,10 @@ union cnxk_dpi_instr_cmd {
 
 struct cnxk_dpi_compl_s {
 	uint64_t cdata;
-	void *cb_data;
+	void *op;
+	uint16_t dev_id;
+	uint16_t vchan;
+	uint32_t wqecs;
 };
 
 struct cnxk_dpi_cdesc_data_s {
@@ -95,6 +106,11 @@ struct cnxk_dpi_cdesc_data_s {
 	uint16_t tail;
 };
 
+struct cnxk_dma_adapter_info {
+	bool enabled;               /* Set if vchan queue is added to dma adapter. */
+	struct rte_mempool *req_mp; /* DMA inflight request mempool. */
+};
+
 struct cnxk_dpi_conf {
 	union cnxk_dpi_instr_cmd cmd;
 	struct cnxk_dpi_cdesc_data_s c_desc;
@@ -103,6 +119,7 @@ struct cnxk_dpi_conf {
 	uint16_t desc_idx;
 	struct rte_dma_stats stats;
 	uint64_t completed_offset;
+	struct cnxk_dma_adapter_info adapter_info;
 };
 
 struct cnxk_dpi_vf_s {
@@ -112,6 +129,7 @@ struct cnxk_dpi_vf_s {
 	uint16_t chunk_size_m1;
 	struct rte_mempool *chunk_pool;
 	struct cnxk_dpi_conf conf[CNXK_DPI_MAX_VCHANS_PER_QUEUE];
+	RTE_ATOMIC(rte_mcslock_t *) mcs_lock;
 	/* Slow path */
 	struct roc_dpi rdpi;
 	uint32_t aura;
diff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c b/drivers/dma/cnxk/cnxk_dmadev_fp.c
index 95df19a2db..f6562b603e 100644
--- a/drivers/dma/cnxk/cnxk_dmadev_fp.c
+++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c
@@ -5,6 +5,10 @@
 #include <rte_vect.h>
 
 #include "cnxk_dmadev.h"
+#include <rte_event_dma_adapter.h>
+
+#include <cn10k_eventdev.h>
+#include <cnxk_eventdev.h>
 
 static __plt_always_inline void
 __dpi_cpy_scalar(uint64_t *src, uint64_t *dst, uint8_t n)
@@ -434,3 +438,289 @@ cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge
 
 	return dpi_conf->desc_idx++;
 }
+
+static inline uint64_t
+cnxk_dma_adapter_format_event(uint64_t event)
+{
+	uint64_t w0;
+	w0 = (event & 0xFFC000000000) >> 6 |
+	     (event & 0xFFFFFFF) | RTE_EVENT_TYPE_DMADEV << 28;
+
+	return w0;
+}
+
+uint16_t
+cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *src, *dst;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	struct cn10k_sso_hws *work;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t mcs_lock_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn10k_sso_hws *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+			     sizeof(struct rte_event_dma_adapter_op));
+		dpivf =	rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+
+		hdr[0] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 54);
+		hdr[0] |= (nb_dst << 6) | nb_src;
+		hdr[1] = ((uint64_t)comp_ptr);
+		hdr[2] = cnxk_dma_adapter_format_event(rsp_info->event);
+
+		src = &op->src_seg[0];
+		dst = &op->dst_seg[0];
+
+		if (CNXK_TAG_IS_HEAD(work->gw_rdata) ||
+		    ((CNXK_TT_FROM_TAG(work->gw_rdata) == SSO_TT_ORDERED) &&
+		    (rsp_info->sched_type & DPI_HDR_TT_MASK) ==
+			    RTE_SCHED_TYPE_ORDERED))
+			roc_sso_hws_head_wait(work->base);
+
+		rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, src, dst, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+	}
+
+	return count;
+}
+
+uint16_t
+cn9k_dma_adapter_dual_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *fptr, *lptr;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cn9k_sso_hws_dual *work;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t mcs_lock_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn9k_sso_hws_dual *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+						sizeof(struct rte_event_dma_adapter_op));
+		dpivf = rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		hdr[1] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 36);
+		hdr[2] = (uint64_t)comp_ptr;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+		/*
+		 * For inbound case, src pointers are last pointers.
+		 * For all other cases, src pointers are first pointers.
+		 */
+		if (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) {
+			fptr = &op->dst_seg[0];
+			lptr = &op->src_seg[0];
+			RTE_SWAP(nb_src, nb_dst);
+		} else {
+			fptr = &op->src_seg[0];
+			lptr = &op->dst_seg[0];
+		}
+
+		hdr[0] = ((uint64_t)nb_dst << 54) | (uint64_t)nb_src << 48;
+		hdr[0] |= cnxk_dma_adapter_format_event(rsp_info->event);
+
+		if ((rsp_info->sched_type & DPI_HDR_TT_MASK) == RTE_SCHED_TYPE_ORDERED)
+			roc_sso_hws_head_wait(work->base[!work->vws]);
+
+		rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, fptr, lptr, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+	}
+
+	return count;
+}
+
+uint16_t
+cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
+{
+	const struct rte_dma_sge *fptr, *lptr;
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct rte_event *rsp_info;
+	struct cn9k_sso_hws *work;
+	uint16_t nb_src, nb_dst;
+	rte_mcslock_t mcs_lock_me;
+	uint64_t hdr[4];
+	uint16_t count;
+	int rc;
+
+	work = (struct cn9k_sso_hws *)ws;
+
+	for (count = 0; count < nb_events; count++) {
+		op = ev[count].event_ptr;
+		rsp_info = (struct rte_event *)((uint8_t *)op +
+			    sizeof(struct rte_event_dma_adapter_op));
+		dpivf =	rte_dma_fp_objs[op->dma_dev_id].dev_private;
+		dpi_conf = &dpivf->conf[op->vchan];
+
+		if (unlikely(rte_mempool_get(dpi_conf->adapter_info.req_mp, (void **)&comp_ptr)))
+			return count;
+
+		comp_ptr->op = op;
+		comp_ptr->dev_id = op->dma_dev_id;
+		comp_ptr->vchan = op->vchan;
+		comp_ptr->cdata = CNXK_DPI_REQ_SSO_CDATA;
+
+		hdr[1] = dpi_conf->cmd.u | ((uint64_t)DPI_HDR_PT_WQP << 36);
+		hdr[2] = (uint64_t)comp_ptr;
+
+		nb_src = op->nb_src & CNXK_DPI_MAX_POINTER;
+		nb_dst = op->nb_dst & CNXK_DPI_MAX_POINTER;
+		/*
+		 * For inbound case, src pointers are last pointers.
+		 * For all other cases, src pointers are first pointers.
+		 */
+		if (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) {
+			fptr = &op->dst_seg[0];
+			lptr = &op->src_seg[0];
+			RTE_SWAP(nb_src, nb_dst);
+		} else {
+			fptr = &op->src_seg[0];
+			lptr = &op->dst_seg[0];
+		}
+
+		hdr[0] = ((uint64_t)nb_dst << 54) | (uint64_t)nb_src << 48;
+		hdr[0] |= cnxk_dma_adapter_format_event(rsp_info->event);
+
+		if ((rsp_info->sched_type & DPI_HDR_TT_MASK) == RTE_SCHED_TYPE_ORDERED)
+			roc_sso_hws_head_wait(work->base);
+
+		rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+		rc = __dpi_queue_write_sg(dpivf, hdr, fptr, lptr, nb_src, nb_dst);
+		if (unlikely(rc)) {
+			rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+			return rc;
+		}
+
+		if (op->flags & RTE_DMA_OP_FLAG_SUBMIT) {
+			rte_wmb();
+			plt_write64(dpi_conf->pnum_words + CNXK_DPI_CMD_LEN(nb_src, nb_dst),
+				    dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+			dpi_conf->stats.submitted += dpi_conf->pending + 1;
+			dpi_conf->pnum_words = 0;
+			dpi_conf->pending = 0;
+		} else {
+			dpi_conf->pnum_words += CNXK_DPI_CMD_LEN(nb_src, nb_dst);
+			dpi_conf->pending++;
+		}
+		rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+	}
+
+	return count;
+}
+
+uintptr_t
+cnxk_dma_adapter_dequeue(uintptr_t get_work1)
+{
+	struct rte_event_dma_adapter_op *op;
+	struct cnxk_dpi_compl_s *comp_ptr;
+	struct cnxk_dpi_conf *dpi_conf;
+	struct cnxk_dpi_vf_s *dpivf;
+	rte_mcslock_t mcs_lock_me;
+	RTE_ATOMIC(uint8_t) *wqecs;
+
+	comp_ptr = (struct cnxk_dpi_compl_s *)get_work1;
+
+	/* Dequeue can be called without calling cnx_enqueue in case of
+	 * dma_adapter. When its called from adapter, dma op will not be
+	 * embedded in completion pointer. In those cases return op.
+	 */
+	if (comp_ptr->cdata != CNXK_DPI_REQ_SSO_CDATA)
+		return (uintptr_t)comp_ptr;
+
+	dpivf =	rte_dma_fp_objs[comp_ptr->dev_id].dev_private;
+	dpi_conf = &dpivf->conf[comp_ptr->vchan];
+
+	rte_mcslock_lock(&dpivf->mcs_lock, &mcs_lock_me);
+	wqecs = (uint8_t __rte_atomic *)&comp_ptr->wqecs;
+	if (rte_atomic_load_explicit(wqecs, rte_memory_order_relaxed) != 0)
+		dpi_conf->stats.errors++;
+
+	/* Take into account errors also. This is similar to
+	 * cnxk_dmadev_completed_status().
+	 */
+	dpi_conf->stats.completed++;
+	rte_mcslock_unlock(&dpivf->mcs_lock, &mcs_lock_me);
+
+	op = (struct rte_event_dma_adapter_op *)comp_ptr->op;
+
+	rte_mempool_put(dpi_conf->adapter_info.req_mp, comp_ptr);
+
+	return (uintptr_t)op;
+}
diff --git a/drivers/dma/cnxk/meson.build b/drivers/dma/cnxk/meson.build
index e557349368..8ccc1c2cb7 100644
--- a/drivers/dma/cnxk/meson.build
+++ b/drivers/dma/cnxk/meson.build
@@ -8,6 +8,13 @@ foreach flag: error_cflags
     endif
 endforeach
 
-deps += ['bus_pci', 'common_cnxk', 'dmadev']
+driver_sdk_headers = files(
+        'cnxk_dma_event_dp.h',
+)
+
+deps += ['bus_pci', 'common_cnxk', 'dmadev', 'eventdev']
+
+includes += include_directories('../../event/cnxk')
+
 sources = files('cnxk_dmadev.c', 'cnxk_dmadev_fp.c')
 require_iova_in_mbuf = false
diff --git a/drivers/dma/cnxk/version.map b/drivers/dma/cnxk/version.map
new file mode 100644
index 0000000000..a1490abf97
--- /dev/null
+++ b/drivers/dma/cnxk/version.map
@@ -0,0 +1,10 @@
+INTERNAL {
+	global:
+
+	cn10k_dma_adapter_enqueue;
+	cn9k_dma_adapter_enqueue;
+	cn9k_dma_adapter_dual_enqueue;
+	cnxk_dma_adapter_dequeue;
+
+	local: *;
+};
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 5e6305947b..00a87b3bcd 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -460,6 +460,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 		}
 	}
 	event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
+	event_dev->dma_enqueue = cn9k_dma_adapter_enqueue;
 
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
 		CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
@@ -475,6 +476,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 		event_dev->enqueue_forward_burst =
 			cn9k_sso_hws_dual_enq_fwd_burst;
 		event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
+		event_dev->dma_enqueue = cn9k_dma_adapter_dual_enqueue;
 		event_dev->profile_switch = cn9k_sso_hws_dual_profile_switch;
 
 		if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 107265d54b..a9251fc713 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -11,6 +11,7 @@
 #include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
+#include "cnxk_dma_event_dp.h"
 #include "cn9k_cryptodev_ops.h"
 
 #include "cn9k_ethdev.h"
diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build
index 13281d687f..2a30b97bff 100644
--- a/drivers/event/cnxk/meson.build
+++ b/drivers/event/cnxk/meson.build
@@ -316,7 +316,7 @@ foreach flag: extra_flags
 endforeach
 
 headers = files('rte_pmd_cnxk_eventdev.h')
-deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk']
+deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk', 'dma_cnxk']
 
 require_iova_in_mbuf = false
 
-- 
2.34.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH v5 3/3] event/cnxk: support DMA event functions
  2024-03-03 21:08       ` [PATCH v5 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
  2024-03-03 21:08         ` [PATCH v5 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
@ 2024-03-03 21:08         ` Amit Prakash Shukla
  2024-03-04  9:54           ` Jerin Jacob
  1 sibling, 1 reply; 19+ messages in thread
From: Amit Prakash Shukla @ 2024-03-03 21:08 UTC (permalink / raw)
  To: Pavan Nikhilesh, Shijith Thotton
  Cc: dev, jerinj, vattunuru, ndabilpuram, anoobj, asasidharan,
	Amit Prakash Shukla

Added support of dma driver callback assignment to eventdev
enqueue and dequeue. The change also defines dma adapter
capabilities function.

Depends-on: series-30612 ("lib/dmadev: get DMA device using device ID")

Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
v5:
- Fixed checkpatch warning.

v4:
- Fixed compilation error.
- Updated release notes.

v3:
- Rebased and fixed compilation error.

v2:
- Added dual workslot enqueue support.
- Fixed compilation error.

 doc/guides/rel_notes/release_24_03.rst   |  4 +
 drivers/event/cnxk/cn10k_eventdev.c      | 70 +++++++++++++++++
 drivers/event/cnxk/cn10k_worker.h        |  3 +
 drivers/event/cnxk/cn9k_eventdev.c       | 67 ++++++++++++++++
 drivers/event/cnxk/cn9k_worker.h         |  2 +
 drivers/event/cnxk/cnxk_eventdev.h       |  3 +
 drivers/event/cnxk/cnxk_eventdev_adptr.c | 97 ++++++++++++++++++++++++
 drivers/event/cnxk/meson.build           |  1 -
 8 files changed, 246 insertions(+), 1 deletion(-)

diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 59a128a0a9..d3883aadd9 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -147,6 +147,10 @@ New Features
   * Added support for DMA event enqueue and dequeue.
   * Added support for dual workslot DMA event enqueue.
 
+* **Updated Marvell cnxk eventdev driver.**
+
+  * Added support for DMA driver callback assignment to eventdev enqueue and dequeue.
+
 Removed Items
 -------------
 
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 221f419055..18f3b402c9 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -8,6 +8,9 @@
 #include "cn10k_cryptodev_ops.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
+#include "cnxk_dma_event_dp.h"
+
+#include <rte_dmadev_pmd.h>
 
 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                           \
 	deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
@@ -477,6 +480,8 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	else
 		event_dev->ca_enqueue = cn10k_cpt_sg_ver1_crypto_adapter_enqueue;
 
+	event_dev->dma_enqueue = cn10k_dma_adapter_enqueue;
+
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
 		CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue, sso_hws_tx_adptr_enq_seg);
 	else
@@ -1020,6 +1025,67 @@ cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev,
 	return 0;
 }
 
+static int
+cn10k_dma_adapter_caps_get(const struct rte_eventdev *event_dev,
+			   const int16_t dma_dev_id, uint32_t *caps)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	*caps = RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
+
+	return 0;
+}
+
+static int
+cn10k_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id,
+			    const struct rte_event *event)
+{
+	struct rte_dma_dev *dma_dev;
+	int ret;
+
+	RTE_SET_USED(event);
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	ret = cnxk_dma_adapter_vchan_add(event_dev, dma_dev_id, vchan_id);
+	cn10k_sso_set_priv_mem(event_dev, NULL);
+
+	return ret;
+}
+
+static int
+cn10k_dma_adapter_vchan_del(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	return cnxk_dma_adapter_vchan_del(dma_dev_id, vchan_id);
+}
+
+
+
 static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -1061,6 +1127,10 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
 	.crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
 	.crypto_adapter_vector_limits_get = cn10k_crypto_adapter_vec_limits,
 
+	.dma_adapter_caps_get = cn10k_dma_adapter_caps_get,
+	.dma_adapter_vchan_add = cn10k_dma_adapter_vchan_add,
+	.dma_adapter_vchan_del = cn10k_dma_adapter_vchan_del,
+
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
 	.xstats_get_names = cnxk_sso_xstats_get_names,
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 8aa916fa12..0036495d98 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -7,6 +7,7 @@
 
 #include <rte_eventdev.h>
 #include "cn10k_cryptodev_event_dp.h"
+#include "cnxk_dma_event_dp.h"
 #include "cn10k_rx.h"
 #include "cnxk_worker.h"
 #include "cn10k_eventdev.h"
@@ -236,6 +237,8 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 		/* Mark vector mempool object as get */
 		RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
 					  (void **)&u64[1], 1, 1);
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_DMADEV) {
+		u64[1] = cnxk_dma_adapter_dequeue(u64[1]);
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 00a87b3bcd..3cd6f448f0 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -6,6 +6,8 @@
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
 
+#include <rte_dmadev_pmd.h>
+
 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
 
 #define CN9K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                            \
@@ -513,6 +515,8 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 					      sso_hws_dual_tx_adptr_enq);
 	}
 
+	event_dev->dma_enqueue = cn9k_dma_adapter_enqueue;
+
 	event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
 	rte_mb();
 #else
@@ -1020,6 +1024,65 @@ cn9k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
 				 cn9k_sso_set_priv_mem);
 }
 
+static int
+cn9k_dma_adapter_caps_get(const struct rte_eventdev *event_dev,
+			  const int16_t dma_dev_id, uint32_t *caps)
+{
+	struct rte_dma_dev *dma_dev;
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	*caps = RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
+
+	return 0;
+}
+
+static int
+cn9k_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id,
+			    const struct rte_event *event)
+{
+	struct rte_dma_dev *dma_dev;
+	int ret;
+
+	RTE_SET_USED(event);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	ret = cnxk_dma_adapter_vchan_add(event_dev, dma_dev_id, vchan_id);
+	cn9k_sso_set_priv_mem(event_dev, NULL);
+
+	return ret;
+}
+
+static int
+cn9k_dma_adapter_vchan_del(const struct rte_eventdev *event_dev,
+			    const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct rte_dma_dev *dma_dev;
+
+	RTE_SET_USED(event_dev);
+
+	dma_dev = rte_dma_pmd_get_dev_by_id(dma_dev_id);
+	if (dma_dev == NULL)
+		return -EINVAL;
+
+	CNXK_VALID_DEV_OR_ERR_RET(dma_dev->device, "cnxk_dmadev_pci_driver", EINVAL);
+
+	return cnxk_dma_adapter_vchan_del(dma_dev_id, vchan_id);
+}
+
 static struct eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
@@ -1058,6 +1121,10 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
 	.crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
 	.crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
 
+	.dma_adapter_caps_get = cn9k_dma_adapter_caps_get,
+	.dma_adapter_vchan_add = cn9k_dma_adapter_vchan_add,
+	.dma_adapter_vchan_del = cn9k_dma_adapter_vchan_del,
+
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
 	.xstats_get_names = cnxk_sso_xstats_get_names,
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index a9251fc713..a8e998951c 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -206,6 +206,8 @@ cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
 		if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
 			cn9k_sso_process_tstamp(u64[1], mbuf, tstamp[port]);
 		u64[1] = mbuf;
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_DMADEV) {
+		u64[1] = cnxk_dma_adapter_dequeue(u64[1]);
 	}
 }
 
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index d42d1afa1a..fa99dede85 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -284,4 +284,7 @@ int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 			       const struct rte_event_crypto_adapter_queue_conf *conf);
 int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id);
 
+int cnxk_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			       const int16_t dma_dev_id, uint16_t vchan_id);
+int cnxk_dma_adapter_vchan_del(const int16_t dma_dev_id, uint16_t vchan_id);
 #endif /* __CNXK_EVENTDEV_H__ */
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 92aea92389..a2a59b16c9 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -5,6 +5,7 @@
 #include "cnxk_cryptodev_ops.h"
 #include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
+#include "cnxk_dmadev.h"
 
 void
 cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
@@ -737,3 +738,99 @@ cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
 
 	return 0;
 }
+
+static int
+dma_adapter_vchan_setup(const int16_t dma_dev_id, struct cnxk_dpi_conf *vchan,
+			uint16_t vchan_id)
+{
+	char name[RTE_MEMPOOL_NAMESIZE];
+	uint32_t cache_size, nb_req;
+	unsigned int req_size;
+
+	snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_dma_req_%u:%u", dma_dev_id, vchan_id);
+	req_size = sizeof(struct cnxk_dpi_compl_s);
+
+	nb_req = vchan->c_desc.max_cnt;
+	cache_size = 16;
+	nb_req += (cache_size * rte_lcore_count());
+
+	vchan->adapter_info.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, 0,
+							NULL, NULL, NULL, NULL, rte_socket_id(), 0);
+	if (vchan->adapter_info.req_mp == NULL)
+		return -ENOMEM;
+
+	vchan->adapter_info.enabled = true;
+
+	return 0;
+}
+
+int
+cnxk_dma_adapter_vchan_add(const struct rte_eventdev *event_dev,
+			   const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
+	uint32_t adptr_xae_cnt = 0;
+	struct cnxk_dpi_vf_s *dpivf;
+	struct cnxk_dpi_conf *vchan;
+	int ret;
+
+	dpivf = rte_dma_fp_objs[dma_dev_id].dev_private;
+	if ((int16_t)vchan_id == -1) {
+		uint16_t vchan_id;
+
+		for (vchan_id = 0; vchan_id < dpivf->num_vchans; vchan_id++) {
+			vchan = &dpivf->conf[vchan_id];
+			ret = dma_adapter_vchan_setup(dma_dev_id, vchan, vchan_id);
+			if (ret) {
+				cnxk_dma_adapter_vchan_del(dma_dev_id, -1);
+				return ret;
+			}
+			adptr_xae_cnt += vchan->adapter_info.req_mp->size;
+		}
+	} else {
+		vchan = &dpivf->conf[vchan_id];
+		ret = dma_adapter_vchan_setup(dma_dev_id, vchan, vchan_id);
+		if (ret)
+			return ret;
+		adptr_xae_cnt = vchan->adapter_info.req_mp->size;
+	}
+
+	/* Update dma adapter XAE count */
+	sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
+	cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
+
+	return 0;
+}
+
+static int
+dma_adapter_vchan_free(struct cnxk_dpi_conf *vchan)
+{
+	rte_mempool_free(vchan->adapter_info.req_mp);
+	vchan->adapter_info.enabled = false;
+
+	return 0;
+}
+
+int
+cnxk_dma_adapter_vchan_del(const int16_t dma_dev_id, uint16_t vchan_id)
+{
+	struct cnxk_dpi_vf_s *dpivf;
+	struct cnxk_dpi_conf *vchan;
+
+	dpivf = rte_dma_fp_objs[dma_dev_id].dev_private;
+	if ((int16_t)vchan_id == -1) {
+		uint16_t vchan_id;
+
+		for (vchan_id = 0; vchan_id < dpivf->num_vchans; vchan_id++) {
+			vchan = &dpivf->conf[vchan_id];
+			if (vchan->adapter_info.enabled)
+				dma_adapter_vchan_free(vchan);
+		}
+	} else {
+		vchan = &dpivf->conf[vchan_id];
+		if (vchan->adapter_info.enabled)
+			dma_adapter_vchan_free(vchan);
+	}
+
+	return 0;
+}
diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build
index 2a30b97bff..f2e07b8665 100644
--- a/drivers/event/cnxk/meson.build
+++ b/drivers/event/cnxk/meson.build
@@ -317,7 +317,6 @@ endforeach
 
 headers = files('rte_pmd_cnxk_eventdev.h')
 deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk', 'dma_cnxk']
-
 require_iova_in_mbuf = false
 
 annotate_locks = false
-- 
2.34.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v5 3/3] event/cnxk: support DMA event functions
  2024-03-03 21:08         ` [PATCH v5 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
@ 2024-03-04  9:54           ` Jerin Jacob
  0 siblings, 0 replies; 19+ messages in thread
From: Jerin Jacob @ 2024-03-04  9:54 UTC (permalink / raw)
  To: Amit Prakash Shukla
  Cc: Pavan Nikhilesh, Shijith Thotton, dev, jerinj, vattunuru,
	ndabilpuram, anoobj, asasidharan

On Mon, Mar 4, 2024 at 2:38 AM Amit Prakash Shukla
<amitprakashs@marvell.com> wrote:
>
> Added support of dma driver callback assignment to eventdev
> enqueue and dequeue. The change also defines dma adapter
> capabilities function.
>
> Depends-on: series-30612 ("lib/dmadev: get DMA device using device ID")
>
> Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>


Cleaned up release notes and Series applied to
dpdk-next-net-eventdev/for-main. Thanks

^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2024-03-04  9:55 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-08  8:28 [PATCH 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
2023-12-08  8:28 ` [PATCH 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
2023-12-08  8:28 ` [PATCH 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
2024-02-09 19:43 ` [PATCH v2 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
2024-02-09 19:43   ` [PATCH v2 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
2024-02-09 19:43   ` [PATCH v2 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
2024-02-25 15:22   ` [PATCH v2 1/3] common/cnxk: dma result to an offset of the event Jerin Jacob
2024-02-26  9:43   ` [PATCH v3 " Amit Prakash Shukla
2024-02-26  9:43     ` [PATCH v3 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
2024-02-26  9:43     ` [PATCH v3 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
2024-02-29 16:52       ` Jerin Jacob
2024-03-01 17:32     ` [PATCH v4 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
2024-03-01 17:32       ` [PATCH v4 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
2024-03-03 15:39         ` Jerin Jacob
2024-03-01 17:32       ` [PATCH v4 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
2024-03-03 21:08       ` [PATCH v5 1/3] common/cnxk: dma result to an offset of the event Amit Prakash Shukla
2024-03-03 21:08         ` [PATCH v5 2/3] dma/cnxk: support for DMA event enqueue dequeue Amit Prakash Shukla
2024-03-03 21:08         ` [PATCH v5 3/3] event/cnxk: support DMA event functions Amit Prakash Shukla
2024-03-04  9:54           ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).