* [PATCH RFC 1/4] dmadev: add enqueue dequeue operations
@ 2025-01-29 14:36 Kommula Shiva Shankar
2025-01-29 14:36 ` [PATCH RFC 2/4] eventdev: refactor rte_event_dma_adapater_op calls Kommula Shiva Shankar
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Kommula Shiva Shankar @ 2025-01-29 14:36 UTC (permalink / raw)
To: jerinj, amitprakashs, vattunuru, fengchengwen, dev
Cc: ndabilpuram, pbhagavatula
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add enqueue/dequeue operations that use struct rte_dma_op
to communicate with the dma device.
These operations need to be enabled at dma device configuration
time by setting the flag rte_dma_conf::enable_enq_deq if the
device supports RTE_DMA_CAPA_OPS_ENQ_DEQ capability.
The enqueue dequeue operations are not compatible with
rte_dma_copy, rte_dma_copy_sg, rte_dma_fill, rte_dma_submit,
rte_dma_completed, rte_dma_completed_status range of APIs.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Change-Id: I6587b19608264a3511ea4dd3cf7b865cc5cac441
---
lib/dmadev/rte_dmadev.c | 18 ++++
lib/dmadev/rte_dmadev.h | 145 +++++++++++++++++++++++++++
lib/dmadev/rte_dmadev_core.h | 10 ++
lib/dmadev/rte_dmadev_trace_fp.h | 20 ++++
lib/dmadev/rte_dmadev_trace_points.c | 6 ++
5 files changed, 199 insertions(+)
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index 8bb7824aa1..4c108ef26e 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -921,6 +921,22 @@ dummy_burst_capacity(__rte_unused const void *dev_private,
return 0;
}
+static uint16_t
+dummy_enqueue(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+ __rte_unused struct rte_dma_op **ops, __rte_unused uint16_t nb_ops)
+{
+ RTE_DMA_LOG(ERR, "Enqueue not configured or not supported.");
+ return 0;
+}
+
+static uint16_t
+dummy_dequeue(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+ __rte_unused struct rte_dma_op **ops, __rte_unused uint16_t nb_ops)
+{
+ RTE_DMA_LOG(ERR, "Enqueue not configured or not supported.");
+ return 0;
+}
+
static void
dma_fp_object_dummy(struct rte_dma_fp_object *obj)
{
@@ -932,6 +948,8 @@ dma_fp_object_dummy(struct rte_dma_fp_object *obj)
obj->completed = dummy_completed;
obj->completed_status = dummy_completed_status;
obj->burst_capacity = dummy_burst_capacity;
+ obj->enqueue = dummy_enqueue;
+ obj->dequeue = dummy_dequeue;
}
static int
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 2f9304a9db..e11bff64d8 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -265,6 +265,11 @@ int16_t rte_dma_next_dev(int16_t start_dev_id);
* known from 'nb_priorities' field in struct rte_dma_info.
*/
#define RTE_DMA_CAPA_PRI_POLICY_SP RTE_BIT64(8)
+/** Support enqueue and dequeue operations.
+ *
+ * @see struct rte_dma_op
+ */
+#define RTE_DMA_CAPA_OPS_ENQ_DEQ RTE_BIT64(9)
/** Support copy operation.
* This capability start with index of 32, so that it could leave gap between
@@ -351,6 +356,15 @@ struct rte_dma_conf {
* Lowest value indicates higher priority and vice-versa.
*/
uint16_t priority;
+ /** Indicates whether to use enqueue dequeue operations using rte_dma_op.
+ * false-default mode, true-enqueue, dequeue mode.
+ * This value can be set to true only when ENQ_DEQ_OPS capability is
+ * supported. When enabled, only calls to `rte_dma_enqueue_ops` and
+ * `rte_dma_dequeue_ops` are valid.
+ *
+ * @see RTE_DMA_CAPA_OPS_ENQ_DEQ
+ */
+ bool enable_enq_deq;
};
/**
@@ -791,6 +805,63 @@ struct rte_dma_sge {
uint32_t length; /**< The DMA operation length. */
};
+/**
+ * A structure used to hold event based DMA operation entry. All the information
+ * required for a DMA transfer shall be populated in "struct rte_dma_op"
+ * instance.
+ */
+struct rte_dma_op {
+ uint64_t flags;
+ /**< Flags related to the operation.
+ * @see RTE_DMA_OP_FLAG_*
+ */
+ struct rte_mempool *op_mp;
+ /**< Mempool from which op is allocated. */
+ enum rte_dma_status_code status;
+ /**< Status code for this operation. */
+ uint32_t rsvd;
+ /**< Reserved for future use. */
+ uint64_t impl_opaque[2];
+ /**< Implementation-specific opaque data.
+ * An dma device implementation use this field to hold
+ * implementation specific values to share between dequeue and enqueue
+ * operations.
+ * The application should not modify this field.
+ */
+ uint64_t user_meta;
+ /**< Memory to store user specific metadata.
+ * The dma device implementation should not modify this area.
+ */
+ uint64_t event_meta;
+ /**< Event metadata of DMA completion event.
+ * Used when RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND is not
+ * supported in OP_NEW mode.
+ * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_NEW
+ * @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND
+ *
+ * Used when RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD is not
+ * supported in OP_FWD mode.
+ * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
+ * @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
+ *
+ * @see struct rte_event::event
+ */
+ int16_t dma_dev_id;
+ /**< DMA device ID to be used with OP_FORWARD mode.
+ * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
+ */
+ uint16_t vchan;
+ /**< DMA vchan ID to be used with OP_FORWARD mode
+ * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
+ */
+ uint16_t nb_src;
+ /**< Number of source segments. */
+ uint16_t nb_dst;
+ /**< Number of destination segments. */
+ struct rte_dma_sge src_dst_seg[0];
+ /**< Source and destination segments. */
+};
+
#ifdef __cplusplus
}
#endif
@@ -1154,6 +1225,80 @@ rte_dma_burst_capacity(int16_t dev_id, uint16_t vchan)
return ret;
}
+/**
+ * Enqueue rte_dma_ops to DMA device, can only be used underlying supports
+ * RTE_DMA_CAPA_OPS_ENQ_DEQ and rte_dma_conf::enable_enq_deq is enabled in
+ * rte_dma_configure()
+ * The ops enqueued will be immediately submitted to the DMA device.
+ * The enqueue should be coupled with dequeue to retrieve completed ops, calls
+ * to rte_dma_submit(), rte_dma_completed() and rte_dma_completed_status()
+ * are not valid.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param vchan
+ * The identifier of virtual DMA channel.
+ * @param ops
+ * Pointer to rte_dma_op array.
+ * @param nb_ops
+ * Number of rte_dma_op in the ops array
+ * @return uint16_t
+ * - Number of successfully submitted ops.
+ */
+static inline uint16_t
+rte_dma_enqueue_ops(int16_t dev_id, uint16_t vchan, struct rte_dma_op **ops, uint16_t nb_ops)
+{
+ struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+ uint16_t ret;
+
+#ifdef RTE_DMADEV_DEBUG
+ if (!rte_dma_is_valid(dev_id))
+ return 0;
+ if (*obj->enqueue == NULL)
+ return 0;
+#endif
+
+ ret = (*obj->enqueue)(obj->dev_private, vchan, ops, nb_ops);
+ rte_dma_trace_enqueue_ops(dev_id, vchan, (void **)ops, nb_ops);
+
+ return ret;
+}
+
+/**
+ * Dequeue completed rte_dma_ops submitted to the DMA device, can only be used
+ * underlying supports RTE_DMA_CAPA_OPS_ENQ_DEQ and rte_dma_conf::enable_enq_deq
+ * is enabled in rte_dma_configure()
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param vchan
+ * The identifier of virtual DMA channel.
+ * @param ops
+ * Pointer to rte_dma_op array.
+ * @param nb_ops
+ * Size of rte_dma_op array.
+ * @return
+ * - Number of successfully completed ops. Should be less or equal to nb_ops.
+ */
+static inline uint16_t
+rte_dma_dequeue_ops(int16_t dev_id, uint16_t vchan, struct rte_dma_op **ops, uint16_t nb_ops)
+{
+ struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+ uint16_t ret;
+
+#ifdef RTE_DMADEV_DEBUG
+ if (!rte_dma_is_valid(dev_id))
+ return 0;
+ if (*obj->dequeue == NULL)
+ return 0;
+#endif
+
+ ret = (*obj->dequeue)(obj->dev_private, vchan, ops, nb_ops);
+ rte_dma_trace_dequeue_ops(dev_id, vchan, (void **)ops, nb_ops);
+
+ return ret;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index 29f52514d7..20a467178f 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -50,6 +50,14 @@ typedef uint16_t (*rte_dma_completed_status_t)(void *dev_private,
/** @internal Used to check the remaining space in descriptor ring. */
typedef uint16_t (*rte_dma_burst_capacity_t)(const void *dev_private, uint16_t vchan);
+/** @internal Used to enqueue a rte_dma_op to the dma engine. */
+typedef uint16_t (*rte_dma_enqueue_ops_t)(void *dev_private, uint16_t vchan,
+ struct rte_dma_op **ops, uint16_t nb_ops);
+
+/** @internal Used to dequeue rte_dma_op from the dma engine. */
+typedef uint16_t (*rte_dma_dequeue_ops_t)(void *dev_private, uint16_t vchan,
+ struct rte_dma_op **ops, uint16_t nb_ops);
+
/**
* @internal
* Fast-path dmadev functions and related data are hold in a flat array.
@@ -73,6 +81,8 @@ struct __rte_cache_aligned rte_dma_fp_object {
rte_dma_completed_t completed;
rte_dma_completed_status_t completed_status;
rte_dma_burst_capacity_t burst_capacity;
+ rte_dma_enqueue_ops_t enqueue;
+ rte_dma_dequeue_ops_t dequeue;
};
extern struct rte_dma_fp_object *rte_dma_fp_objs;
diff --git a/lib/dmadev/rte_dmadev_trace_fp.h b/lib/dmadev/rte_dmadev_trace_fp.h
index f5b96838bc..5773617058 100644
--- a/lib/dmadev/rte_dmadev_trace_fp.h
+++ b/lib/dmadev/rte_dmadev_trace_fp.h
@@ -143,6 +143,26 @@ RTE_TRACE_POINT_FP(
rte_trace_point_emit_u16(ret);
)
+RTE_TRACE_POINT_FP(
+ rte_dma_trace_enqueue_ops,
+ RTE_TRACE_POINT_ARGS(int16_t dev_id, uint16_t vchan, void **ops,
+ uint16_t nb_ops),
+ rte_trace_point_emit_i16(dev_id);
+ rte_trace_point_emit_u16(vchan);
+ rte_trace_point_emit_ptr(ops);
+ rte_trace_point_emit_u16(nb_ops);
+)
+
+RTE_TRACE_POINT_FP(
+ rte_dma_trace_dequeue_ops,
+ RTE_TRACE_POINT_ARGS(int16_t dev_id, uint16_t vchan, void **ops,
+ uint16_t nb_ops),
+ rte_trace_point_emit_i16(dev_id);
+ rte_trace_point_emit_u16(vchan);
+ rte_trace_point_emit_ptr(ops);
+ rte_trace_point_emit_u16(nb_ops);
+)
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/dmadev/rte_dmadev_trace_points.c b/lib/dmadev/rte_dmadev_trace_points.c
index 4c74356346..60a0de95d1 100644
--- a/lib/dmadev/rte_dmadev_trace_points.c
+++ b/lib/dmadev/rte_dmadev_trace_points.c
@@ -56,3 +56,9 @@ RTE_TRACE_POINT_REGISTER(rte_dma_trace_completed_status,
RTE_TRACE_POINT_REGISTER(rte_dma_trace_burst_capacity,
lib.dmadev.burst_capacity)
+
+RTE_TRACE_POINT_REGISTER(rte_dma_trace_enqueue_ops,
+ lib.dmadev.enqueue_ops)
+
+RTE_TRACE_POINT_REGISTER(rte_dma_trace_dequeue_ops,
+ lib.dmadev.dequeue_ops)
--
2.43.0
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH RFC 2/4] eventdev: refactor rte_event_dma_adapater_op calls
2025-01-29 14:36 [PATCH RFC 1/4] dmadev: add enqueue dequeue operations Kommula Shiva Shankar
@ 2025-01-29 14:36 ` Kommula Shiva Shankar
2025-01-29 14:36 ` [PATCH RFC 3/4] doc: update prog guide to use rte_dma_op Kommula Shiva Shankar
2025-01-29 14:36 ` [PATCH RFC 4/4] dma/cnxk: implement enqueue dequeue ops Kommula Shiva Shankar
2 siblings, 0 replies; 4+ messages in thread
From: Kommula Shiva Shankar @ 2025-01-29 14:36 UTC (permalink / raw)
To: jerinj, amitprakashs, vattunuru, fengchengwen, dev
Cc: ndabilpuram, pbhagavatula
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Migrate all invocations of rte_event_dma_adapter_op
API to rte_dma_op.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Change-Id: I56b6e61af72d119287b0d2ba6a9bbacc3ae808d6
---
app/test-eventdev/test_perf_common.c | 6 +--
app/test-eventdev/test_perf_common.h | 4 +-
app/test/test_event_dma_adapter.c | 6 +--
drivers/dma/cnxk/cnxk_dmadev.c | 2 +-
drivers/dma/cnxk/cnxk_dmadev_fp.c | 12 +++---
lib/eventdev/rte_event_dma_adapter.c | 18 ++++-----
lib/eventdev/rte_event_dma_adapter.h | 57 ----------------------------
7 files changed, 24 insertions(+), 81 deletions(-)
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 627f07caa1..4e0109db52 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -562,11 +562,11 @@ crypto_adapter_enq_op_fwd(struct prod_data *p)
static inline void
dma_adapter_enq_op_fwd(struct prod_data *p)
{
- struct rte_event_dma_adapter_op *ops[BURST_SIZE] = {NULL};
+ struct rte_dma_op *ops[BURST_SIZE] = {NULL};
struct test_perf *t = p->t;
const uint32_t nb_flows = t->nb_flows;
const uint64_t nb_pkts = t->nb_pkts;
- struct rte_event_dma_adapter_op op;
+ struct rte_dma_op op;
struct rte_event evts[BURST_SIZE];
const uint8_t dev_id = p->dev_id;
struct evt_options *opt = t->opt;
@@ -2114,7 +2114,7 @@ perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
t->pool = rte_mempool_create(test->name, /* mempool name */
opt->pool_sz, /* number of elements*/
- sizeof(struct rte_event_dma_adapter_op) +
+ sizeof(struct rte_dma_op) +
(sizeof(struct rte_dma_sge) * 2),
cache_sz, /* cache size*/
0, NULL, NULL, NULL, /* obj constructor */
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index d7333ad390..63078b0ee2 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -139,7 +139,7 @@ perf_mark_fwd_latency(enum evt_prod_type prod_type, struct rte_event *const ev)
}
pe->timestamp = rte_get_timer_cycles();
} else if (prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
- struct rte_event_dma_adapter_op *op = ev->event_ptr;
+ struct rte_dma_op *op = ev->event_ptr;
op->user_meta = rte_get_timer_cycles();
} else {
@@ -297,7 +297,7 @@ perf_process_last_stage_latency(struct rte_mempool *const pool, enum evt_prod_ty
tstamp = pe->timestamp;
rte_crypto_op_free(op);
} else if (prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
- struct rte_event_dma_adapter_op *op = ev->event_ptr;
+ struct rte_dma_op *op = ev->event_ptr;
to_free_in_bulk = op;
tstamp = op->user_meta;
diff --git a/app/test/test_event_dma_adapter.c b/app/test/test_event_dma_adapter.c
index 9988d4fc7b..7f72a4e81d 100644
--- a/app/test/test_event_dma_adapter.c
+++ b/app/test/test_event_dma_adapter.c
@@ -234,7 +234,7 @@ test_op_forward_mode(void)
{
struct rte_mbuf *src_mbuf[TEST_MAX_OP];
struct rte_mbuf *dst_mbuf[TEST_MAX_OP];
- struct rte_event_dma_adapter_op *op;
+ struct rte_dma_op *op;
struct rte_event ev[TEST_MAX_OP];
int ret, i;
@@ -266,7 +266,7 @@ test_op_forward_mode(void)
op->vchan = TEST_DMA_VCHAN_ID;
op->event_meta = dma_response_info.event;
- /* Fill in event info and update event_ptr with rte_event_dma_adapter_op */
+ /* Fill in event info and update event_ptr with rte_dma_op */
memset(&ev[i], 0, sizeof(struct rte_event));
ev[i].event = 0;
ev[i].op = RTE_EVENT_OP_NEW;
@@ -396,7 +396,7 @@ configure_dmadev(void)
rte_socket_id());
RTE_TEST_ASSERT_NOT_NULL(params.dst_mbuf_pool, "Can't create DMA_DST_MBUFPOOL\n");
- elt_size = sizeof(struct rte_event_dma_adapter_op) + (sizeof(struct rte_dma_sge) * 2);
+ elt_size = sizeof(struct rte_dma_op) + (sizeof(struct rte_dma_sge) * 2);
params.op_mpool = rte_mempool_create("EVENT_DMA_OP_POOL", DMA_OP_POOL_SIZE, elt_size, 0,
0, NULL, NULL, NULL, NULL, rte_socket_id(), 0);
RTE_TEST_ASSERT_NOT_NULL(params.op_mpool, "Can't create DMA_OP_POOL\n");
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index e7be3767b2..60b3d28d65 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -591,7 +591,7 @@ cnxk_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_de
rdpi = &dpivf->rdpi;
rdpi->pci_dev = pci_dev;
- rc = roc_dpi_dev_init(rdpi, offsetof(struct rte_event_dma_adapter_op, impl_opaque));
+ rc = roc_dpi_dev_init(rdpi, offsetof(struct rte_dma_op, impl_opaque));
if (rc < 0)
goto err_out_free;
diff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c b/drivers/dma/cnxk/cnxk_dmadev_fp.c
index 26591235c6..340c7601d7 100644
--- a/drivers/dma/cnxk/cnxk_dmadev_fp.c
+++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c
@@ -453,7 +453,7 @@ uint16_t
cn10k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
{
const struct rte_dma_sge *src, *dst;
- struct rte_event_dma_adapter_op *op;
+ struct rte_dma_op *op;
struct cnxk_dpi_conf *dpi_conf;
struct cnxk_dpi_vf_s *dpivf;
struct cn10k_sso_hws *work;
@@ -514,7 +514,7 @@ uint16_t
cn9k_dma_adapter_dual_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
{
const struct rte_dma_sge *fptr, *lptr;
- struct rte_event_dma_adapter_op *op;
+ struct rte_dma_op *op;
struct cn9k_sso_hws_dual *work;
struct cnxk_dpi_conf *dpi_conf;
struct cnxk_dpi_vf_s *dpivf;
@@ -530,7 +530,7 @@ cn9k_dma_adapter_dual_enqueue(void *ws, struct rte_event ev[], uint16_t nb_event
for (count = 0; count < nb_events; count++) {
op = ev[count].event_ptr;
rsp_info = (struct rte_event *)((uint8_t *)op +
- sizeof(struct rte_event_dma_adapter_op));
+ sizeof(struct rte_dma_op));
dpivf = rte_dma_fp_objs[op->dma_dev_id].dev_private;
dpi_conf = &dpivf->conf[op->vchan];
@@ -586,7 +586,7 @@ uint16_t
cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
{
const struct rte_dma_sge *fptr, *lptr;
- struct rte_event_dma_adapter_op *op;
+ struct rte_dma_op *op;
struct cnxk_dpi_conf *dpi_conf;
struct cnxk_dpi_vf_s *dpivf;
struct cn9k_sso_hws *work;
@@ -654,11 +654,11 @@ cn9k_dma_adapter_enqueue(void *ws, struct rte_event ev[], uint16_t nb_events)
uintptr_t
cnxk_dma_adapter_dequeue(uintptr_t get_work1)
{
- struct rte_event_dma_adapter_op *op;
+ struct rte_dma_op *op;
struct cnxk_dpi_conf *dpi_conf;
struct cnxk_dpi_vf_s *dpivf;
- op = (struct rte_event_dma_adapter_op *)get_work1;
+ op = (struct rte_dma_op *)get_work1;
dpivf = rte_dma_fp_objs[op->dma_dev_id].dev_private;
dpi_conf = &dpivf->conf[op->vchan];
diff --git a/lib/eventdev/rte_event_dma_adapter.c b/lib/eventdev/rte_event_dma_adapter.c
index ff2bc408c1..7baa46e0a3 100644
--- a/lib/eventdev/rte_event_dma_adapter.c
+++ b/lib/eventdev/rte_event_dma_adapter.c
@@ -39,8 +39,8 @@ struct __rte_cache_aligned dma_ops_circular_buffer {
/* Size of circular buffer */
uint16_t size;
- /* Pointer to hold rte_event_dma_adapter_op for processing */
- struct rte_event_dma_adapter_op **op_buffer;
+ /* Pointer to hold rte_dma_op for processing */
+ struct rte_dma_op **op_buffer;
};
/* Vchan information */
@@ -201,7 +201,7 @@ edma_circular_buffer_space_for_batch(struct dma_ops_circular_buffer *bufp)
static inline int
edma_circular_buffer_init(const char *name, struct dma_ops_circular_buffer *buf, uint16_t sz)
{
- buf->op_buffer = rte_zmalloc(name, sizeof(struct rte_event_dma_adapter_op *) * sz, 0);
+ buf->op_buffer = rte_zmalloc(name, sizeof(struct rte_dma_op *) * sz, 0);
if (buf->op_buffer == NULL)
return -ENOMEM;
@@ -217,7 +217,7 @@ edma_circular_buffer_free(struct dma_ops_circular_buffer *buf)
}
static inline int
-edma_circular_buffer_add(struct dma_ops_circular_buffer *bufp, struct rte_event_dma_adapter_op *op)
+edma_circular_buffer_add(struct dma_ops_circular_buffer *bufp, struct rte_dma_op *op)
{
uint16_t *tail = &bufp->tail;
@@ -235,7 +235,7 @@ edma_circular_buffer_flush_to_dma_dev(struct event_dma_adapter *adapter,
struct dma_ops_circular_buffer *bufp, uint8_t dma_dev_id,
uint16_t vchan, uint16_t *nb_ops_flushed)
{
- struct rte_event_dma_adapter_op *op;
+ struct rte_dma_op *op;
uint16_t *head = &bufp->head;
uint16_t *tail = &bufp->tail;
struct dma_vchan_info *tq;
@@ -498,7 +498,7 @@ edma_enq_to_dma_dev(struct event_dma_adapter *adapter, struct rte_event *ev, uns
{
struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats;
struct dma_vchan_info *vchan_qinfo = NULL;
- struct rte_event_dma_adapter_op *dma_op;
+ struct rte_dma_op *dma_op;
uint16_t vchan, nb_enqueued = 0;
int16_t dma_dev_id;
unsigned int i, n;
@@ -641,7 +641,7 @@ edma_adapter_enq_run(struct event_dma_adapter *adapter, unsigned int max_enq)
#define DMA_ADAPTER_MAX_EV_ENQ_RETRIES 100
static inline uint16_t
-edma_ops_enqueue_burst(struct event_dma_adapter *adapter, struct rte_event_dma_adapter_op **ops,
+edma_ops_enqueue_burst(struct event_dma_adapter *adapter, struct rte_dma_op **ops,
uint16_t num)
{
struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats;
@@ -687,7 +687,7 @@ edma_circular_buffer_flush_to_evdev(struct event_dma_adapter *adapter,
struct dma_ops_circular_buffer *bufp,
uint16_t *enqueue_count)
{
- struct rte_event_dma_adapter_op **ops = bufp->op_buffer;
+ struct rte_dma_op **ops = bufp->op_buffer;
uint16_t n = 0, nb_ops_flushed;
uint16_t *head = &bufp->head;
uint16_t *tail = &bufp->tail;
@@ -736,7 +736,7 @@ edma_adapter_deq_run(struct event_dma_adapter *adapter, unsigned int max_deq)
struct rte_event_dma_adapter_stats *stats = &adapter->dma_stats;
struct dma_vchan_info *vchan_info;
struct dma_ops_circular_buffer *tq_buf;
- struct rte_event_dma_adapter_op *ops;
+ struct rte_dma_op *ops;
uint16_t n, nb_deq, nb_enqueued, i;
struct dma_device_info *dev_info;
uint16_t vchan, num_vchan;
diff --git a/lib/eventdev/rte_event_dma_adapter.h b/lib/eventdev/rte_event_dma_adapter.h
index 5c480b82ff..453754d13b 100644
--- a/lib/eventdev/rte_event_dma_adapter.h
+++ b/lib/eventdev/rte_event_dma_adapter.h
@@ -151,63 +151,6 @@
extern "C" {
#endif
-/**
- * A structure used to hold event based DMA operation entry. All the information
- * required for a DMA transfer shall be populated in "struct rte_event_dma_adapter_op"
- * instance.
- */
-struct rte_event_dma_adapter_op {
- uint64_t flags;
- /**< Flags related to the operation.
- * @see RTE_DMA_OP_FLAG_*
- */
- struct rte_mempool *op_mp;
- /**< Mempool from which op is allocated. */
- enum rte_dma_status_code status;
- /**< Status code for this operation. */
- uint32_t rsvd;
- /**< Reserved for future use. */
- uint64_t impl_opaque[2];
- /**< Implementation-specific opaque data.
- * An dma device implementation use this field to hold
- * implementation specific values to share between dequeue and enqueue
- * operations.
- * The application should not modify this field.
- */
- uint64_t user_meta;
- /**< Memory to store user specific metadata.
- * The dma device implementation should not modify this area.
- */
- uint64_t event_meta;
- /**< Event metadata of DMA completion event.
- * Used when RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND is not
- * supported in OP_NEW mode.
- * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_NEW
- * @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND
- *
- * Used when RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD is not
- * supported in OP_FWD mode.
- * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
- * @see RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD
- *
- * @see struct rte_event::event
- */
- int16_t dma_dev_id;
- /**< DMA device ID to be used with OP_FORWARD mode.
- * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
- */
- uint16_t vchan;
- /**< DMA vchan ID to be used with OP_FORWARD mode
- * @see rte_event_dma_adapter_mode::RTE_EVENT_DMA_ADAPTER_OP_FORWARD
- */
- uint16_t nb_src;
- /**< Number of source segments. */
- uint16_t nb_dst;
- /**< Number of destination segments. */
- struct rte_dma_sge src_dst_seg[];
- /**< Source and destination segments. */
-};
-
/**
* DMA event adapter mode
*/
--
2.43.0
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH RFC 3/4] doc: update prog guide to use rte_dma_op
2025-01-29 14:36 [PATCH RFC 1/4] dmadev: add enqueue dequeue operations Kommula Shiva Shankar
2025-01-29 14:36 ` [PATCH RFC 2/4] eventdev: refactor rte_event_dma_adapater_op calls Kommula Shiva Shankar
@ 2025-01-29 14:36 ` Kommula Shiva Shankar
2025-01-29 14:36 ` [PATCH RFC 4/4] dma/cnxk: implement enqueue dequeue ops Kommula Shiva Shankar
2 siblings, 0 replies; 4+ messages in thread
From: Kommula Shiva Shankar @ 2025-01-29 14:36 UTC (permalink / raw)
To: jerinj, amitprakashs, vattunuru, fengchengwen, dev
Cc: ndabilpuram, pbhagavatula
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Update the documentation to replace all instances of
rte_event_dma_adapter_op with rte_dma_op
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Change-Id: I0fe65d18f4601709826c11c6738cacec8991515d
---
doc/guides/prog_guide/eventdev/event_dma_adapter.rst | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/doc/guides/prog_guide/eventdev/event_dma_adapter.rst b/doc/guides/prog_guide/eventdev/event_dma_adapter.rst
index e040d89e8b..e8437a3297 100644
--- a/doc/guides/prog_guide/eventdev/event_dma_adapter.rst
+++ b/doc/guides/prog_guide/eventdev/event_dma_adapter.rst
@@ -144,7 +144,7 @@ on which it enqueues events towards the DMA adapter using ``rte_event_enqueue_bu
uint32_t cap;
int ret;
- /* Fill in event info and update event_ptr with rte_event_dma_adapter_op */
+ /* Fill in event info and update event_ptr with rte_dma_op */
memset(&ev, 0, sizeof(ev));
.
.
@@ -244,11 +244,11 @@ Set event response information
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the ``RTE_EVENT_DMA_ADAPTER_OP_FORWARD`` / ``RTE_EVENT_DMA_ADAPTER_OP_NEW`` mode,
-the application specifies the dmadev ID and vchan ID in ``struct rte_event_dma_adapter_op``
+the application specifies the dmadev ID and vchan ID in ``struct rte_dma_op``
and the event information (response information)
needed to enqueue an event after the DMA operation has completed.
The response information is specified in ``struct rte_event``
-and appended to the ``struct rte_event_dma_adapter_op``.
+and appended to the ``struct rte_dma_op``.
Start the adapter instance
--
2.43.0
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH RFC 4/4] dma/cnxk: implement enqueue dequeue ops
2025-01-29 14:36 [PATCH RFC 1/4] dmadev: add enqueue dequeue operations Kommula Shiva Shankar
2025-01-29 14:36 ` [PATCH RFC 2/4] eventdev: refactor rte_event_dma_adapater_op calls Kommula Shiva Shankar
2025-01-29 14:36 ` [PATCH RFC 3/4] doc: update prog guide to use rte_dma_op Kommula Shiva Shankar
@ 2025-01-29 14:36 ` Kommula Shiva Shankar
2 siblings, 0 replies; 4+ messages in thread
From: Kommula Shiva Shankar @ 2025-01-29 14:36 UTC (permalink / raw)
To: jerinj, amitprakashs, vattunuru, fengchengwen, dev
Cc: ndabilpuram, pbhagavatula
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Implement DMA enqueue/dequeue operations when
application enables it via configuration.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Change-Id: I57883ce5d358bf23a9d940ed513d0dc762227dcc
---
drivers/dma/cnxk/cnxk_dmadev.c | 25 +++++-
drivers/dma/cnxk/cnxk_dmadev.h | 7 ++
drivers/dma/cnxk/cnxk_dmadev_fp.c | 140 ++++++++++++++++++++++++++++++
3 files changed, 171 insertions(+), 1 deletion(-)
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 60b3d28d65..18a4914013 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -19,7 +19,7 @@ cnxk_dmadev_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_inf
dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_MEM_TO_DEV |
RTE_DMA_CAPA_DEV_TO_MEM | RTE_DMA_CAPA_DEV_TO_DEV |
RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_COPY_SG |
- RTE_DMA_CAPA_M2D_AUTO_FREE;
+ RTE_DMA_CAPA_M2D_AUTO_FREE | RTE_DMA_CAPA_OPS_ENQ_DEQ;
if (roc_feature_dpi_has_priority()) {
dev_info->dev_capa |= RTE_DMA_CAPA_PRI_POLICY_SP;
dev_info->nb_priorities = CN10K_DPI_MAX_PRI;
@@ -114,6 +114,21 @@ cnxk_dmadev_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
if (roc_feature_dpi_has_priority())
dpivf->rdpi.priority = conf->priority;
+ if (conf->enable_enq_deq) {
+ dev->fp_obj->copy = NULL;
+ dev->fp_obj->fill = NULL;
+ dev->fp_obj->submit = NULL;
+ dev->fp_obj->copy_sg = NULL;
+ dev->fp_obj->completed = NULL;
+ dev->fp_obj->completed_status = NULL;
+
+ dev->fp_obj->enqueue = cnxk_dma_ops_enqueue;
+ dev->fp_obj->dequeue = cnxk_dma_ops_dequeue;
+
+ if (roc_model_is_cn10k())
+ dev->fp_obj->enqueue = cn10k_dma_ops_enqueue;
+ }
+
return 0;
}
@@ -270,6 +285,14 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
return -ENOMEM;
}
+ size = (max_desc * sizeof(struct rte_dma_op *));
+ dpi_conf->c_desc.ops = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (dpi_conf->c_desc.ops == NULL) {
+ plt_err("Failed to allocate for ops array");
+ rte_free(dpi_conf->c_desc.compl_ptr);
+ return -ENOMEM;
+ }
+
for (i = 0; i < max_desc; i++)
dpi_conf->c_desc.compl_ptr[i * CNXK_DPI_COMPL_OFFSET] = CNXK_DPI_REQ_CDATA;
diff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h
index 39fd6afbe9..2615cb5b73 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.h
+++ b/drivers/dma/cnxk/cnxk_dmadev.h
@@ -93,6 +93,7 @@ struct cnxk_dpi_cdesc_data_s {
uint16_t head;
uint16_t tail;
uint8_t *compl_ptr;
+ struct rte_dma_op **ops;
};
struct cnxk_dpi_conf {
@@ -132,5 +133,11 @@ int cn10k_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iov
int cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge *src,
const struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst,
uint64_t flags);
+uint16_t cnxk_dma_ops_enqueue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops,
+ uint16_t nb_ops);
+uint16_t cn10k_dma_ops_enqueue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops,
+ uint16_t nb_ops);
+uint16_t cnxk_dma_ops_dequeue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops,
+ uint16_t nb_ops);
#endif
diff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c b/drivers/dma/cnxk/cnxk_dmadev_fp.c
index 340c7601d7..ca9ae7cd3f 100644
--- a/drivers/dma/cnxk/cnxk_dmadev_fp.c
+++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c
@@ -675,3 +675,143 @@ cnxk_dma_adapter_dequeue(uintptr_t get_work1)
return (uintptr_t)op;
}
+
+uint16_t
+cnxk_dma_ops_enqueue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops, uint16_t nb_ops)
+{
+ struct cnxk_dpi_vf_s *dpivf = dev_private;
+ struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan];
+ const struct rte_dma_sge *fptr, *lptr;
+ uint16_t src, dst, nwords = 0;
+ struct rte_dma_op *op;
+ uint16_t space, i;
+ uint8_t *comp_ptr;
+ uint64_t hdr[4];
+ int rc;
+
+ space = (dpi_conf->c_desc.max_cnt + 1) -
+ ((dpi_conf->c_desc.tail - dpi_conf->c_desc.head) & dpi_conf->c_desc.max_cnt);
+ space = RTE_MIN(space, nb_ops);
+
+ for (i = 0; i < space; i++) {
+ op = ops[i];
+ comp_ptr =
+ &dpi_conf->c_desc.compl_ptr[dpi_conf->c_desc.tail * CNXK_DPI_COMPL_OFFSET];
+ dpi_conf->c_desc.ops[dpi_conf->c_desc.tail] = op;
+ CNXK_DPI_STRM_INC(dpi_conf->c_desc, tail);
+
+ hdr[1] = dpi_conf->cmd.u | ((op->flags & RTE_DMA_OP_FLAG_AUTO_FREE) << 37);
+ hdr[2] = (uint64_t)comp_ptr;
+
+ src = op->nb_src;
+ dst = op->nb_dst;
+ /*
+ * For inbound case, src pointers are last pointers.
+ * For all other cases, src pointers are first pointers.
+ */
+ if (((dpi_conf->cmd.u >> 48) & DPI_HDR_XTYPE_MASK) == DPI_XTYPE_INBOUND) {
+ fptr = &op->src_dst_seg[src];
+ lptr = &op->src_dst_seg[0];
+ RTE_SWAP(src, dst);
+ } else {
+ fptr = &op->src_dst_seg[0];
+ lptr = &op->src_dst_seg[src];
+ }
+ hdr[0] = ((uint64_t)dst << 54) | (uint64_t)src << 48;
+
+ rc = __dpi_queue_write_sg(dpivf, hdr, fptr, lptr, src, dst);
+ if (rc) {
+ CNXK_DPI_STRM_DEC(dpi_conf->c_desc, tail);
+ goto done;
+ }
+ nwords += CNXK_DPI_CMD_LEN(src, dst);
+ }
+
+done:
+ if (nwords) {
+ rte_wmb();
+ plt_write64(nwords, dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+ dpi_conf->stats.submitted += i;
+ }
+
+ return i;
+}
+
+uint16_t
+cn10k_dma_ops_enqueue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops, uint16_t nb_ops)
+{
+ struct cnxk_dpi_vf_s *dpivf = dev_private;
+ struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan];
+ uint16_t space, i, nwords = 0;
+ struct rte_dma_op *op;
+ uint16_t src, dst;
+ uint8_t *comp_ptr;
+ uint64_t hdr[4];
+ int rc;
+
+ space = (dpi_conf->c_desc.max_cnt + 1) -
+ ((dpi_conf->c_desc.tail - dpi_conf->c_desc.head) & dpi_conf->c_desc.max_cnt);
+ space = RTE_MIN(space, nb_ops);
+
+ for (i = 0; i < space; i++) {
+ op = ops[i];
+ src = op->nb_src;
+ dst = op->nb_dst;
+ comp_ptr =
+ &dpi_conf->c_desc.compl_ptr[dpi_conf->c_desc.tail * CNXK_DPI_COMPL_OFFSET];
+ dpi_conf->c_desc.ops[dpi_conf->c_desc.tail] = op;
+ CNXK_DPI_STRM_INC(dpi_conf->c_desc, tail);
+
+ hdr[0] = dpi_conf->cmd.u | (dst << 6) | src;
+ hdr[1] = (uint64_t)comp_ptr;
+ hdr[2] = (1UL << 47) | ((op->flags & RTE_DMA_OP_FLAG_AUTO_FREE) << 43);
+
+ rc = __dpi_queue_write_sg(dpivf, hdr, &op->src_dst_seg[0], &op->src_dst_seg[src],
+ src, dst);
+ if (rc) {
+ CNXK_DPI_STRM_DEC(dpi_conf->c_desc, tail);
+ goto done;
+ }
+ nwords += CNXK_DPI_CMD_LEN(src, dst);
+ }
+
+done:
+ if (nwords) {
+ rte_wmb();
+ plt_write64(nwords, dpivf->rdpi.rbase + DPI_VDMA_DBELL);
+ dpi_conf->stats.submitted += i;
+ }
+
+ return i;
+}
+
+uint16_t
+cnxk_dma_ops_dequeue(void *dev_private, uint16_t vchan, struct rte_dma_op **ops, uint16_t nb_ops)
+{
+ struct cnxk_dpi_vf_s *dpivf = dev_private;
+ struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan];
+ struct cnxk_dpi_cdesc_data_s *c_desc = &dpi_conf->c_desc;
+ struct rte_dma_op *op;
+ uint16_t space, cnt;
+ uint8_t status;
+
+ space = (c_desc->tail - c_desc->head) & c_desc->max_cnt;
+ space = RTE_MIN(nb_ops, space);
+ for (cnt = 0; cnt < space; cnt++) {
+ status = c_desc->compl_ptr[c_desc->head * CNXK_DPI_COMPL_OFFSET];
+ op = c_desc->ops[c_desc->head];
+ op->status = status;
+ ops[cnt] = op;
+ if (status) {
+ if (status == CNXK_DPI_REQ_CDATA)
+ break;
+ dpi_conf->stats.errors++;
+ }
+ c_desc->compl_ptr[c_desc->head * CNXK_DPI_COMPL_OFFSET] = CNXK_DPI_REQ_CDATA;
+ CNXK_DPI_STRM_INC(*c_desc, head);
+ }
+
+ dpi_conf->stats.completed += cnt;
+
+ return cnt;
+}
--
2.43.0
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2025-01-29 14:37 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-01-29 14:36 [PATCH RFC 1/4] dmadev: add enqueue dequeue operations Kommula Shiva Shankar
2025-01-29 14:36 ` [PATCH RFC 2/4] eventdev: refactor rte_event_dma_adapater_op calls Kommula Shiva Shankar
2025-01-29 14:36 ` [PATCH RFC 3/4] doc: update prog guide to use rte_dma_op Kommula Shiva Shankar
2025-01-29 14:36 ` [PATCH RFC 4/4] dma/cnxk: implement enqueue dequeue ops Kommula Shiva Shankar
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).