From: Amit Prakash Shukla <amitprakashs@marvell.com>
To: Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com>,
Jerin Jacob <jerinj@marvell.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>,
Vamsi Krishna Attunuru <vattunuru@marvell.com>,
Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>,
Anoob Joseph <anoobj@marvell.com>,
Aakash Sasidharan <asasidharan@marvell.com>
Subject: RE: [PATCH v2] app/eventdev: support DMA adapter test
Date: Fri, 1 Mar 2024 14:42:03 +0000 [thread overview]
Message-ID: <PH0PR18MB51671D6182B9B848DF192D81C85E2@PH0PR18MB5167.namprd18.prod.outlook.com> (raw)
In-Reply-To: <PH0PR18MB408685752BF9735F44BB5E78DE5E2@PH0PR18MB4086.namprd18.prod.outlook.com>
Hi Pavan,
Thanks for the review and feedback. I will send next version with suggested changes.
Thanks,
Amit Shukla
<snip>
> > diff --git a/app/test-eventdev/test_perf_common.c b/app/test-
> > eventdev/test_perf_common.c index 5e0255cfeb..509d3f9232 100644
> > --- a/app/test-eventdev/test_perf_common.c
> > +++ b/app/test-eventdev/test_perf_common.c
> > @@ -559,6 +559,84 @@ crypto_adapter_enq_op_fwd(struct prod_data
> *p)
> > __func__, rte_lcore_id(), alloc_failures); }
> >
> > +static inline void
> > +dma_adapter_enq_op_new(struct prod_data *p) {
> > + struct test_perf *t = p->t;
> > + const uint32_t nb_flows = t->nb_flows;
> > + const uint64_t nb_pkts = t->nb_pkts;
> > + struct rte_event_dma_adapter_op *op;
> > + struct evt_options *opt = t->opt;
> > + uint32_t flow_counter = 0;
> > + uint64_t count = 0;
> > +
> > + if (opt->verbose_level > 1)
> > + printf("%s(): lcore %d queue %d dma_dev_id %u
> > dma_dev_vhcan_id %u\n",
> > + __func__, rte_lcore_id(), p->queue_id, p->da.dma_dev_id,
> > + p->da.vchan_id);
> > +
> > + while (count < nb_pkts && t->done == false) {
> > + op = p->da.dma_op[flow_counter++ % nb_flows];
> > + while (rte_dma_copy_sg(op->dma_dev_id, op->vchan, op-
> > >src_seg,
> > + op->dst_seg, op->nb_src, op->nb_dst,
> > + op->flags) < 0 && t->done == false)
>
> Is op type new really sending events to eventdev? If not, you can remove this
> test from the app for now and add it when dmadev supports enqueueing ops
> similar to cryptodev.
>
> You can set --dma_adptr_mode default to FORWARD and say NEW is not
> supported.
>
> > + rte_pause();
> > +
> > + count++;
> > + }
> > +}
> > +
> > +static inline void
> > +dma_adapter_enq_op_fwd(struct prod_data *p) {
> > + struct test_perf *t = p->t;
> > + const uint32_t nb_flows = t->nb_flows;
> > + const uint64_t nb_pkts = t->nb_pkts;
> > + struct rte_event_dma_adapter_op *op;
> > + const uint8_t dev_id = p->dev_id;
> > + struct evt_options *opt = t->opt;
> > + const uint8_t port = p->port_id;
> > + uint32_t flow_counter = 0;
> > + struct rte_event ev;
> > + uint64_t count = 0;
> > +
> > + if (opt->verbose_level > 1)
> > + printf("%s(): lcore %d port %d queue %d dma_dev_id %u
> > dma_dev_vchan_id %u\n",
> > + __func__, rte_lcore_id(), port, p->queue_id,
> > + p->da.dma_dev_id, p->da.vchan_id);
> > +
> > + ev.event = 0;
> > + ev.op = RTE_EVENT_OP_NEW;
> > + ev.queue_id = p->queue_id;
> > + ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
> > + ev.event_type = RTE_EVENT_TYPE_CPU;
> > +
> > + while (count < nb_pkts && t->done == false) {
> > + op = p->da.dma_op[flow_counter++ % nb_flows];
> > + ev.event_ptr = op;
> > +
> > + while (rte_event_dma_adapter_enqueue(dev_id, port, &ev, 1)
> > != 1 &&
> > + t->done == false)
> > + rte_pause();
> > +
> > + count++;
> > + }
> > +}
> > +
> > +static inline int
> > +perf_event_dma_producer(void *arg)
> > +{
> > + struct prod_data *p = arg;
> > + struct evt_options *opt = p->t->opt;
> > +
> > + if (opt->dma_adptr_mode == RTE_EVENT_DMA_ADAPTER_OP_NEW)
> > + dma_adapter_enq_op_new(p);
> > + else
> > + dma_adapter_enq_op_fwd(p);
> > +
> > + return 0;
> > +}
> > +
> > static inline int
> > perf_event_crypto_producer(void *arg) { @@ -841,7 +919,9 @@
> > perf_producer_wrapper(void *arg)
> > return perf_event_crypto_producer_burst(arg);
> > else
> > return perf_event_crypto_producer(arg);
> > - }
> > + } else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR)
> > + return perf_event_dma_producer(arg);
> > +
> > return 0;
> > }
> >
> > @@ -952,7 +1032,9 @@ perf_launch_lcores(struct evt_test *test, struct
> > evt_options *opt,
> > opt->prod_type ==
> >
> > EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
> > opt->prod_type ==
> > -
> > EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
> > +
> > EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR ||
> > + opt->prod_type ==
> > +
> > EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
> > t->done = true;
> > break;
> > }
> > @@ -962,7 +1044,8 @@ perf_launch_lcores(struct evt_test *test, struct
> > evt_options *opt,
> > if (new_cycles - dead_lock_cycles > dead_lock_sample &&
> > (opt->prod_type == EVT_PROD_TYPE_SYNT ||
> > opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR
> > ||
> > - opt->prod_type ==
> > EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)) {
> > + opt->prod_type ==
> > EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR ||
> > + opt->prod_type ==
> > EVT_PROD_TYPE_EVENT_DMA_ADPTR)) {
> > remaining = t->outstand_pkts - processed_pkts(t);
> > if (dead_lock_remaining == remaining) {
> > rte_event_dev_dump(opt->dev_id, stdout);
> @@ -1162,6 +1245,39 @@
> > perf_event_crypto_adapter_setup(struct
> > test_perf *t, struct prod_data *p)
> > return ret;
> > }
> >
> > +static int
> > +perf_event_dma_adapter_setup(struct test_perf *t, struct prod_data
> > +*p) {
> > + struct evt_options *opt = t->opt;
> > + struct rte_event event;
> > + uint32_t cap;
> > + int ret;
> > +
> > + ret = rte_event_dma_adapter_caps_get(p->dev_id, p-
> > >da.dma_dev_id, &cap);
> > + if (ret) {
> > + evt_err("Failed to get dma adapter capabilities");
> > + return ret;
> > + }
> > +
> > + if (((opt->dma_adptr_mode ==
> > RTE_EVENT_DMA_ADAPTER_OP_NEW) &&
> > + !(cap &
> > RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
> > + ((opt->dma_adptr_mode ==
> > RTE_EVENT_DMA_ADAPTER_OP_FORWARD) &&
> > + !(cap &
> > RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
> > + evt_err("dma adapter %s mode unsupported\n",
> > + opt->dma_adptr_mode ? "OP_FORWARD" :
> > "OP_NEW");
> > + return -ENOTSUP;
> > + }
> > +
> > + if (cap &
> > RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND)
> > + ret = rte_event_dma_adapter_vchan_add(TEST_PERF_DA_ID,
> > p->da.dma_dev_id,
> > + p->da.vchan_id, &event);
> > + else
> > + ret = rte_event_dma_adapter_vchan_add(TEST_PERF_DA_ID,
> > p->da.dma_dev_id,
> > + p->da.vchan_id, NULL);
> > +
> > + return ret;
> > +}
> > +
> > static void *
> > cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t)
> > { @@ -1399,6 +1515,77 @@ perf_event_dev_port_setup(struct evt_test
> > *test, struct evt_options *opt,
> > }
> >
> > qp_id++;
> > + prod++;
> > + }
> > + } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
> > + struct rte_event_port_conf conf = *port_conf;
> > + struct rte_event_dma_adapter_op *op;
> > + struct rte_mempool *pool = t->pool;
> > + uint8_t dma_dev_id = 0;
> > + uint16_t vchan_id = 0;
> > +
> > + ret = rte_event_dma_adapter_create(TEST_PERF_DA_ID, opt-
> > >dev_id, &conf, 0);
> > + if (ret) {
> > + evt_err("Failed to create dma adapter");
> > + return ret;
> > + }
> > +
> > + prod = 0;
> > + for (; port < perf_nb_event_ports(opt); port++) {
> > + struct prod_data *p = &t->prod[port];
> > + struct rte_event *response_info;
> > + uint32_t flow_id;
> > +
> > + p->dev_id = opt->dev_id;
> > + p->port_id = port;
> > + p->queue_id = prod * stride;
> > + p->da.dma_dev_id = dma_dev_id;
> > + p->da.vchan_id = vchan_id;
> > + p->da.dma_op = rte_zmalloc_socket(NULL, sizeof(void
> > *) * t->nb_flows,
> > + RTE_CACHE_LINE_SIZE, opt-
> > >socket_id);
> > +
> > + p->t = t;
> > +
> > + ret = perf_event_dma_adapter_setup(t, p);
> > + if (ret)
> > + return ret;
> > +
> > + for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
> > + rte_mempool_get(t->da_op_pool, (void
> > **)&op);
> > +
> > + op->src_seg = rte_malloc(NULL, sizeof(struct
> > rte_dma_sge), 0);
> > + op->dst_seg = rte_malloc(NULL, sizeof(struct
> > rte_dma_sge), 0);
> > +
> > + op->src_seg->addr =
> > rte_pktmbuf_iova(rte_pktmbuf_alloc(pool));
> > + op->dst_seg->addr =
> > rte_pktmbuf_iova(rte_pktmbuf_alloc(pool));
> > + op->src_seg->length = 1024;
> > + op->dst_seg->length = 1024;
> > + op->nb_src = 1;
> > + op->nb_dst = 1;
> > + op->flags = RTE_DMA_OP_FLAG_SUBMIT;
> > + op->op_mp = t->da_op_pool;
> > + op->dma_dev_id = dma_dev_id;
> > + op->vchan = vchan_id;
> > +
> > + response_info = (struct rte_event *)((uint8_t
> > *)op +
> > + sizeof(struct
> > rte_event_dma_adapter_op));
> > + response_info->queue_id = p->queue_id;
> > + response_info->sched_type =
> > RTE_SCHED_TYPE_ATOMIC;
> > + response_info->flow_id = flow_id;
> > +
> > + p->da.dma_op[flow_id] = op;
> > + }
> > +
> > + conf.event_port_cfg |=
> > + RTE_EVENT_PORT_CFG_HINT_PRODUCER |
> > + RTE_EVENT_PORT_CFG_HINT_CONSUMER;
> > +
> > + ret = rte_event_port_setup(opt->dev_id, port, &conf);
> > + if (ret) {
> > + evt_err("failed to setup port %d", port);
> > + return ret;
> > + }
> > +
> > prod++;
> > }
> > } else {
> > @@ -1463,7 +1650,8 @@ perf_opt_check(struct evt_options *opt,
> uint64_t
> > nb_queues)
> >
> > if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
> > opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
> > - opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
> > + opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR ||
> > + opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
> > /* Validate producer lcores */
> > if (evt_lcores_has_overlap(opt->plcores,
> > rte_get_main_lcore())) {
> > @@ -1855,6 +2043,103 @@ perf_cryptodev_destroy(struct evt_test *test,
> > struct evt_options *opt)
> > rte_mempool_free(t->ca_vector_pool);
> > }
> >
> > +int
> > +perf_dmadev_setup(struct evt_test *test, struct evt_options *opt) {
> > + const struct rte_dma_conf conf = { .nb_vchans = 1};
> > + const struct rte_dma_vchan_conf qconf = {
> > + .direction = RTE_DMA_DIR_MEM_TO_MEM,
> > + .nb_desc = 1024,
> > + };
> > + struct test_perf *t = evt_test_priv(test);
> > + uint8_t dma_dev_count, dma_dev_id;
> > + unsigned int elt_size;
> > + int ret;
> > +
> > + if (opt->prod_type != EVT_PROD_TYPE_EVENT_DMA_ADPTR)
> > + return 0;
> > +
> > + dma_dev_count = rte_dma_count_avail();
> > + if (dma_dev_count == 0) {
> > + evt_err("No dma devices available\n");
> > + return -ENODEV;
> > + }
> > +
> > + elt_size = sizeof(struct rte_event_dma_adapter_op) + sizeof(struct
> > rte_event);
> > + t->da_op_pool = rte_mempool_create("dma_op_pool", opt->pool_sz,
> > elt_size, 256,
> > + 0, NULL, NULL, NULL, NULL,
> > rte_socket_id(), 0);
> > + if (t->da_op_pool == NULL) {
> > + evt_err("Failed to create dma op pool");
> > + return -ENOMEM;
> > + }
> > +
> > + for (dma_dev_id = 0; dma_dev_id < dma_dev_count; dma_dev_id++) {
>
> Since only dma_dev_id 0 is used, we can skip configuring the rest.
>
> > + int vchan_id;
> > +
> > + ret = rte_dma_configure(dma_dev_id, &conf);
> > + if (ret) {
> > + evt_err("Failed to configure dma dev (%u)",
> > dma_dev_id);
> > + goto err;
> > + }
> > +
> > + for (vchan_id = 0; vchan_id < conf.nb_vchans; vchan_id++) {
> > + ret = rte_dma_vchan_setup(dma_dev_id, vchan_id,
> > &qconf);
> > + if (ret) {
> > + evt_err("Failed to setup vchan on dma dev
> > %u\n",
> > + dma_dev_id);
> > + goto err;
> > + }
> > + }
> > + }
> > +
> > + return 0;
> > +err:
> > + for (dma_dev_id = 0; dma_dev_id < dma_dev_count; dma_dev_id++)
> > + rte_dma_close(dma_dev_id);
> > +
> > + rte_mempool_free(t->da_op_pool);
> > +
> > + return ret;
> > +}
> > +
> > +void
> > +perf_dmadev_destroy(struct evt_test *test, struct evt_options *opt) {
> > + uint8_t dma_dev_id, dma_dev_count = rte_dma_count_avail();
> > + struct test_perf *t = evt_test_priv(test);
> > + uint16_t port;
> > +
> > + if (opt->prod_type != EVT_PROD_TYPE_EVENT_DMA_ADPTR)
> > + return;
> > +
> > + for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) {
> > + struct prod_data *p = &t->prod[port];
> > + struct rte_event_dma_adapter_op *op;
> > + uint32_t flow_id;
> > +
> > + for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
> > + op = p->da.dma_op[flow_id];
> > +
> > + rte_pktmbuf_free((struct rte_mbuf *)(uintptr_t)op-
> > >src_seg->addr);
> > + rte_pktmbuf_free((struct rte_mbuf *)(uintptr_t)op-
> > >dst_seg->addr);
> > + rte_free(op->src_seg);
> > + rte_free(op->dst_seg);
> > + rte_mempool_put(op->op_mp, op);
> > + }
> > +
> > + rte_event_dma_adapter_vchan_del(TEST_PERF_DA_ID, p-
> > >da.dma_dev_id, p->da.vchan_id);
> > + }
> > +
> > + rte_event_dma_adapter_free(TEST_PERF_DA_ID);
> > +
> > + for (dma_dev_id = 0; dma_dev_id < dma_dev_count; dma_dev_id++) {
>
> Same as above.
>
> > + rte_dma_stop(dma_dev_id);
> > + rte_dma_close(dma_dev_id);
> > + }
> > +
> > + rte_mempool_free(t->da_op_pool);
> > +}
> > +
> > int
> > perf_mempool_setup(struct evt_test *test, struct evt_options *opt) {
> > diff --git a/app/test-eventdev/test_perf_common.h b/app/test-
> > eventdev/test_perf_common.h index faedd471c6..2b4f572b7f 100644
> > --- a/app/test-eventdev/test_perf_common.h
> > +++ b/app/test-eventdev/test_perf_common.h
> > @@ -27,6 +27,7 @@
> > #include "evt_test.h"
> >
> > #define TEST_PERF_CA_ID 0
> > +#define TEST_PERF_DA_ID 0
> >
> > struct test_perf;
> >
> > @@ -43,11 +44,19 @@ struct crypto_adptr_data {
> > uint16_t cdev_qp_id;
> > void **crypto_sess;
> > };
> > +
> > +struct dma_adptr_data {
> > + uint8_t dma_dev_id;
> > + uint16_t vchan_id;
> > + void **dma_op;
> > +};
> > +
> > struct prod_data {
> > uint8_t dev_id;
> > uint8_t port_id;
> > uint8_t queue_id;
> > struct crypto_adptr_data ca;
> > + struct dma_adptr_data da;
> > struct test_perf *t;
> > } __rte_cache_aligned;
> >
> > @@ -72,6 +81,7 @@ struct test_perf {
> > struct rte_mempool *ca_sess_pool;
> > struct rte_mempool *ca_asym_sess_pool;
> > struct rte_mempool *ca_vector_pool;
> > + struct rte_mempool *da_op_pool;
> > } __rte_cache_aligned;
> >
> > struct perf_elt {
> > @@ -95,9 +105,8 @@ struct perf_elt {
> > const uint8_t port = w->port_id;\
> > const uint8_t prod_timer_type = \
> > opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR;\
> > - const uint8_t prod_crypto_type = \
> > - opt->prod_type ==
> > EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR;\
> > uint8_t *const sched_type_list = &t->sched_type_list[0];\
> > + const enum evt_prod_type prod_type = opt->prod_type;\
> > struct rte_mempool *const pool = t->pool;\
> > const uint8_t nb_stages = t->opt->nb_stages;\
> > const uint8_t laststage = nb_stages - 1;\ @@ -206,9 +215,9 @@
> > perf_handle_crypto_vector_ev(struct rte_event *ev, struct perf_elt
> > **pe, }
> >
> > static __rte_always_inline int
> > -perf_process_last_stage(struct rte_mempool *const pool, uint8_t
> > prod_crypto_type,
> > - struct rte_event *const ev, struct worker_data *const w,
> > - void *bufs[], int const buf_sz, uint8_t count)
> > +perf_process_last_stage(struct rte_mempool *const pool, enum
> > evt_prod_type prod_type,
> > + struct rte_event *const ev, struct worker_data *const
> > w,
> > + void *bufs[], int const buf_sz, uint8_t count)
> > {
> > void *to_free_in_bulk;
> >
> > @@ -219,7 +228,7 @@ perf_process_last_stage(struct rte_mempool
> *const
> > pool, uint8_t prod_crypto_type
> > rte_atomic_thread_fence(__ATOMIC_RELEASE);
> > w->processed_pkts++;
> >
> > - if (prod_crypto_type) {
> > + if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
> > struct rte_crypto_op *op = ev->event_ptr;
> > struct rte_mbuf *m;
> >
> > @@ -234,6 +243,8 @@ perf_process_last_stage(struct rte_mempool
> *const
> > pool, uint8_t prod_crypto_type
> > to_free_in_bulk = op->asym->modex.result.data;
> > }
> > rte_crypto_op_free(op);
> > + } else if (prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
> > + return count;
> > } else {
> > to_free_in_bulk = ev->event_ptr;
> > }
> > @@ -248,9 +259,9 @@ perf_process_last_stage(struct rte_mempool
> *const
> > pool, uint8_t prod_crypto_type }
> >
> > static __rte_always_inline uint8_t
> > -perf_process_last_stage_latency(struct rte_mempool *const pool,
> > uint8_t prod_crypto_type,
> > - struct rte_event *const ev, struct worker_data *const w,
> > - void *bufs[], int const buf_sz, uint8_t count)
> > +perf_process_last_stage_latency(struct rte_mempool *const pool, enum
> > evt_prod_type prod_type,
> > + struct rte_event *const ev, struct worker_data
> > *const w,
> > + void *bufs[], int const buf_sz, uint8_t count)
> > {
> > uint64_t latency;
> > struct perf_elt *pe;
> > @@ -262,7 +273,7 @@ perf_process_last_stage_latency(struct
> rte_mempool
> > *const pool, uint8_t prod_cry
> > rte_atomic_thread_fence(__ATOMIC_RELEASE);
> > w->processed_pkts++;
> >
> > - if (prod_crypto_type) {
> > + if (prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
> > struct rte_crypto_op *op = ev->event_ptr;
> > struct rte_mbuf *m;
> >
> > @@ -280,6 +291,8 @@ perf_process_last_stage_latency(struct
> rte_mempool
> > *const pool, uint8_t prod_cry
> > to_free_in_bulk = op->asym->modex.result.data;
> > }
> > rte_crypto_op_free(op);
> > + } else if (prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
> > + return count;
> > } else {
> > pe = ev->event_ptr;
> > to_free_in_bulk = pe;
> > @@ -346,6 +359,7 @@ int perf_opt_check(struct evt_options *opt,
> > uint64_t nb_queues); int perf_test_setup(struct evt_test *test,
> > struct evt_options *opt); int perf_ethdev_setup(struct evt_test
> > *test, struct evt_options *opt); int perf_cryptodev_setup(struct
> > evt_test *test, struct evt_options *opt);
> > +int perf_dmadev_setup(struct evt_test *test, struct evt_options
> > +*opt);
> > int perf_mempool_setup(struct evt_test *test, struct evt_options
> > *opt); int perf_event_dev_port_setup(struct evt_test *test, struct
> evt_options *opt,
> > uint8_t stride, uint8_t nb_queues, @@ -
> 357,6 +371,7 @@ void
> > perf_opt_dump(struct evt_options *opt, uint8_t nb_queues); void
> > perf_test_destroy(struct evt_test *test, struct evt_options *opt);
> > void perf_eventdev_destroy(struct evt_test *test, struct evt_options
> > *opt); void perf_cryptodev_destroy(struct evt_test *test, struct
> > evt_options *opt);
> > +void perf_dmadev_destroy(struct evt_test *test, struct evt_options
> > +*opt);
> > void perf_ethdev_destroy(struct evt_test *test, struct evt_options
> > *opt); void perf_ethdev_rx_stop(struct evt_test *test, struct
> > evt_options *opt); void perf_mempool_destroy(struct evt_test *test,
> > struct evt_options *opt); diff --git
> > a/app/test-eventdev/test_perf_queue.c b/app/test-
> > eventdev/test_perf_queue.c index 2399cfb69b..8b6b85c1ad 100644
> > --- a/app/test-eventdev/test_perf_queue.c
> > +++ b/app/test-eventdev/test_perf_queue.c
> > @@ -50,7 +50,8 @@ perf_queue_worker(void *arg, const int
> > enable_fwd_latency)
> > continue;
> > }
> >
> > - if (prod_crypto_type && (ev.event_type ==
> > RTE_EVENT_TYPE_CRYPTODEV)) {
> > + if ((prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
> > &&
> > + (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) {
> > if (perf_handle_crypto_ev(&ev, &pe,
> > enable_fwd_latency))
> > continue;
> > } else {
> > @@ -65,10 +66,10 @@ perf_queue_worker(void *arg, const int
> > enable_fwd_latency)
> > /* last stage in pipeline */
> > if (unlikely(stage == laststage)) {
> > if (enable_fwd_latency)
> > - cnt = perf_process_last_stage_latency(pool,
> > prod_crypto_type,
> > + cnt = perf_process_last_stage_latency(pool,
> > prod_type,
> > &ev, w, bufs, sz, cnt);
> > else
> > - cnt = perf_process_last_stage(pool,
> > prod_crypto_type,
> > + cnt = perf_process_last_stage(pool,
> > prod_type,
> > &ev, w, bufs, sz, cnt);
> > } else {
> > fwd_event(&ev, sched_type_list, nb_stages); @@ -
> 101,7 +102,8 @@
> > perf_queue_worker_burst(void *arg, const int
> > enable_fwd_latency)
> > }
> >
> > for (i = 0; i < nb_rx; i++) {
> > - if (prod_crypto_type && (ev[i].event_type ==
> > RTE_EVENT_TYPE_CRYPTODEV)) {
> > + if ((prod_type ==
> > EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) &&
> > + (ev[i].event_type ==
> > RTE_EVENT_TYPE_CRYPTODEV)) {
> > if (perf_handle_crypto_ev(&ev[i], &pe,
> > enable_fwd_latency))
> > continue;
> > }
> > @@ -118,9 +120,9 @@ perf_queue_worker_burst(void *arg, const int
> > enable_fwd_latency)
> > if (unlikely(stage == laststage)) {
> > if (enable_fwd_latency)
> > cnt =
> > perf_process_last_stage_latency(pool,
> > - prod_crypto_type, &ev[i], w,
> > bufs, sz, cnt);
> > + prod_type, &ev[i], w, bufs, sz,
> > cnt);
> > else
> > - cnt = perf_process_last_stage(pool,
> > prod_crypto_type,
> > + cnt = perf_process_last_stage(pool,
> > prod_type,
> > &ev[i], w, bufs, sz, cnt);
> >
> > ev[i].op = RTE_EVENT_OP_RELEASE;
> > @@ -151,7 +153,7 @@ perf_queue_worker_vector(void *arg, const int
> > enable_fwd_latency)
> >
> > RTE_SET_USED(sz);
> > RTE_SET_USED(cnt);
> > - RTE_SET_USED(prod_crypto_type);
> > + RTE_SET_USED(prod_type);
> >
> > while (t->done == false) {
> > deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0); @@ -
> 346,6
> > +348,18 @@ perf_queue_eventdev_setup(struct evt_test *test, struct
> > evt_options *opt)
> > return ret;
> > }
> > }
> > + } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_DMA_ADPTR) {
> > + uint8_t dma_dev_id, dma_dev_count;
> > +
> > + dma_dev_count = rte_dma_count_avail();
> > + for (dma_dev_id = 0; dma_dev_id < dma_dev_count;
> > dma_dev_id++) {
>
> Since we only use the 1st dma_dev_id in fp, we should skip this loop.
>
> > + ret = rte_dma_start(dma_dev_id);
> > + if (ret) {
> > + evt_err("Failed to start dmadev %u",
> > + dma_dev_id);
> > + return ret;
> > + }
> > + }
> > }
> >
> > return 0;
> > @@ -389,6 +403,7 @@ static const struct evt_test_ops perf_queue = {
> > .mempool_setup = perf_mempool_setup,
> > .ethdev_setup = perf_ethdev_setup,
> > .cryptodev_setup = perf_cryptodev_setup,
> > + .dmadev_setup = perf_dmadev_setup,
> > .ethdev_rx_stop = perf_ethdev_rx_stop,
> > .eventdev_setup = perf_queue_eventdev_setup,
> > .launch_lcores = perf_queue_launch_lcores,
> > @@ -396,6 +411,7 @@ static const struct evt_test_ops perf_queue = {
> > .mempool_destroy = perf_mempool_destroy,
> > .ethdev_destroy = perf_ethdev_destroy,
> > .cryptodev_destroy = perf_cryptodev_destroy,
> > + .dmadev_destroy = perf_dmadev_destroy,
> > .test_result = perf_test_result,
> > .test_destroy = perf_test_destroy,
> > };
> > diff --git a/doc/guides/tools/testeventdev.rst
> > b/doc/guides/tools/testeventdev.rst
> > index 3fcc2c9894..a29afe2cb2 100644
> > --- a/doc/guides/tools/testeventdev.rst
> > +++ b/doc/guides/tools/testeventdev.rst
>
> Please Add example command
>
> > @@ -124,6 +124,10 @@ The following are the application command-line
> > options:
> >
> > Use crypto device as producer.
> >
> > +* ``--prod_type_dmadev``
> > +
> > + Use dma device as producer.
> > +
> > * ``--timer_tick_nsec``
> >
> > Used to dictate number of nano seconds between bucket
> > traversal of the @@ -157,6 +161,11 @@ The following are the
> > application command-line
> > options:
> > Set crypto adapter mode. Use 0 for OP_NEW (default) and 1 for
> > OP_FORWARD mode.
> >
> > +* ``--dma_adptr_mode``
> > +
> > + Set dma adapter mode. Use 0 for OP_NEW (default) and 1 for
> > + OP_FORWARD mode.
> > +
> > * ``--crypto_op_type``
> >
> > Set crypto operation type. Use 0 for symmetric crypto ops
> > (default) @@ -459,6 +468,7 @@ Supported application command line
> > options are
> > following::
> > --prod_type_timerdev_burst
> > --prod_type_timerdev
> > --prod_type_cryptodev
> > + --prod_type_dmadev
> > --prod_enq_burst_sz
> > --timer_tick_nsec
> > --max_tmo_nsec
> > @@ -467,6 +477,7 @@ Supported application command line options are
> > following::
> > --nb_timer_adptrs
> > --deq_tmo_nsec
> > --crypto_adptr_mode
> > + --dma_adptr_mode
> >
> > Example
> > ^^^^^^^
> > @@ -570,6 +581,7 @@ Supported application command line options are
> > following::
> > --prod_type_timerdev_burst
> > --prod_type_timerdev
> > --prod_type_cryptodev
> > + --prod_type_dmadev
> > --timer_tick_nsec
> > --max_tmo_nsec
> > --expiry_nsec
> > @@ -577,6 +589,7 @@ Supported application command line options are
> > following::
> > --nb_timer_adptrs
> > --deq_tmo_nsec
> > --crypto_adptr_mode
> > + --dma_adptr_mode
> >
> > Example
> > ^^^^^^^
> > --
> > 2.34.1
next prev parent reply other threads:[~2024-03-01 14:42 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-26 13:26 [PATCH] " Amit Prakash Shukla
2024-02-28 6:01 ` [PATCH v2] " Amit Prakash Shukla
2024-03-01 12:57 ` Pavan Nikhilesh Bhagavatula
2024-03-01 14:42 ` Amit Prakash Shukla [this message]
2024-03-06 19:05 ` [PATCH v3] " Amit Prakash Shukla
2024-03-07 3:56 ` Pavan Nikhilesh Bhagavatula
2024-03-12 13:10 ` Jerin Jacob
2024-03-12 14:30 ` [PATCH v4] " Amit Prakash Shukla
2024-03-13 13:18 ` Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=PH0PR18MB51671D6182B9B848DF192D81C85E2@PH0PR18MB5167.namprd18.prod.outlook.com \
--to=amitprakashs@marvell.com \
--cc=anoobj@marvell.com \
--cc=asasidharan@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=ndabilpuram@marvell.com \
--cc=pbhagavatula@marvell.com \
--cc=vattunuru@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).