* [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size @ 2021-09-18 13:11 Naga Harish K S V 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 2/5] test/event: add unit test for rte_event_eth_rx_adapter_create2 api Naga Harish K S V ` (6 more replies) 0 siblings, 7 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-18 13:11 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev, Ganapati Kundapura Currently Rx event buffer is static array with a default size of 192(6*BATCH_SIZE). ``rte_event_eth_rx_adapter_create2`` api is added which takes ``struct rte_event_eth_rx_adapter_params`` to configure event buffer size in addition other params . The event buffer is allocated dynamically at run time aligned to BATCH_SIZE + 2*BATCH_SIZE. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> --- .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ lib/eventdev/rte_event_eth_rx_adapter.c | 87 +++++++++++++++++-- lib/eventdev/rte_event_eth_rx_adapter.h | 45 +++++++++- lib/eventdev/version.map | 2 + 4 files changed, 133 insertions(+), 8 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index 0780b6f711..cbf694c66b 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. +If the application desires to control the event buffer size, it can use the +``rte_event_eth_rx_adapter_create2()`` api. The event buffer size is +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. +The function is passed the event device to be associated with the adapter +and port configuration for the adapter to setup an event port if the +adapter needs to use a service function. + Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index f2dc69503d..f567a83223 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -82,7 +82,9 @@ struct rte_eth_event_enqueue_buffer { /* Count of events in this buffer */ uint16_t count; /* Array of events in this buffer */ - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; + struct rte_event *events; + /* size of event buffer */ + uint16_t events_size; /* Event enqueue happens from head */ uint16_t head; /* New packets from rte_eth_rx_burst is enqued from tail */ @@ -919,7 +921,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, dropped = 0; nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, buf->last | - (RTE_DIM(buf->events) & ~buf->last_mask), + (buf->events_size & ~buf->last_mask), buf->count >= BATCH_SIZE ? buf->count - BATCH_SIZE : 0, &buf->events[buf->tail], @@ -945,7 +947,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) uint32_t nb_req = buf->tail + BATCH_SIZE; if (!buf->last) { - if (nb_req <= RTE_DIM(buf->events)) + if (nb_req <= buf->events_size) return true; if (buf->head >= BATCH_SIZE) { @@ -2164,12 +2166,15 @@ rxa_ctrl(uint8_t id, int start) return 0; } -int -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, - rte_event_eth_rx_adapter_conf_cb conf_cb, - void *conf_arg) +static int +rxa_create(uint8_t id, uint8_t dev_id, + struct rte_event_eth_rx_adapter_params *rxa_params, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) { struct rte_event_eth_rx_adapter *rx_adapter; + struct rte_eth_event_enqueue_buffer *buf; + struct rte_event *events; int ret; int socket_id; uint16_t i; @@ -2184,6 +2189,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (conf_cb == NULL) return -EINVAL; @@ -2231,11 +2237,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_free(rx_adapter); return -ENOMEM; } + rte_spinlock_init(&rx_adapter->rx_lock); + for (i = 0; i < RTE_MAX_ETHPORTS; i++) rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; + /* Rx adapter event buffer allocation */ + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } + + rx_adapter->event_enqueue_buffer.events = events; + event_eth_rx_adapter[id] = rx_adapter; + if (conf_cb == rxa_default_conf_cb) rx_adapter->default_cb_arg = 1; rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb, @@ -2243,6 +2268,50 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, return 0; } +int +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) +{ + struct rte_event_eth_rx_adapter_params rxa_params; + + /* Event buffer with default size = 6*BATCH_SIZE */ + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); +} + +int +rte_event_eth_rx_adapter_create2(uint8_t id, uint8_t dev_id, + struct rte_event_eth_rx_adapter_params *rxa_params, + struct rte_event_port_conf *port_config) +{ + struct rte_event_port_conf *pc; + int ret; + + if (port_config == NULL) + return -EINVAL; + + if (rxa_params == NULL || rxa_params->event_buf_size == 0) + return -EINVAL; + + pc = rte_malloc(NULL, sizeof(*pc), 0); + if (pc == NULL) + return -ENOMEM; + + *pc = *port_config; + + /* event buff size aligned to BATCH_SIZE + 2*BATCH_SIZE */ + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + rxa_params->event_buf_size += BATCH_SIZE + BATCH_SIZE; + + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); + if (ret) + rte_free(pc); + + return ret; +} + int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config) @@ -2252,12 +2321,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) return -ENOMEM; *pc = *port_config; + ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, rxa_default_conf_cb, pc); @@ -2286,6 +2357,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2658,6 +2730,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, stats->rx_packets += dev_stats_sum.rx_packets; stats->rx_enq_count += dev_stats_sum.rx_enq_count; + return 0; } diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 3f8b362295..a1b5e0ed37 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -26,6 +26,7 @@ * The ethernet Rx event adapter's functions are: * - rte_event_eth_rx_adapter_create_ext() * - rte_event_eth_rx_adapter_create() + * - rte_event_eth_rx_adapter_create2() * - rte_event_eth_rx_adapter_free() * - rte_event_eth_rx_adapter_queue_add() * - rte_event_eth_rx_adapter_queue_del() @@ -36,7 +37,7 @@ * * The application creates an ethernet to event adapter using * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() - * functions. + * or rte_event_eth_rx_adapter_create2() functions. * The adapter needs to know which ethernet rx queues to poll for mbufs as well * as event device parameters such as the event queue identifier, event * priority and scheduling type that the adapter should use when constructing @@ -256,6 +257,14 @@ struct rte_event_eth_rx_adapter_vector_limits { */ }; +/** + * A structure to hold adapter config params + */ +struct rte_event_eth_rx_adapter_params { + uint16_t event_buf_size; + /**< size of event buffer for the adapter */ +}; + /** * * Callback function invoked by the SW adapter before it continues @@ -330,6 +339,40 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_event_eth_rx_adapter_conf_cb conf_cb, void *conf_arg); +/** + * Create a new ethernet Rx event adapter with the specified identifier. + * This function allocates Rx adapter event buffer with the size specified + * in rxa_params aligned to BATCH_SIZE plus (BATCH_SIZE+BATCH_SIZE) and + * uses an internal configuration function that creates an event port. + * This default function reconfigures the event device with an + * additional event port and setups up the event port using the port config + * parameter passed into this function. In case the application needs more + * control in configuration of the service, it should use the + * rte_event_eth_rx_adapter_create_ext() version. + * + * @param id + * The identifier of the ethernet Rx event adapter. + * + * @param dev_id + * The identifier of the event device to configure. + * + * @param rxa_params + * Pointer to struct rte_event_eth_rx_adapter_params containing + * size to allocate rx event buffer. + * + * @param port_config + * Argument of type *rte_event_port_conf* that is passed to the conf_cb + * function. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_eth_rx_adapter_create2(uint8_t id, uint8_t dev_id, + struct rte_event_eth_rx_adapter_params *rxa_params, + struct rte_event_port_conf *port_config); + /** * Create a new ethernet Rx event adapter with the specified identifier. * This function uses an internal configuration function that creates an event diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index cd86d2d908..868d352eb3 100644 --- a/lib/eventdev/version.map +++ b/lib/eventdev/version.map @@ -138,6 +138,8 @@ EXPERIMENTAL { __rte_eventdev_trace_port_setup; # added in 20.11 rte_event_pmd_pci_probe_named; + # added in 21.11 + rte_event_eth_rx_adapter_create2; #added in 21.05 rte_event_vector_pool_create; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v1 2/5] test/event: add unit test for rte_event_eth_rx_adapter_create2 api 2021-09-18 13:11 [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Naga Harish K S V @ 2021-09-18 13:11 ` Naga Harish K S V 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 3/5] eventdev:rx_adapter:add per queue event buffer configure support Naga Harish K S V ` (5 subsequent siblings) 6 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-18 13:11 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit test for rte_event_eth_rx_adapter_create2 api and validate all possible input combinations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 53 +++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index add4d8a678..6151027022 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -428,6 +428,50 @@ adapter_create_free(void) return TEST_SUCCESS; } +static int +adapter_create_free_v2(void) +{ + int err; + + struct rte_event_port_conf rx_p_conf = { + .dequeue_depth = 8, + .enqueue_depth = 8, + .new_event_threshold = 1200, + }; + + struct rte_event_eth_rx_adapter_params rxa_params = { + .event_buf_size = 1024 + }; + + err = rte_event_eth_rx_adapter_create2(TEST_INST_ID, TEST_DEV_ID, + NULL, NULL); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_create2(TEST_INST_ID, TEST_DEV_ID, + &rxa_params, &rx_p_conf); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create2(TEST_INST_ID, TEST_DEV_ID, + &rxa_params, &rx_p_conf); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err); + + rxa_params.event_buf_size = 0; + err = rte_event_eth_rx_adapter_create2(TEST_INST_ID, TEST_DEV_ID, + &rxa_params, &rx_p_conf); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + err = rte_event_eth_rx_adapter_free(1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + return TEST_SUCCESS; +} + static int adapter_queue_add_del(void) { @@ -435,7 +479,7 @@ adapter_queue_add_del(void) struct rte_event ev; uint32_t cap; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, &cap); @@ -523,7 +567,7 @@ adapter_multi_eth_add_del(void) uint16_t port_index, port_index_base, drv_id = 0; char driver_name[50]; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; ev.queue_id = 0; ev.sched_type = RTE_SCHED_TYPE_ATOMIC; @@ -594,7 +638,7 @@ adapter_intr_queue_add_del(void) struct rte_event ev; uint32_t cap; uint16_t eth_port; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; if (!default_params.rx_intr_port_inited) return 0; @@ -687,7 +731,7 @@ adapter_start_stop(void) ev.sched_type = RTE_SCHED_TYPE_ATOMIC; ev.priority = 0; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; queue_config.rx_queue_flags = 0; if (default_params.caps & @@ -753,6 +797,7 @@ static struct unit_test_suite event_eth_rx_tests = { .teardown = testsuite_teardown, .unit_test_cases = { TEST_CASE_ST(NULL, NULL, adapter_create_free), + TEST_CASE_ST(NULL, NULL, adapter_create_free_v2), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_add_del), TEST_CASE_ST(adapter_create, adapter_free, -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v1 3/5] eventdev:rx_adapter:add per queue event buffer configure support 2021-09-18 13:11 [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Naga Harish K S V 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 2/5] test/event: add unit test for rte_event_eth_rx_adapter_create2 api Naga Harish K S V @ 2021-09-18 13:11 ` Naga Harish K S V 2021-09-20 6:23 ` Jerin Jacob 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 4/5] eventdev:rx_adapter: implement per queue event buffer Naga Harish K S V ` (4 subsequent siblings) 6 siblings, 1 reply; 81+ messages in thread From: Naga Harish K S V @ 2021-09-18 13:11 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev To configure per queue event buffer size, applications sets ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag as true and is passed to `rte_event_eth_rx_adapter_create2` api. The per queue event buffer size is populated in ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed to ``rte_event_eth_rx_adapter_queue_add``. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index cbf694c66b..55d09dbcb8 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,12 +62,14 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. -If the application desires to control the event buffer size, it can use the -``rte_event_eth_rx_adapter_create2()`` api. The event buffer size is -specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. -The function is passed the event device to be associated with the adapter -and port configuration for the adapter to setup an event port if the -adapter needs to use a service function. +If the application desires to control the event buffer size at adapter level, +it can use the ``rte_event_eth_rx_adapter_create2()`` api. The event buffer size +is specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. +To configure the event buffer size at queue level, the boolean flag +``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be +set to true. The function is passed the event device to be associated with +the adapter and port configuration for the adapter to setup an event port +if the adapter needs to use a service function. Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx queue is encoded in the ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf is the relative polling frequency of the Rx queue and is applicable when the -adapter uses a service core function. +adapter uses a service core function. The applications can configure queue +event buffer size in ``struct rte_event_eth_rx_adapter_queue_conf::event_buf_size`` +parameter. .. code-block:: c @@ -90,6 +94,7 @@ adapter uses a service core function. queue_config.rx_queue_flags = 0; queue_config.ev = ev; queue_config.servicing_weight = 1; + queue_config.event_buf_size = 1024; err = rte_event_eth_rx_adapter_queue_add(id, eth_dev_id, diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index a1b5e0ed37..f9e63dc126 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -199,6 +199,8 @@ struct rte_event_eth_rx_adapter_queue_conf { * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. */ + uint16_t event_buf_size; + /**< event buffer size for this queue */ }; /** @@ -263,6 +265,8 @@ struct rte_event_eth_rx_adapter_vector_limits { struct rte_event_eth_rx_adapter_params { uint16_t event_buf_size; /**< size of event buffer for the adapter */ + bool use_queue_event_buf; + /**< flag to indicate that event buffer is separate for each queue */ }; /** -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v1 3/5] eventdev:rx_adapter:add per queue event buffer configure support 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 3/5] eventdev:rx_adapter:add per queue event buffer configure support Naga Harish K S V @ 2021-09-20 6:23 ` Jerin Jacob 2021-09-21 13:46 ` Naga Harish K, S V 0 siblings, 1 reply; 81+ messages in thread From: Jerin Jacob @ 2021-09-20 6:23 UTC (permalink / raw) To: Naga Harish K S V, Pavan Nikhilesh, Mattias Rönnblom Cc: Jerin Jacob, Jayatheerthan, Jay, dpdk-dev On Sat, Sep 18, 2021 at 6:42 PM Naga Harish K S V <s.v.naga.harish.k@intel.com> wrote: > > To configure per queue event buffer size, applications sets > ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag > as true and is passed to `rte_event_eth_rx_adapter_create2` api. > > The per queue event buffer size is populated in > ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed > to ``rte_event_eth_rx_adapter_queue_add``. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Please change the subject to eventdev/rx_adapter: ... rest looks good to me. > --- > .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- > lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ > 2 files changed, 16 insertions(+), 7 deletions(-) > > diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > index cbf694c66b..55d09dbcb8 100644 > --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > @@ -62,12 +62,14 @@ service function and needs to create an event port for it. The callback is > expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` > passed to it. > > -If the application desires to control the event buffer size, it can use the > -``rte_event_eth_rx_adapter_create2()`` api. The event buffer size is > -specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. > -The function is passed the event device to be associated with the adapter > -and port configuration for the adapter to setup an event port if the > -adapter needs to use a service function. > +If the application desires to control the event buffer size at adapter level, > +it can use the ``rte_event_eth_rx_adapter_create2()`` api. The event buffer size > +is specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. > +To configure the event buffer size at queue level, the boolean flag > +``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be > +set to true. The function is passed the event device to be associated with > +the adapter and port configuration for the adapter to setup an event port > +if the adapter needs to use a service function. > > Adding Rx Queues to the Adapter Instance > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > @@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx queue is encoded in the > ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The > servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf > is the relative polling frequency of the Rx queue and is applicable when the > -adapter uses a service core function. > +adapter uses a service core function. The applications can configure queue > +event buffer size in ``struct rte_event_eth_rx_adapter_queue_conf::event_buf_size`` > +parameter. > > .. code-block:: c > > @@ -90,6 +94,7 @@ adapter uses a service core function. > queue_config.rx_queue_flags = 0; > queue_config.ev = ev; > queue_config.servicing_weight = 1; > + queue_config.event_buf_size = 1024; > > err = rte_event_eth_rx_adapter_queue_add(id, > eth_dev_id, > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h > index a1b5e0ed37..f9e63dc126 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.h > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h > @@ -199,6 +199,8 @@ struct rte_event_eth_rx_adapter_queue_conf { > * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in > * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. > */ > + uint16_t event_buf_size; > + /**< event buffer size for this queue */ > }; > > /** > @@ -263,6 +265,8 @@ struct rte_event_eth_rx_adapter_vector_limits { > struct rte_event_eth_rx_adapter_params { > uint16_t event_buf_size; > /**< size of event buffer for the adapter */ > + bool use_queue_event_buf; > + /**< flag to indicate that event buffer is separate for each queue */ > }; > > /** > -- > 2.25.1 > ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v1 3/5] eventdev:rx_adapter:add per queue event buffer configure support 2021-09-20 6:23 ` Jerin Jacob @ 2021-09-21 13:46 ` Naga Harish K, S V 0 siblings, 0 replies; 81+ messages in thread From: Naga Harish K, S V @ 2021-09-21 13:46 UTC (permalink / raw) To: Jerin Jacob, Pavan Nikhilesh, mattias.ronnblom Cc: Jerin Jacob, Jayatheerthan, Jay, dpdk-dev Hi Jerin, > -----Original Message----- > From: Jerin Jacob <jerinjacobk@gmail.com> > Sent: Monday, September 20, 2021 11:53 AM > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>; Pavan Nikhilesh > <pbhagavatula@marvell.com>; mattias.ronnblom > <mattias.ronnblom@ericsson.com> > Cc: Jerin Jacob <jerinj@marvell.com>; Jayatheerthan, Jay > <jay.jayatheerthan@intel.com>; dpdk-dev <dev@dpdk.org> > Subject: Re: [dpdk-dev] [PATCH v1 3/5] eventdev:rx_adapter:add per queue > event buffer configure support > > On Sat, Sep 18, 2021 at 6:42 PM Naga Harish K S V > <s.v.naga.harish.k@intel.com> wrote: > > > > To configure per queue event buffer size, applications sets > > ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag as true > > and is passed to `rte_event_eth_rx_adapter_create2` api. > > > > The per queue event buffer size is populated in > > ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed to > > ``rte_event_eth_rx_adapter_queue_add``. > > > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > > Please change the subject to > eventdev/rx_adapter: ... > It is updated in latest patch set. > rest looks good to me. > > > --- > > .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- > > lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ > > 2 files changed, 16 insertions(+), 7 deletions(-) > > > > diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > index cbf694c66b..55d09dbcb8 100644 > > --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > @@ -62,12 +62,14 @@ service function and needs to create an event port > > for it. The callback is expected to fill the ``struct > > rte_event_eth_rx_adapter_conf structure`` passed to it. > > > > -If the application desires to control the event buffer size, it can > > use the -``rte_event_eth_rx_adapter_create2()`` api. The event buffer > > size is -specified using ``struct > rte_event_eth_rx_adapter_params::event_buf_size``. > > -The function is passed the event device to be associated with the > > adapter -and port configuration for the adapter to setup an event port > > if the -adapter needs to use a service function. > > +If the application desires to control the event buffer size at > > +adapter level, it can use the ``rte_event_eth_rx_adapter_create2()`` > > +api. The event buffer size is specified using ``struct > rte_event_eth_rx_adapter_params::event_buf_size``. > > +To configure the event buffer size at queue level, the boolean flag > > +``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` > need > > +to be set to true. The function is passed the event device to be > > +associated with the adapter and port configuration for the adapter to > > +setup an event port if the adapter needs to use a service function. > > > > Adding Rx Queues to the Adapter Instance > > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > > @@ -79,7 +81,9 @@ parameter. Event information for packets from this > > Rx queue is encoded in the ``ev`` field of ``struct > > rte_event_eth_rx_adapter_queue_conf``. The servicing_weight member > of > > the struct rte_event_eth_rx_adapter_queue_conf > > is the relative polling frequency of the Rx queue and is applicable > > when the -adapter uses a service core function. > > +adapter uses a service core function. The applications can configure > > +queue event buffer size in ``struct > > +rte_event_eth_rx_adapter_queue_conf::event_buf_size`` > > +parameter. > > > > .. code-block:: c > > > > @@ -90,6 +94,7 @@ adapter uses a service core function. > > queue_config.rx_queue_flags = 0; > > queue_config.ev = ev; > > queue_config.servicing_weight = 1; > > + queue_config.event_buf_size = 1024; > > > > err = rte_event_eth_rx_adapter_queue_add(id, > > eth_dev_id, diff > > --git a/lib/eventdev/rte_event_eth_rx_adapter.h > > b/lib/eventdev/rte_event_eth_rx_adapter.h > > index a1b5e0ed37..f9e63dc126 100644 > > --- a/lib/eventdev/rte_event_eth_rx_adapter.h > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h > > @@ -199,6 +199,8 @@ struct rte_event_eth_rx_adapter_queue_conf { > > * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR > flag is set in > > * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. > > */ > > + uint16_t event_buf_size; > > + /**< event buffer size for this queue */ > > }; > > > > /** > > @@ -263,6 +265,8 @@ struct rte_event_eth_rx_adapter_vector_limits { > > struct rte_event_eth_rx_adapter_params { > > uint16_t event_buf_size; > > /**< size of event buffer for the adapter */ > > + bool use_queue_event_buf; > > + /**< flag to indicate that event buffer is separate for each > > + queue */ > > }; > > > > /** > > -- > > 2.25.1 > > Regards Harish ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v1 4/5] eventdev:rx_adapter: implement per queue event buffer 2021-09-18 13:11 [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Naga Harish K S V 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 2/5] test/event: add unit test for rte_event_eth_rx_adapter_create2 api Naga Harish K S V 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 3/5] eventdev:rx_adapter:add per queue event buffer configure support Naga Harish K S V @ 2021-09-18 13:11 ` Naga Harish K S V 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 5/5] test/eventdev: add per rx queue event buffer unit Naga Harish K S V ` (3 subsequent siblings) 6 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-18 13:11 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch implement the per queue event buffer after required validations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- lib/eventdev/rte_event_eth_rx_adapter.c | 185 ++++++++++++++++++------ 1 file changed, 138 insertions(+), 47 deletions(-) diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index f567a83223..ab315e0d82 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -99,10 +99,12 @@ struct rte_event_eth_rx_adapter { uint8_t rss_key_be[RSS_KEY_SIZE]; /* Event device identifier */ uint8_t eventdev_id; - /* Per ethernet device structure */ - struct eth_device_info *eth_devices; /* Event port identifier */ uint8_t event_port_id; + /* Flag indicating per rxq event buffer */ + bool use_queue_event_buf; + /* Per ethernet device structure */ + struct eth_device_info *eth_devices; /* Lock to serialize config updates with service function */ rte_spinlock_t rx_lock; /* Max mbufs processed in any service function invocation */ @@ -238,6 +240,7 @@ struct eth_rx_queue_info { uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ uint64_t event; struct eth_rx_vector_data vector_data; + struct rte_eth_event_enqueue_buffer *event_buf; }; static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ -753,10 +756,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, /* Enqueue buffered events to event device */ static inline uint16_t -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, + struct rte_eth_event_enqueue_buffer *buf) { - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t count = buf->last ? buf->last - buf->head : buf->count; @@ -874,15 +876,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, uint16_t rx_queue_id, struct rte_mbuf **mbufs, - uint16_t num) + uint16_t num, + struct rte_eth_event_enqueue_buffer *buf) { uint32_t i; struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; struct eth_rx_queue_info *eth_rx_queue_info = &dev_info->rx_queue[rx_queue_id]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; uint16_t new_tail = buf->tail; uint64_t event = eth_rx_queue_info->event; uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ -968,11 +969,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t queue_id, uint32_t rx_count, uint32_t max_rx, - int *rxq_empty) + int *rxq_empty, + struct rte_eth_event_enqueue_buffer *buf) { struct rte_mbuf *mbufs[BATCH_SIZE]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t n; @@ -985,7 +985,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, */ while (rxa_pkt_buf_available(buf)) { if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); stats->rx_poll_count++; n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); @@ -994,14 +994,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, *rxq_empty = 1; break; } - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); nb_rx += n; if (rx_count + nb_rx > max_rx) break; } if (buf->count > 0) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); return nb_rx; } @@ -1142,7 +1142,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) ring_lock = &rx_adapter->intr_ring_lock; if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); while (rxa_pkt_buf_available(buf)) { struct eth_device_info *dev_info; @@ -1194,7 +1194,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) continue; n = rxa_eth_rx(rx_adapter, port, i, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); nb_rx += n; enq_buffer_full = !rxq_empty && n == 0; @@ -1215,7 +1215,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) } else { n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); rx_adapter->qd_valid = !rxq_empty; nb_rx += n; if (nb_rx > rx_adapter->max_nb_rx) @@ -1246,13 +1246,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { uint32_t num_queue; uint32_t nb_rx = 0; - struct rte_eth_event_enqueue_buffer *buf; + struct rte_eth_event_enqueue_buffer *buf = NULL; uint32_t wrr_pos; uint32_t max_nb_rx; wrr_pos = rx_adapter->wrr_pos; max_nb_rx = rx_adapter->max_nb_rx; - buf = &rx_adapter->event_enqueue_buffer; /* Iterate through a WRR sequence */ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { @@ -1260,24 +1259,36 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[d]; + buf = dev_info->rx_queue[qid].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + /* Don't do a batch dequeue from the rx queue if there isn't * enough space in the enqueue buffer. */ if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (!rxa_pkt_buf_available(buf)) { - rx_adapter->wrr_pos = wrr_pos; - return nb_rx; + if (rx_adapter->use_queue_event_buf) + goto poll_next_entry; + else { + rx_adapter->wrr_pos = wrr_pos; + return nb_rx; + } } nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, - NULL); + NULL, buf); if (nb_rx > max_nb_rx) { rx_adapter->wrr_pos = (wrr_pos + 1) % rx_adapter->wrr_len; break; } +poll_next_entry: if (++wrr_pos == rx_adapter->wrr_len) wrr_pos = 0; } @@ -1288,12 +1299,18 @@ static void rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { struct rte_event_eth_rx_adapter *rx_adapter = arg; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; + struct rte_eth_event_enqueue_buffer *buf = NULL; struct rte_event *ev; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[vec->port]; + buf = dev_info->rx_queue[vec->queue].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + if (buf->count) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (vec->vector_ev->nb_elem == 0) return; @@ -1905,9 +1922,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, rx_adapter->num_rx_intr -= intrq; dev_info->nb_rx_intr -= intrq; dev_info->nb_shared_intr -= intrq && sintrq; + if (rx_adapter->use_queue_event_buf) { + struct rte_eth_event_enqueue_buffer *event_buf = + dev_info->rx_queue[rx_queue_id].event_buf; + rte_free(event_buf->events); + rte_free(event_buf); + dev_info->rx_queue[rx_queue_id].event_buf = NULL; + } } -static void +static int rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, struct eth_device_info *dev_info, int32_t rx_queue_id, @@ -1919,15 +1943,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int intrq; int sintrq; struct rte_event *qi_ev; + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; + uint16_t eth_dev_id = dev_info->dev->data->port_id; + int ret; if (rx_queue_id == -1) { uint16_t nb_rx_queues; uint16_t i; nb_rx_queues = dev_info->dev->data->nb_rx_queues; - for (i = 0; i < nb_rx_queues; i++) - rxa_add_queue(rx_adapter, dev_info, i, conf); - return; + for (i = 0; i < nb_rx_queues; i++) { + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); + if (ret) + return ret; + } + return 0; } pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -1990,6 +2020,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, dev_info->next_q_idx = 0; } } + + if (!rx_adapter->use_queue_event_buf) + return 0; + + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", + sizeof(*new_rx_buf), 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); + new_rx_buf->events_size += (2 * BATCH_SIZE); + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", + sizeof(struct rte_event) * + new_rx_buf->events_size, 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf->events == NULL) { + rte_free(new_rx_buf); + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + queue_info->event_buf = new_rx_buf; + + return 0; } static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ -2018,6 +2079,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, temp_conf.servicing_weight = 1; } queue_conf = &temp_conf; + + if (queue_conf->servicing_weight == 0 && + rx_adapter->use_queue_event_buf) { + + RTE_EDEV_LOG_ERR("Use of queue level event buffer " + "not supported for interrupt queues " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -EINVAL; + } } nb_rx_queues = dev_info->dev->data->nb_rx_queues; @@ -2097,7 +2168,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + if (ret) + goto err_free_rxqueue; rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); rte_free(rx_adapter->eth_rx_poll); @@ -2118,7 +2191,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, rte_free(rx_poll); rte_free(rx_wrr); - return 0; + return ret; } static int @@ -2244,20 +2317,26 @@ rxa_create(uint8_t id, uint8_t dev_id, rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; /* Rx adapter event buffer allocation */ - buf = &rx_adapter->event_enqueue_buffer; - buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); - - events = rte_zmalloc_socket(rx_adapter->mem_name, - buf->events_size * sizeof(*events), - 0, socket_id); - if (events == NULL) { - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); - rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter); - return -ENOMEM; - } + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; + + if (!rx_adapter->use_queue_event_buf) { + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate memory " + "for adapter event buffer"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } - rx_adapter->event_enqueue_buffer.events = events; + rx_adapter->event_enqueue_buffer.events = events; + } event_eth_rx_adapter[id] = rx_adapter; @@ -2277,6 +2356,8 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, /* Event buffer with default size = 6*BATCH_SIZE */ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + rxa_params.use_queue_event_buf = false; + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } @@ -2291,7 +2372,8 @@ rte_event_eth_rx_adapter_create2(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; - if (rxa_params == NULL || rxa_params->event_buf_size == 0) + if (rxa_params == NULL || (!rxa_params->use_queue_event_buf && + rxa_params->event_buf_size == 0)) return -EINVAL; pc = rte_malloc(NULL, sizeof(*pc), 0); @@ -2357,7 +2439,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter->event_enqueue_buffer.events); + if (!rx_adapter->use_queue_event_buf) + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2461,6 +2544,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, return -EINVAL; } + if ((rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size == 0) || + (!rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); + return -EINVAL; + } + dev_info = &rx_adapter->eth_devices[eth_dev_id]; if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v1 5/5] test/eventdev: add per rx queue event buffer unit 2021-09-18 13:11 [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Naga Harish K S V ` (2 preceding siblings ...) 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 4/5] eventdev:rx_adapter: implement per queue event buffer Naga Harish K S V @ 2021-09-18 13:11 ` Naga Harish K S V 2021-09-20 6:20 ` [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Jerin Jacob ` (2 subsequent siblings) 6 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-18 13:11 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit tests for per rx queue event buffer Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 90 ++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 6151027022..27723b2c1c 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -387,6 +387,94 @@ adapter_create(void) return err; } +static int +adapter_create_v2(void) +{ + int err; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf rx_p_conf; + struct rte_event_eth_rx_adapter_params rxa_params; + + memset(&rx_p_conf, 0, sizeof(rx_p_conf)); + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + rx_p_conf.new_event_threshold = dev_info.max_num_events; + rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + + rxa_params.use_queue_event_buf = false; + rxa_params.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_create2(TEST_INST_ID, TEST_DEV_ID, + NULL, &rx_p_conf); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_create2(TEST_INST_ID, TEST_DEV_ID, + &rxa_params, &rx_p_conf); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + rxa_params.use_queue_event_buf = true; + + err = rte_event_eth_rx_adapter_create2(TEST_INST_ID, TEST_DEV_ID, + &rxa_params, &rx_p_conf); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create2(TEST_INST_ID, TEST_DEV_ID, + &rxa_params, &rx_p_conf); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err); + + return TEST_SUCCESS; +} + +static int +adapter_queue_event_buf_test(void) +{ + int err; + struct rte_event ev; + uint32_t cap; + + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; + + err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + ev.queue_id = 0; + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; + ev.priority = 0; + + queue_config.rx_queue_flags = 0; + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) { + ev.flow_id = 1; + queue_config.rx_queue_flags = + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; + } + queue_config.ev = ev; + queue_config.servicing_weight = 1; + queue_config.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + queue_config.event_buf_size = 1024; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + return TEST_SUCCESS; +} + static void adapter_free(void) { @@ -804,6 +892,8 @@ static struct unit_test_suite event_eth_rx_tests = { adapter_multi_eth_add_del), TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop), TEST_CASE_ST(adapter_create, adapter_free, adapter_stats), + TEST_CASE_ST(adapter_create_v2, adapter_free, + adapter_queue_event_buf_test), TEST_CASES_END() /**< NULL terminate unit test array */ } }; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size 2021-09-18 13:11 [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Naga Harish K S V ` (3 preceding siblings ...) 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 5/5] test/eventdev: add per rx queue event buffer unit Naga Harish K S V @ 2021-09-20 6:20 ` Jerin Jacob 2021-09-21 13:45 ` Naga Harish K, S V 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: " Naga Harish K S V 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Naga Harish K S V 6 siblings, 1 reply; 81+ messages in thread From: Jerin Jacob @ 2021-09-20 6:20 UTC (permalink / raw) To: Naga Harish K S V Cc: Jerin Jacob, Jayatheerthan, Jay, dpdk-dev, Ganapati Kundapura On Sat, Sep 18, 2021 at 6:41 PM Naga Harish K S V <s.v.naga.harish.k@intel.com> wrote: > > Currently Rx event buffer is static array > with a default size of 192(6*BATCH_SIZE). > > ``rte_event_eth_rx_adapter_create2`` api is added which takes > ``struct rte_event_eth_rx_adapter_params`` to configure event > buffer size in addition other params . The event buffer is > allocated dynamically at run time aligned to BATCH_SIZE + 2*BATCH_SIZE. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> > --- > > +/** > + * A structure to hold adapter config params > + */ > +struct rte_event_eth_rx_adapter_params { > + uint16_t event_buf_size; > + /**< size of event buffer for the adapter */ See below. > +}; > + > /** > * > * Callback function invoked by the SW adapter before it continues > @@ -330,6 +339,40 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > rte_event_eth_rx_adapter_conf_cb conf_cb, > void *conf_arg); > > +/** > + * Create a new ethernet Rx event adapter with the specified identifier. > + * This function allocates Rx adapter event buffer with the size specified > + * in rxa_params aligned to BATCH_SIZE plus (BATCH_SIZE+BATCH_SIZE) and > + * uses an internal configuration function that creates an event port. This function may use for adding another rte_event_eth_rx_adapter_params:: value. So semantics of rte_event_eth_rx_adapter_params::event_buf_size you can document at in that structure. This function, you can tell it adapter creation varint with parameters or so See below. > + * This default function reconfigures the event device with an > + * additional event port and setups up the event port using the port config > + * parameter passed into this function. In case the application needs more > + * control in configuration of the service, it should use the > + * rte_event_eth_rx_adapter_create_ext() version. > + * > + * @param id > + * The identifier of the ethernet Rx event adapter. > + * > + * @param dev_id > + * The identifier of the event device to configure. > + * > + * @param rxa_params > + * Pointer to struct rte_event_eth_rx_adapter_params containing > + * size to allocate rx event buffer. Value NULL is allowed to represent the default values or so. > + * > + * @param port_config > + * Argument of type *rte_event_port_conf* that is passed to the conf_cb > + * function. > + * > + * @return > + * - 0: Success > + * - <0: Error code on failure > + */ > +__rte_experimental > +int rte_event_eth_rx_adapter_create2(uint8_t id, uint8_t dev_id, > + struct rte_event_eth_rx_adapter_params *rxa_params, > + struct rte_event_port_conf *port_config); Couple of suggestion on API name and prototype: - I think, we can remove 2 version and give more meaningful,name like rte_event_eth_rx_adapter_create_with_param() or so - Keep new parameter as last to have better compatibility i.e rte_event_eth_rx_adapter_create_with_param(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config, struct rte_event_eth_rx_adapter_params *rxa_params) ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size 2021-09-20 6:20 ` [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Jerin Jacob @ 2021-09-21 13:45 ` Naga Harish K, S V 0 siblings, 0 replies; 81+ messages in thread From: Naga Harish K, S V @ 2021-09-21 13:45 UTC (permalink / raw) To: Jerin Jacob Cc: Jerin Jacob, Jayatheerthan, Jay, dpdk-dev, Kundapura, Ganapati Hi Jerin, Please see the replies inline. > -----Original Message----- > From: Jerin Jacob <jerinjacobk@gmail.com> > Sent: Monday, September 20, 2021 11:50 AM > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > Cc: Jerin Jacob <jerinj@marvell.com>; Jayatheerthan, Jay > <jay.jayatheerthan@intel.com>; dpdk-dev <dev@dpdk.org>; Kundapura, > Ganapati <ganapati.kundapura@intel.com> > Subject: Re: [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support > to configure event buffer size > > On Sat, Sep 18, 2021 at 6:41 PM Naga Harish K S V > <s.v.naga.harish.k@intel.com> wrote: > > > > Currently Rx event buffer is static array with a default size of > > 192(6*BATCH_SIZE). > > > > ``rte_event_eth_rx_adapter_create2`` api is added which takes ``struct > > rte_event_eth_rx_adapter_params`` to configure event buffer size in > > addition other params . The event buffer is allocated dynamically at > > run time aligned to BATCH_SIZE + 2*BATCH_SIZE. > > > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > > Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> > > --- > > > > +/** > > + * A structure to hold adapter config params */ struct > > +rte_event_eth_rx_adapter_params { > > + uint16_t event_buf_size; > > + /**< size of event buffer for the adapter */ > > See below. > > > +}; > > + > > /** > > * > > * Callback function invoked by the SW adapter before it continues @@ > > -330,6 +339,40 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, > uint8_t dev_id, > > rte_event_eth_rx_adapter_conf_cb conf_cb, > > void *conf_arg); > > > > +/** > > + * Create a new ethernet Rx event adapter with the specified identifier. > > + * This function allocates Rx adapter event buffer with the size > > +specified > > + * in rxa_params aligned to BATCH_SIZE plus (BATCH_SIZE+BATCH_SIZE) > > +and > > + * uses an internal configuration function that creates an event port. > > This function may use for adding another > rte_event_eth_rx_adapter_params:: value. > So semantics of rte_event_eth_rx_adapter_params::event_buf_size you > can document at in that structure. This function, you can tell it adapter > creation varint with parameters or so See below. > The documentation is updated as as per the review comments. > > + * This default function reconfigures the event device with an > > + * additional event port and setups up the event port using the port > > + config > > + * parameter passed into this function. In case the application needs > > + more > > + * control in configuration of the service, it should use the > > + * rte_event_eth_rx_adapter_create_ext() version. > > + * > > + * @param id > > + * The identifier of the ethernet Rx event adapter. > > + * > > + * @param dev_id > > + * The identifier of the event device to configure. > > + * > > + * @param rxa_params > > + * Pointer to struct rte_event_eth_rx_adapter_params containing > > + * size to allocate rx event buffer. > > Value NULL is allowed to represent the default values or so. > The api is updated to treat NULL pointer for adapter params with default values. > > + * > > + * @param port_config > > + * Argument of type *rte_event_port_conf* that is passed to the > > +conf_cb > > + * function. > > + * > > + * @return > > + * - 0: Success > > + * - <0: Error code on failure > > + */ > > +__rte_experimental > > +int rte_event_eth_rx_adapter_create2(uint8_t id, uint8_t dev_id, > > + struct rte_event_eth_rx_adapter_params *rxa_params, > > + struct rte_event_port_conf *port_config); > > Couple of suggestion on API name and prototype: > - I think, we can remove 2 version and give more meaningful,name like > rte_event_eth_rx_adapter_create_with_param() or so > > - Keep new parameter as last to have better compatibility i.e > rte_event_eth_rx_adapter_create_with_param(uint8_t id, uint8_t dev_id, > struct rte_event_port_conf *port_config, struct > rte_event_eth_rx_adapter_params *rxa_params) The function name and parameters are adjusted as suggested. Regards Harish ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size 2021-09-18 13:11 [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Naga Harish K S V ` (4 preceding siblings ...) 2021-09-20 6:20 ` [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Jerin Jacob @ 2021-09-21 9:21 ` Naga Harish K S V 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for event buffer size config api Naga Harish K S V ` (3 more replies) 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Naga Harish K S V 6 siblings, 4 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-21 9:21 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev, Ganapati Kundapura Currently Rx event buffer is static array with a default size of 192(6*BATCH_SIZE). ``rte_event_eth_rx_adapter_create_with_params`` api is added which takes ``struct rte_event_eth_rx_adapter_params`` to configure event buffer size in addition other params . The event buffer is allocated from heap after aligning the size to BATCH_SIZE and adding 2*BATCH_SIZE. In case of NULL params argument, default event buffer size is used. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> --- .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ lib/eventdev/rte_event_eth_rx_adapter.c | 94 +++++++++++++++++-- lib/eventdev/rte_event_eth_rx_adapter.h | 40 +++++++- lib/eventdev/version.map | 2 + 4 files changed, 135 insertions(+), 8 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index 0780b6f711..dd753613bd 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. +If the application desires to control the event buffer size, it can use the +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. +The function is passed the event device to be associated with the adapter +and port configuration for the adapter to setup an event port if the +adapter needs to use a service function. + Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index f2dc69503d..df1653b497 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -82,7 +82,9 @@ struct rte_eth_event_enqueue_buffer { /* Count of events in this buffer */ uint16_t count; /* Array of events in this buffer */ - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; + struct rte_event *events; + /* size of event buffer */ + uint16_t events_size; /* Event enqueue happens from head */ uint16_t head; /* New packets from rte_eth_rx_burst is enqued from tail */ @@ -919,7 +921,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, dropped = 0; nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, buf->last | - (RTE_DIM(buf->events) & ~buf->last_mask), + (buf->events_size & ~buf->last_mask), buf->count >= BATCH_SIZE ? buf->count - BATCH_SIZE : 0, &buf->events[buf->tail], @@ -945,7 +947,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) uint32_t nb_req = buf->tail + BATCH_SIZE; if (!buf->last) { - if (nb_req <= RTE_DIM(buf->events)) + if (nb_req <= buf->events_size) return true; if (buf->head >= BATCH_SIZE) { @@ -2164,12 +2166,15 @@ rxa_ctrl(uint8_t id, int start) return 0; } -int -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, - rte_event_eth_rx_adapter_conf_cb conf_cb, - void *conf_arg) +static int +rxa_create(uint8_t id, uint8_t dev_id, + struct rte_event_eth_rx_adapter_params *rxa_params, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) { struct rte_event_eth_rx_adapter *rx_adapter; + struct rte_eth_event_enqueue_buffer *buf; + struct rte_event *events; int ret; int socket_id; uint16_t i; @@ -2184,6 +2189,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (conf_cb == NULL) return -EINVAL; @@ -2231,11 +2237,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_free(rx_adapter); return -ENOMEM; } + rte_spinlock_init(&rx_adapter->rx_lock); + for (i = 0; i < RTE_MAX_ETHPORTS; i++) rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; + /* Rx adapter event buffer allocation */ + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } + + rx_adapter->event_enqueue_buffer.events = events; + event_eth_rx_adapter[id] = rx_adapter; + if (conf_cb == rxa_default_conf_cb) rx_adapter->default_cb_arg = 1; rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb, @@ -2243,6 +2268,57 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, return 0; } +int +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) +{ + struct rte_event_eth_rx_adapter_params rxa_params; + + /* Event buffer with default size = 6*BATCH_SIZE */ + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); +} + +int +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params) +{ + struct rte_event_port_conf *pc; + int ret; + struct rte_event_eth_rx_adapter_params temp_params = {0}; + + if (port_config == NULL) + return -EINVAL; + + /* use default values if rxa_parmas is NULL */ + if (rxa_params == NULL) { + rxa_params = &temp_params; + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; + } + + if (rxa_params->event_buf_size == 0) + return -EINVAL; + + pc = rte_malloc(NULL, sizeof(*pc), 0); + if (pc == NULL) + return -ENOMEM; + + *pc = *port_config; + + /* event buff size aligned to BATCH_SIZE + 2*BATCH_SIZE */ + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + rxa_params->event_buf_size += BATCH_SIZE + BATCH_SIZE; + + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); + if (ret) + rte_free(pc); + + return ret; +} + int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config) @@ -2252,12 +2328,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) return -ENOMEM; *pc = *port_config; + ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, rxa_default_conf_cb, pc); @@ -2286,6 +2364,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2658,6 +2737,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, stats->rx_packets += dev_stats_sum.rx_packets; stats->rx_enq_count += dev_stats_sum.rx_enq_count; + return 0; } diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 3f8b362295..a7881097b4 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -26,6 +26,7 @@ * The ethernet Rx event adapter's functions are: * - rte_event_eth_rx_adapter_create_ext() * - rte_event_eth_rx_adapter_create() + * - rte_event_eth_rx_adapter_create_with_params() * - rte_event_eth_rx_adapter_free() * - rte_event_eth_rx_adapter_queue_add() * - rte_event_eth_rx_adapter_queue_del() @@ -36,7 +37,7 @@ * * The application creates an ethernet to event adapter using * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() - * functions. + * or rte_event_eth_rx_adapter_create_with_params() functions. * The adapter needs to know which ethernet rx queues to poll for mbufs as well * as event device parameters such as the event queue identifier, event * priority and scheduling type that the adapter should use when constructing @@ -256,6 +257,16 @@ struct rte_event_eth_rx_adapter_vector_limits { */ }; +/** + * A structure to hold adapter config params + */ +struct rte_event_eth_rx_adapter_params { + uint16_t event_buf_size; + /**< size of event buffer for the adapter. + * the size is aligned to BATCH_SIZE and added (2 * BATCH_SIZE) + */ +}; + /** * * Callback function invoked by the SW adapter before it continues @@ -356,6 +367,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config); +/** + * This is a variant of rte_event_eth_rx_adapter_create() with additional + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. + * + * @param id + * The identifier of the ethernet Rx event adapter. + * + * @param dev_id + * The identifier of the event device to configure. + * + * @param port_config + * Argument of type *rte_event_port_conf* that is passed to the conf_cb + * function. + * + * @param rxa_params + * Pointer to struct rte_event_eth_rx_adapter_params. + * In case of NULL, default values are used. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params); + /** * Free an event adapter * diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index cd86d2d908..87586de879 100644 --- a/lib/eventdev/version.map +++ b/lib/eventdev/version.map @@ -138,6 +138,8 @@ EXPERIMENTAL { __rte_eventdev_trace_port_setup; # added in 20.11 rte_event_pmd_pci_probe_named; + # added in 21.11 + rte_event_eth_rx_adapter_create_with_params; #added in 21.05 rte_event_vector_pool_create; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for event buffer size config api 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: " Naga Harish K S V @ 2021-09-21 9:21 ` Naga Harish K S V 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support Naga Harish K S V ` (2 subsequent siblings) 3 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-21 9:21 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit test for rte_event_eth_rx_adapter_create_with_params api and validate all possible input combinations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 53 +++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index add4d8a678..3c0f0ad7cc 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -428,6 +428,50 @@ adapter_create_free(void) return TEST_SUCCESS; } +static int +adapter_create_free_v2(void) +{ + int err; + + struct rte_event_port_conf rx_p_conf = { + .dequeue_depth = 8, + .enqueue_depth = 8, + .new_event_threshold = 1200, + }; + + struct rte_event_eth_rx_adapter_params rxa_params = { + .event_buf_size = 1024 + }; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, NULL, NULL); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err); + + rxa_params.event_buf_size = 0; + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + err = rte_event_eth_rx_adapter_free(1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + return TEST_SUCCESS; +} + static int adapter_queue_add_del(void) { @@ -435,7 +479,7 @@ adapter_queue_add_del(void) struct rte_event ev; uint32_t cap; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, &cap); @@ -523,7 +567,7 @@ adapter_multi_eth_add_del(void) uint16_t port_index, port_index_base, drv_id = 0; char driver_name[50]; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; ev.queue_id = 0; ev.sched_type = RTE_SCHED_TYPE_ATOMIC; @@ -594,7 +638,7 @@ adapter_intr_queue_add_del(void) struct rte_event ev; uint32_t cap; uint16_t eth_port; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; if (!default_params.rx_intr_port_inited) return 0; @@ -687,7 +731,7 @@ adapter_start_stop(void) ev.sched_type = RTE_SCHED_TYPE_ATOMIC; ev.priority = 0; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; queue_config.rx_queue_flags = 0; if (default_params.caps & @@ -753,6 +797,7 @@ static struct unit_test_suite event_eth_rx_tests = { .teardown = testsuite_teardown, .unit_test_cases = { TEST_CASE_ST(NULL, NULL, adapter_create_free), + TEST_CASE_ST(NULL, NULL, adapter_create_free_v2), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_add_del), TEST_CASE_ST(adapter_create, adapter_free, -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: " Naga Harish K S V 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for event buffer size config api Naga Harish K S V @ 2021-09-21 9:21 ` Naga Harish K S V 2021-09-21 16:24 ` Jerin Jacob 2021-09-21 20:32 ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 4/5] eventdev/rx_adapter: implement per queue event buffer Naga Harish K S V 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 5/5] test/eventdev: add per rx queue event buffer unit Naga Harish K S V 3 siblings, 2 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-21 9:21 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev To configure per queue event buffer size, applications sets ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag as true and is passed to ``rte_event_eth_rx_adapter_create_with_params`` api. The per queue event buffer size is populated in ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed to ``rte_event_eth_rx_adapter_queue_add`` api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index dd753613bd..333e6f8192 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,12 +62,14 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. -If the application desires to control the event buffer size, it can use the -``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is -specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. -The function is passed the event device to be associated with the adapter -and port configuration for the adapter to setup an event port if the -adapter needs to use a service function. +If the application desires to control the event buffer size at adapter level, +it can use the ``rte_event_eth_rx_adapter_create_with_params()`` api. The event +buffer size is specified using ``struct rte_event_eth_rx_adapter_params:: +event_buf_size``. To configure the event buffer size at queue level, the boolean +flag ``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be +set to true. The function is passed the event device to be associated with +the adapter and port configuration for the adapter to setup an event port +if the adapter needs to use a service function. Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx queue is encoded in the ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf is the relative polling frequency of the Rx queue and is applicable when the -adapter uses a service core function. +adapter uses a service core function. The applications can configure queue +event buffer size in ``struct rte_event_eth_rx_adapter_queue_conf::event_buf_size`` +parameter. .. code-block:: c @@ -90,6 +94,7 @@ adapter uses a service core function. queue_config.rx_queue_flags = 0; queue_config.ev = ev; queue_config.servicing_weight = 1; + queue_config.event_buf_size = 1024; err = rte_event_eth_rx_adapter_queue_add(id, eth_dev_id, diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index a7881097b4..b9f0563244 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -199,6 +199,8 @@ struct rte_event_eth_rx_adapter_queue_conf { * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. */ + uint16_t event_buf_size; + /**< event buffer size for this queue */ }; /** @@ -265,6 +267,8 @@ struct rte_event_eth_rx_adapter_params { /**< size of event buffer for the adapter. * the size is aligned to BATCH_SIZE and added (2 * BATCH_SIZE) */ + bool use_queue_event_buf; + /**< flag to indicate that event buffer is separate for each queue */ }; /** -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support Naga Harish K S V @ 2021-09-21 16:24 ` Jerin Jacob 2021-09-22 15:21 ` Naga Harish K, S V 2021-09-21 20:32 ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula 1 sibling, 1 reply; 81+ messages in thread From: Jerin Jacob @ 2021-09-21 16:24 UTC (permalink / raw) To: Naga Harish K S V; +Cc: Jerin Jacob, Jayatheerthan, Jay, dpdk-dev On Tue, Sep 21, 2021 at 2:52 PM Naga Harish K S V <s.v.naga.harish.k@intel.com> wrote: > > To configure per queue event buffer size, applications sets > ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag > as true and is passed to ``rte_event_eth_rx_adapter_create_with_params`` > api. > > The per queue event buffer size is populated in > ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed > to ``rte_event_eth_rx_adapter_queue_add`` api. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Fix check-git-log issues Wrong headline format: eventdev/rx_adapter:add per queue event buffer configure support Wrong headline case: "test/event: add unit test for event buffer size config api": api --> API Wrong headline case: "test/eventdev: add per rx queue event buffer unit": rx --> Rx Headline too long: eventdev/rx_adapter: add support to configure event buffer size eventdev/rx_adapter:add per queue event buffer configure support Invalid patch(es) found - checked 5 patches > --- > .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- > lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ > 2 files changed, 16 insertions(+), 7 deletions(-) > > diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > index dd753613bd..333e6f8192 100644 > --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > @@ -62,12 +62,14 @@ service function and needs to create an event port for it. The callback is > expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` > passed to it. > > -If the application desires to control the event buffer size, it can use the > -``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is > -specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. > -The function is passed the event device to be associated with the adapter > -and port configuration for the adapter to setup an event port if the > -adapter needs to use a service function. > +If the application desires to control the event buffer size at adapter level, > +it can use the ``rte_event_eth_rx_adapter_create_with_params()`` api. The event > +buffer size is specified using ``struct rte_event_eth_rx_adapter_params:: > +event_buf_size``. To configure the event buffer size at queue level, the boolean > +flag ``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be > +set to true. The function is passed the event device to be associated with > +the adapter and port configuration for the adapter to setup an event port > +if the adapter needs to use a service function. > > Adding Rx Queues to the Adapter Instance > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > @@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx queue is encoded in the > ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The > servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf > is the relative polling frequency of the Rx queue and is applicable when the > -adapter uses a service core function. > +adapter uses a service core function. The applications can configure queue > +event buffer size in ``struct rte_event_eth_rx_adapter_queue_conf::event_buf_size`` > +parameter. > > .. code-block:: c > > @@ -90,6 +94,7 @@ adapter uses a service core function. > queue_config.rx_queue_flags = 0; > queue_config.ev = ev; > queue_config.servicing_weight = 1; > + queue_config.event_buf_size = 1024; > > err = rte_event_eth_rx_adapter_queue_add(id, > eth_dev_id, > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h > index a7881097b4..b9f0563244 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.h > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h > @@ -199,6 +199,8 @@ struct rte_event_eth_rx_adapter_queue_conf { > * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in > * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. > */ > + uint16_t event_buf_size; > + /**< event buffer size for this queue */ > }; > > /** > @@ -265,6 +267,8 @@ struct rte_event_eth_rx_adapter_params { > /**< size of event buffer for the adapter. > * the size is aligned to BATCH_SIZE and added (2 * BATCH_SIZE) > */ > + bool use_queue_event_buf; > + /**< flag to indicate that event buffer is separate for each queue */ > }; > > /** > -- > 2.25.1 > ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support 2021-09-21 16:24 ` Jerin Jacob @ 2021-09-22 15:21 ` Naga Harish K, S V 0 siblings, 0 replies; 81+ messages in thread From: Naga Harish K, S V @ 2021-09-22 15:21 UTC (permalink / raw) To: Jerin Jacob; +Cc: Jerin Jacob, Jayatheerthan, Jay, dpdk-dev Hi Jerin, > -----Original Message----- > From: Jerin Jacob <jerinjacobk@gmail.com> > Sent: Tuesday, September 21, 2021 9:55 PM > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > Cc: Jerin Jacob <jerinj@marvell.com>; Jayatheerthan, Jay > <jay.jayatheerthan@intel.com>; dpdk-dev <dev@dpdk.org> > Subject: Re: [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue > event buffer configure support > > On Tue, Sep 21, 2021 at 2:52 PM Naga Harish K S V > <s.v.naga.harish.k@intel.com> wrote: > > > > To configure per queue event buffer size, applications sets > > ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag as true > > and is passed to ``rte_event_eth_rx_adapter_create_with_params`` > > api. > > > > The per queue event buffer size is populated in > > ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed to > > ``rte_event_eth_rx_adapter_queue_add`` api. > > > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > > Fix check-git-log issues > > > Wrong headline format: > eventdev/rx_adapter:add per queue event buffer configure support > Wrong headline case: > "test/event: add unit test for event buffer size config api": api --> > API Wrong headline case: > "test/eventdev: add per rx queue event buffer > unit": rx --> Rx > Headline too long: > eventdev/rx_adapter: add support to configure event buffer size > eventdev/rx_adapter:add per queue event buffer configure support > > Invalid patch(es) found - checked 5 patches The check-git-log issues are resolved in v3 patch set. > > > > --- > > .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- > > lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ > > 2 files changed, 16 insertions(+), 7 deletions(-) > > > > diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > index dd753613bd..333e6f8192 100644 > > --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > @@ -62,12 +62,14 @@ service function and needs to create an event port > > for it. The callback is expected to fill the ``struct > > rte_event_eth_rx_adapter_conf structure`` passed to it. > > > > -If the application desires to control the event buffer size, it can > > use the -``rte_event_eth_rx_adapter_create_with_params()`` api. The > > event buffer size is -specified using ``struct > rte_event_eth_rx_adapter_params::event_buf_size``. > > -The function is passed the event device to be associated with the > > adapter -and port configuration for the adapter to setup an event port > > if the -adapter needs to use a service function. > > +If the application desires to control the event buffer size at > > +adapter level, it can use the > > +``rte_event_eth_rx_adapter_create_with_params()`` api. The event > buffer size is specified using ``struct rte_event_eth_rx_adapter_params:: > > +event_buf_size``. To configure the event buffer size at queue level, > > +the boolean flag ``struct > > +rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be > set > > +to true. The function is passed the event device to be associated > > +with the adapter and port configuration for the adapter to setup an event > port if the adapter needs to use a service function. > > > > Adding Rx Queues to the Adapter Instance > > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > > @@ -79,7 +81,9 @@ parameter. Event information for packets from this > > Rx queue is encoded in the ``ev`` field of ``struct > > rte_event_eth_rx_adapter_queue_conf``. The servicing_weight member > of > > the struct rte_event_eth_rx_adapter_queue_conf > > is the relative polling frequency of the Rx queue and is applicable > > when the -adapter uses a service core function. > > +adapter uses a service core function. The applications can configure > > +queue event buffer size in ``struct > > +rte_event_eth_rx_adapter_queue_conf::event_buf_size`` > > +parameter. > > > > .. code-block:: c > > > > @@ -90,6 +94,7 @@ adapter uses a service core function. > > queue_config.rx_queue_flags = 0; > > queue_config.ev = ev; > > queue_config.servicing_weight = 1; > > + queue_config.event_buf_size = 1024; > > > > err = rte_event_eth_rx_adapter_queue_add(id, > > eth_dev_id, diff > > --git a/lib/eventdev/rte_event_eth_rx_adapter.h > > b/lib/eventdev/rte_event_eth_rx_adapter.h > > index a7881097b4..b9f0563244 100644 > > --- a/lib/eventdev/rte_event_eth_rx_adapter.h > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h > > @@ -199,6 +199,8 @@ struct rte_event_eth_rx_adapter_queue_conf { > > * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR > flag is set in > > * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. > > */ > > + uint16_t event_buf_size; > > + /**< event buffer size for this queue */ > > }; > > > > /** > > @@ -265,6 +267,8 @@ struct rte_event_eth_rx_adapter_params { > > /**< size of event buffer for the adapter. > > * the size is aligned to BATCH_SIZE and added (2 * BATCH_SIZE) > > */ > > + bool use_queue_event_buf; > > + /**< flag to indicate that event buffer is separate for each > > + queue */ > > }; > > > > /** > > -- > > 2.25.1 > > ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [EXT] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support Naga Harish K S V 2021-09-21 16:24 ` Jerin Jacob @ 2021-09-21 20:32 ` Pavan Nikhilesh Bhagavatula 2021-09-22 15:39 ` Naga Harish K, S V 1 sibling, 1 reply; 81+ messages in thread From: Pavan Nikhilesh Bhagavatula @ 2021-09-21 20:32 UTC (permalink / raw) To: Naga Harish K S V, Jerin Jacob Kollanukkaran, jay.jayatheerthan; +Cc: dev >-----Original Message----- >From: dev <dev-bounces@dpdk.org> On Behalf Of Naga Harish K S V >Sent: Tuesday, September 21, 2021 2:52 PM >To: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; >jay.jayatheerthan@intel.com >Cc: dev@dpdk.org >Subject: [EXT] [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per >queue event buffer configure support > >External Email > >---------------------------------------------------------------------- >To configure per queue event buffer size, applications sets >``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag >as true and is passed to >``rte_event_eth_rx_adapter_create_with_params`` >api. > >The per queue event buffer size is populated in >``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and >passed >to ``rte_event_eth_rx_adapter_queue_add`` api. > >Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> >--- > .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++--- >---- > lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ > 2 files changed, 16 insertions(+), 7 deletions(-) > >diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst >b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst >index dd753613bd..333e6f8192 100644 >--- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst >+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst >@@ -62,12 +62,14 @@ service function and needs to create an event >port for it. The callback is > expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` > passed to it. > >-If the application desires to control the event buffer size, it can use the >-``rte_event_eth_rx_adapter_create_with_params()`` api. The event >buffer size is >-specified using ``struct >rte_event_eth_rx_adapter_params::event_buf_size``. >-The function is passed the event device to be associated with the >adapter >-and port configuration for the adapter to setup an event port if the >-adapter needs to use a service function. >+If the application desires to control the event buffer size at adapter >level, >+it can use the ``rte_event_eth_rx_adapter_create_with_params()`` >api. The event >+buffer size is specified using ``struct >rte_event_eth_rx_adapter_params:: >+event_buf_size``. To configure the event buffer size at queue level, >the boolean >+flag ``struct >rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to >be >+set to true. The function is passed the event device to be associated >with >+the adapter and port configuration for the adapter to setup an event >port >+if the adapter needs to use a service function. > > Adding Rx Queues to the Adapter Instance > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ >@@ -79,7 +81,9 @@ parameter. Event information for packets from this >Rx queue is encoded in the > ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The > servicing_weight member of the struct >rte_event_eth_rx_adapter_queue_conf > is the relative polling frequency of the Rx queue and is applicable when >the >-adapter uses a service core function. >+adapter uses a service core function. The applications can configure >queue >+event buffer size in ``struct >rte_event_eth_rx_adapter_queue_conf::event_buf_size`` >+parameter. > > .. code-block:: c > >@@ -90,6 +94,7 @@ adapter uses a service core function. > queue_config.rx_queue_flags = 0; > queue_config.ev = ev; > queue_config.servicing_weight = 1; >+ queue_config.event_buf_size = 1024; Wasn't this set to 6 * BURST_SIZE in the previous patch? > > err = rte_event_eth_rx_adapter_queue_add(id, > eth_dev_id, >diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h >b/lib/eventdev/rte_event_eth_rx_adapter.h >index a7881097b4..b9f0563244 100644 >--- a/lib/eventdev/rte_event_eth_rx_adapter.h >+++ b/lib/eventdev/rte_event_eth_rx_adapter.h >@@ -199,6 +199,8 @@ struct rte_event_eth_rx_adapter_queue_conf { > * Valid when >RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in > * @see >rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. > */ >+ uint16_t event_buf_size; >+ /**< event buffer size for this queue */ > }; > > /** >@@ -265,6 +267,8 @@ struct rte_event_eth_rx_adapter_params { > /**< size of event buffer for the adapter. > * the size is aligned to BATCH_SIZE and added (2 * BATCH_SIZE) > */ >+ bool use_queue_event_buf; >+ /**< flag to indicate that event buffer is separate for each >queue */ > }; > > /** >-- >2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [EXT] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support 2021-09-21 20:32 ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula @ 2021-09-22 15:39 ` Naga Harish K, S V 0 siblings, 0 replies; 81+ messages in thread From: Naga Harish K, S V @ 2021-09-22 15:39 UTC (permalink / raw) To: Pavan Nikhilesh Bhagavatula, Jerin Jacob Kollanukkaran, Jayatheerthan, Jay Cc: dev > -----Original Message----- > From: Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com> > Sent: Wednesday, September 22, 2021 2:02 AM > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>; Jerin Jacob > Kollanukkaran <jerinj@marvell.com>; Jayatheerthan, Jay > <jay.jayatheerthan@intel.com> > Cc: dev@dpdk.org > Subject: RE: [EXT] [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per > queue event buffer configure support > > > > >-----Original Message----- > >From: dev <dev-bounces@dpdk.org> On Behalf Of Naga Harish K S V > >Sent: Tuesday, September 21, 2021 2:52 PM > >To: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; > >jay.jayatheerthan@intel.com > >Cc: dev@dpdk.org > >Subject: [EXT] [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per > >queue event buffer configure support > > > >External Email > > > >---------------------------------------------------------------------- > >To configure per queue event buffer size, applications sets > >``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag as true > >and is passed to ``rte_event_eth_rx_adapter_create_with_params`` > >api. > > > >The per queue event buffer size is populated in > >``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed to > >``rte_event_eth_rx_adapter_queue_add`` api. > > > >Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > >--- > > .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++--- > >---- > > lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ > > 2 files changed, 16 insertions(+), 7 deletions(-) > > > >diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > >b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > >index dd753613bd..333e6f8192 100644 > >--- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > >+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > >@@ -62,12 +62,14 @@ service function and needs to create an event port > >for it. The callback is expected to fill the ``struct > >rte_event_eth_rx_adapter_conf structure`` passed to it. > > > >-If the application desires to control the event buffer size, it can > >use the -``rte_event_eth_rx_adapter_create_with_params()`` api. The > >event buffer size is -specified using ``struct > >rte_event_eth_rx_adapter_params::event_buf_size``. > >-The function is passed the event device to be associated with the > >adapter -and port configuration for the adapter to setup an event port > >if the -adapter needs to use a service function. > >+If the application desires to control the event buffer size at adapter > >level, > >+it can use the ``rte_event_eth_rx_adapter_create_with_params()`` > >api. The event > >+buffer size is specified using ``struct > >rte_event_eth_rx_adapter_params:: > >+event_buf_size``. To configure the event buffer size at queue level, > >the boolean > >+flag ``struct > >rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be > >+set to true. The function is passed the event device to be associated > >with > >+the adapter and port configuration for the adapter to setup an event > >port > >+if the adapter needs to use a service function. > > > > Adding Rx Queues to the Adapter Instance > >~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > >@@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx > >queue is encoded in the ``ev`` field of ``struct > >rte_event_eth_rx_adapter_queue_conf``. The servicing_weight member > of > >the struct rte_event_eth_rx_adapter_queue_conf > > is the relative polling frequency of the Rx queue and is applicable > >when the -adapter uses a service core function. > >+adapter uses a service core function. The applications can configure > >queue > >+event buffer size in ``struct > >rte_event_eth_rx_adapter_queue_conf::event_buf_size`` > >+parameter. > > > > .. code-block:: c > > > >@@ -90,6 +94,7 @@ adapter uses a service core function. > > queue_config.rx_queue_flags = 0; > > queue_config.ev = ev; > > queue_config.servicing_weight = 1; > >+ queue_config.event_buf_size = 1024; > > Wasn't this set to 6 * BURST_SIZE in the previous patch? Application may choose to use any value for the event buffer size. That is the purpose of this change, to change event buffer size as per Application requirements. > > > > > err = rte_event_eth_rx_adapter_queue_add(id, > > eth_dev_id, diff --git > >a/lib/eventdev/rte_event_eth_rx_adapter.h > >b/lib/eventdev/rte_event_eth_rx_adapter.h > >index a7881097b4..b9f0563244 100644 > >--- a/lib/eventdev/rte_event_eth_rx_adapter.h > >+++ b/lib/eventdev/rte_event_eth_rx_adapter.h > >@@ -199,6 +199,8 @@ struct rte_event_eth_rx_adapter_queue_conf { > > * Valid when > >RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in > > * @see > >rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. > > */ > >+ uint16_t event_buf_size; > >+ /**< event buffer size for this queue */ > > }; > > > > /** > >@@ -265,6 +267,8 @@ struct rte_event_eth_rx_adapter_params { > > /**< size of event buffer for the adapter. > > * the size is aligned to BATCH_SIZE and added (2 * BATCH_SIZE) > > */ > >+ bool use_queue_event_buf; > >+ /**< flag to indicate that event buffer is separate for each > >queue */ > > }; > > > > /** > >-- > >2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v2 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: " Naga Harish K S V 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for event buffer size config api Naga Harish K S V 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support Naga Harish K S V @ 2021-09-21 9:21 ` Naga Harish K S V 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 5/5] test/eventdev: add per rx queue event buffer unit Naga Harish K S V 3 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-21 9:21 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch implement the per queue event buffer after required validations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- lib/eventdev/rte_event_eth_rx_adapter.c | 188 ++++++++++++++++++------ 1 file changed, 139 insertions(+), 49 deletions(-) diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index df1653b497..20ea440275 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -99,10 +99,12 @@ struct rte_event_eth_rx_adapter { uint8_t rss_key_be[RSS_KEY_SIZE]; /* Event device identifier */ uint8_t eventdev_id; - /* Per ethernet device structure */ - struct eth_device_info *eth_devices; /* Event port identifier */ uint8_t event_port_id; + /* Flag indicating per rxq event buffer */ + bool use_queue_event_buf; + /* Per ethernet device structure */ + struct eth_device_info *eth_devices; /* Lock to serialize config updates with service function */ rte_spinlock_t rx_lock; /* Max mbufs processed in any service function invocation */ @@ -238,6 +240,7 @@ struct eth_rx_queue_info { uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ uint64_t event; struct eth_rx_vector_data vector_data; + struct rte_eth_event_enqueue_buffer *event_buf; }; static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ -753,10 +756,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, /* Enqueue buffered events to event device */ static inline uint16_t -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, + struct rte_eth_event_enqueue_buffer *buf) { - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t count = buf->last ? buf->last - buf->head : buf->count; @@ -874,15 +876,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, uint16_t rx_queue_id, struct rte_mbuf **mbufs, - uint16_t num) + uint16_t num, + struct rte_eth_event_enqueue_buffer *buf) { uint32_t i; struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; struct eth_rx_queue_info *eth_rx_queue_info = &dev_info->rx_queue[rx_queue_id]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; uint16_t new_tail = buf->tail; uint64_t event = eth_rx_queue_info->event; uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ -968,11 +969,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t queue_id, uint32_t rx_count, uint32_t max_rx, - int *rxq_empty) + int *rxq_empty, + struct rte_eth_event_enqueue_buffer *buf) { struct rte_mbuf *mbufs[BATCH_SIZE]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t n; @@ -985,7 +985,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, */ while (rxa_pkt_buf_available(buf)) { if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); stats->rx_poll_count++; n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); @@ -994,14 +994,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, *rxq_empty = 1; break; } - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); nb_rx += n; if (rx_count + nb_rx > max_rx) break; } if (buf->count > 0) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); return nb_rx; } @@ -1142,7 +1142,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) ring_lock = &rx_adapter->intr_ring_lock; if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); while (rxa_pkt_buf_available(buf)) { struct eth_device_info *dev_info; @@ -1194,7 +1194,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) continue; n = rxa_eth_rx(rx_adapter, port, i, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); nb_rx += n; enq_buffer_full = !rxq_empty && n == 0; @@ -1215,7 +1215,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) } else { n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); rx_adapter->qd_valid = !rxq_empty; nb_rx += n; if (nb_rx > rx_adapter->max_nb_rx) @@ -1246,13 +1246,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { uint32_t num_queue; uint32_t nb_rx = 0; - struct rte_eth_event_enqueue_buffer *buf; + struct rte_eth_event_enqueue_buffer *buf = NULL; uint32_t wrr_pos; uint32_t max_nb_rx; wrr_pos = rx_adapter->wrr_pos; max_nb_rx = rx_adapter->max_nb_rx; - buf = &rx_adapter->event_enqueue_buffer; /* Iterate through a WRR sequence */ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { @@ -1260,24 +1259,36 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[d]; + buf = dev_info->rx_queue[qid].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + /* Don't do a batch dequeue from the rx queue if there isn't * enough space in the enqueue buffer. */ if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (!rxa_pkt_buf_available(buf)) { - rx_adapter->wrr_pos = wrr_pos; - return nb_rx; + if (rx_adapter->use_queue_event_buf) + goto poll_next_entry; + else { + rx_adapter->wrr_pos = wrr_pos; + return nb_rx; + } } nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, - NULL); + NULL, buf); if (nb_rx > max_nb_rx) { rx_adapter->wrr_pos = (wrr_pos + 1) % rx_adapter->wrr_len; break; } +poll_next_entry: if (++wrr_pos == rx_adapter->wrr_len) wrr_pos = 0; } @@ -1288,12 +1299,18 @@ static void rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { struct rte_event_eth_rx_adapter *rx_adapter = arg; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; + struct rte_eth_event_enqueue_buffer *buf = NULL; struct rte_event *ev; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[vec->port]; + buf = dev_info->rx_queue[vec->queue].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + if (buf->count) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (vec->vector_ev->nb_elem == 0) return; @@ -1905,9 +1922,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, rx_adapter->num_rx_intr -= intrq; dev_info->nb_rx_intr -= intrq; dev_info->nb_shared_intr -= intrq && sintrq; + if (rx_adapter->use_queue_event_buf) { + struct rte_eth_event_enqueue_buffer *event_buf = + dev_info->rx_queue[rx_queue_id].event_buf; + rte_free(event_buf->events); + rte_free(event_buf); + dev_info->rx_queue[rx_queue_id].event_buf = NULL; + } } -static void +static int rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, struct eth_device_info *dev_info, int32_t rx_queue_id, @@ -1919,15 +1943,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int intrq; int sintrq; struct rte_event *qi_ev; + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; + uint16_t eth_dev_id = dev_info->dev->data->port_id; + int ret; if (rx_queue_id == -1) { uint16_t nb_rx_queues; uint16_t i; nb_rx_queues = dev_info->dev->data->nb_rx_queues; - for (i = 0; i < nb_rx_queues; i++) - rxa_add_queue(rx_adapter, dev_info, i, conf); - return; + for (i = 0; i < nb_rx_queues; i++) { + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); + if (ret) + return ret; + } + return 0; } pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -1990,6 +2020,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, dev_info->next_q_idx = 0; } } + + if (!rx_adapter->use_queue_event_buf) + return 0; + + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", + sizeof(*new_rx_buf), 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); + new_rx_buf->events_size += (2 * BATCH_SIZE); + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", + sizeof(struct rte_event) * + new_rx_buf->events_size, 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf->events == NULL) { + rte_free(new_rx_buf); + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + queue_info->event_buf = new_rx_buf; + + return 0; } static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ -2018,6 +2079,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, temp_conf.servicing_weight = 1; } queue_conf = &temp_conf; + + if (queue_conf->servicing_weight == 0 && + rx_adapter->use_queue_event_buf) { + + RTE_EDEV_LOG_ERR("Use of queue level event buffer " + "not supported for interrupt queues " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -EINVAL; + } } nb_rx_queues = dev_info->dev->data->nb_rx_queues; @@ -2097,7 +2168,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + if (ret) + goto err_free_rxqueue; rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); rte_free(rx_adapter->eth_rx_poll); @@ -2118,7 +2191,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, rte_free(rx_poll); rte_free(rx_wrr); - return 0; + return ret; } static int @@ -2244,20 +2317,26 @@ rxa_create(uint8_t id, uint8_t dev_id, rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; /* Rx adapter event buffer allocation */ - buf = &rx_adapter->event_enqueue_buffer; - buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); - - events = rte_zmalloc_socket(rx_adapter->mem_name, - buf->events_size * sizeof(*events), - 0, socket_id); - if (events == NULL) { - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); - rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter); - return -ENOMEM; - } + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; + + if (!rx_adapter->use_queue_event_buf) { + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate memory " + "for adapter event buffer"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } - rx_adapter->event_enqueue_buffer.events = events; + rx_adapter->event_enqueue_buffer.events = events; + } event_eth_rx_adapter[id] = rx_adapter; @@ -2277,6 +2356,8 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, /* Event buffer with default size = 6*BATCH_SIZE */ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + rxa_params.use_queue_event_buf = false; + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } @@ -2296,9 +2377,9 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, if (rxa_params == NULL) { rxa_params = &temp_params; rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; - } - - if (rxa_params->event_buf_size == 0) + rxa_params->use_queue_event_buf = false; + } else if ((!rxa_params->use_queue_event_buf && + rxa_params->event_buf_size == 0)) return -EINVAL; pc = rte_malloc(NULL, sizeof(*pc), 0); @@ -2364,7 +2445,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter->event_enqueue_buffer.events); + if (!rx_adapter->use_queue_event_buf) + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2468,6 +2550,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, return -EINVAL; } + if ((rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size == 0) || + (!rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); + return -EINVAL; + } + dev_info = &rx_adapter->eth_devices[eth_dev_id]; if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v2 5/5] test/eventdev: add per rx queue event buffer unit 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: " Naga Harish K S V ` (2 preceding siblings ...) 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 4/5] eventdev/rx_adapter: implement per queue event buffer Naga Harish K S V @ 2021-09-21 9:21 ` Naga Harish K S V 3 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-21 9:21 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit tests for per rx queue event buffer Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 86 ++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 3c0f0ad7cc..11110564d0 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -387,6 +387,90 @@ adapter_create(void) return err; } +static int +adapter_create_with_params(void) +{ + int err; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf rx_p_conf; + struct rte_event_eth_rx_adapter_params rxa_params; + + memset(&rx_p_conf, 0, sizeof(rx_p_conf)); + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + rx_p_conf.new_event_threshold = dev_info.max_num_events; + rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + + rxa_params.use_queue_event_buf = false; + rxa_params.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + rxa_params.use_queue_event_buf = true; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err); + + return TEST_SUCCESS; +} + +static int +adapter_queue_event_buf_test(void) +{ + int err; + struct rte_event ev; + uint32_t cap; + + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; + + err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + ev.queue_id = 0; + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; + ev.priority = 0; + + queue_config.rx_queue_flags = 0; + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) { + ev.flow_id = 1; + queue_config.rx_queue_flags = + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; + } + queue_config.ev = ev; + queue_config.servicing_weight = 1; + queue_config.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + queue_config.event_buf_size = 1024; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + return TEST_SUCCESS; +} + static void adapter_free(void) { @@ -804,6 +888,8 @@ static struct unit_test_suite event_eth_rx_tests = { adapter_multi_eth_add_del), TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop), TEST_CASE_ST(adapter_create, adapter_free, adapter_stats), + TEST_CASE_ST(adapter_create_with_params, adapter_free, + adapter_queue_event_buf_test), TEST_CASES_END() /**< NULL terminate unit test array */ } }; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size 2021-09-18 13:11 [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Naga Harish K S V ` (5 preceding siblings ...) 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: " Naga Harish K S V @ 2021-09-21 9:45 ` Naga Harish K S V 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for event buffer size config api Naga Harish K S V ` (5 more replies) 6 siblings, 6 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-21 9:45 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev, Ganapati Kundapura Currently Rx event buffer is static array with a default size of 192(6*BATCH_SIZE). ``rte_event_eth_rx_adapter_create_with_params`` api is added which takes ``struct rte_event_eth_rx_adapter_params`` to configure event buffer size in addition other params . The event buffer is allocated from heap after aligning the size to BATCH_SIZE and adding 2*BATCH_SIZE. In case of NULL params argument, default event buffer size is used. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> --- v2: * Updated header file and rx adapter documentation as per review comments. * new api name is modified as rte_event_eth_rx_adapter_create_with_params as per review comments. * rxa_params pointer argument Value NULL is allowed to represent the default values v1: * Initial implementation with documentation and unit tests. --- .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ lib/eventdev/rte_event_eth_rx_adapter.c | 94 +++++++++++++++++-- lib/eventdev/rte_event_eth_rx_adapter.h | 40 +++++++- lib/eventdev/version.map | 2 + 4 files changed, 135 insertions(+), 8 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index 0780b6f711..dd753613bd 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. +If the application desires to control the event buffer size, it can use the +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. +The function is passed the event device to be associated with the adapter +and port configuration for the adapter to setup an event port if the +adapter needs to use a service function. + Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index f2dc69503d..df1653b497 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -82,7 +82,9 @@ struct rte_eth_event_enqueue_buffer { /* Count of events in this buffer */ uint16_t count; /* Array of events in this buffer */ - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; + struct rte_event *events; + /* size of event buffer */ + uint16_t events_size; /* Event enqueue happens from head */ uint16_t head; /* New packets from rte_eth_rx_burst is enqued from tail */ @@ -919,7 +921,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, dropped = 0; nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, buf->last | - (RTE_DIM(buf->events) & ~buf->last_mask), + (buf->events_size & ~buf->last_mask), buf->count >= BATCH_SIZE ? buf->count - BATCH_SIZE : 0, &buf->events[buf->tail], @@ -945,7 +947,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) uint32_t nb_req = buf->tail + BATCH_SIZE; if (!buf->last) { - if (nb_req <= RTE_DIM(buf->events)) + if (nb_req <= buf->events_size) return true; if (buf->head >= BATCH_SIZE) { @@ -2164,12 +2166,15 @@ rxa_ctrl(uint8_t id, int start) return 0; } -int -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, - rte_event_eth_rx_adapter_conf_cb conf_cb, - void *conf_arg) +static int +rxa_create(uint8_t id, uint8_t dev_id, + struct rte_event_eth_rx_adapter_params *rxa_params, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) { struct rte_event_eth_rx_adapter *rx_adapter; + struct rte_eth_event_enqueue_buffer *buf; + struct rte_event *events; int ret; int socket_id; uint16_t i; @@ -2184,6 +2189,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (conf_cb == NULL) return -EINVAL; @@ -2231,11 +2237,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_free(rx_adapter); return -ENOMEM; } + rte_spinlock_init(&rx_adapter->rx_lock); + for (i = 0; i < RTE_MAX_ETHPORTS; i++) rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; + /* Rx adapter event buffer allocation */ + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } + + rx_adapter->event_enqueue_buffer.events = events; + event_eth_rx_adapter[id] = rx_adapter; + if (conf_cb == rxa_default_conf_cb) rx_adapter->default_cb_arg = 1; rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb, @@ -2243,6 +2268,57 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, return 0; } +int +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) +{ + struct rte_event_eth_rx_adapter_params rxa_params; + + /* Event buffer with default size = 6*BATCH_SIZE */ + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); +} + +int +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params) +{ + struct rte_event_port_conf *pc; + int ret; + struct rte_event_eth_rx_adapter_params temp_params = {0}; + + if (port_config == NULL) + return -EINVAL; + + /* use default values if rxa_parmas is NULL */ + if (rxa_params == NULL) { + rxa_params = &temp_params; + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; + } + + if (rxa_params->event_buf_size == 0) + return -EINVAL; + + pc = rte_malloc(NULL, sizeof(*pc), 0); + if (pc == NULL) + return -ENOMEM; + + *pc = *port_config; + + /* event buff size aligned to BATCH_SIZE + 2*BATCH_SIZE */ + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + rxa_params->event_buf_size += BATCH_SIZE + BATCH_SIZE; + + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); + if (ret) + rte_free(pc); + + return ret; +} + int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config) @@ -2252,12 +2328,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) return -ENOMEM; *pc = *port_config; + ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, rxa_default_conf_cb, pc); @@ -2286,6 +2364,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2658,6 +2737,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, stats->rx_packets += dev_stats_sum.rx_packets; stats->rx_enq_count += dev_stats_sum.rx_enq_count; + return 0; } diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 3f8b362295..a7881097b4 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -26,6 +26,7 @@ * The ethernet Rx event adapter's functions are: * - rte_event_eth_rx_adapter_create_ext() * - rte_event_eth_rx_adapter_create() + * - rte_event_eth_rx_adapter_create_with_params() * - rte_event_eth_rx_adapter_free() * - rte_event_eth_rx_adapter_queue_add() * - rte_event_eth_rx_adapter_queue_del() @@ -36,7 +37,7 @@ * * The application creates an ethernet to event adapter using * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() - * functions. + * or rte_event_eth_rx_adapter_create_with_params() functions. * The adapter needs to know which ethernet rx queues to poll for mbufs as well * as event device parameters such as the event queue identifier, event * priority and scheduling type that the adapter should use when constructing @@ -256,6 +257,16 @@ struct rte_event_eth_rx_adapter_vector_limits { */ }; +/** + * A structure to hold adapter config params + */ +struct rte_event_eth_rx_adapter_params { + uint16_t event_buf_size; + /**< size of event buffer for the adapter. + * the size is aligned to BATCH_SIZE and added (2 * BATCH_SIZE) + */ +}; + /** * * Callback function invoked by the SW adapter before it continues @@ -356,6 +367,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config); +/** + * This is a variant of rte_event_eth_rx_adapter_create() with additional + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. + * + * @param id + * The identifier of the ethernet Rx event adapter. + * + * @param dev_id + * The identifier of the event device to configure. + * + * @param port_config + * Argument of type *rte_event_port_conf* that is passed to the conf_cb + * function. + * + * @param rxa_params + * Pointer to struct rte_event_eth_rx_adapter_params. + * In case of NULL, default values are used. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params); + /** * Free an event adapter * diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index cd86d2d908..87586de879 100644 --- a/lib/eventdev/version.map +++ b/lib/eventdev/version.map @@ -138,6 +138,8 @@ EXPERIMENTAL { __rte_eventdev_trace_port_setup; # added in 20.11 rte_event_pmd_pci_probe_named; + # added in 21.11 + rte_event_eth_rx_adapter_create_with_params; #added in 21.05 rte_event_vector_pool_create; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for event buffer size config api 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Naga Harish K S V @ 2021-09-21 9:45 ` Naga Harish K S V 2021-09-21 20:28 ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support Naga Harish K S V ` (4 subsequent siblings) 5 siblings, 1 reply; 81+ messages in thread From: Naga Harish K S V @ 2021-09-21 9:45 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit test for rte_event_eth_rx_adapter_create_with_params api and validate all possible input combinations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 53 +++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index add4d8a678..3c0f0ad7cc 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -428,6 +428,50 @@ adapter_create_free(void) return TEST_SUCCESS; } +static int +adapter_create_free_v2(void) +{ + int err; + + struct rte_event_port_conf rx_p_conf = { + .dequeue_depth = 8, + .enqueue_depth = 8, + .new_event_threshold = 1200, + }; + + struct rte_event_eth_rx_adapter_params rxa_params = { + .event_buf_size = 1024 + }; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, NULL, NULL); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err); + + rxa_params.event_buf_size = 0; + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + err = rte_event_eth_rx_adapter_free(1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + return TEST_SUCCESS; +} + static int adapter_queue_add_del(void) { @@ -435,7 +479,7 @@ adapter_queue_add_del(void) struct rte_event ev; uint32_t cap; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, &cap); @@ -523,7 +567,7 @@ adapter_multi_eth_add_del(void) uint16_t port_index, port_index_base, drv_id = 0; char driver_name[50]; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; ev.queue_id = 0; ev.sched_type = RTE_SCHED_TYPE_ATOMIC; @@ -594,7 +638,7 @@ adapter_intr_queue_add_del(void) struct rte_event ev; uint32_t cap; uint16_t eth_port; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; if (!default_params.rx_intr_port_inited) return 0; @@ -687,7 +731,7 @@ adapter_start_stop(void) ev.sched_type = RTE_SCHED_TYPE_ATOMIC; ev.priority = 0; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; queue_config.rx_queue_flags = 0; if (default_params.caps & @@ -753,6 +797,7 @@ static struct unit_test_suite event_eth_rx_tests = { .teardown = testsuite_teardown, .unit_test_cases = { TEST_CASE_ST(NULL, NULL, adapter_create_free), + TEST_CASE_ST(NULL, NULL, adapter_create_free_v2), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_add_del), TEST_CASE_ST(adapter_create, adapter_free, -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [EXT] [PATCH v2 2/5] test/event: add unit test for event buffer size config api 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for event buffer size config api Naga Harish K S V @ 2021-09-21 20:28 ` Pavan Nikhilesh Bhagavatula 2021-09-22 15:37 ` Naga Harish K, S V 0 siblings, 1 reply; 81+ messages in thread From: Pavan Nikhilesh Bhagavatula @ 2021-09-21 20:28 UTC (permalink / raw) To: Naga Harish K S V, Jerin Jacob Kollanukkaran, jay.jayatheerthan; +Cc: dev >this patch adds unit test for >rte_event_eth_rx_adapter_create_with_params >api and validate all possible input combinations. > >Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> >--- > app/test/test_event_eth_rx_adapter.c | 53 >+++++++++++++++++++++++++--- > 1 file changed, 49 insertions(+), 4 deletions(-) > >diff --git a/app/test/test_event_eth_rx_adapter.c >b/app/test/test_event_eth_rx_adapter.c >index add4d8a678..3c0f0ad7cc 100644 >--- a/app/test/test_event_eth_rx_adapter.c >+++ b/app/test/test_event_eth_rx_adapter.c >@@ -428,6 +428,50 @@ adapter_create_free(void) > return TEST_SUCCESS; > } > >+static int >+adapter_create_free_v2(void) >+{ >+ int err; >+ >+ struct rte_event_port_conf rx_p_conf = { >+ .dequeue_depth = 8, >+ .enqueue_depth = 8, >+ .new_event_threshold = 1200, >+ }; >+ >+ struct rte_event_eth_rx_adapter_params rxa_params = { >+ .event_buf_size = 1024 >+ }; >+ >+ err = >rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, >+ TEST_DEV_ID, NULL, NULL); >+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); >+ >+ err = >rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, >+ TEST_DEV_ID, &rx_p_conf, >&rxa_params); >+ TEST_ASSERT(err == 0, "Expected 0 got %d", err); >+ >+ err = >rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, >+ TEST_DEV_ID, &rx_p_conf, >&rxa_params); >+ TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", - >EEXIST, err); >+ >+ rxa_params.event_buf_size = 0; >+ err = >rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, >+ TEST_DEV_ID, &rx_p_conf, >&rxa_params); >+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); >+ >+ err = rte_event_eth_rx_adapter_free(TEST_INST_ID); >+ TEST_ASSERT(err == 0, "Expected 0 got %d", err); >+ >+ err = rte_event_eth_rx_adapter_free(TEST_INST_ID); >+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", - >EINVAL, err); >+ >+ err = rte_event_eth_rx_adapter_free(1); >+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", - >EINVAL, err); >+ >+ return TEST_SUCCESS; >+} >+ > static int > adapter_queue_add_del(void) > { >@@ -435,7 +479,7 @@ adapter_queue_add_del(void) > struct rte_event ev; > uint32_t cap; > >- struct rte_event_eth_rx_adapter_queue_conf queue_config; >+ struct rte_event_eth_rx_adapter_queue_conf queue_config = >{0}; > > err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, >TEST_ETHDEV_ID, > &cap); >@@ -523,7 +567,7 @@ adapter_multi_eth_add_del(void) > uint16_t port_index, port_index_base, drv_id = 0; > char driver_name[50]; > >- struct rte_event_eth_rx_adapter_queue_conf queue_config; >+ struct rte_event_eth_rx_adapter_queue_conf queue_config = >{0}; > > ev.queue_id = 0; > ev.sched_type = RTE_SCHED_TYPE_ATOMIC; >@@ -594,7 +638,7 @@ adapter_intr_queue_add_del(void) > struct rte_event ev; > uint32_t cap; > uint16_t eth_port; >- struct rte_event_eth_rx_adapter_queue_conf queue_config; >+ struct rte_event_eth_rx_adapter_queue_conf queue_config = >{0}; > > if (!default_params.rx_intr_port_inited) > return 0; >@@ -687,7 +731,7 @@ adapter_start_stop(void) > ev.sched_type = RTE_SCHED_TYPE_ATOMIC; > ev.priority = 0; > >- struct rte_event_eth_rx_adapter_queue_conf queue_config; >+ struct rte_event_eth_rx_adapter_queue_conf queue_config = >{0}; > > queue_config.rx_queue_flags = 0; > if (default_params.caps & >@@ -753,6 +797,7 @@ static struct unit_test_suite event_eth_rx_tests >= { > .teardown = testsuite_teardown, > .unit_test_cases = { > TEST_CASE_ST(NULL, NULL, adapter_create_free), >+ TEST_CASE_ST(NULL, NULL, adapter_create_free_v2), Please use a meaningful function name. In case of failure I don't thing "v2" will gave any meaning > TEST_CASE_ST(adapter_create, adapter_free, > adapter_queue_add_del), > TEST_CASE_ST(adapter_create, adapter_free, >-- >2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [EXT] [PATCH v2 2/5] test/event: add unit test for event buffer size config api 2021-09-21 20:28 ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula @ 2021-09-22 15:37 ` Naga Harish K, S V 0 siblings, 0 replies; 81+ messages in thread From: Naga Harish K, S V @ 2021-09-22 15:37 UTC (permalink / raw) To: Pavan Nikhilesh Bhagavatula, Jerin Jacob Kollanukkaran, Jayatheerthan, Jay Cc: dev > -----Original Message----- > From: Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com> > Sent: Wednesday, September 22, 2021 1:58 AM > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>; Jerin Jacob > Kollanukkaran <jerinj@marvell.com>; Jayatheerthan, Jay > <jay.jayatheerthan@intel.com> > Cc: dev@dpdk.org > Subject: RE: [EXT] [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for > event buffer size config api > > >this patch adds unit test for > >rte_event_eth_rx_adapter_create_with_params > >api and validate all possible input combinations. > > > >Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > >--- > > app/test/test_event_eth_rx_adapter.c | 53 > >+++++++++++++++++++++++++--- > > 1 file changed, 49 insertions(+), 4 deletions(-) > > > >diff --git a/app/test/test_event_eth_rx_adapter.c > >b/app/test/test_event_eth_rx_adapter.c > >index add4d8a678..3c0f0ad7cc 100644 > >--- a/app/test/test_event_eth_rx_adapter.c > >+++ b/app/test/test_event_eth_rx_adapter.c > >@@ -428,6 +428,50 @@ adapter_create_free(void) > > return TEST_SUCCESS; > > } > > > >+static int > >+adapter_create_free_v2(void) > >+{ > >+ int err; > >+ > >+ struct rte_event_port_conf rx_p_conf = { > >+ .dequeue_depth = 8, > >+ .enqueue_depth = 8, > >+ .new_event_threshold = 1200, > >+ }; > >+ > >+ struct rte_event_eth_rx_adapter_params rxa_params = { > >+ .event_buf_size = 1024 > >+ }; > >+ > >+ err = > >rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > >+ TEST_DEV_ID, NULL, NULL); > >+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); > >+ > >+ err = > >rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > >+ TEST_DEV_ID, &rx_p_conf, > >&rxa_params); > >+ TEST_ASSERT(err == 0, "Expected 0 got %d", err); > >+ > >+ err = > >rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > >+ TEST_DEV_ID, &rx_p_conf, > >&rxa_params); > >+ TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", - > >EEXIST, err); > >+ > >+ rxa_params.event_buf_size = 0; > >+ err = > >rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > >+ TEST_DEV_ID, &rx_p_conf, > >&rxa_params); > >+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); > >+ > >+ err = rte_event_eth_rx_adapter_free(TEST_INST_ID); > >+ TEST_ASSERT(err == 0, "Expected 0 got %d", err); > >+ > >+ err = rte_event_eth_rx_adapter_free(TEST_INST_ID); > >+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", - > >EINVAL, err); > >+ > >+ err = rte_event_eth_rx_adapter_free(1); > >+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", - > >EINVAL, err); > >+ > >+ return TEST_SUCCESS; > >+} > >+ > > static int > > adapter_queue_add_del(void) > > { > >@@ -435,7 +479,7 @@ adapter_queue_add_del(void) > > struct rte_event ev; > > uint32_t cap; > > > >- struct rte_event_eth_rx_adapter_queue_conf queue_config; > >+ struct rte_event_eth_rx_adapter_queue_conf queue_config = > >{0}; > > > > err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, > >TEST_ETHDEV_ID, > > &cap); > >@@ -523,7 +567,7 @@ adapter_multi_eth_add_del(void) > > uint16_t port_index, port_index_base, drv_id = 0; > > char driver_name[50]; > > > >- struct rte_event_eth_rx_adapter_queue_conf queue_config; > >+ struct rte_event_eth_rx_adapter_queue_conf queue_config = > >{0}; > > > > ev.queue_id = 0; > > ev.sched_type = RTE_SCHED_TYPE_ATOMIC; @@ -594,7 +638,7 @@ > >adapter_intr_queue_add_del(void) > > struct rte_event ev; > > uint32_t cap; > > uint16_t eth_port; > >- struct rte_event_eth_rx_adapter_queue_conf queue_config; > >+ struct rte_event_eth_rx_adapter_queue_conf queue_config = > >{0}; > > > > if (!default_params.rx_intr_port_inited) > > return 0; > >@@ -687,7 +731,7 @@ adapter_start_stop(void) > > ev.sched_type = RTE_SCHED_TYPE_ATOMIC; > > ev.priority = 0; > > > >- struct rte_event_eth_rx_adapter_queue_conf queue_config; > >+ struct rte_event_eth_rx_adapter_queue_conf queue_config = > >{0}; > > > > queue_config.rx_queue_flags = 0; > > if (default_params.caps & > >@@ -753,6 +797,7 @@ static struct unit_test_suite event_eth_rx_tests = > >{ > > .teardown = testsuite_teardown, > > .unit_test_cases = { > > TEST_CASE_ST(NULL, NULL, adapter_create_free), > >+ TEST_CASE_ST(NULL, NULL, adapter_create_free_v2), > > Please use a meaningful function name. In case of failure I don't thing "v2" > will gave any meaning The test case name is changed with proper name in v3 of patch set. > > > TEST_CASE_ST(adapter_create, adapter_free, > > adapter_queue_add_del), > > TEST_CASE_ST(adapter_create, adapter_free, > >-- > >2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Naga Harish K S V 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for event buffer size config api Naga Harish K S V @ 2021-09-21 9:45 ` Naga Harish K S V 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 4/5] eventdev/rx_adapter: implement per queue event buffer Naga Harish K S V ` (3 subsequent siblings) 5 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-21 9:45 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev To configure per queue event buffer size, applications sets ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag as true and is passed to ``rte_event_eth_rx_adapter_create_with_params`` api. The per queue event buffer size is populated in ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed to ``rte_event_eth_rx_adapter_queue_add`` api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index dd753613bd..333e6f8192 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,12 +62,14 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. -If the application desires to control the event buffer size, it can use the -``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is -specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. -The function is passed the event device to be associated with the adapter -and port configuration for the adapter to setup an event port if the -adapter needs to use a service function. +If the application desires to control the event buffer size at adapter level, +it can use the ``rte_event_eth_rx_adapter_create_with_params()`` api. The event +buffer size is specified using ``struct rte_event_eth_rx_adapter_params:: +event_buf_size``. To configure the event buffer size at queue level, the boolean +flag ``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be +set to true. The function is passed the event device to be associated with +the adapter and port configuration for the adapter to setup an event port +if the adapter needs to use a service function. Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx queue is encoded in the ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf is the relative polling frequency of the Rx queue and is applicable when the -adapter uses a service core function. +adapter uses a service core function. The applications can configure queue +event buffer size in ``struct rte_event_eth_rx_adapter_queue_conf::event_buf_size`` +parameter. .. code-block:: c @@ -90,6 +94,7 @@ adapter uses a service core function. queue_config.rx_queue_flags = 0; queue_config.ev = ev; queue_config.servicing_weight = 1; + queue_config.event_buf_size = 1024; err = rte_event_eth_rx_adapter_queue_add(id, eth_dev_id, diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index a7881097b4..b9f0563244 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -199,6 +199,8 @@ struct rte_event_eth_rx_adapter_queue_conf { * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. */ + uint16_t event_buf_size; + /**< event buffer size for this queue */ }; /** @@ -265,6 +267,8 @@ struct rte_event_eth_rx_adapter_params { /**< size of event buffer for the adapter. * the size is aligned to BATCH_SIZE and added (2 * BATCH_SIZE) */ + bool use_queue_event_buf; + /**< flag to indicate that event buffer is separate for each queue */ }; /** -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v2 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Naga Harish K S V 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for event buffer size config api Naga Harish K S V 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support Naga Harish K S V @ 2021-09-21 9:45 ` Naga Harish K S V 2021-09-21 20:51 ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 5/5] test/eventdev: add per rx queue event buffer unit Naga Harish K S V ` (2 subsequent siblings) 5 siblings, 1 reply; 81+ messages in thread From: Naga Harish K S V @ 2021-09-21 9:45 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch implement the per queue event buffer after required validations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- lib/eventdev/rte_event_eth_rx_adapter.c | 188 ++++++++++++++++++------ 1 file changed, 139 insertions(+), 49 deletions(-) diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index df1653b497..20ea440275 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -99,10 +99,12 @@ struct rte_event_eth_rx_adapter { uint8_t rss_key_be[RSS_KEY_SIZE]; /* Event device identifier */ uint8_t eventdev_id; - /* Per ethernet device structure */ - struct eth_device_info *eth_devices; /* Event port identifier */ uint8_t event_port_id; + /* Flag indicating per rxq event buffer */ + bool use_queue_event_buf; + /* Per ethernet device structure */ + struct eth_device_info *eth_devices; /* Lock to serialize config updates with service function */ rte_spinlock_t rx_lock; /* Max mbufs processed in any service function invocation */ @@ -238,6 +240,7 @@ struct eth_rx_queue_info { uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ uint64_t event; struct eth_rx_vector_data vector_data; + struct rte_eth_event_enqueue_buffer *event_buf; }; static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ -753,10 +756,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, /* Enqueue buffered events to event device */ static inline uint16_t -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, + struct rte_eth_event_enqueue_buffer *buf) { - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t count = buf->last ? buf->last - buf->head : buf->count; @@ -874,15 +876,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, uint16_t rx_queue_id, struct rte_mbuf **mbufs, - uint16_t num) + uint16_t num, + struct rte_eth_event_enqueue_buffer *buf) { uint32_t i; struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; struct eth_rx_queue_info *eth_rx_queue_info = &dev_info->rx_queue[rx_queue_id]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; uint16_t new_tail = buf->tail; uint64_t event = eth_rx_queue_info->event; uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ -968,11 +969,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t queue_id, uint32_t rx_count, uint32_t max_rx, - int *rxq_empty) + int *rxq_empty, + struct rte_eth_event_enqueue_buffer *buf) { struct rte_mbuf *mbufs[BATCH_SIZE]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t n; @@ -985,7 +985,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, */ while (rxa_pkt_buf_available(buf)) { if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); stats->rx_poll_count++; n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); @@ -994,14 +994,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, *rxq_empty = 1; break; } - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); nb_rx += n; if (rx_count + nb_rx > max_rx) break; } if (buf->count > 0) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); return nb_rx; } @@ -1142,7 +1142,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) ring_lock = &rx_adapter->intr_ring_lock; if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); while (rxa_pkt_buf_available(buf)) { struct eth_device_info *dev_info; @@ -1194,7 +1194,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) continue; n = rxa_eth_rx(rx_adapter, port, i, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); nb_rx += n; enq_buffer_full = !rxq_empty && n == 0; @@ -1215,7 +1215,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) } else { n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); rx_adapter->qd_valid = !rxq_empty; nb_rx += n; if (nb_rx > rx_adapter->max_nb_rx) @@ -1246,13 +1246,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { uint32_t num_queue; uint32_t nb_rx = 0; - struct rte_eth_event_enqueue_buffer *buf; + struct rte_eth_event_enqueue_buffer *buf = NULL; uint32_t wrr_pos; uint32_t max_nb_rx; wrr_pos = rx_adapter->wrr_pos; max_nb_rx = rx_adapter->max_nb_rx; - buf = &rx_adapter->event_enqueue_buffer; /* Iterate through a WRR sequence */ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { @@ -1260,24 +1259,36 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[d]; + buf = dev_info->rx_queue[qid].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + /* Don't do a batch dequeue from the rx queue if there isn't * enough space in the enqueue buffer. */ if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (!rxa_pkt_buf_available(buf)) { - rx_adapter->wrr_pos = wrr_pos; - return nb_rx; + if (rx_adapter->use_queue_event_buf) + goto poll_next_entry; + else { + rx_adapter->wrr_pos = wrr_pos; + return nb_rx; + } } nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, - NULL); + NULL, buf); if (nb_rx > max_nb_rx) { rx_adapter->wrr_pos = (wrr_pos + 1) % rx_adapter->wrr_len; break; } +poll_next_entry: if (++wrr_pos == rx_adapter->wrr_len) wrr_pos = 0; } @@ -1288,12 +1299,18 @@ static void rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { struct rte_event_eth_rx_adapter *rx_adapter = arg; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; + struct rte_eth_event_enqueue_buffer *buf = NULL; struct rte_event *ev; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[vec->port]; + buf = dev_info->rx_queue[vec->queue].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + if (buf->count) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (vec->vector_ev->nb_elem == 0) return; @@ -1905,9 +1922,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, rx_adapter->num_rx_intr -= intrq; dev_info->nb_rx_intr -= intrq; dev_info->nb_shared_intr -= intrq && sintrq; + if (rx_adapter->use_queue_event_buf) { + struct rte_eth_event_enqueue_buffer *event_buf = + dev_info->rx_queue[rx_queue_id].event_buf; + rte_free(event_buf->events); + rte_free(event_buf); + dev_info->rx_queue[rx_queue_id].event_buf = NULL; + } } -static void +static int rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, struct eth_device_info *dev_info, int32_t rx_queue_id, @@ -1919,15 +1943,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int intrq; int sintrq; struct rte_event *qi_ev; + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; + uint16_t eth_dev_id = dev_info->dev->data->port_id; + int ret; if (rx_queue_id == -1) { uint16_t nb_rx_queues; uint16_t i; nb_rx_queues = dev_info->dev->data->nb_rx_queues; - for (i = 0; i < nb_rx_queues; i++) - rxa_add_queue(rx_adapter, dev_info, i, conf); - return; + for (i = 0; i < nb_rx_queues; i++) { + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); + if (ret) + return ret; + } + return 0; } pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -1990,6 +2020,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, dev_info->next_q_idx = 0; } } + + if (!rx_adapter->use_queue_event_buf) + return 0; + + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", + sizeof(*new_rx_buf), 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); + new_rx_buf->events_size += (2 * BATCH_SIZE); + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", + sizeof(struct rte_event) * + new_rx_buf->events_size, 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf->events == NULL) { + rte_free(new_rx_buf); + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + queue_info->event_buf = new_rx_buf; + + return 0; } static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ -2018,6 +2079,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, temp_conf.servicing_weight = 1; } queue_conf = &temp_conf; + + if (queue_conf->servicing_weight == 0 && + rx_adapter->use_queue_event_buf) { + + RTE_EDEV_LOG_ERR("Use of queue level event buffer " + "not supported for interrupt queues " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -EINVAL; + } } nb_rx_queues = dev_info->dev->data->nb_rx_queues; @@ -2097,7 +2168,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + if (ret) + goto err_free_rxqueue; rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); rte_free(rx_adapter->eth_rx_poll); @@ -2118,7 +2191,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, rte_free(rx_poll); rte_free(rx_wrr); - return 0; + return ret; } static int @@ -2244,20 +2317,26 @@ rxa_create(uint8_t id, uint8_t dev_id, rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; /* Rx adapter event buffer allocation */ - buf = &rx_adapter->event_enqueue_buffer; - buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); - - events = rte_zmalloc_socket(rx_adapter->mem_name, - buf->events_size * sizeof(*events), - 0, socket_id); - if (events == NULL) { - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); - rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter); - return -ENOMEM; - } + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; + + if (!rx_adapter->use_queue_event_buf) { + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate memory " + "for adapter event buffer"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } - rx_adapter->event_enqueue_buffer.events = events; + rx_adapter->event_enqueue_buffer.events = events; + } event_eth_rx_adapter[id] = rx_adapter; @@ -2277,6 +2356,8 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, /* Event buffer with default size = 6*BATCH_SIZE */ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + rxa_params.use_queue_event_buf = false; + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } @@ -2296,9 +2377,9 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, if (rxa_params == NULL) { rxa_params = &temp_params; rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; - } - - if (rxa_params->event_buf_size == 0) + rxa_params->use_queue_event_buf = false; + } else if ((!rxa_params->use_queue_event_buf && + rxa_params->event_buf_size == 0)) return -EINVAL; pc = rte_malloc(NULL, sizeof(*pc), 0); @@ -2364,7 +2445,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter->event_enqueue_buffer.events); + if (!rx_adapter->use_queue_event_buf) + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2468,6 +2550,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, return -EINVAL; } + if ((rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size == 0) || + (!rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); + return -EINVAL; + } + dev_info = &rx_adapter->eth_devices[eth_dev_id]; if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [EXT] [PATCH v2 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 4/5] eventdev/rx_adapter: implement per queue event buffer Naga Harish K S V @ 2021-09-21 20:51 ` Pavan Nikhilesh Bhagavatula 2021-09-22 15:45 ` Naga Harish K, S V 0 siblings, 1 reply; 81+ messages in thread From: Pavan Nikhilesh Bhagavatula @ 2021-09-21 20:51 UTC (permalink / raw) To: Naga Harish K S V, Jerin Jacob Kollanukkaran, jay.jayatheerthan; +Cc: dev >this patch implement the per queue event buffer after >required validations. > >Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> >--- > lib/eventdev/rte_event_eth_rx_adapter.c | 188 >++++++++++++++++++------ > 1 file changed, 139 insertions(+), 49 deletions(-) > <snip> >@@ -2277,6 +2356,8 @@ >rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > /* Event buffer with default size = 6*BATCH_SIZE */ > rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; >+ rxa_params.use_queue_event_buf = false; >+ Both these params should be passed to the driver layer when RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT is set in caps. > return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); > } > >@@ -2296,9 +2377,9 @@ >rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t >dev_id, > if (rxa_params == NULL) { > rxa_params = &temp_params; > rxa_params->event_buf_size = >ETH_EVENT_BUFFER_SIZE; >- } >- >- if (rxa_params->event_buf_size == 0) >+ rxa_params->use_queue_event_buf = false; >+ } else if ((!rxa_params->use_queue_event_buf && >+ rxa_params->event_buf_size == 0)) > return -EINVAL; > > pc = rte_malloc(NULL, sizeof(*pc), 0); >@@ -2364,7 +2445,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) > if (rx_adapter->default_cb_arg) > rte_free(rx_adapter->conf_arg); > rte_free(rx_adapter->eth_devices); >- rte_free(rx_adapter->event_enqueue_buffer.events); >+ if (!rx_adapter->use_queue_event_buf) >+ rte_free(rx_adapter->event_enqueue_buffer.events); > rte_free(rx_adapter); > event_eth_rx_adapter[id] = NULL; > >@@ -2468,6 +2550,14 @@ >rte_event_eth_rx_adapter_queue_add(uint8_t id, > return -EINVAL; > } > >+ if ((rx_adapter->use_queue_event_buf && >+ queue_conf->event_buf_size == 0) || >+ (!rx_adapter->use_queue_event_buf && >+ queue_conf->event_buf_size != 0)) { >+ RTE_EDEV_LOG_ERR("Invalid Event buffer size for the >queue"); >+ return -EINVAL; >+ } >+ > dev_info = &rx_adapter->eth_devices[eth_dev_id]; > > if (cap & >RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { >-- >2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [EXT] [PATCH v2 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-09-21 20:51 ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula @ 2021-09-22 15:45 ` Naga Harish K, S V 0 siblings, 0 replies; 81+ messages in thread From: Naga Harish K, S V @ 2021-09-22 15:45 UTC (permalink / raw) To: Pavan Nikhilesh Bhagavatula, Jerin Jacob Kollanukkaran, Jayatheerthan, Jay Cc: dev > -----Original Message----- > From: Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com> > Sent: Wednesday, September 22, 2021 2:22 AM > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>; Jerin Jacob > Kollanukkaran <jerinj@marvell.com>; Jayatheerthan, Jay > <jay.jayatheerthan@intel.com> > Cc: dev@dpdk.org > Subject: RE: [EXT] [dpdk-dev] [PATCH v2 4/5] eventdev/rx_adapter: > implement per queue event buffer > > >this patch implement the per queue event buffer after required > >validations. > > > >Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > >--- > > lib/eventdev/rte_event_eth_rx_adapter.c | 188 > >++++++++++++++++++------ > > 1 file changed, 139 insertions(+), 49 deletions(-) > > > > <snip> > > >@@ -2277,6 +2356,8 @@ > >rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > > > /* Event buffer with default size = 6*BATCH_SIZE */ > > rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > >+ rxa_params.use_queue_event_buf = false; > >+ > > Both these params should be passed to the driver layer when > RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT is set in caps. > I think the event buffer is not required for RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT Adapter capability. In the existing implementation for adapter create, there is no special handling for RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT capability. Let us know this is still a valid requirement. > > return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } > > > >@@ -2296,9 +2377,9 @@ > >rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t > dev_id, > > if (rxa_params == NULL) { > > rxa_params = &temp_params; > > rxa_params->event_buf_size = > >ETH_EVENT_BUFFER_SIZE; > >- } > >- > >- if (rxa_params->event_buf_size == 0) > >+ rxa_params->use_queue_event_buf = false; > >+ } else if ((!rxa_params->use_queue_event_buf && > >+ rxa_params->event_buf_size == 0)) > > return -EINVAL; > > > > pc = rte_malloc(NULL, sizeof(*pc), 0); @@ -2364,7 +2445,8 @@ > >rte_event_eth_rx_adapter_free(uint8_t id) > > if (rx_adapter->default_cb_arg) > > rte_free(rx_adapter->conf_arg); > > rte_free(rx_adapter->eth_devices); > >- rte_free(rx_adapter->event_enqueue_buffer.events); > >+ if (!rx_adapter->use_queue_event_buf) > >+ rte_free(rx_adapter->event_enqueue_buffer.events); > > rte_free(rx_adapter); > > event_eth_rx_adapter[id] = NULL; > > > >@@ -2468,6 +2550,14 @@ > >rte_event_eth_rx_adapter_queue_add(uint8_t id, > > return -EINVAL; > > } > > > >+ if ((rx_adapter->use_queue_event_buf && > >+ queue_conf->event_buf_size == 0) || > >+ (!rx_adapter->use_queue_event_buf && > >+ queue_conf->event_buf_size != 0)) { > >+ RTE_EDEV_LOG_ERR("Invalid Event buffer size for the > >queue"); > >+ return -EINVAL; > >+ } > >+ > > dev_info = &rx_adapter->eth_devices[eth_dev_id]; > > > > if (cap & > >RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { > >-- > >2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v2 5/5] test/eventdev: add per rx queue event buffer unit 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Naga Harish K S V ` (2 preceding siblings ...) 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 4/5] eventdev/rx_adapter: implement per queue event buffer Naga Harish K S V @ 2021-09-21 9:45 ` Naga Harish K S V 2021-09-21 20:24 ` [dpdk-dev] [EXT] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Pavan Nikhilesh Bhagavatula 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 5 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-21 9:45 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit tests for per rx queue event buffer Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 86 ++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 3c0f0ad7cc..11110564d0 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -387,6 +387,90 @@ adapter_create(void) return err; } +static int +adapter_create_with_params(void) +{ + int err; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf rx_p_conf; + struct rte_event_eth_rx_adapter_params rxa_params; + + memset(&rx_p_conf, 0, sizeof(rx_p_conf)); + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + rx_p_conf.new_event_threshold = dev_info.max_num_events; + rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + + rxa_params.use_queue_event_buf = false; + rxa_params.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + rxa_params.use_queue_event_buf = true; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err); + + return TEST_SUCCESS; +} + +static int +adapter_queue_event_buf_test(void) +{ + int err; + struct rte_event ev; + uint32_t cap; + + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; + + err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + ev.queue_id = 0; + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; + ev.priority = 0; + + queue_config.rx_queue_flags = 0; + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) { + ev.flow_id = 1; + queue_config.rx_queue_flags = + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; + } + queue_config.ev = ev; + queue_config.servicing_weight = 1; + queue_config.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + queue_config.event_buf_size = 1024; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + return TEST_SUCCESS; +} + static void adapter_free(void) { @@ -804,6 +888,8 @@ static struct unit_test_suite event_eth_rx_tests = { adapter_multi_eth_add_del), TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop), TEST_CASE_ST(adapter_create, adapter_free, adapter_stats), + TEST_CASE_ST(adapter_create_with_params, adapter_free, + adapter_queue_event_buf_test), TEST_CASES_END() /**< NULL terminate unit test array */ } }; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [EXT] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Naga Harish K S V ` (3 preceding siblings ...) 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 5/5] test/eventdev: add per rx queue event buffer unit Naga Harish K S V @ 2021-09-21 20:24 ` Pavan Nikhilesh Bhagavatula 2021-09-22 15:36 ` Naga Harish K, S V 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 5 siblings, 1 reply; 81+ messages in thread From: Pavan Nikhilesh Bhagavatula @ 2021-09-21 20:24 UTC (permalink / raw) To: Naga Harish K S V, Jerin Jacob Kollanukkaran, jay.jayatheerthan Cc: dev, Ganapati Kundapura >Currently Rx event buffer is static array with a default size >of 192(6*BATCH_SIZE). > >``rte_event_eth_rx_adapter_create_with_params`` api is added which >takes >``struct rte_event_eth_rx_adapter_params`` to configure event >buffer size in addition other params . The event buffer is >allocated from heap after aligning the size to BATCH_SIZE and >adding 2*BATCH_SIZE. In case of NULL params argument, default event >buffer size is used. > >Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> >Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> > >--- >v2: >* Updated header file and rx adapter documentation as per review >comments. >* new api name is modified as >rte_event_eth_rx_adapter_create_with_params > as per review comments. >* rxa_params pointer argument Value NULL is allowed to represent the > default values > >v1: >* Initial implementation with documentation and unit tests. >--- > .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ > lib/eventdev/rte_event_eth_rx_adapter.c | 94 >+++++++++++++++++-- > lib/eventdev/rte_event_eth_rx_adapter.h | 40 +++++++- > lib/eventdev/version.map | 2 + > 4 files changed, 135 insertions(+), 8 deletions(-) > >diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst >b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst >index 0780b6f711..dd753613bd 100644 >--- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst >+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst >@@ -62,6 +62,13 @@ service function and needs to create an event >port for it. The callback is > expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` > passed to it. > >+If the application desires to control the event buffer size, it can use the >+``rte_event_eth_rx_adapter_create_with_params()`` api. The event >buffer size is >+specified using ``struct >rte_event_eth_rx_adapter_params::event_buf_size``. >+The function is passed the event device to be associated with the >adapter >+and port configuration for the adapter to setup an event port if the >+adapter needs to use a service function. >+ > Adding Rx Queues to the Adapter Instance > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > >diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c >b/lib/eventdev/rte_event_eth_rx_adapter.c >index f2dc69503d..df1653b497 100644 >--- a/lib/eventdev/rte_event_eth_rx_adapter.c >+++ b/lib/eventdev/rte_event_eth_rx_adapter.c >@@ -82,7 +82,9 @@ struct rte_eth_event_enqueue_buffer { > /* Count of events in this buffer */ > uint16_t count; > /* Array of events in this buffer */ >- struct rte_event events[ETH_EVENT_BUFFER_SIZE]; >+ struct rte_event *events; >+ /* size of event buffer */ >+ uint16_t events_size; > /* Event enqueue happens from head */ > uint16_t head; > /* New packets from rte_eth_rx_burst is enqued from tail */ >@@ -919,7 +921,7 @@ rxa_buffer_mbufs(struct >rte_event_eth_rx_adapter *rx_adapter, > dropped = 0; > nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, > buf->last | >- (RTE_DIM(buf->events) & ~buf- >>last_mask), >+ (buf->events_size & ~buf- >>last_mask), > buf->count >= BATCH_SIZE ? > buf->count - >BATCH_SIZE : 0, > &buf->events[buf->tail], >@@ -945,7 +947,7 @@ rxa_pkt_buf_available(struct >rte_eth_event_enqueue_buffer *buf) > uint32_t nb_req = buf->tail + BATCH_SIZE; > > if (!buf->last) { >- if (nb_req <= RTE_DIM(buf->events)) >+ if (nb_req <= buf->events_size) > return true; > > if (buf->head >= BATCH_SIZE) { >@@ -2164,12 +2166,15 @@ rxa_ctrl(uint8_t id, int start) > return 0; > } > >-int >-rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, >- rte_event_eth_rx_adapter_conf_cb >conf_cb, >- void *conf_arg) >+static int >+rxa_create(uint8_t id, uint8_t dev_id, >+ struct rte_event_eth_rx_adapter_params *rxa_params, >+ rte_event_eth_rx_adapter_conf_cb conf_cb, >+ void *conf_arg) > { > struct rte_event_eth_rx_adapter *rx_adapter; >+ struct rte_eth_event_enqueue_buffer *buf; >+ struct rte_event *events; > int ret; > int socket_id; > uint16_t i; >@@ -2184,6 +2189,7 @@ >rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, - >EINVAL); > RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); >+ > if (conf_cb == NULL) > return -EINVAL; > >@@ -2231,11 +2237,30 @@ >rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > rte_free(rx_adapter); > return -ENOMEM; > } >+ > rte_spinlock_init(&rx_adapter->rx_lock); >+ > for (i = 0; i < RTE_MAX_ETHPORTS; i++) > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > >+ /* Rx adapter event buffer allocation */ >+ buf = &rx_adapter->event_enqueue_buffer; >+ buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, >BATCH_SIZE); >+ >+ events = rte_zmalloc_socket(rx_adapter->mem_name, >+ buf->events_size * sizeof(*events), >+ 0, socket_id); >+ if (events == NULL) { >+ RTE_EDEV_LOG_ERR("Failed to allocate mem for event >buffer\n"); >+ rte_free(rx_adapter->eth_devices); >+ rte_free(rx_adapter); >+ return -ENOMEM; >+ } >+ >+ rx_adapter->event_enqueue_buffer.events = events; >+ > event_eth_rx_adapter[id] = rx_adapter; >+ > if (conf_cb == rxa_default_conf_cb) > rx_adapter->default_cb_arg = 1; > rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, >conf_cb, >@@ -2243,6 +2268,57 @@ >rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > return 0; > } > >+int >+rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, >+ rte_event_eth_rx_adapter_conf_cb >conf_cb, >+ void *conf_arg) >+{ >+ struct rte_event_eth_rx_adapter_params rxa_params; >+ >+ /* Event buffer with default size = 6*BATCH_SIZE */ Why is it a multiple of 6, if its not documented please add a line here. >+ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; >+ return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); >+} >+ >+int >+rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t >dev_id, >+ struct rte_event_port_conf *port_config, >+ struct rte_event_eth_rx_adapter_params >*rxa_params) >+{ >+ struct rte_event_port_conf *pc; >+ int ret; >+ struct rte_event_eth_rx_adapter_params temp_params = {0}; >+ >+ if (port_config == NULL) >+ return -EINVAL; >+ >+ /* use default values if rxa_parmas is NULL */ >+ if (rxa_params == NULL) { >+ rxa_params = &temp_params; >+ rxa_params->event_buf_size = >ETH_EVENT_BUFFER_SIZE; >+ } >+ >+ if (rxa_params->event_buf_size == 0) >+ return -EINVAL; >+ >+ pc = rte_malloc(NULL, sizeof(*pc), 0); >+ if (pc == NULL) >+ return -ENOMEM; >+ >+ *pc = *port_config; >+ >+ /* event buff size aligned to BATCH_SIZE + 2*BATCH_SIZE */ >+ rxa_params->event_buf_size = RTE_ALIGN(rxa_params- >>event_buf_size, >+ BATCH_SIZE); >+ rxa_params->event_buf_size += BATCH_SIZE + BATCH_SIZE; >+ >+ ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, >pc); >+ if (ret) >+ rte_free(pc); >+ >+ return ret; >+} >+ > int > rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > struct rte_event_port_conf *port_config) >@@ -2252,12 +2328,14 @@ rte_event_eth_rx_adapter_create(uint8_t >id, uint8_t dev_id, > > if (port_config == NULL) > return -EINVAL; >+ > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, - >EINVAL); > > pc = rte_malloc(NULL, sizeof(*pc), 0); > if (pc == NULL) > return -ENOMEM; > *pc = *port_config; >+ > ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, > rxa_default_conf_cb, > pc); >@@ -2286,6 +2364,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) > if (rx_adapter->default_cb_arg) > rte_free(rx_adapter->conf_arg); > rte_free(rx_adapter->eth_devices); >+ rte_free(rx_adapter->event_enqueue_buffer.events); > rte_free(rx_adapter); > event_eth_rx_adapter[id] = NULL; > >@@ -2658,6 +2737,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t >id, > > stats->rx_packets += dev_stats_sum.rx_packets; > stats->rx_enq_count += dev_stats_sum.rx_enq_count; >+ > return 0; > } > >diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h >b/lib/eventdev/rte_event_eth_rx_adapter.h >index 3f8b362295..a7881097b4 100644 >--- a/lib/eventdev/rte_event_eth_rx_adapter.h >+++ b/lib/eventdev/rte_event_eth_rx_adapter.h >@@ -26,6 +26,7 @@ > * The ethernet Rx event adapter's functions are: > * - rte_event_eth_rx_adapter_create_ext() > * - rte_event_eth_rx_adapter_create() >+ * - rte_event_eth_rx_adapter_create_with_params() > * - rte_event_eth_rx_adapter_free() > * - rte_event_eth_rx_adapter_queue_add() > * - rte_event_eth_rx_adapter_queue_del() >@@ -36,7 +37,7 @@ > * > * The application creates an ethernet to event adapter using > * rte_event_eth_rx_adapter_create_ext() or >rte_event_eth_rx_adapter_create() >- * functions. >+ * or rte_event_eth_rx_adapter_create_with_params() functions. > * The adapter needs to know which ethernet rx queues to poll for >mbufs as well > * as event device parameters such as the event queue identifier, >event > * priority and scheduling type that the adapter should use when >constructing >@@ -256,6 +257,16 @@ struct >rte_event_eth_rx_adapter_vector_limits { > */ > }; > >+/** >+ * A structure to hold adapter config params >+ */ >+struct rte_event_eth_rx_adapter_params { >+ uint16_t event_buf_size; >+ /**< size of event buffer for the adapter. >+ * the size is aligned to BATCH_SIZE and added (2 * BATCH_SIZE) >+ */ BATCH_SIZE is internal i.e. not exposed to application layer, can we please define what is BATCH_SIZE here and why applications input needs to be aligned to BATCH_SIZE? and why 2*BATCH size is required. If this is an application driven parameter we shouldn't impose arbitrary constraints like this. Maybe "Aligned to power of 2" would be sufficient. OR Extend rte_event_eth_rx_adapter_caps_get() to return optimal event_buf_size if supported. >+}; >+ > /** > * > * Callback function invoked by the SW adapter before it continues >@@ -356,6 +367,33 @@ int >rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > struct rte_event_port_conf >*port_config); > >+/** >+ * This is a variant of rte_event_eth_rx_adapter_create() with >additional >+ * adapter params specified in ``struct >rte_event_eth_rx_adapter_params``. >+ * >+ * @param id >+ * The identifier of the ethernet Rx event adapter. >+ * >+ * @param dev_id >+ * The identifier of the event device to configure. >+ * >+ * @param port_config >+ * Argument of type *rte_event_port_conf* that is passed to the >conf_cb >+ * function. >+ * >+ * @param rxa_params >+ * Pointer to struct rte_event_eth_rx_adapter_params. >+ * In case of NULL, default values are used. >+ * >+ * @return >+ * - 0: Success >+ * - <0: Error code on failure >+ */ >+__rte_experimental >+int rte_event_eth_rx_adapter_create_with_params(uint8_t id, >uint8_t dev_id, >+ struct rte_event_port_conf *port_config, >+ struct rte_event_eth_rx_adapter_params >*rxa_params); >+ > /** > * Free an event adapter > * >diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map >index cd86d2d908..87586de879 100644 >--- a/lib/eventdev/version.map >+++ b/lib/eventdev/version.map >@@ -138,6 +138,8 @@ EXPERIMENTAL { > __rte_eventdev_trace_port_setup; > # added in 20.11 > rte_event_pmd_pci_probe_named; >+ # added in 21.11 >+ rte_event_eth_rx_adapter_create_with_params; > > #added in 21.05 > rte_event_vector_pool_create; >-- >2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [EXT] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size 2021-09-21 20:24 ` [dpdk-dev] [EXT] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Pavan Nikhilesh Bhagavatula @ 2021-09-22 15:36 ` Naga Harish K, S V 0 siblings, 0 replies; 81+ messages in thread From: Naga Harish K, S V @ 2021-09-22 15:36 UTC (permalink / raw) To: Pavan Nikhilesh Bhagavatula, Jerin Jacob Kollanukkaran, Jayatheerthan, Jay Cc: dev, Kundapura, Ganapati Hi Pavan, > -----Original Message----- > From: Pavan Nikhilesh Bhagavatula <pbhagavatula@marvell.com> > Sent: Wednesday, September 22, 2021 1:55 AM > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>; Jerin Jacob > Kollanukkaran <jerinj@marvell.com>; Jayatheerthan, Jay > <jay.jayatheerthan@intel.com> > Cc: dev@dpdk.org; Kundapura, Ganapati <ganapati.kundapura@intel.com> > Subject: RE: [EXT] [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: add > support to configure event buffer size > > >Currently Rx event buffer is static array with a default size of > >192(6*BATCH_SIZE). > > > >``rte_event_eth_rx_adapter_create_with_params`` api is added which > >takes ``struct rte_event_eth_rx_adapter_params`` to configure event > >buffer size in addition other params . The event buffer is allocated > >from heap after aligning the size to BATCH_SIZE and adding > >2*BATCH_SIZE. In case of NULL params argument, default event buffer > >size is used. > > > >Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > >Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> > > > >--- > >v2: > >* Updated header file and rx adapter documentation as per review > >comments. > >* new api name is modified as > >rte_event_eth_rx_adapter_create_with_params > > as per review comments. > >* rxa_params pointer argument Value NULL is allowed to represent the > > default values > > > >v1: > >* Initial implementation with documentation and unit tests. > >--- > > .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ > > lib/eventdev/rte_event_eth_rx_adapter.c | 94 > >+++++++++++++++++-- > > lib/eventdev/rte_event_eth_rx_adapter.h | 40 +++++++- > > lib/eventdev/version.map | 2 + > > 4 files changed, 135 insertions(+), 8 deletions(-) > > > >diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > >b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > >index 0780b6f711..dd753613bd 100644 > >--- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > >+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > >@@ -62,6 +62,13 @@ service function and needs to create an event port > >for it. The callback is expected to fill the ``struct > >rte_event_eth_rx_adapter_conf structure`` passed to it. > > > >+If the application desires to control the event buffer size, it can > >+use the ``rte_event_eth_rx_adapter_create_with_params()`` api. The > >+event > >buffer size is > >+specified using ``struct > >rte_event_eth_rx_adapter_params::event_buf_size``. > >+The function is passed the event device to be associated with the > >adapter > >+and port configuration for the adapter to setup an event port if the > >+adapter needs to use a service function. > >+ > > Adding Rx Queues to the Adapter Instance > > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > > > >diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c > >b/lib/eventdev/rte_event_eth_rx_adapter.c > >index f2dc69503d..df1653b497 100644 > >--- a/lib/eventdev/rte_event_eth_rx_adapter.c > >+++ b/lib/eventdev/rte_event_eth_rx_adapter.c > >@@ -82,7 +82,9 @@ struct rte_eth_event_enqueue_buffer { > > /* Count of events in this buffer */ > > uint16_t count; > > /* Array of events in this buffer */ > >- struct rte_event events[ETH_EVENT_BUFFER_SIZE]; > >+ struct rte_event *events; > >+ /* size of event buffer */ > >+ uint16_t events_size; > > /* Event enqueue happens from head */ > > uint16_t head; > > /* New packets from rte_eth_rx_burst is enqued from tail */ @@ - > 919,7 > >+921,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter > *rx_adapter, > > dropped = 0; > > nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, > > buf->last | > >- (RTE_DIM(buf->events) & ~buf- > >>last_mask), > >+ (buf->events_size & ~buf- > >>last_mask), > > buf->count >= BATCH_SIZE ? > > buf->count - > >BATCH_SIZE : 0, > > &buf->events[buf->tail], > >@@ -945,7 +947,7 @@ rxa_pkt_buf_available(struct > >rte_eth_event_enqueue_buffer *buf) > > uint32_t nb_req = buf->tail + BATCH_SIZE; > > > > if (!buf->last) { > >- if (nb_req <= RTE_DIM(buf->events)) > >+ if (nb_req <= buf->events_size) > > return true; > > > > if (buf->head >= BATCH_SIZE) { > >@@ -2164,12 +2166,15 @@ rxa_ctrl(uint8_t id, int start) > > return 0; > > } > > > >-int > >-rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > >- rte_event_eth_rx_adapter_conf_cb > >conf_cb, > >- void *conf_arg) > >+static int > >+rxa_create(uint8_t id, uint8_t dev_id, > >+ struct rte_event_eth_rx_adapter_params *rxa_params, > >+ rte_event_eth_rx_adapter_conf_cb conf_cb, > >+ void *conf_arg) > > { > > struct rte_event_eth_rx_adapter *rx_adapter; > >+ struct rte_eth_event_enqueue_buffer *buf; > >+ struct rte_event *events; > > int ret; > > int socket_id; > > uint16_t i; > >@@ -2184,6 +2189,7 @@ > >rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > > > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, - > EINVAL); > > RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); > >+ > > if (conf_cb == NULL) > > return -EINVAL; > > > >@@ -2231,11 +2237,30 @@ > >rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > rte_free(rx_adapter); > > return -ENOMEM; > > } > >+ > > rte_spinlock_init(&rx_adapter->rx_lock); > >+ > > for (i = 0; i < RTE_MAX_ETHPORTS; i++) > > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > > >+ /* Rx adapter event buffer allocation */ > >+ buf = &rx_adapter->event_enqueue_buffer; > >+ buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, > >BATCH_SIZE); > >+ > >+ events = rte_zmalloc_socket(rx_adapter->mem_name, > >+ buf->events_size * sizeof(*events), > >+ 0, socket_id); > >+ if (events == NULL) { > >+ RTE_EDEV_LOG_ERR("Failed to allocate mem for event > >buffer\n"); > >+ rte_free(rx_adapter->eth_devices); > >+ rte_free(rx_adapter); > >+ return -ENOMEM; > >+ } > >+ > >+ rx_adapter->event_enqueue_buffer.events = events; > >+ > > event_eth_rx_adapter[id] = rx_adapter; > >+ > > if (conf_cb == rxa_default_conf_cb) > > rx_adapter->default_cb_arg = 1; > > rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb, > @@ > >-2243,6 +2268,57 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, > >uint8_t dev_id, > > return 0; > > } > > > >+int > >+rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > >+ rte_event_eth_rx_adapter_conf_cb > >conf_cb, > >+ void *conf_arg) > >+{ > >+ struct rte_event_eth_rx_adapter_params rxa_params; > >+ > >+ /* Event buffer with default size = 6*BATCH_SIZE */ > > Why is it a multiple of 6, if its not documented please add a line here. This is the existing default event buffer size and continued its usage. There is no hard rule to decide the default size. > > >+ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > >+ return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } > >+ > >+int > >+rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t > >dev_id, > >+ struct rte_event_port_conf *port_config, > >+ struct rte_event_eth_rx_adapter_params > >*rxa_params) > >+{ > >+ struct rte_event_port_conf *pc; > >+ int ret; > >+ struct rte_event_eth_rx_adapter_params temp_params = {0}; > >+ > >+ if (port_config == NULL) > >+ return -EINVAL; > >+ > >+ /* use default values if rxa_parmas is NULL */ > >+ if (rxa_params == NULL) { > >+ rxa_params = &temp_params; > >+ rxa_params->event_buf_size = > >ETH_EVENT_BUFFER_SIZE; > >+ } > >+ > >+ if (rxa_params->event_buf_size == 0) > >+ return -EINVAL; > >+ > >+ pc = rte_malloc(NULL, sizeof(*pc), 0); > >+ if (pc == NULL) > >+ return -ENOMEM; > >+ > >+ *pc = *port_config; > >+ > >+ /* event buff size aligned to BATCH_SIZE + 2*BATCH_SIZE */ > >+ rxa_params->event_buf_size = RTE_ALIGN(rxa_params- > >>event_buf_size, > >+ BATCH_SIZE); > >+ rxa_params->event_buf_size += BATCH_SIZE + BATCH_SIZE; > >+ > >+ ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, > >pc); > >+ if (ret) > >+ rte_free(pc); > >+ > >+ return ret; > >+} > >+ > > int > > rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > > struct rte_event_port_conf *port_config) @@ -2252,12 > +2328,14 @@ > >rte_event_eth_rx_adapter_create(uint8_t > >id, uint8_t dev_id, > > > > if (port_config == NULL) > > return -EINVAL; > >+ > > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, - > EINVAL); > > > > pc = rte_malloc(NULL, sizeof(*pc), 0); > > if (pc == NULL) > > return -ENOMEM; > > *pc = *port_config; > >+ > > ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, > > rxa_default_conf_cb, > > pc); > >@@ -2286,6 +2364,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) > > if (rx_adapter->default_cb_arg) > > rte_free(rx_adapter->conf_arg); > > rte_free(rx_adapter->eth_devices); > >+ rte_free(rx_adapter->event_enqueue_buffer.events); > > rte_free(rx_adapter); > > event_eth_rx_adapter[id] = NULL; > > > >@@ -2658,6 +2737,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t > >id, > > > > stats->rx_packets += dev_stats_sum.rx_packets; > > stats->rx_enq_count += dev_stats_sum.rx_enq_count; > >+ > > return 0; > > } > > > >diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h > >b/lib/eventdev/rte_event_eth_rx_adapter.h > >index 3f8b362295..a7881097b4 100644 > >--- a/lib/eventdev/rte_event_eth_rx_adapter.h > >+++ b/lib/eventdev/rte_event_eth_rx_adapter.h > >@@ -26,6 +26,7 @@ > > * The ethernet Rx event adapter's functions are: > > * - rte_event_eth_rx_adapter_create_ext() > > * - rte_event_eth_rx_adapter_create() > >+ * - rte_event_eth_rx_adapter_create_with_params() > > * - rte_event_eth_rx_adapter_free() > > * - rte_event_eth_rx_adapter_queue_add() > > * - rte_event_eth_rx_adapter_queue_del() > >@@ -36,7 +37,7 @@ > > * > > * The application creates an ethernet to event adapter using > > * rte_event_eth_rx_adapter_create_ext() or > >rte_event_eth_rx_adapter_create() > >- * functions. > >+ * or rte_event_eth_rx_adapter_create_with_params() functions. > > * The adapter needs to know which ethernet rx queues to poll for > >mbufs as well > > * as event device parameters such as the event queue identifier, > >event > > * priority and scheduling type that the adapter should use when > >constructing @@ -256,6 +257,16 @@ struct > >rte_event_eth_rx_adapter_vector_limits { > > */ > > }; > > > >+/** > >+ * A structure to hold adapter config params */ struct > >+rte_event_eth_rx_adapter_params { > >+ uint16_t event_buf_size; > >+ /**< size of event buffer for the adapter. > >+ * the size is aligned to BATCH_SIZE and added (2 * BATCH_SIZE) > >+ */ > > BATCH_SIZE is internal i.e. not exposed to application layer, can we please > define what is BATCH_SIZE here and why applications input needs to be > aligned to BATCH_SIZE? > and why 2*BATCH size is required. BATCH_SIZE is the size used to dequeue packets form NIC rx queues. If the event buffer size aligned to BATCH_SIZE, the buffer utilization will be upto Its fullest and avoid wastage of buffer space during rollover conditions. The additional adjustment is to make sure that, the buffer usage is at least upto The user requested size under overload conditions. > > If this is an application driven parameter we shouldn't impose arbitrary > constraints like this. > Maybe "Aligned to power of 2" would be sufficient. > > OR > > Extend rte_event_eth_rx_adapter_caps_get() to return optimal > event_buf_size if supported. > > > >+}; > >+ > > /** > > * > > * Callback function invoked by the SW adapter before it continues @@ > >-356,6 +367,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, > >uint8_t dev_id, int rte_event_eth_rx_adapter_create(uint8_t id, > >uint8_t dev_id, > > struct rte_event_port_conf > >*port_config); > > > >+/** > >+ * This is a variant of rte_event_eth_rx_adapter_create() with > >additional > >+ * adapter params specified in ``struct > >rte_event_eth_rx_adapter_params``. > >+ * > >+ * @param id > >+ * The identifier of the ethernet Rx event adapter. > >+ * > >+ * @param dev_id > >+ * The identifier of the event device to configure. > >+ * > >+ * @param port_config > >+ * Argument of type *rte_event_port_conf* that is passed to the > >conf_cb > >+ * function. > >+ * > >+ * @param rxa_params > >+ * Pointer to struct rte_event_eth_rx_adapter_params. > >+ * In case of NULL, default values are used. > >+ * > >+ * @return > >+ * - 0: Success > >+ * - <0: Error code on failure > >+ */ > >+__rte_experimental > >+int rte_event_eth_rx_adapter_create_with_params(uint8_t id, > >uint8_t dev_id, > >+ struct rte_event_port_conf *port_config, > >+ struct rte_event_eth_rx_adapter_params > >*rxa_params); > >+ > > /** > > * Free an event adapter > > * > >diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index > >cd86d2d908..87586de879 100644 > >--- a/lib/eventdev/version.map > >+++ b/lib/eventdev/version.map > >@@ -138,6 +138,8 @@ EXPERIMENTAL { > > __rte_eventdev_trace_port_setup; > > # added in 20.11 > > rte_event_pmd_pci_probe_named; > >+ # added in 21.11 > >+ rte_event_eth_rx_adapter_create_with_params; > > > > #added in 21.05 > > rte_event_vector_pool_create; > >-- > >2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Naga Harish K S V ` (4 preceding siblings ...) 2021-09-21 20:24 ` [dpdk-dev] [EXT] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Pavan Nikhilesh Bhagavatula @ 2021-09-22 15:13 ` Naga Harish K S V 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 2/5] test/event: add unit test for Rx adapter Naga Harish K S V ` (6 more replies) 5 siblings, 7 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-22 15:13 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev, Ganapati Kundapura Currently event buffer is static array with a default size defined internally. To configure event buffer size from application, ``rte_event_eth_rx_adapter_create_with_params`` api is added which takes ``struct rte_event_eth_rx_adapter_params`` to configure event buffer size in addition other params . The event buffer size is rounded up for better buffer utilization and performance . In case of NULL params argument, default event buffer size is used. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> --- v3: * updated documentation and code comments as per review comments. * updated new create api test case name with suitable one. v2: * Updated header file and rx adapter documentation as per review comments. * new api name is modified as rte_event_eth_rx_adapter_create_with_params as per review comments. * rxa_params pointer argument Value NULL is allowed to represent the default values v1: * Initial implementation with documentation and unit tests. --- .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ lib/eventdev/rte_event_eth_rx_adapter.c | 98 +++++++++++++++++-- lib/eventdev/rte_event_eth_rx_adapter.h | 41 +++++++- lib/eventdev/version.map | 2 + 4 files changed, 140 insertions(+), 8 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index 0780b6f711..dd753613bd 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. +If the application desires to control the event buffer size, it can use the +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. +The function is passed the event device to be associated with the adapter +and port configuration for the adapter to setup an event port if the +adapter needs to use a service function. + Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index f2dc69503d..7dec9a8734 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -82,7 +82,9 @@ struct rte_eth_event_enqueue_buffer { /* Count of events in this buffer */ uint16_t count; /* Array of events in this buffer */ - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; + struct rte_event *events; + /* size of event buffer */ + uint16_t events_size; /* Event enqueue happens from head */ uint16_t head; /* New packets from rte_eth_rx_burst is enqued from tail */ @@ -919,7 +921,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, dropped = 0; nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, buf->last | - (RTE_DIM(buf->events) & ~buf->last_mask), + (buf->events_size & ~buf->last_mask), buf->count >= BATCH_SIZE ? buf->count - BATCH_SIZE : 0, &buf->events[buf->tail], @@ -945,7 +947,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) uint32_t nb_req = buf->tail + BATCH_SIZE; if (!buf->last) { - if (nb_req <= RTE_DIM(buf->events)) + if (nb_req <= buf->events_size) return true; if (buf->head >= BATCH_SIZE) { @@ -2164,12 +2166,15 @@ rxa_ctrl(uint8_t id, int start) return 0; } -int -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, - rte_event_eth_rx_adapter_conf_cb conf_cb, - void *conf_arg) +static int +rxa_create(uint8_t id, uint8_t dev_id, + struct rte_event_eth_rx_adapter_params *rxa_params, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) { struct rte_event_eth_rx_adapter *rx_adapter; + struct rte_eth_event_enqueue_buffer *buf; + struct rte_event *events; int ret; int socket_id; uint16_t i; @@ -2184,6 +2189,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (conf_cb == NULL) return -EINVAL; @@ -2231,11 +2237,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_free(rx_adapter); return -ENOMEM; } + rte_spinlock_init(&rx_adapter->rx_lock); + for (i = 0; i < RTE_MAX_ETHPORTS; i++) rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; + /* Rx adapter event buffer allocation */ + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } + + rx_adapter->event_enqueue_buffer.events = events; + event_eth_rx_adapter[id] = rx_adapter; + if (conf_cb == rxa_default_conf_cb) rx_adapter->default_cb_arg = 1; rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb, @@ -2243,6 +2268,61 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, return 0; } +int +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) +{ + struct rte_event_eth_rx_adapter_params rxa_params; + + /* use default values for adapter params */ + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); +} + +int +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params) +{ + struct rte_event_port_conf *pc; + int ret; + struct rte_event_eth_rx_adapter_params temp_params = {0}; + + if (port_config == NULL) + return -EINVAL; + + /* use default values if rxa_parmas is NULL */ + if (rxa_params == NULL) { + rxa_params = &temp_params; + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; + } + + if (rxa_params->event_buf_size == 0) + return -EINVAL; + + pc = rte_malloc(NULL, sizeof(*pc), 0); + if (pc == NULL) + return -ENOMEM; + + *pc = *port_config; + + /* adjust event buff size with BATCH_SIZE used for fetching packets + * from NIC rx queues to get full buffer utilization and prevent + * unnecessary rollovers. + */ + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + rxa_params->event_buf_size += BATCH_SIZE + BATCH_SIZE; + + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); + if (ret) + rte_free(pc); + + return ret; +} + int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config) @@ -2252,12 +2332,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) return -ENOMEM; *pc = *port_config; + ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, rxa_default_conf_cb, pc); @@ -2286,6 +2368,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2658,6 +2741,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, stats->rx_packets += dev_stats_sum.rx_packets; stats->rx_enq_count += dev_stats_sum.rx_enq_count; + return 0; } diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 3f8b362295..6e8b3085f8 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -26,6 +26,7 @@ * The ethernet Rx event adapter's functions are: * - rte_event_eth_rx_adapter_create_ext() * - rte_event_eth_rx_adapter_create() + * - rte_event_eth_rx_adapter_create_with_params() * - rte_event_eth_rx_adapter_free() * - rte_event_eth_rx_adapter_queue_add() * - rte_event_eth_rx_adapter_queue_del() @@ -36,7 +37,7 @@ * * The application creates an ethernet to event adapter using * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() - * functions. + * or rte_event_eth_rx_adapter_create_with_params() functions. * The adapter needs to know which ethernet rx queues to poll for mbufs as well * as event device parameters such as the event queue identifier, event * priority and scheduling type that the adapter should use when constructing @@ -256,6 +257,17 @@ struct rte_event_eth_rx_adapter_vector_limits { */ }; +/** + * A structure to hold adapter config params + */ +struct rte_event_eth_rx_adapter_params { + uint16_t event_buf_size; + /**< size of event buffer for the adapter. + * This value is rounded up for better buffer utilization + * and performance. + */ +}; + /** * * Callback function invoked by the SW adapter before it continues @@ -356,6 +368,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config); +/** + * This is a variant of rte_event_eth_rx_adapter_create() with additional + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. + * + * @param id + * The identifier of the ethernet Rx event adapter. + * + * @param dev_id + * The identifier of the event device to configure. + * + * @param port_config + * Argument of type *rte_event_port_conf* that is passed to the conf_cb + * function. + * + * @param rxa_params + * Pointer to struct rte_event_eth_rx_adapter_params. + * In case of NULL, default values are used. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params); + /** * Free an event adapter * diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index cd86d2d908..87586de879 100644 --- a/lib/eventdev/version.map +++ b/lib/eventdev/version.map @@ -138,6 +138,8 @@ EXPERIMENTAL { __rte_eventdev_trace_port_setup; # added in 20.11 rte_event_pmd_pci_probe_named; + # added in 21.11 + rte_event_eth_rx_adapter_create_with_params; #added in 21.05 rte_event_vector_pool_create; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v3 2/5] test/event: add unit test for Rx adapter 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V @ 2021-09-22 15:13 ` Naga Harish K S V 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V ` (5 subsequent siblings) 6 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-22 15:13 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit test for rte_event_eth_rx_adapter_create_with_params api and validate all possible input combinations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 53 +++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index add4d8a678..183ec92ccb 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -428,6 +428,50 @@ adapter_create_free(void) return TEST_SUCCESS; } +static int +adapter_create_free_with_params(void) +{ + int err; + + struct rte_event_port_conf rx_p_conf = { + .dequeue_depth = 8, + .enqueue_depth = 8, + .new_event_threshold = 1200, + }; + + struct rte_event_eth_rx_adapter_params rxa_params = { + .event_buf_size = 1024 + }; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, NULL, NULL); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err); + + rxa_params.event_buf_size = 0; + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + err = rte_event_eth_rx_adapter_free(1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + return TEST_SUCCESS; +} + static int adapter_queue_add_del(void) { @@ -435,7 +479,7 @@ adapter_queue_add_del(void) struct rte_event ev; uint32_t cap; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, &cap); @@ -523,7 +567,7 @@ adapter_multi_eth_add_del(void) uint16_t port_index, port_index_base, drv_id = 0; char driver_name[50]; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; ev.queue_id = 0; ev.sched_type = RTE_SCHED_TYPE_ATOMIC; @@ -594,7 +638,7 @@ adapter_intr_queue_add_del(void) struct rte_event ev; uint32_t cap; uint16_t eth_port; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; if (!default_params.rx_intr_port_inited) return 0; @@ -687,7 +731,7 @@ adapter_start_stop(void) ev.sched_type = RTE_SCHED_TYPE_ATOMIC; ev.priority = 0; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; queue_config.rx_queue_flags = 0; if (default_params.caps & @@ -753,6 +797,7 @@ static struct unit_test_suite event_eth_rx_tests = { .teardown = testsuite_teardown, .unit_test_cases = { TEST_CASE_ST(NULL, NULL, adapter_create_free), + TEST_CASE_ST(NULL, NULL, adapter_create_free_with_params), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_add_del), TEST_CASE_ST(adapter_create, adapter_free, -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v3 3/5] eventdev/rx_adapter: introduce per queue event buffer 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 2/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-09-22 15:13 ` Naga Harish K S V 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 4/5] eventdev/rx_adapter: implement " Naga Harish K S V ` (4 subsequent siblings) 6 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-22 15:13 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev To configure per queue event buffer size, application sets ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag as true and is passed to ``rte_event_eth_rx_adapter_create_with_params`` api. The per queue event buffer size is populated in ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed to ``rte_event_eth_rx_adapter_queue_add`` api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index dd753613bd..e73ae135d5 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,12 +62,14 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. -If the application desires to control the event buffer size, it can use the -``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is -specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. -The function is passed the event device to be associated with the adapter -and port configuration for the adapter to setup an event port if the -adapter needs to use a service function. +If the application desires to control the event buffer size at adapter level, +it can use the ``rte_event_eth_rx_adapter_create_with_params()`` api. The event +buffer size is specified using ``struct rte_event_eth_rx_adapter_params:: +event_buf_size``. To configure the event buffer size at queue level, the boolean +flag ``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be +set to true. The function is passed the event device to be associated with +the adapter and port configuration for the adapter to setup an event port +if the adapter needs to use a service function. Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx queue is encoded in the ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf is the relative polling frequency of the Rx queue and is applicable when the -adapter uses a service core function. +adapter uses a service core function. The applications can configure queue +event buffer size in ``struct rte_event_eth_rx_adapter_queue_conf::event_buf_size`` +parameter. .. code-block:: c @@ -90,6 +94,7 @@ adapter uses a service core function. queue_config.rx_queue_flags = 0; queue_config.ev = ev; queue_config.servicing_weight = 1; + queue_config.event_buf_size = 1024; err = rte_event_eth_rx_adapter_queue_add(id, eth_dev_id, diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 6e8b3085f8..8e341b8bc8 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -199,6 +199,8 @@ struct rte_event_eth_rx_adapter_queue_conf { * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. */ + uint16_t event_buf_size; + /**< event buffer size for this queue */ }; /** @@ -266,6 +268,8 @@ struct rte_event_eth_rx_adapter_params { * This value is rounded up for better buffer utilization * and performance. */ + bool use_queue_event_buf; + /**< flag to indicate that event buffer is separate for each queue */ }; /** -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v3 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V @ 2021-09-22 15:13 ` Naga Harish K S V 2021-09-22 15:14 ` [dpdk-dev] [PATCH v3 5/5] test/event: add unit test for Rx adapter Naga Harish K S V ` (3 subsequent siblings) 6 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-22 15:13 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch implement the per queue event buffer with required validations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- lib/eventdev/rte_event_eth_rx_adapter.c | 187 +++++++++++++++++------- 1 file changed, 138 insertions(+), 49 deletions(-) diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 7dec9a8734..f3d5efd916 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -99,10 +99,12 @@ struct rte_event_eth_rx_adapter { uint8_t rss_key_be[RSS_KEY_SIZE]; /* Event device identifier */ uint8_t eventdev_id; - /* Per ethernet device structure */ - struct eth_device_info *eth_devices; /* Event port identifier */ uint8_t event_port_id; + /* Flag indicating per rxq event buffer */ + bool use_queue_event_buf; + /* Per ethernet device structure */ + struct eth_device_info *eth_devices; /* Lock to serialize config updates with service function */ rte_spinlock_t rx_lock; /* Max mbufs processed in any service function invocation */ @@ -238,6 +240,7 @@ struct eth_rx_queue_info { uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ uint64_t event; struct eth_rx_vector_data vector_data; + struct rte_eth_event_enqueue_buffer *event_buf; }; static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ -753,10 +756,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, /* Enqueue buffered events to event device */ static inline uint16_t -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, + struct rte_eth_event_enqueue_buffer *buf) { - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t count = buf->last ? buf->last - buf->head : buf->count; @@ -874,15 +876,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, uint16_t rx_queue_id, struct rte_mbuf **mbufs, - uint16_t num) + uint16_t num, + struct rte_eth_event_enqueue_buffer *buf) { uint32_t i; struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; struct eth_rx_queue_info *eth_rx_queue_info = &dev_info->rx_queue[rx_queue_id]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; uint16_t new_tail = buf->tail; uint64_t event = eth_rx_queue_info->event; uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ -968,11 +969,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t queue_id, uint32_t rx_count, uint32_t max_rx, - int *rxq_empty) + int *rxq_empty, + struct rte_eth_event_enqueue_buffer *buf) { struct rte_mbuf *mbufs[BATCH_SIZE]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t n; @@ -985,7 +985,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, */ while (rxa_pkt_buf_available(buf)) { if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); stats->rx_poll_count++; n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); @@ -994,14 +994,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, *rxq_empty = 1; break; } - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); nb_rx += n; if (rx_count + nb_rx > max_rx) break; } if (buf->count > 0) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); return nb_rx; } @@ -1142,7 +1142,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) ring_lock = &rx_adapter->intr_ring_lock; if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); while (rxa_pkt_buf_available(buf)) { struct eth_device_info *dev_info; @@ -1194,7 +1194,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) continue; n = rxa_eth_rx(rx_adapter, port, i, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); nb_rx += n; enq_buffer_full = !rxq_empty && n == 0; @@ -1215,7 +1215,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) } else { n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); rx_adapter->qd_valid = !rxq_empty; nb_rx += n; if (nb_rx > rx_adapter->max_nb_rx) @@ -1246,13 +1246,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { uint32_t num_queue; uint32_t nb_rx = 0; - struct rte_eth_event_enqueue_buffer *buf; + struct rte_eth_event_enqueue_buffer *buf = NULL; uint32_t wrr_pos; uint32_t max_nb_rx; wrr_pos = rx_adapter->wrr_pos; max_nb_rx = rx_adapter->max_nb_rx; - buf = &rx_adapter->event_enqueue_buffer; /* Iterate through a WRR sequence */ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { @@ -1260,24 +1259,36 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[d]; + buf = dev_info->rx_queue[qid].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + /* Don't do a batch dequeue from the rx queue if there isn't * enough space in the enqueue buffer. */ if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (!rxa_pkt_buf_available(buf)) { - rx_adapter->wrr_pos = wrr_pos; - return nb_rx; + if (rx_adapter->use_queue_event_buf) + goto poll_next_entry; + else { + rx_adapter->wrr_pos = wrr_pos; + return nb_rx; + } } nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, - NULL); + NULL, buf); if (nb_rx > max_nb_rx) { rx_adapter->wrr_pos = (wrr_pos + 1) % rx_adapter->wrr_len; break; } +poll_next_entry: if (++wrr_pos == rx_adapter->wrr_len) wrr_pos = 0; } @@ -1288,12 +1299,18 @@ static void rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { struct rte_event_eth_rx_adapter *rx_adapter = arg; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; + struct rte_eth_event_enqueue_buffer *buf = NULL; struct rte_event *ev; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[vec->port]; + buf = dev_info->rx_queue[vec->queue].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + if (buf->count) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (vec->vector_ev->nb_elem == 0) return; @@ -1905,9 +1922,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, rx_adapter->num_rx_intr -= intrq; dev_info->nb_rx_intr -= intrq; dev_info->nb_shared_intr -= intrq && sintrq; + if (rx_adapter->use_queue_event_buf) { + struct rte_eth_event_enqueue_buffer *event_buf = + dev_info->rx_queue[rx_queue_id].event_buf; + rte_free(event_buf->events); + rte_free(event_buf); + dev_info->rx_queue[rx_queue_id].event_buf = NULL; + } } -static void +static int rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, struct eth_device_info *dev_info, int32_t rx_queue_id, @@ -1919,15 +1943,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int intrq; int sintrq; struct rte_event *qi_ev; + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; + uint16_t eth_dev_id = dev_info->dev->data->port_id; + int ret; if (rx_queue_id == -1) { uint16_t nb_rx_queues; uint16_t i; nb_rx_queues = dev_info->dev->data->nb_rx_queues; - for (i = 0; i < nb_rx_queues; i++) - rxa_add_queue(rx_adapter, dev_info, i, conf); - return; + for (i = 0; i < nb_rx_queues; i++) { + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); + if (ret) + return ret; + } + return 0; } pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -1990,6 +2020,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, dev_info->next_q_idx = 0; } } + + if (!rx_adapter->use_queue_event_buf) + return 0; + + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", + sizeof(*new_rx_buf), 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); + new_rx_buf->events_size += (2 * BATCH_SIZE); + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", + sizeof(struct rte_event) * + new_rx_buf->events_size, 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf->events == NULL) { + rte_free(new_rx_buf); + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + queue_info->event_buf = new_rx_buf; + + return 0; } static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ -2018,6 +2079,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, temp_conf.servicing_weight = 1; } queue_conf = &temp_conf; + + if (queue_conf->servicing_weight == 0 && + rx_adapter->use_queue_event_buf) { + + RTE_EDEV_LOG_ERR("Use of queue level event buffer " + "not supported for interrupt queues " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -EINVAL; + } } nb_rx_queues = dev_info->dev->data->nb_rx_queues; @@ -2097,7 +2168,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + if (ret) + goto err_free_rxqueue; rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); rte_free(rx_adapter->eth_rx_poll); @@ -2118,7 +2191,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, rte_free(rx_poll); rte_free(rx_wrr); - return 0; + return ret; } static int @@ -2244,20 +2317,26 @@ rxa_create(uint8_t id, uint8_t dev_id, rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; /* Rx adapter event buffer allocation */ - buf = &rx_adapter->event_enqueue_buffer; - buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); - - events = rte_zmalloc_socket(rx_adapter->mem_name, - buf->events_size * sizeof(*events), - 0, socket_id); - if (events == NULL) { - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); - rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter); - return -ENOMEM; - } + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; + + if (!rx_adapter->use_queue_event_buf) { + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate memory " + "for adapter event buffer"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } - rx_adapter->event_enqueue_buffer.events = events; + rx_adapter->event_enqueue_buffer.events = events; + } event_eth_rx_adapter[id] = rx_adapter; @@ -2277,6 +2356,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, /* use default values for adapter params */ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + rxa_params.use_queue_event_buf = false; return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } @@ -2297,9 +2377,9 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, if (rxa_params == NULL) { rxa_params = &temp_params; rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; - } - - if (rxa_params->event_buf_size == 0) + rxa_params->use_queue_event_buf = false; + } else if ((!rxa_params->use_queue_event_buf && + rxa_params->event_buf_size == 0)) return -EINVAL; pc = rte_malloc(NULL, sizeof(*pc), 0); @@ -2368,7 +2448,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter->event_enqueue_buffer.events); + if (!rx_adapter->use_queue_event_buf) + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2472,6 +2553,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, return -EINVAL; } + if ((rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size == 0) || + (!rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); + return -EINVAL; + } + dev_info = &rx_adapter->eth_devices[eth_dev_id]; if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v3 5/5] test/event: add unit test for Rx adapter 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V ` (2 preceding siblings ...) 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 4/5] eventdev/rx_adapter: implement " Naga Harish K S V @ 2021-09-22 15:14 ` Naga Harish K S V 2021-09-29 5:16 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Jerin Jacob ` (2 subsequent siblings) 6 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-22 15:14 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit tests for checking per rx queue event buffer feature using rte_event_eth_rx_adapter_queue_add api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 86 ++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 183ec92ccb..710370c3bf 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -387,6 +387,90 @@ adapter_create(void) return err; } +static int +adapter_create_with_params(void) +{ + int err; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf rx_p_conf; + struct rte_event_eth_rx_adapter_params rxa_params; + + memset(&rx_p_conf, 0, sizeof(rx_p_conf)); + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + rx_p_conf.new_event_threshold = dev_info.max_num_events; + rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + + rxa_params.use_queue_event_buf = false; + rxa_params.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + rxa_params.use_queue_event_buf = true; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err); + + return TEST_SUCCESS; +} + +static int +adapter_queue_event_buf_test(void) +{ + int err; + struct rte_event ev; + uint32_t cap; + + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; + + err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + ev.queue_id = 0; + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; + ev.priority = 0; + + queue_config.rx_queue_flags = 0; + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) { + ev.flow_id = 1; + queue_config.rx_queue_flags = + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; + } + queue_config.ev = ev; + queue_config.servicing_weight = 1; + queue_config.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + queue_config.event_buf_size = 1024; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + return TEST_SUCCESS; +} + static void adapter_free(void) { @@ -804,6 +888,8 @@ static struct unit_test_suite event_eth_rx_tests = { adapter_multi_eth_add_del), TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop), TEST_CASE_ST(adapter_create, adapter_free, adapter_stats), + TEST_CASE_ST(adapter_create_with_params, adapter_free, + adapter_queue_event_buf_test), TEST_CASES_END() /**< NULL terminate unit test array */ } }; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V ` (3 preceding siblings ...) 2021-09-22 15:14 ` [dpdk-dev] [PATCH v3 5/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-09-29 5:16 ` Jerin Jacob 2021-09-30 8:34 ` Jayatheerthan, Jay 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 " Naga Harish K S V 2021-10-07 8:51 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Jerin Jacob 6 siblings, 1 reply; 81+ messages in thread From: Jerin Jacob @ 2021-09-29 5:16 UTC (permalink / raw) To: Naga Harish K S V Cc: Jerin Jacob, Jayatheerthan, Jay, dpdk-dev, Ganapati Kundapura On Wed, Sep 22, 2021 at 8:44 PM Naga Harish K S V <s.v.naga.harish.k@intel.com> wrote: > > Currently event buffer is static array with a default size defined > internally. > > To configure event buffer size from application, > ``rte_event_eth_rx_adapter_create_with_params`` api is added which > takes ``struct rte_event_eth_rx_adapter_params`` to configure event > buffer size in addition other params . The event buffer size is > rounded up for better buffer utilization and performance . In case > of NULL params argument, default event buffer size is used. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> Changes look good to me. @Jayatheerthan, Jay Could review and Ack it? > > --- > v3: > * updated documentation and code comments as per review comments. > * updated new create api test case name with suitable one. > > v2: > * Updated header file and rx adapter documentation as per review comments. > * new api name is modified as rte_event_eth_rx_adapter_create_with_params > as per review comments. > * rxa_params pointer argument Value NULL is allowed to represent the > default values > > v1: > * Initial implementation with documentation and unit tests. > --- > .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ > lib/eventdev/rte_event_eth_rx_adapter.c | 98 +++++++++++++++++-- > lib/eventdev/rte_event_eth_rx_adapter.h | 41 +++++++- > lib/eventdev/version.map | 2 + > 4 files changed, 140 insertions(+), 8 deletions(-) > > diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > index 0780b6f711..dd753613bd 100644 > --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is > expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` > passed to it. > > +If the application desires to control the event buffer size, it can use the > +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is > +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. > +The function is passed the event device to be associated with the adapter > +and port configuration for the adapter to setup an event port if the > +adapter needs to use a service function. > + > Adding Rx Queues to the Adapter Instance > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c > index f2dc69503d..7dec9a8734 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.c > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c > @@ -82,7 +82,9 @@ struct rte_eth_event_enqueue_buffer { > /* Count of events in this buffer */ > uint16_t count; > /* Array of events in this buffer */ > - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; > + struct rte_event *events; > + /* size of event buffer */ > + uint16_t events_size; > /* Event enqueue happens from head */ > uint16_t head; > /* New packets from rte_eth_rx_burst is enqued from tail */ > @@ -919,7 +921,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, > dropped = 0; > nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, > buf->last | > - (RTE_DIM(buf->events) & ~buf->last_mask), > + (buf->events_size & ~buf->last_mask), > buf->count >= BATCH_SIZE ? > buf->count - BATCH_SIZE : 0, > &buf->events[buf->tail], > @@ -945,7 +947,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) > uint32_t nb_req = buf->tail + BATCH_SIZE; > > if (!buf->last) { > - if (nb_req <= RTE_DIM(buf->events)) > + if (nb_req <= buf->events_size) > return true; > > if (buf->head >= BATCH_SIZE) { > @@ -2164,12 +2166,15 @@ rxa_ctrl(uint8_t id, int start) > return 0; > } > > -int > -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > - rte_event_eth_rx_adapter_conf_cb conf_cb, > - void *conf_arg) > +static int > +rxa_create(uint8_t id, uint8_t dev_id, > + struct rte_event_eth_rx_adapter_params *rxa_params, > + rte_event_eth_rx_adapter_conf_cb conf_cb, > + void *conf_arg) > { > struct rte_event_eth_rx_adapter *rx_adapter; > + struct rte_eth_event_enqueue_buffer *buf; > + struct rte_event *events; > int ret; > int socket_id; > uint16_t i; > @@ -2184,6 +2189,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); > RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); > + > if (conf_cb == NULL) > return -EINVAL; > > @@ -2231,11 +2237,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > rte_free(rx_adapter); > return -ENOMEM; > } > + > rte_spinlock_init(&rx_adapter->rx_lock); > + > for (i = 0; i < RTE_MAX_ETHPORTS; i++) > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > + /* Rx adapter event buffer allocation */ > + buf = &rx_adapter->event_enqueue_buffer; > + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); > + > + events = rte_zmalloc_socket(rx_adapter->mem_name, > + buf->events_size * sizeof(*events), > + 0, socket_id); > + if (events == NULL) { > + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); > + rte_free(rx_adapter->eth_devices); > + rte_free(rx_adapter); > + return -ENOMEM; > + } > + > + rx_adapter->event_enqueue_buffer.events = events; > + > event_eth_rx_adapter[id] = rx_adapter; > + > if (conf_cb == rxa_default_conf_cb) > rx_adapter->default_cb_arg = 1; > rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb, > @@ -2243,6 +2268,61 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > return 0; > } > > +int > +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > + rte_event_eth_rx_adapter_conf_cb conf_cb, > + void *conf_arg) > +{ > + struct rte_event_eth_rx_adapter_params rxa_params; > + > + /* use default values for adapter params */ > + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > + > + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); > +} > + > +int > +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > + struct rte_event_port_conf *port_config, > + struct rte_event_eth_rx_adapter_params *rxa_params) > +{ > + struct rte_event_port_conf *pc; > + int ret; > + struct rte_event_eth_rx_adapter_params temp_params = {0}; > + > + if (port_config == NULL) > + return -EINVAL; > + > + /* use default values if rxa_parmas is NULL */ > + if (rxa_params == NULL) { > + rxa_params = &temp_params; > + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > + } > + > + if (rxa_params->event_buf_size == 0) > + return -EINVAL; > + > + pc = rte_malloc(NULL, sizeof(*pc), 0); > + if (pc == NULL) > + return -ENOMEM; > + > + *pc = *port_config; > + > + /* adjust event buff size with BATCH_SIZE used for fetching packets > + * from NIC rx queues to get full buffer utilization and prevent > + * unnecessary rollovers. > + */ > + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, > + BATCH_SIZE); > + rxa_params->event_buf_size += BATCH_SIZE + BATCH_SIZE; > + > + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); > + if (ret) > + rte_free(pc); > + > + return ret; > +} > + > int > rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > struct rte_event_port_conf *port_config) > @@ -2252,12 +2332,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > > if (port_config == NULL) > return -EINVAL; > + > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); > > pc = rte_malloc(NULL, sizeof(*pc), 0); > if (pc == NULL) > return -ENOMEM; > *pc = *port_config; > + > ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, > rxa_default_conf_cb, > pc); > @@ -2286,6 +2368,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) > if (rx_adapter->default_cb_arg) > rte_free(rx_adapter->conf_arg); > rte_free(rx_adapter->eth_devices); > + rte_free(rx_adapter->event_enqueue_buffer.events); > rte_free(rx_adapter); > event_eth_rx_adapter[id] = NULL; > > @@ -2658,6 +2741,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, > > stats->rx_packets += dev_stats_sum.rx_packets; > stats->rx_enq_count += dev_stats_sum.rx_enq_count; > + > return 0; > } > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h > index 3f8b362295..6e8b3085f8 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.h > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h > @@ -26,6 +26,7 @@ > * The ethernet Rx event adapter's functions are: > * - rte_event_eth_rx_adapter_create_ext() > * - rte_event_eth_rx_adapter_create() > + * - rte_event_eth_rx_adapter_create_with_params() > * - rte_event_eth_rx_adapter_free() > * - rte_event_eth_rx_adapter_queue_add() > * - rte_event_eth_rx_adapter_queue_del() > @@ -36,7 +37,7 @@ > * > * The application creates an ethernet to event adapter using > * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() > - * functions. > + * or rte_event_eth_rx_adapter_create_with_params() functions. > * The adapter needs to know which ethernet rx queues to poll for mbufs as well > * as event device parameters such as the event queue identifier, event > * priority and scheduling type that the adapter should use when constructing > @@ -256,6 +257,17 @@ struct rte_event_eth_rx_adapter_vector_limits { > */ > }; > > +/** > + * A structure to hold adapter config params > + */ > +struct rte_event_eth_rx_adapter_params { > + uint16_t event_buf_size; > + /**< size of event buffer for the adapter. > + * This value is rounded up for better buffer utilization > + * and performance. > + */ > +}; > + > /** > * > * Callback function invoked by the SW adapter before it continues > @@ -356,6 +368,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > struct rte_event_port_conf *port_config); > > +/** > + * This is a variant of rte_event_eth_rx_adapter_create() with additional > + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. > + * > + * @param id > + * The identifier of the ethernet Rx event adapter. > + * > + * @param dev_id > + * The identifier of the event device to configure. > + * > + * @param port_config > + * Argument of type *rte_event_port_conf* that is passed to the conf_cb > + * function. > + * > + * @param rxa_params > + * Pointer to struct rte_event_eth_rx_adapter_params. > + * In case of NULL, default values are used. > + * > + * @return > + * - 0: Success > + * - <0: Error code on failure > + */ > +__rte_experimental > +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > + struct rte_event_port_conf *port_config, > + struct rte_event_eth_rx_adapter_params *rxa_params); > + > /** > * Free an event adapter > * > diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map > index cd86d2d908..87586de879 100644 > --- a/lib/eventdev/version.map > +++ b/lib/eventdev/version.map > @@ -138,6 +138,8 @@ EXPERIMENTAL { > __rte_eventdev_trace_port_setup; > # added in 20.11 > rte_event_pmd_pci_probe_named; > + # added in 21.11 > + rte_event_eth_rx_adapter_create_with_params; > > #added in 21.05 > rte_event_vector_pool_create; > -- > 2.25.1 > ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability 2021-09-29 5:16 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Jerin Jacob @ 2021-09-30 8:34 ` Jayatheerthan, Jay 0 siblings, 0 replies; 81+ messages in thread From: Jayatheerthan, Jay @ 2021-09-30 8:34 UTC (permalink / raw) To: Jerin Jacob, Naga Harish K, S V Cc: Jerin Jacob, dpdk-dev, Kundapura, Ganapati > -----Original Message----- > From: Jerin Jacob <jerinjacobk@gmail.com> > Sent: Wednesday, September 29, 2021 10:46 AM > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > Cc: Jerin Jacob <jerinj@marvell.com>; Jayatheerthan, Jay <jay.jayatheerthan@intel.com>; dpdk-dev <dev@dpdk.org>; Kundapura, > Ganapati <ganapati.kundapura@intel.com> > Subject: Re: [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability > > On Wed, Sep 22, 2021 at 8:44 PM Naga Harish K S V > <s.v.naga.harish.k@intel.com> wrote: > > > > Currently event buffer is static array with a default size defined > > internally. > > > > To configure event buffer size from application, > > ``rte_event_eth_rx_adapter_create_with_params`` api is added which > > takes ``struct rte_event_eth_rx_adapter_params`` to configure event > > buffer size in addition other params . The event buffer size is > > rounded up for better buffer utilization and performance . In case > > of NULL params argument, default event buffer size is used. > > > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > > Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> > > > Changes look good to me. > > @Jayatheerthan, Jay Could review and Ack it? @Jerin, sure. Will be able to get to it next week. > > > > > > --- > > v3: > > * updated documentation and code comments as per review comments. > > * updated new create api test case name with suitable one. > > > > v2: > > * Updated header file and rx adapter documentation as per review comments. > > * new api name is modified as rte_event_eth_rx_adapter_create_with_params > > as per review comments. > > * rxa_params pointer argument Value NULL is allowed to represent the > > default values > > > > v1: > > * Initial implementation with documentation and unit tests. > > --- > > .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ > > lib/eventdev/rte_event_eth_rx_adapter.c | 98 +++++++++++++++++-- > > lib/eventdev/rte_event_eth_rx_adapter.h | 41 +++++++- > > lib/eventdev/version.map | 2 + > > 4 files changed, 140 insertions(+), 8 deletions(-) > > > > diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > index 0780b6f711..dd753613bd 100644 > > --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is > > expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` > > passed to it. > > > > +If the application desires to control the event buffer size, it can use the > > +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is > > +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. > > +The function is passed the event device to be associated with the adapter > > +and port configuration for the adapter to setup an event port if the > > +adapter needs to use a service function. > > + > > Adding Rx Queues to the Adapter Instance > > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > > > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c > > index f2dc69503d..7dec9a8734 100644 > > --- a/lib/eventdev/rte_event_eth_rx_adapter.c > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c > > @@ -82,7 +82,9 @@ struct rte_eth_event_enqueue_buffer { > > /* Count of events in this buffer */ > > uint16_t count; > > /* Array of events in this buffer */ > > - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; > > + struct rte_event *events; > > + /* size of event buffer */ > > + uint16_t events_size; > > /* Event enqueue happens from head */ > > uint16_t head; > > /* New packets from rte_eth_rx_burst is enqued from tail */ > > @@ -919,7 +921,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, > > dropped = 0; > > nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, > > buf->last | > > - (RTE_DIM(buf->events) & ~buf->last_mask), > > + (buf->events_size & ~buf->last_mask), > > buf->count >= BATCH_SIZE ? > > buf->count - BATCH_SIZE : 0, > > &buf->events[buf->tail], > > @@ -945,7 +947,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) > > uint32_t nb_req = buf->tail + BATCH_SIZE; > > > > if (!buf->last) { > > - if (nb_req <= RTE_DIM(buf->events)) > > + if (nb_req <= buf->events_size) > > return true; > > > > if (buf->head >= BATCH_SIZE) { > > @@ -2164,12 +2166,15 @@ rxa_ctrl(uint8_t id, int start) > > return 0; > > } > > > > -int > > -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > - rte_event_eth_rx_adapter_conf_cb conf_cb, > > - void *conf_arg) > > +static int > > +rxa_create(uint8_t id, uint8_t dev_id, > > + struct rte_event_eth_rx_adapter_params *rxa_params, > > + rte_event_eth_rx_adapter_conf_cb conf_cb, > > + void *conf_arg) > > { > > struct rte_event_eth_rx_adapter *rx_adapter; > > + struct rte_eth_event_enqueue_buffer *buf; > > + struct rte_event *events; > > int ret; > > int socket_id; > > uint16_t i; > > @@ -2184,6 +2189,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > > > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); > > RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); > > + > > if (conf_cb == NULL) > > return -EINVAL; > > > > @@ -2231,11 +2237,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > rte_free(rx_adapter); > > return -ENOMEM; > > } > > + > > rte_spinlock_init(&rx_adapter->rx_lock); > > + > > for (i = 0; i < RTE_MAX_ETHPORTS; i++) > > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > > > + /* Rx adapter event buffer allocation */ > > + buf = &rx_adapter->event_enqueue_buffer; > > + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); > > + > > + events = rte_zmalloc_socket(rx_adapter->mem_name, > > + buf->events_size * sizeof(*events), > > + 0, socket_id); > > + if (events == NULL) { > > + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); > > + rte_free(rx_adapter->eth_devices); > > + rte_free(rx_adapter); > > + return -ENOMEM; > > + } > > + > > + rx_adapter->event_enqueue_buffer.events = events; > > + > > event_eth_rx_adapter[id] = rx_adapter; > > + > > if (conf_cb == rxa_default_conf_cb) > > rx_adapter->default_cb_arg = 1; > > rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb, > > @@ -2243,6 +2268,61 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > return 0; > > } > > > > +int > > +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > + rte_event_eth_rx_adapter_conf_cb conf_cb, > > + void *conf_arg) > > +{ > > + struct rte_event_eth_rx_adapter_params rxa_params; > > + > > + /* use default values for adapter params */ > > + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > > + > > + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); > > +} > > + > > +int > > +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > > + struct rte_event_port_conf *port_config, > > + struct rte_event_eth_rx_adapter_params *rxa_params) > > +{ > > + struct rte_event_port_conf *pc; > > + int ret; > > + struct rte_event_eth_rx_adapter_params temp_params = {0}; > > + > > + if (port_config == NULL) > > + return -EINVAL; > > + > > + /* use default values if rxa_parmas is NULL */ > > + if (rxa_params == NULL) { > > + rxa_params = &temp_params; > > + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > > + } > > + > > + if (rxa_params->event_buf_size == 0) > > + return -EINVAL; > > + > > + pc = rte_malloc(NULL, sizeof(*pc), 0); > > + if (pc == NULL) > > + return -ENOMEM; > > + > > + *pc = *port_config; > > + > > + /* adjust event buff size with BATCH_SIZE used for fetching packets > > + * from NIC rx queues to get full buffer utilization and prevent > > + * unnecessary rollovers. > > + */ > > + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, > > + BATCH_SIZE); > > + rxa_params->event_buf_size += BATCH_SIZE + BATCH_SIZE; > > + > > + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); > > + if (ret) > > + rte_free(pc); > > + > > + return ret; > > +} > > + > > int > > rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > > struct rte_event_port_conf *port_config) > > @@ -2252,12 +2332,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > > > > if (port_config == NULL) > > return -EINVAL; > > + > > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); > > > > pc = rte_malloc(NULL, sizeof(*pc), 0); > > if (pc == NULL) > > return -ENOMEM; > > *pc = *port_config; > > + > > ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, > > rxa_default_conf_cb, > > pc); > > @@ -2286,6 +2368,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) > > if (rx_adapter->default_cb_arg) > > rte_free(rx_adapter->conf_arg); > > rte_free(rx_adapter->eth_devices); > > + rte_free(rx_adapter->event_enqueue_buffer.events); > > rte_free(rx_adapter); > > event_eth_rx_adapter[id] = NULL; > > > > @@ -2658,6 +2741,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, > > > > stats->rx_packets += dev_stats_sum.rx_packets; > > stats->rx_enq_count += dev_stats_sum.rx_enq_count; > > + > > return 0; > > } > > > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h > > index 3f8b362295..6e8b3085f8 100644 > > --- a/lib/eventdev/rte_event_eth_rx_adapter.h > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h > > @@ -26,6 +26,7 @@ > > * The ethernet Rx event adapter's functions are: > > * - rte_event_eth_rx_adapter_create_ext() > > * - rte_event_eth_rx_adapter_create() > > + * - rte_event_eth_rx_adapter_create_with_params() > > * - rte_event_eth_rx_adapter_free() > > * - rte_event_eth_rx_adapter_queue_add() > > * - rte_event_eth_rx_adapter_queue_del() > > @@ -36,7 +37,7 @@ > > * > > * The application creates an ethernet to event adapter using > > * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() > > - * functions. > > + * or rte_event_eth_rx_adapter_create_with_params() functions. > > * The adapter needs to know which ethernet rx queues to poll for mbufs as well > > * as event device parameters such as the event queue identifier, event > > * priority and scheduling type that the adapter should use when constructing > > @@ -256,6 +257,17 @@ struct rte_event_eth_rx_adapter_vector_limits { > > */ > > }; > > > > +/** > > + * A structure to hold adapter config params > > + */ > > +struct rte_event_eth_rx_adapter_params { > > + uint16_t event_buf_size; > > + /**< size of event buffer for the adapter. > > + * This value is rounded up for better buffer utilization > > + * and performance. > > + */ > > +}; > > + > > /** > > * > > * Callback function invoked by the SW adapter before it continues > > @@ -356,6 +368,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > > struct rte_event_port_conf *port_config); > > > > +/** > > + * This is a variant of rte_event_eth_rx_adapter_create() with additional > > + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. > > + * > > + * @param id > > + * The identifier of the ethernet Rx event adapter. > > + * > > + * @param dev_id > > + * The identifier of the event device to configure. > > + * > > + * @param port_config > > + * Argument of type *rte_event_port_conf* that is passed to the conf_cb > > + * function. > > + * > > + * @param rxa_params > > + * Pointer to struct rte_event_eth_rx_adapter_params. > > + * In case of NULL, default values are used. > > + * > > + * @return > > + * - 0: Success > > + * - <0: Error code on failure > > + */ > > +__rte_experimental > > +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > > + struct rte_event_port_conf *port_config, > > + struct rte_event_eth_rx_adapter_params *rxa_params); > > + > > /** > > * Free an event adapter > > * > > diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map > > index cd86d2d908..87586de879 100644 > > --- a/lib/eventdev/version.map > > +++ b/lib/eventdev/version.map > > @@ -138,6 +138,8 @@ EXPERIMENTAL { > > __rte_eventdev_trace_port_setup; > > # added in 20.11 > > rte_event_pmd_pci_probe_named; > > + # added in 21.11 > > + rte_event_eth_rx_adapter_create_with_params; > > > > #added in 21.05 > > rte_event_vector_pool_create; > > -- > > 2.25.1 > > ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v4 1/5] eventdev/rx_adapter: add event buffer size configurability 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V ` (4 preceding siblings ...) 2021-09-29 5:16 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Jerin Jacob @ 2021-09-30 8:28 ` Naga Harish K S V 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 2/5] test/event: add unit test for Rx adapter Naga Harish K S V ` (4 more replies) 2021-10-07 8:51 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Jerin Jacob 6 siblings, 5 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-30 8:28 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev, Ganapati Kundapura Currently event buffer is static array with a default size defined internally. To configure event buffer size from application, ``rte_event_eth_rx_adapter_create_with_params`` api is added which takes ``struct rte_event_eth_rx_adapter_params`` to configure event buffer size in addition other params . The event buffer size is rounded up for better buffer utilization and performance . In case of NULL params argument, default event buffer size is used. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> --- v4: * rebased with latest dpdk-next-eventdev branch v3: * updated documentation and code comments as per review comments. * updated new create api test case name with suitable one. v2: * Updated header file and rx adapter documentation as per review comments. * new api name is modified as rte_event_eth_rx_adapter_create_with_params as per review comments. * rxa_params pointer argument Value NULL is allowed to represent the default values v1: * Initial implementation with documentation and unit tests. --- --- .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ lib/eventdev/rte_event_eth_rx_adapter.c | 98 +++++++++++++++++-- lib/eventdev/rte_event_eth_rx_adapter.h | 41 +++++++- lib/eventdev/version.map | 2 + 4 files changed, 140 insertions(+), 8 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index ce23d8a474..8526aecf57 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. +If the application desires to control the event buffer size, it can use the +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. +The function is passed the event device to be associated with the adapter +and port configuration for the adapter to setup an event port if the +adapter needs to use a service function. + Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 10491ca07b..606db241b8 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -85,7 +85,9 @@ struct rte_eth_event_enqueue_buffer { /* Count of events in this buffer */ uint16_t count; /* Array of events in this buffer */ - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; + struct rte_event *events; + /* size of event buffer */ + uint16_t events_size; /* Event enqueue happens from head */ uint16_t head; /* New packets from rte_eth_rx_burst is enqued from tail */ @@ -946,7 +948,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, dropped = 0; nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, buf->last | - (RTE_DIM(buf->events) & ~buf->last_mask), + (buf->events_size & ~buf->last_mask), buf->count >= BATCH_SIZE ? buf->count - BATCH_SIZE : 0, &buf->events[buf->tail], @@ -972,7 +974,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) uint32_t nb_req = buf->tail + BATCH_SIZE; if (!buf->last) { - if (nb_req <= RTE_DIM(buf->events)) + if (nb_req <= buf->events_size) return true; if (buf->head >= BATCH_SIZE) { @@ -2206,12 +2208,15 @@ rxa_ctrl(uint8_t id, int start) return 0; } -int -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, - rte_event_eth_rx_adapter_conf_cb conf_cb, - void *conf_arg) +static int +rxa_create(uint8_t id, uint8_t dev_id, + struct rte_event_eth_rx_adapter_params *rxa_params, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) { struct rte_event_eth_rx_adapter *rx_adapter; + struct rte_eth_event_enqueue_buffer *buf; + struct rte_event *events; int ret; int socket_id; uint16_t i; @@ -2226,6 +2231,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (conf_cb == NULL) return -EINVAL; @@ -2273,11 +2279,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_free(rx_adapter); return -ENOMEM; } + rte_spinlock_init(&rx_adapter->rx_lock); + for (i = 0; i < RTE_MAX_ETHPORTS; i++) rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; + /* Rx adapter event buffer allocation */ + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } + + rx_adapter->event_enqueue_buffer.events = events; + event_eth_rx_adapter[id] = rx_adapter; + if (conf_cb == rxa_default_conf_cb) rx_adapter->default_cb_arg = 1; @@ -2293,6 +2318,61 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, return 0; } +int +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) +{ + struct rte_event_eth_rx_adapter_params rxa_params; + + /* use default values for adapter params */ + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); +} + +int +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params) +{ + struct rte_event_port_conf *pc; + int ret; + struct rte_event_eth_rx_adapter_params temp_params = {0}; + + if (port_config == NULL) + return -EINVAL; + + /* use default values if rxa_parmas is NULL */ + if (rxa_params == NULL) { + rxa_params = &temp_params; + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; + } + + if (rxa_params->event_buf_size == 0) + return -EINVAL; + + pc = rte_malloc(NULL, sizeof(*pc), 0); + if (pc == NULL) + return -ENOMEM; + + *pc = *port_config; + + /* adjust event buff size with BATCH_SIZE used for fetching packets + * from NIC rx queues to get full buffer utilization and prevent + * unnecessary rollovers. + */ + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + rxa_params->event_buf_size += BATCH_SIZE + BATCH_SIZE; + + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); + if (ret) + rte_free(pc); + + return ret; +} + int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config) @@ -2302,12 +2382,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) return -ENOMEM; *pc = *port_config; + ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, rxa_default_conf_cb, pc); @@ -2336,6 +2418,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2711,6 +2794,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, stats->rx_packets += dev_stats_sum.rx_packets; stats->rx_enq_count += dev_stats_sum.rx_enq_count; + return 0; } diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 470543e434..846ca569e9 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -26,6 +26,7 @@ * The ethernet Rx event adapter's functions are: * - rte_event_eth_rx_adapter_create_ext() * - rte_event_eth_rx_adapter_create() + * - rte_event_eth_rx_adapter_create_with_params() * - rte_event_eth_rx_adapter_free() * - rte_event_eth_rx_adapter_queue_add() * - rte_event_eth_rx_adapter_queue_del() @@ -37,7 +38,7 @@ * * The application creates an ethernet to event adapter using * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() - * functions. + * or rte_event_eth_rx_adapter_create_with_params() functions. * The adapter needs to know which ethernet rx queues to poll for mbufs as well * as event device parameters such as the event queue identifier, event * priority and scheduling type that the adapter should use when constructing @@ -257,6 +258,17 @@ struct rte_event_eth_rx_adapter_vector_limits { */ }; +/** + * A structure to hold adapter config params + */ +struct rte_event_eth_rx_adapter_params { + uint16_t event_buf_size; + /**< size of event buffer for the adapter. + * This value is rounded up for better buffer utilization + * and performance. + */ +}; + /** * * Callback function invoked by the SW adapter before it continues @@ -357,6 +369,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config); +/** + * This is a variant of rte_event_eth_rx_adapter_create() with additional + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. + * + * @param id + * The identifier of the ethernet Rx event adapter. + * + * @param dev_id + * The identifier of the event device to configure. + * + * @param port_config + * Argument of type *rte_event_port_conf* that is passed to the conf_cb + * function. + * + * @param rxa_params + * Pointer to struct rte_event_eth_rx_adapter_params. + * In case of NULL, default values are used. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params); + /** * Free an event adapter * diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index 9f280160fa..7de18497a6 100644 --- a/lib/eventdev/version.map +++ b/lib/eventdev/version.map @@ -138,6 +138,8 @@ EXPERIMENTAL { __rte_eventdev_trace_port_setup; # added in 20.11 rte_event_pmd_pci_probe_named; + # added in 21.11 + rte_event_eth_rx_adapter_create_with_params; #added in 21.05 rte_event_vector_pool_create; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v4 2/5] test/event: add unit test for Rx adapter 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 " Naga Harish K S V @ 2021-09-30 8:28 ` Naga Harish K S V 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V ` (3 subsequent siblings) 4 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-30 8:28 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit test for rte_event_eth_rx_adapter_create_with_params api and validate all possible input combinations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 53 +++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 13664a3a3b..7c2cf0dd70 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -428,6 +428,50 @@ adapter_create_free(void) return TEST_SUCCESS; } +static int +adapter_create_free_with_params(void) +{ + int err; + + struct rte_event_port_conf rx_p_conf = { + .dequeue_depth = 8, + .enqueue_depth = 8, + .new_event_threshold = 1200, + }; + + struct rte_event_eth_rx_adapter_params rxa_params = { + .event_buf_size = 1024 + }; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, NULL, NULL); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err); + + rxa_params.event_buf_size = 0; + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + err = rte_event_eth_rx_adapter_free(1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + return TEST_SUCCESS; +} + static int adapter_queue_add_del(void) { @@ -435,7 +479,7 @@ adapter_queue_add_del(void) struct rte_event ev; uint32_t cap; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, &cap); @@ -523,7 +567,7 @@ adapter_multi_eth_add_del(void) uint16_t port_index, port_index_base, drv_id = 0; char driver_name[50]; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; ev.queue_id = 0; ev.sched_type = RTE_SCHED_TYPE_ATOMIC; @@ -594,7 +638,7 @@ adapter_intr_queue_add_del(void) struct rte_event ev; uint32_t cap; uint16_t eth_port; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; if (!default_params.rx_intr_port_inited) return 0; @@ -687,7 +731,7 @@ adapter_start_stop(void) ev.sched_type = RTE_SCHED_TYPE_ATOMIC; ev.priority = 0; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; queue_config.rx_queue_flags = 0; if (default_params.caps & @@ -774,6 +818,7 @@ static struct unit_test_suite event_eth_rx_tests = { .teardown = testsuite_teardown, .unit_test_cases = { TEST_CASE_ST(NULL, NULL, adapter_create_free), + TEST_CASE_ST(NULL, NULL, adapter_create_free_with_params), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_add_del), TEST_CASE_ST(adapter_create, adapter_free, -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v4 3/5] eventdev/rx_adapter: introduce per queue event buffer 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 " Naga Harish K S V 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 2/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-09-30 8:28 ` Naga Harish K S V 2021-09-30 8:29 ` [dpdk-dev] [PATCH v4 4/5] eventdev/rx_adapter: implement " Naga Harish K S V ` (2 subsequent siblings) 4 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-30 8:28 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev To configure per queue event buffer size, application sets ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag as true and is passed to ``rte_event_eth_rx_adapter_create_with_params`` api. The per queue event buffer size is populated in ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed to ``rte_event_eth_rx_adapter_queue_add`` api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index 8526aecf57..8b58130fc5 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,12 +62,14 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. -If the application desires to control the event buffer size, it can use the -``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is -specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. -The function is passed the event device to be associated with the adapter -and port configuration for the adapter to setup an event port if the -adapter needs to use a service function. +If the application desires to control the event buffer size at adapter level, +it can use the ``rte_event_eth_rx_adapter_create_with_params()`` api. The event +buffer size is specified using ``struct rte_event_eth_rx_adapter_params:: +event_buf_size``. To configure the event buffer size at queue level, the boolean +flag ``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be +set to true. The function is passed the event device to be associated with +the adapter and port configuration for the adapter to setup an event port +if the adapter needs to use a service function. Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx queue is encoded in the ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf is the relative polling frequency of the Rx queue and is applicable when the -adapter uses a service core function. +adapter uses a service core function. The applications can configure queue +event buffer size in ``struct rte_event_eth_rx_adapter_queue_conf::event_buf_size`` +parameter. .. code-block:: c @@ -90,6 +94,7 @@ adapter uses a service core function. queue_config.rx_queue_flags = 0; queue_config.ev = ev; queue_config.servicing_weight = 1; + queue_config.event_buf_size = 1024; err = rte_event_eth_rx_adapter_queue_add(id, eth_dev_id, diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 846ca569e9..70ca427d66 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -200,6 +200,8 @@ struct rte_event_eth_rx_adapter_queue_conf { * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. */ + uint16_t event_buf_size; + /**< event buffer size for this queue */ }; /** @@ -267,6 +269,8 @@ struct rte_event_eth_rx_adapter_params { * This value is rounded up for better buffer utilization * and performance. */ + bool use_queue_event_buf; + /**< flag to indicate that event buffer is separate for each queue */ }; /** -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v4 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 " Naga Harish K S V 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V @ 2021-09-30 8:29 ` Naga Harish K S V 2021-09-30 8:29 ` [dpdk-dev] [PATCH v4 5/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 4 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-30 8:29 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch implement the per queue event buffer with required validations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- lib/eventdev/rte_event_eth_rx_adapter.c | 187 +++++++++++++++++------- 1 file changed, 138 insertions(+), 49 deletions(-) diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 606db241b8..b61af0e75e 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter { uint8_t rss_key_be[RSS_KEY_SIZE]; /* Event device identifier */ uint8_t eventdev_id; - /* Per ethernet device structure */ - struct eth_device_info *eth_devices; /* Event port identifier */ uint8_t event_port_id; + /* Flag indicating per rxq event buffer */ + bool use_queue_event_buf; + /* Per ethernet device structure */ + struct eth_device_info *eth_devices; /* Lock to serialize config updates with service function */ rte_spinlock_t rx_lock; /* Max mbufs processed in any service function invocation */ @@ -241,6 +243,7 @@ struct eth_rx_queue_info { uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ uint64_t event; struct eth_rx_vector_data vector_data; + struct rte_eth_event_enqueue_buffer *event_buf; }; static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ -767,10 +770,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, /* Enqueue buffered events to event device */ static inline uint16_t -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, + struct rte_eth_event_enqueue_buffer *buf) { - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t count = buf->last ? buf->last - buf->head : buf->count; @@ -888,15 +890,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, uint16_t rx_queue_id, struct rte_mbuf **mbufs, - uint16_t num) + uint16_t num, + struct rte_eth_event_enqueue_buffer *buf) { uint32_t i; struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; struct eth_rx_queue_info *eth_rx_queue_info = &dev_info->rx_queue[rx_queue_id]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; uint16_t new_tail = buf->tail; uint64_t event = eth_rx_queue_info->event; uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ -995,11 +996,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t queue_id, uint32_t rx_count, uint32_t max_rx, - int *rxq_empty) + int *rxq_empty, + struct rte_eth_event_enqueue_buffer *buf) { struct rte_mbuf *mbufs[BATCH_SIZE]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t n; @@ -1012,7 +1012,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, */ while (rxa_pkt_buf_available(buf)) { if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); stats->rx_poll_count++; n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); @@ -1021,14 +1021,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, *rxq_empty = 1; break; } - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); nb_rx += n; if (rx_count + nb_rx > max_rx) break; } if (buf->count > 0) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); return nb_rx; } @@ -1169,7 +1169,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) ring_lock = &rx_adapter->intr_ring_lock; if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); while (rxa_pkt_buf_available(buf)) { struct eth_device_info *dev_info; @@ -1221,7 +1221,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) continue; n = rxa_eth_rx(rx_adapter, port, i, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); nb_rx += n; enq_buffer_full = !rxq_empty && n == 0; @@ -1242,7 +1242,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) } else { n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); rx_adapter->qd_valid = !rxq_empty; nb_rx += n; if (nb_rx > rx_adapter->max_nb_rx) @@ -1273,13 +1273,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { uint32_t num_queue; uint32_t nb_rx = 0; - struct rte_eth_event_enqueue_buffer *buf; + struct rte_eth_event_enqueue_buffer *buf = NULL; uint32_t wrr_pos; uint32_t max_nb_rx; wrr_pos = rx_adapter->wrr_pos; max_nb_rx = rx_adapter->max_nb_rx; - buf = &rx_adapter->event_enqueue_buffer; /* Iterate through a WRR sequence */ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { @@ -1287,24 +1286,36 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[d]; + buf = dev_info->rx_queue[qid].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + /* Don't do a batch dequeue from the rx queue if there isn't * enough space in the enqueue buffer. */ if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (!rxa_pkt_buf_available(buf)) { - rx_adapter->wrr_pos = wrr_pos; - return nb_rx; + if (rx_adapter->use_queue_event_buf) + goto poll_next_entry; + else { + rx_adapter->wrr_pos = wrr_pos; + return nb_rx; + } } nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, - NULL); + NULL, buf); if (nb_rx > max_nb_rx) { rx_adapter->wrr_pos = (wrr_pos + 1) % rx_adapter->wrr_len; break; } +poll_next_entry: if (++wrr_pos == rx_adapter->wrr_len) wrr_pos = 0; } @@ -1315,12 +1326,18 @@ static void rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { struct rte_event_eth_rx_adapter *rx_adapter = arg; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; + struct rte_eth_event_enqueue_buffer *buf = NULL; struct rte_event *ev; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[vec->port]; + buf = dev_info->rx_queue[vec->queue].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + if (buf->count) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (vec->vector_ev->nb_elem == 0) return; @@ -1947,9 +1964,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, rx_adapter->num_rx_intr -= intrq; dev_info->nb_rx_intr -= intrq; dev_info->nb_shared_intr -= intrq && sintrq; + if (rx_adapter->use_queue_event_buf) { + struct rte_eth_event_enqueue_buffer *event_buf = + dev_info->rx_queue[rx_queue_id].event_buf; + rte_free(event_buf->events); + rte_free(event_buf); + dev_info->rx_queue[rx_queue_id].event_buf = NULL; + } } -static void +static int rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, struct eth_device_info *dev_info, int32_t rx_queue_id, @@ -1961,15 +1985,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int intrq; int sintrq; struct rte_event *qi_ev; + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; + uint16_t eth_dev_id = dev_info->dev->data->port_id; + int ret; if (rx_queue_id == -1) { uint16_t nb_rx_queues; uint16_t i; nb_rx_queues = dev_info->dev->data->nb_rx_queues; - for (i = 0; i < nb_rx_queues; i++) - rxa_add_queue(rx_adapter, dev_info, i, conf); - return; + for (i = 0; i < nb_rx_queues; i++) { + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); + if (ret) + return ret; + } + return 0; } pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -2032,6 +2062,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, dev_info->next_q_idx = 0; } } + + if (!rx_adapter->use_queue_event_buf) + return 0; + + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", + sizeof(*new_rx_buf), 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); + new_rx_buf->events_size += (2 * BATCH_SIZE); + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", + sizeof(struct rte_event) * + new_rx_buf->events_size, 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf->events == NULL) { + rte_free(new_rx_buf); + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + queue_info->event_buf = new_rx_buf; + + return 0; } static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ -2060,6 +2121,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, temp_conf.servicing_weight = 1; } queue_conf = &temp_conf; + + if (queue_conf->servicing_weight == 0 && + rx_adapter->use_queue_event_buf) { + + RTE_EDEV_LOG_ERR("Use of queue level event buffer " + "not supported for interrupt queues " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -EINVAL; + } } nb_rx_queues = dev_info->dev->data->nb_rx_queues; @@ -2139,7 +2210,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + if (ret) + goto err_free_rxqueue; rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); rte_free(rx_adapter->eth_rx_poll); @@ -2160,7 +2233,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, rte_free(rx_poll); rte_free(rx_wrr); - return 0; + return ret; } static int @@ -2286,20 +2359,26 @@ rxa_create(uint8_t id, uint8_t dev_id, rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; /* Rx adapter event buffer allocation */ - buf = &rx_adapter->event_enqueue_buffer; - buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); - - events = rte_zmalloc_socket(rx_adapter->mem_name, - buf->events_size * sizeof(*events), - 0, socket_id); - if (events == NULL) { - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); - rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter); - return -ENOMEM; - } + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; + + if (!rx_adapter->use_queue_event_buf) { + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate memory " + "for adapter event buffer"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } - rx_adapter->event_enqueue_buffer.events = events; + rx_adapter->event_enqueue_buffer.events = events; + } event_eth_rx_adapter[id] = rx_adapter; @@ -2327,6 +2406,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, /* use default values for adapter params */ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + rxa_params.use_queue_event_buf = false; return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } @@ -2347,9 +2427,9 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, if (rxa_params == NULL) { rxa_params = &temp_params; rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; - } - - if (rxa_params->event_buf_size == 0) + rxa_params->use_queue_event_buf = false; + } else if ((!rxa_params->use_queue_event_buf && + rxa_params->event_buf_size == 0)) return -EINVAL; pc = rte_malloc(NULL, sizeof(*pc), 0); @@ -2418,7 +2498,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter->event_enqueue_buffer.events); + if (!rx_adapter->use_queue_event_buf) + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2522,6 +2603,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, return -EINVAL; } + if ((rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size == 0) || + (!rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); + return -EINVAL; + } + dev_info = &rx_adapter->eth_devices[eth_dev_id]; if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v4 5/5] test/event: add unit test for Rx adapter 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 " Naga Harish K S V ` (2 preceding siblings ...) 2021-09-30 8:29 ` [dpdk-dev] [PATCH v4 4/5] eventdev/rx_adapter: implement " Naga Harish K S V @ 2021-09-30 8:29 ` Naga Harish K S V 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 4 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-09-30 8:29 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit tests for checking per rx queue event buffer feature using rte_event_eth_rx_adapter_queue_add api. fix segfault in adapter_queue_conf unit test Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 129 ++++++++++++++++++++++++++- 1 file changed, 126 insertions(+), 3 deletions(-) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 7c2cf0dd70..5e69971b54 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -387,6 +387,90 @@ adapter_create(void) return err; } +static int +adapter_create_with_params(void) +{ + int err; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf rx_p_conf; + struct rte_event_eth_rx_adapter_params rxa_params; + + memset(&rx_p_conf, 0, sizeof(rx_p_conf)); + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + rx_p_conf.new_event_threshold = dev_info.max_num_events; + rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + + rxa_params.use_queue_event_buf = false; + rxa_params.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + rxa_params.use_queue_event_buf = true; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err); + + return TEST_SUCCESS; +} + +static int +adapter_queue_event_buf_test(void) +{ + int err; + struct rte_event ev; + uint32_t cap; + + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; + + err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + ev.queue_id = 0; + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; + ev.priority = 0; + + queue_config.rx_queue_flags = 0; + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) { + ev.flow_id = 1; + queue_config.rx_queue_flags = + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; + } + queue_config.ev = ev; + queue_config.servicing_weight = 1; + queue_config.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + queue_config.event_buf_size = 1024; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + return TEST_SUCCESS; +} + static void adapter_free(void) { @@ -795,20 +879,57 @@ static int adapter_queue_conf(void) { int err; - struct rte_event_eth_rx_adapter_queue_conf queue_conf; + struct rte_event ev; + uint32_t cap; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; + + err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + ev.queue_id = 0; + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; + ev.priority = 0; + + queue_config.rx_queue_flags = 0; + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) { + ev.flow_id = 1; + queue_config.rx_queue_flags = + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; + } + queue_config.ev = ev; + queue_config.servicing_weight = 1; + queue_config.event_buf_size = 1024; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + queue_config.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); err = rte_event_eth_rx_adapter_queue_conf_get(TEST_INST_ID, TEST_DEV_ID, - 0, &queue_conf); + 0, &queue_config); TEST_ASSERT(err == 0, "Expected 0 got %d", err); err = rte_event_eth_rx_adapter_queue_conf_get(TEST_INST_ID, TEST_DEV_ID, - -1, &queue_conf); + -1, &queue_config); TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); err = rte_event_eth_rx_adapter_queue_conf_get(TEST_INST_ID, TEST_DEV_ID, 0, NULL); TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + return TEST_SUCCESS; } @@ -826,6 +947,8 @@ static struct unit_test_suite event_eth_rx_tests = { TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop), TEST_CASE_ST(adapter_create, adapter_free, adapter_stats), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf), + TEST_CASE_ST(adapter_create_with_params, adapter_free, + adapter_queue_event_buf_test), TEST_CASES_END() /**< NULL terminate unit test array */ } }; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 " Naga Harish K S V ` (3 preceding siblings ...) 2021-09-30 8:29 ` [dpdk-dev] [PATCH v4 5/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-10-04 5:41 ` Naga Harish K S V 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 2/5] test/event: add unit test for Rx adapter Naga Harish K S V ` (5 more replies) 4 siblings, 6 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-04 5:41 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev, Ganapati Kundapura Currently event buffer is static array with a default size defined internally. To configure event buffer size from application, ``rte_event_eth_rx_adapter_create_with_params`` api is added which takes ``struct rte_event_eth_rx_adapter_params`` to configure event buffer size in addition other params . The event buffer size is rounded up for better buffer utilization and performance . In case of NULL params argument, default event buffer size is used. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> --- v5: * reverted queue conf get unit test change v4: * rebased with latest dpdk-next-eventdev branch * changed queue conf get unit test v3: * updated documentation and code comments as per review comments. * updated new create api test case name with suitable one. v2: * Updated header file and rx adapter documentation as per review comments. * new api name is modified as rte_event_eth_rx_adapter_create_with_params as per review comments. * rxa_params pointer argument Value NULL is allowed to represent the default values v1: * Initial implementation with documentation and unit tests. --- --- .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ lib/eventdev/rte_event_eth_rx_adapter.c | 98 +++++++++++++++++-- lib/eventdev/rte_event_eth_rx_adapter.h | 41 +++++++- lib/eventdev/version.map | 2 + 4 files changed, 140 insertions(+), 8 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index ce23d8a474..8526aecf57 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. +If the application desires to control the event buffer size, it can use the +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. +The function is passed the event device to be associated with the adapter +and port configuration for the adapter to setup an event port if the +adapter needs to use a service function. + Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 10491ca07b..606db241b8 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -85,7 +85,9 @@ struct rte_eth_event_enqueue_buffer { /* Count of events in this buffer */ uint16_t count; /* Array of events in this buffer */ - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; + struct rte_event *events; + /* size of event buffer */ + uint16_t events_size; /* Event enqueue happens from head */ uint16_t head; /* New packets from rte_eth_rx_burst is enqued from tail */ @@ -946,7 +948,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, dropped = 0; nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, buf->last | - (RTE_DIM(buf->events) & ~buf->last_mask), + (buf->events_size & ~buf->last_mask), buf->count >= BATCH_SIZE ? buf->count - BATCH_SIZE : 0, &buf->events[buf->tail], @@ -972,7 +974,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) uint32_t nb_req = buf->tail + BATCH_SIZE; if (!buf->last) { - if (nb_req <= RTE_DIM(buf->events)) + if (nb_req <= buf->events_size) return true; if (buf->head >= BATCH_SIZE) { @@ -2206,12 +2208,15 @@ rxa_ctrl(uint8_t id, int start) return 0; } -int -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, - rte_event_eth_rx_adapter_conf_cb conf_cb, - void *conf_arg) +static int +rxa_create(uint8_t id, uint8_t dev_id, + struct rte_event_eth_rx_adapter_params *rxa_params, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) { struct rte_event_eth_rx_adapter *rx_adapter; + struct rte_eth_event_enqueue_buffer *buf; + struct rte_event *events; int ret; int socket_id; uint16_t i; @@ -2226,6 +2231,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (conf_cb == NULL) return -EINVAL; @@ -2273,11 +2279,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_free(rx_adapter); return -ENOMEM; } + rte_spinlock_init(&rx_adapter->rx_lock); + for (i = 0; i < RTE_MAX_ETHPORTS; i++) rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; + /* Rx adapter event buffer allocation */ + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } + + rx_adapter->event_enqueue_buffer.events = events; + event_eth_rx_adapter[id] = rx_adapter; + if (conf_cb == rxa_default_conf_cb) rx_adapter->default_cb_arg = 1; @@ -2293,6 +2318,61 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, return 0; } +int +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) +{ + struct rte_event_eth_rx_adapter_params rxa_params; + + /* use default values for adapter params */ + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); +} + +int +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params) +{ + struct rte_event_port_conf *pc; + int ret; + struct rte_event_eth_rx_adapter_params temp_params = {0}; + + if (port_config == NULL) + return -EINVAL; + + /* use default values if rxa_parmas is NULL */ + if (rxa_params == NULL) { + rxa_params = &temp_params; + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; + } + + if (rxa_params->event_buf_size == 0) + return -EINVAL; + + pc = rte_malloc(NULL, sizeof(*pc), 0); + if (pc == NULL) + return -ENOMEM; + + *pc = *port_config; + + /* adjust event buff size with BATCH_SIZE used for fetching packets + * from NIC rx queues to get full buffer utilization and prevent + * unnecessary rollovers. + */ + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + rxa_params->event_buf_size += BATCH_SIZE + BATCH_SIZE; + + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); + if (ret) + rte_free(pc); + + return ret; +} + int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config) @@ -2302,12 +2382,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) return -ENOMEM; *pc = *port_config; + ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, rxa_default_conf_cb, pc); @@ -2336,6 +2418,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2711,6 +2794,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, stats->rx_packets += dev_stats_sum.rx_packets; stats->rx_enq_count += dev_stats_sum.rx_enq_count; + return 0; } diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 470543e434..846ca569e9 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -26,6 +26,7 @@ * The ethernet Rx event adapter's functions are: * - rte_event_eth_rx_adapter_create_ext() * - rte_event_eth_rx_adapter_create() + * - rte_event_eth_rx_adapter_create_with_params() * - rte_event_eth_rx_adapter_free() * - rte_event_eth_rx_adapter_queue_add() * - rte_event_eth_rx_adapter_queue_del() @@ -37,7 +38,7 @@ * * The application creates an ethernet to event adapter using * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() - * functions. + * or rte_event_eth_rx_adapter_create_with_params() functions. * The adapter needs to know which ethernet rx queues to poll for mbufs as well * as event device parameters such as the event queue identifier, event * priority and scheduling type that the adapter should use when constructing @@ -257,6 +258,17 @@ struct rte_event_eth_rx_adapter_vector_limits { */ }; +/** + * A structure to hold adapter config params + */ +struct rte_event_eth_rx_adapter_params { + uint16_t event_buf_size; + /**< size of event buffer for the adapter. + * This value is rounded up for better buffer utilization + * and performance. + */ +}; + /** * * Callback function invoked by the SW adapter before it continues @@ -357,6 +369,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config); +/** + * This is a variant of rte_event_eth_rx_adapter_create() with additional + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. + * + * @param id + * The identifier of the ethernet Rx event adapter. + * + * @param dev_id + * The identifier of the event device to configure. + * + * @param port_config + * Argument of type *rte_event_port_conf* that is passed to the conf_cb + * function. + * + * @param rxa_params + * Pointer to struct rte_event_eth_rx_adapter_params. + * In case of NULL, default values are used. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params); + /** * Free an event adapter * diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index 9f280160fa..7de18497a6 100644 --- a/lib/eventdev/version.map +++ b/lib/eventdev/version.map @@ -138,6 +138,8 @@ EXPERIMENTAL { __rte_eventdev_trace_port_setup; # added in 20.11 rte_event_pmd_pci_probe_named; + # added in 21.11 + rte_event_eth_rx_adapter_create_with_params; #added in 21.05 rte_event_vector_pool_create; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v5 2/5] test/event: add unit test for Rx adapter 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V @ 2021-10-04 5:41 ` Naga Harish K S V 2021-10-05 7:36 ` Jayatheerthan, Jay 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V ` (4 subsequent siblings) 5 siblings, 1 reply; 81+ messages in thread From: Naga Harish K S V @ 2021-10-04 5:41 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit test for rte_event_eth_rx_adapter_create_with_params api and validate all possible input combinations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 53 +++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 13664a3a3b..7c2cf0dd70 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -428,6 +428,50 @@ adapter_create_free(void) return TEST_SUCCESS; } +static int +adapter_create_free_with_params(void) +{ + int err; + + struct rte_event_port_conf rx_p_conf = { + .dequeue_depth = 8, + .enqueue_depth = 8, + .new_event_threshold = 1200, + }; + + struct rte_event_eth_rx_adapter_params rxa_params = { + .event_buf_size = 1024 + }; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, NULL, NULL); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err); + + rxa_params.event_buf_size = 0; + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + err = rte_event_eth_rx_adapter_free(1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + return TEST_SUCCESS; +} + static int adapter_queue_add_del(void) { @@ -435,7 +479,7 @@ adapter_queue_add_del(void) struct rte_event ev; uint32_t cap; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, &cap); @@ -523,7 +567,7 @@ adapter_multi_eth_add_del(void) uint16_t port_index, port_index_base, drv_id = 0; char driver_name[50]; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; ev.queue_id = 0; ev.sched_type = RTE_SCHED_TYPE_ATOMIC; @@ -594,7 +638,7 @@ adapter_intr_queue_add_del(void) struct rte_event ev; uint32_t cap; uint16_t eth_port; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; if (!default_params.rx_intr_port_inited) return 0; @@ -687,7 +731,7 @@ adapter_start_stop(void) ev.sched_type = RTE_SCHED_TYPE_ATOMIC; ev.priority = 0; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; queue_config.rx_queue_flags = 0; if (default_params.caps & @@ -774,6 +818,7 @@ static struct unit_test_suite event_eth_rx_tests = { .teardown = testsuite_teardown, .unit_test_cases = { TEST_CASE_ST(NULL, NULL, adapter_create_free), + TEST_CASE_ST(NULL, NULL, adapter_create_free_with_params), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_add_del), TEST_CASE_ST(adapter_create, adapter_free, -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v5 2/5] test/event: add unit test for Rx adapter 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 2/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-10-05 7:36 ` Jayatheerthan, Jay 0 siblings, 0 replies; 81+ messages in thread From: Jayatheerthan, Jay @ 2021-10-05 7:36 UTC (permalink / raw) To: Naga Harish K, S V, jerinj; +Cc: dev > -----Original Message----- > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > Sent: Monday, October 4, 2021 11:11 AM > To: jerinj@marvell.com; Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > Cc: dev@dpdk.org > Subject: [PATCH v5 2/5] test/event: add unit test for Rx adapter > > this patch adds unit test for rte_event_eth_rx_adapter_create_with_params > api and validate all possible input combinations. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > --- > app/test/test_event_eth_rx_adapter.c | 53 +++++++++++++++++++++++++--- > 1 file changed, 49 insertions(+), 4 deletions(-) > > diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c > index 13664a3a3b..7c2cf0dd70 100644 > --- a/app/test/test_event_eth_rx_adapter.c > +++ b/app/test/test_event_eth_rx_adapter.c > @@ -428,6 +428,50 @@ adapter_create_free(void) > return TEST_SUCCESS; > } > > +static int > +adapter_create_free_with_params(void) > +{ > + int err; > + > + struct rte_event_port_conf rx_p_conf = { > + .dequeue_depth = 8, > + .enqueue_depth = 8, > + .new_event_threshold = 1200, > + }; > + > + struct rte_event_eth_rx_adapter_params rxa_params = { > + .event_buf_size = 1024 > + }; > + > + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > + TEST_DEV_ID, NULL, NULL); > + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); > + > + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > + TEST_DEV_ID, &rx_p_conf, &rxa_params); > + TEST_ASSERT(err == 0, "Expected 0 got %d", err); > + > + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > + TEST_DEV_ID, &rx_p_conf, &rxa_params); > + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err); > + > + rxa_params.event_buf_size = 0; > + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > + TEST_DEV_ID, &rx_p_conf, &rxa_params); > + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); > + > + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); > + TEST_ASSERT(err == 0, "Expected 0 got %d", err); > + > + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); > + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); > + > + err = rte_event_eth_rx_adapter_free(1); > + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); > + > + return TEST_SUCCESS; > +} > + > static int > adapter_queue_add_del(void) > { > @@ -435,7 +479,7 @@ adapter_queue_add_del(void) > struct rte_event ev; > uint32_t cap; > > - struct rte_event_eth_rx_adapter_queue_conf queue_config; > + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; > > err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, > &cap); > @@ -523,7 +567,7 @@ adapter_multi_eth_add_del(void) > uint16_t port_index, port_index_base, drv_id = 0; > char driver_name[50]; > > - struct rte_event_eth_rx_adapter_queue_conf queue_config; > + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; > > ev.queue_id = 0; > ev.sched_type = RTE_SCHED_TYPE_ATOMIC; > @@ -594,7 +638,7 @@ adapter_intr_queue_add_del(void) > struct rte_event ev; > uint32_t cap; > uint16_t eth_port; > - struct rte_event_eth_rx_adapter_queue_conf queue_config; > + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; > > if (!default_params.rx_intr_port_inited) > return 0; > @@ -687,7 +731,7 @@ adapter_start_stop(void) > ev.sched_type = RTE_SCHED_TYPE_ATOMIC; > ev.priority = 0; > > - struct rte_event_eth_rx_adapter_queue_conf queue_config; > + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; > > queue_config.rx_queue_flags = 0; > if (default_params.caps & > @@ -774,6 +818,7 @@ static struct unit_test_suite event_eth_rx_tests = { > .teardown = testsuite_teardown, > .unit_test_cases = { > TEST_CASE_ST(NULL, NULL, adapter_create_free), > + TEST_CASE_ST(NULL, NULL, adapter_create_free_with_params), > TEST_CASE_ST(adapter_create, adapter_free, > adapter_queue_add_del), > TEST_CASE_ST(adapter_create, adapter_free, > -- > 2.25.1 Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v5 3/5] eventdev/rx_adapter: introduce per queue event buffer 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 2/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-10-04 5:41 ` Naga Harish K S V 2021-10-05 7:39 ` Jayatheerthan, Jay 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 4/5] eventdev/rx_adapter: implement " Naga Harish K S V ` (3 subsequent siblings) 5 siblings, 1 reply; 81+ messages in thread From: Naga Harish K S V @ 2021-10-04 5:41 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev To configure per queue event buffer size, application sets ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag as true and is passed to ``rte_event_eth_rx_adapter_create_with_params`` api. The per queue event buffer size is populated in ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed to ``rte_event_eth_rx_adapter_queue_add`` api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index 8526aecf57..8b58130fc5 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,12 +62,14 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. -If the application desires to control the event buffer size, it can use the -``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is -specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. -The function is passed the event device to be associated with the adapter -and port configuration for the adapter to setup an event port if the -adapter needs to use a service function. +If the application desires to control the event buffer size at adapter level, +it can use the ``rte_event_eth_rx_adapter_create_with_params()`` api. The event +buffer size is specified using ``struct rte_event_eth_rx_adapter_params:: +event_buf_size``. To configure the event buffer size at queue level, the boolean +flag ``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be +set to true. The function is passed the event device to be associated with +the adapter and port configuration for the adapter to setup an event port +if the adapter needs to use a service function. Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx queue is encoded in the ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf is the relative polling frequency of the Rx queue and is applicable when the -adapter uses a service core function. +adapter uses a service core function. The applications can configure queue +event buffer size in ``struct rte_event_eth_rx_adapter_queue_conf::event_buf_size`` +parameter. .. code-block:: c @@ -90,6 +94,7 @@ adapter uses a service core function. queue_config.rx_queue_flags = 0; queue_config.ev = ev; queue_config.servicing_weight = 1; + queue_config.event_buf_size = 1024; err = rte_event_eth_rx_adapter_queue_add(id, eth_dev_id, diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 846ca569e9..70ca427d66 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -200,6 +200,8 @@ struct rte_event_eth_rx_adapter_queue_conf { * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. */ + uint16_t event_buf_size; + /**< event buffer size for this queue */ }; /** @@ -267,6 +269,8 @@ struct rte_event_eth_rx_adapter_params { * This value is rounded up for better buffer utilization * and performance. */ + bool use_queue_event_buf; + /**< flag to indicate that event buffer is separate for each queue */ }; /** -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v5 3/5] eventdev/rx_adapter: introduce per queue event buffer 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V @ 2021-10-05 7:39 ` Jayatheerthan, Jay 0 siblings, 0 replies; 81+ messages in thread From: Jayatheerthan, Jay @ 2021-10-05 7:39 UTC (permalink / raw) To: Naga Harish K, S V, jerinj; +Cc: dev > -----Original Message----- > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > Sent: Monday, October 4, 2021 11:11 AM > To: jerinj@marvell.com; Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > Cc: dev@dpdk.org > Subject: [PATCH v5 3/5] eventdev/rx_adapter: introduce per queue event buffer > > To configure per queue event buffer size, application sets > ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag > as true and is passed to ``rte_event_eth_rx_adapter_create_with_params`` > api. > > The per queue event buffer size is populated in > ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed > to ``rte_event_eth_rx_adapter_queue_add`` api. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > --- > .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- > lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ > 2 files changed, 16 insertions(+), 7 deletions(-) > > diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > index 8526aecf57..8b58130fc5 100644 > --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > @@ -62,12 +62,14 @@ service function and needs to create an event port for it. The callback is > expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` > passed to it. > > -If the application desires to control the event buffer size, it can use the > -``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is > -specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. > -The function is passed the event device to be associated with the adapter > -and port configuration for the adapter to setup an event port if the > -adapter needs to use a service function. > +If the application desires to control the event buffer size at adapter level, > +it can use the ``rte_event_eth_rx_adapter_create_with_params()`` api. The event > +buffer size is specified using ``struct rte_event_eth_rx_adapter_params:: > +event_buf_size``. To configure the event buffer size at queue level, the boolean > +flag ``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be > +set to true. The function is passed the event device to be associated with > +the adapter and port configuration for the adapter to setup an event port > +if the adapter needs to use a service function. > > Adding Rx Queues to the Adapter Instance > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > @@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx queue is encoded in the > ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The > servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf > is the relative polling frequency of the Rx queue and is applicable when the > -adapter uses a service core function. > +adapter uses a service core function. The applications can configure queue > +event buffer size in ``struct rte_event_eth_rx_adapter_queue_conf::event_buf_size`` > +parameter. > > .. code-block:: c > > @@ -90,6 +94,7 @@ adapter uses a service core function. > queue_config.rx_queue_flags = 0; > queue_config.ev = ev; > queue_config.servicing_weight = 1; > + queue_config.event_buf_size = 1024; > > err = rte_event_eth_rx_adapter_queue_add(id, > eth_dev_id, > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h > index 846ca569e9..70ca427d66 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.h > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h > @@ -200,6 +200,8 @@ struct rte_event_eth_rx_adapter_queue_conf { > * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in > * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. > */ > + uint16_t event_buf_size; > + /**< event buffer size for this queue */ > }; > > /** > @@ -267,6 +269,8 @@ struct rte_event_eth_rx_adapter_params { > * This value is rounded up for better buffer utilization > * and performance. > */ > + bool use_queue_event_buf; > + /**< flag to indicate that event buffer is separate for each queue */ > }; > > /** > -- > 2.25.1 Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v5 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V @ 2021-10-04 5:41 ` Naga Harish K S V 2021-10-05 7:55 ` Jayatheerthan, Jay 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 5/5] test/event: add unit test for Rx adapter Naga Harish K S V ` (2 subsequent siblings) 5 siblings, 1 reply; 81+ messages in thread From: Naga Harish K S V @ 2021-10-04 5:41 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch implement the per queue event buffer with required validations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- lib/eventdev/rte_event_eth_rx_adapter.c | 187 +++++++++++++++++------- 1 file changed, 138 insertions(+), 49 deletions(-) diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 606db241b8..b61af0e75e 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter { uint8_t rss_key_be[RSS_KEY_SIZE]; /* Event device identifier */ uint8_t eventdev_id; - /* Per ethernet device structure */ - struct eth_device_info *eth_devices; /* Event port identifier */ uint8_t event_port_id; + /* Flag indicating per rxq event buffer */ + bool use_queue_event_buf; + /* Per ethernet device structure */ + struct eth_device_info *eth_devices; /* Lock to serialize config updates with service function */ rte_spinlock_t rx_lock; /* Max mbufs processed in any service function invocation */ @@ -241,6 +243,7 @@ struct eth_rx_queue_info { uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ uint64_t event; struct eth_rx_vector_data vector_data; + struct rte_eth_event_enqueue_buffer *event_buf; }; static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ -767,10 +770,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, /* Enqueue buffered events to event device */ static inline uint16_t -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, + struct rte_eth_event_enqueue_buffer *buf) { - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t count = buf->last ? buf->last - buf->head : buf->count; @@ -888,15 +890,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, uint16_t rx_queue_id, struct rte_mbuf **mbufs, - uint16_t num) + uint16_t num, + struct rte_eth_event_enqueue_buffer *buf) { uint32_t i; struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; struct eth_rx_queue_info *eth_rx_queue_info = &dev_info->rx_queue[rx_queue_id]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; uint16_t new_tail = buf->tail; uint64_t event = eth_rx_queue_info->event; uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ -995,11 +996,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t queue_id, uint32_t rx_count, uint32_t max_rx, - int *rxq_empty) + int *rxq_empty, + struct rte_eth_event_enqueue_buffer *buf) { struct rte_mbuf *mbufs[BATCH_SIZE]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t n; @@ -1012,7 +1012,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, */ while (rxa_pkt_buf_available(buf)) { if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); stats->rx_poll_count++; n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); @@ -1021,14 +1021,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, *rxq_empty = 1; break; } - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); nb_rx += n; if (rx_count + nb_rx > max_rx) break; } if (buf->count > 0) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); return nb_rx; } @@ -1169,7 +1169,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) ring_lock = &rx_adapter->intr_ring_lock; if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); while (rxa_pkt_buf_available(buf)) { struct eth_device_info *dev_info; @@ -1221,7 +1221,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) continue; n = rxa_eth_rx(rx_adapter, port, i, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); nb_rx += n; enq_buffer_full = !rxq_empty && n == 0; @@ -1242,7 +1242,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) } else { n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); rx_adapter->qd_valid = !rxq_empty; nb_rx += n; if (nb_rx > rx_adapter->max_nb_rx) @@ -1273,13 +1273,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { uint32_t num_queue; uint32_t nb_rx = 0; - struct rte_eth_event_enqueue_buffer *buf; + struct rte_eth_event_enqueue_buffer *buf = NULL; uint32_t wrr_pos; uint32_t max_nb_rx; wrr_pos = rx_adapter->wrr_pos; max_nb_rx = rx_adapter->max_nb_rx; - buf = &rx_adapter->event_enqueue_buffer; /* Iterate through a WRR sequence */ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { @@ -1287,24 +1286,36 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[d]; + buf = dev_info->rx_queue[qid].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + /* Don't do a batch dequeue from the rx queue if there isn't * enough space in the enqueue buffer. */ if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (!rxa_pkt_buf_available(buf)) { - rx_adapter->wrr_pos = wrr_pos; - return nb_rx; + if (rx_adapter->use_queue_event_buf) + goto poll_next_entry; + else { + rx_adapter->wrr_pos = wrr_pos; + return nb_rx; + } } nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, - NULL); + NULL, buf); if (nb_rx > max_nb_rx) { rx_adapter->wrr_pos = (wrr_pos + 1) % rx_adapter->wrr_len; break; } +poll_next_entry: if (++wrr_pos == rx_adapter->wrr_len) wrr_pos = 0; } @@ -1315,12 +1326,18 @@ static void rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { struct rte_event_eth_rx_adapter *rx_adapter = arg; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; + struct rte_eth_event_enqueue_buffer *buf = NULL; struct rte_event *ev; + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[vec->port]; + buf = dev_info->rx_queue[vec->queue].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + if (buf->count) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (vec->vector_ev->nb_elem == 0) return; @@ -1947,9 +1964,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, rx_adapter->num_rx_intr -= intrq; dev_info->nb_rx_intr -= intrq; dev_info->nb_shared_intr -= intrq && sintrq; + if (rx_adapter->use_queue_event_buf) { + struct rte_eth_event_enqueue_buffer *event_buf = + dev_info->rx_queue[rx_queue_id].event_buf; + rte_free(event_buf->events); + rte_free(event_buf); + dev_info->rx_queue[rx_queue_id].event_buf = NULL; + } } -static void +static int rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, struct eth_device_info *dev_info, int32_t rx_queue_id, @@ -1961,15 +1985,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int intrq; int sintrq; struct rte_event *qi_ev; + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; + uint16_t eth_dev_id = dev_info->dev->data->port_id; + int ret; if (rx_queue_id == -1) { uint16_t nb_rx_queues; uint16_t i; nb_rx_queues = dev_info->dev->data->nb_rx_queues; - for (i = 0; i < nb_rx_queues; i++) - rxa_add_queue(rx_adapter, dev_info, i, conf); - return; + for (i = 0; i < nb_rx_queues; i++) { + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); + if (ret) + return ret; + } + return 0; } pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -2032,6 +2062,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, dev_info->next_q_idx = 0; } } + + if (!rx_adapter->use_queue_event_buf) + return 0; + + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", + sizeof(*new_rx_buf), 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); + new_rx_buf->events_size += (2 * BATCH_SIZE); + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", + sizeof(struct rte_event) * + new_rx_buf->events_size, 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf->events == NULL) { + rte_free(new_rx_buf); + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + queue_info->event_buf = new_rx_buf; + + return 0; } static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ -2060,6 +2121,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, temp_conf.servicing_weight = 1; } queue_conf = &temp_conf; + + if (queue_conf->servicing_weight == 0 && + rx_adapter->use_queue_event_buf) { + + RTE_EDEV_LOG_ERR("Use of queue level event buffer " + "not supported for interrupt queues " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -EINVAL; + } } nb_rx_queues = dev_info->dev->data->nb_rx_queues; @@ -2139,7 +2210,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + if (ret) + goto err_free_rxqueue; rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); rte_free(rx_adapter->eth_rx_poll); @@ -2160,7 +2233,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, rte_free(rx_poll); rte_free(rx_wrr); - return 0; + return ret; } static int @@ -2286,20 +2359,26 @@ rxa_create(uint8_t id, uint8_t dev_id, rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; /* Rx adapter event buffer allocation */ - buf = &rx_adapter->event_enqueue_buffer; - buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); - - events = rte_zmalloc_socket(rx_adapter->mem_name, - buf->events_size * sizeof(*events), - 0, socket_id); - if (events == NULL) { - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); - rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter); - return -ENOMEM; - } + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; + + if (!rx_adapter->use_queue_event_buf) { + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate memory " + "for adapter event buffer"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } - rx_adapter->event_enqueue_buffer.events = events; + rx_adapter->event_enqueue_buffer.events = events; + } event_eth_rx_adapter[id] = rx_adapter; @@ -2327,6 +2406,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, /* use default values for adapter params */ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + rxa_params.use_queue_event_buf = false; return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } @@ -2347,9 +2427,9 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, if (rxa_params == NULL) { rxa_params = &temp_params; rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; - } - - if (rxa_params->event_buf_size == 0) + rxa_params->use_queue_event_buf = false; + } else if ((!rxa_params->use_queue_event_buf && + rxa_params->event_buf_size == 0)) return -EINVAL; pc = rte_malloc(NULL, sizeof(*pc), 0); @@ -2418,7 +2498,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter->event_enqueue_buffer.events); + if (!rx_adapter->use_queue_event_buf) + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2522,6 +2603,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, return -EINVAL; } + if ((rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size == 0) || + (!rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); + return -EINVAL; + } + dev_info = &rx_adapter->eth_devices[eth_dev_id]; if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v5 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 4/5] eventdev/rx_adapter: implement " Naga Harish K S V @ 2021-10-05 7:55 ` Jayatheerthan, Jay 2021-10-05 14:47 ` Naga Harish K, S V 0 siblings, 1 reply; 81+ messages in thread From: Jayatheerthan, Jay @ 2021-10-05 7:55 UTC (permalink / raw) To: Naga Harish K, S V, jerinj; +Cc: dev > -----Original Message----- > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > Sent: Monday, October 4, 2021 11:11 AM > To: jerinj@marvell.com; Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > Cc: dev@dpdk.org > Subject: [PATCH v5 4/5] eventdev/rx_adapter: implement per queue event buffer > > this patch implement the per queue event buffer with > required validations. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > --- > lib/eventdev/rte_event_eth_rx_adapter.c | 187 +++++++++++++++++------- > 1 file changed, 138 insertions(+), 49 deletions(-) > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c > index 606db241b8..b61af0e75e 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.c > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c > @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter { > uint8_t rss_key_be[RSS_KEY_SIZE]; > /* Event device identifier */ > uint8_t eventdev_id; > - /* Per ethernet device structure */ > - struct eth_device_info *eth_devices; > /* Event port identifier */ > uint8_t event_port_id; > + /* Flag indicating per rxq event buffer */ > + bool use_queue_event_buf; > + /* Per ethernet device structure */ > + struct eth_device_info *eth_devices; > /* Lock to serialize config updates with service function */ > rte_spinlock_t rx_lock; > /* Max mbufs processed in any service function invocation */ > @@ -241,6 +243,7 @@ struct eth_rx_queue_info { > uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ > uint64_t event; > struct eth_rx_vector_data vector_data; > + struct rte_eth_event_enqueue_buffer *event_buf; > }; > > static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; > @@ -767,10 +770,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, > > /* Enqueue buffered events to event device */ > static inline uint16_t > -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) > +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, > + struct rte_eth_event_enqueue_buffer *buf) > { > - struct rte_eth_event_enqueue_buffer *buf = > - &rx_adapter->event_enqueue_buffer; > struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; > uint16_t count = buf->last ? buf->last - buf->head : buf->count; > > @@ -888,15 +890,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, > uint16_t eth_dev_id, > uint16_t rx_queue_id, > struct rte_mbuf **mbufs, > - uint16_t num) > + uint16_t num, > + struct rte_eth_event_enqueue_buffer *buf) > { > uint32_t i; > struct eth_device_info *dev_info = > &rx_adapter->eth_devices[eth_dev_id]; > struct eth_rx_queue_info *eth_rx_queue_info = > &dev_info->rx_queue[rx_queue_id]; > - struct rte_eth_event_enqueue_buffer *buf = > - &rx_adapter->event_enqueue_buffer; > uint16_t new_tail = buf->tail; > uint64_t event = eth_rx_queue_info->event; > uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; > @@ -995,11 +996,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, > uint16_t queue_id, > uint32_t rx_count, > uint32_t max_rx, > - int *rxq_empty) > + int *rxq_empty, > + struct rte_eth_event_enqueue_buffer *buf) > { > struct rte_mbuf *mbufs[BATCH_SIZE]; > - struct rte_eth_event_enqueue_buffer *buf = > - &rx_adapter->event_enqueue_buffer; > struct rte_event_eth_rx_adapter_stats *stats = > &rx_adapter->stats; > uint16_t n; > @@ -1012,7 +1012,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, > */ > while (rxa_pkt_buf_available(buf)) { > if (buf->count >= BATCH_SIZE) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > > stats->rx_poll_count++; > n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); > @@ -1021,14 +1021,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, > *rxq_empty = 1; > break; > } > - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); > + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); > nb_rx += n; > if (rx_count + nb_rx > max_rx) > break; > } > > if (buf->count > 0) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > > return nb_rx; > } > @@ -1169,7 +1169,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) > ring_lock = &rx_adapter->intr_ring_lock; > > if (buf->count >= BATCH_SIZE) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > > while (rxa_pkt_buf_available(buf)) { > struct eth_device_info *dev_info; > @@ -1221,7 +1221,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) > continue; > n = rxa_eth_rx(rx_adapter, port, i, nb_rx, > rx_adapter->max_nb_rx, > - &rxq_empty); > + &rxq_empty, buf); > nb_rx += n; > > enq_buffer_full = !rxq_empty && n == 0; > @@ -1242,7 +1242,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) > } else { > n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, > rx_adapter->max_nb_rx, > - &rxq_empty); > + &rxq_empty, buf); > rx_adapter->qd_valid = !rxq_empty; > nb_rx += n; > if (nb_rx > rx_adapter->max_nb_rx) > @@ -1273,13 +1273,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) > { > uint32_t num_queue; > uint32_t nb_rx = 0; > - struct rte_eth_event_enqueue_buffer *buf; > + struct rte_eth_event_enqueue_buffer *buf = NULL; > uint32_t wrr_pos; > uint32_t max_nb_rx; > > wrr_pos = rx_adapter->wrr_pos; > max_nb_rx = rx_adapter->max_nb_rx; > - buf = &rx_adapter->event_enqueue_buffer; > > /* Iterate through a WRR sequence */ > for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { > @@ -1287,24 +1286,36 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) > uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; > uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; > > + if (rx_adapter->use_queue_event_buf) { > + struct eth_device_info *dev_info = > + &rx_adapter->eth_devices[d]; > + buf = dev_info->rx_queue[qid].event_buf; > + } else > + buf = &rx_adapter->event_enqueue_buffer; > + > /* Don't do a batch dequeue from the rx queue if there isn't > * enough space in the enqueue buffer. > */ > if (buf->count >= BATCH_SIZE) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > if (!rxa_pkt_buf_available(buf)) { > - rx_adapter->wrr_pos = wrr_pos; > - return nb_rx; > + if (rx_adapter->use_queue_event_buf) > + goto poll_next_entry; > + else { > + rx_adapter->wrr_pos = wrr_pos; > + return nb_rx; > + } > } > > nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, > - NULL); > + NULL, buf); > if (nb_rx > max_nb_rx) { > rx_adapter->wrr_pos = > (wrr_pos + 1) % rx_adapter->wrr_len; > break; > } > > +poll_next_entry: > if (++wrr_pos == rx_adapter->wrr_len) > wrr_pos = 0; > } > @@ -1315,12 +1326,18 @@ static void > rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) > { > struct rte_event_eth_rx_adapter *rx_adapter = arg; > - struct rte_eth_event_enqueue_buffer *buf = > - &rx_adapter->event_enqueue_buffer; > + struct rte_eth_event_enqueue_buffer *buf = NULL; > struct rte_event *ev; > > + if (rx_adapter->use_queue_event_buf) { > + struct eth_device_info *dev_info = > + &rx_adapter->eth_devices[vec->port]; > + buf = dev_info->rx_queue[vec->queue].event_buf; > + } else > + buf = &rx_adapter->event_enqueue_buffer; > + The above code to get the buffer can be made an inline function since it is needed in more than one place. > if (buf->count) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > > if (vec->vector_ev->nb_elem == 0) > return; > @@ -1947,9 +1964,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, > rx_adapter->num_rx_intr -= intrq; > dev_info->nb_rx_intr -= intrq; > dev_info->nb_shared_intr -= intrq && sintrq; > + if (rx_adapter->use_queue_event_buf) { > + struct rte_eth_event_enqueue_buffer *event_buf = > + dev_info->rx_queue[rx_queue_id].event_buf; > + rte_free(event_buf->events); > + rte_free(event_buf); > + dev_info->rx_queue[rx_queue_id].event_buf = NULL; > + } > } > > -static void > +static int > rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > struct eth_device_info *dev_info, > int32_t rx_queue_id, > @@ -1961,15 +1985,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > int intrq; > int sintrq; > struct rte_event *qi_ev; > + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; > + uint16_t eth_dev_id = dev_info->dev->data->port_id; > + int ret; > > if (rx_queue_id == -1) { > uint16_t nb_rx_queues; > uint16_t i; > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > - for (i = 0; i < nb_rx_queues; i++) > - rxa_add_queue(rx_adapter, dev_info, i, conf); > - return; > + for (i = 0; i < nb_rx_queues; i++) { > + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); > + if (ret) > + return ret; > + } > + return 0; > } > > pollq = rxa_polled_queue(dev_info, rx_queue_id); > @@ -2032,6 +2062,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > dev_info->next_q_idx = 0; > } > } > + > + if (!rx_adapter->use_queue_event_buf) > + return 0; > + > + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", > + sizeof(*new_rx_buf), 0, > + rte_eth_dev_socket_id(eth_dev_id)); > + if (new_rx_buf == NULL) { > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " > + "dev_id: %d queue_id: %d", > + eth_dev_id, rx_queue_id); > + return -ENOMEM; > + } > + > + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); > + new_rx_buf->events_size += (2 * BATCH_SIZE); > + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", > + sizeof(struct rte_event) * > + new_rx_buf->events_size, 0, > + rte_eth_dev_socket_id(eth_dev_id)); > + if (new_rx_buf->events == NULL) { > + rte_free(new_rx_buf); > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " > + "dev_id: %d queue_id: %d", > + eth_dev_id, rx_queue_id); > + return -ENOMEM; > + } > + > + queue_info->event_buf = new_rx_buf; > + > + return 0; > } > > static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, > @@ -2060,6 +2121,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, > temp_conf.servicing_weight = 1; > } > queue_conf = &temp_conf; > + > + if (queue_conf->servicing_weight == 0 && > + rx_adapter->use_queue_event_buf) { > + > + RTE_EDEV_LOG_ERR("Use of queue level event buffer " > + "not supported for interrupt queues " > + "dev_id: %d queue_id: %d", > + eth_dev_id, rx_queue_id); > + return -EINVAL; > + } > } > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > @@ -2139,7 +2210,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, > > > > - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); > + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); > + if (ret) > + goto err_free_rxqueue; > rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); > > rte_free(rx_adapter->eth_rx_poll); > @@ -2160,7 +2233,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, > rte_free(rx_poll); > rte_free(rx_wrr); > > - return 0; > + return ret; > } > > static int > @@ -2286,20 +2359,26 @@ rxa_create(uint8_t id, uint8_t dev_id, > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > /* Rx adapter event buffer allocation */ > - buf = &rx_adapter->event_enqueue_buffer; > - buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); > - > - events = rte_zmalloc_socket(rx_adapter->mem_name, > - buf->events_size * sizeof(*events), > - 0, socket_id); > - if (events == NULL) { > - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); > - rte_free(rx_adapter->eth_devices); > - rte_free(rx_adapter); > - return -ENOMEM; > - } > + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; > + > + if (!rx_adapter->use_queue_event_buf) { > + buf = &rx_adapter->event_enqueue_buffer; > + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, > + BATCH_SIZE); > + > + events = rte_zmalloc_socket(rx_adapter->mem_name, > + buf->events_size * sizeof(*events), > + 0, socket_id); > + if (events == NULL) { > + RTE_EDEV_LOG_ERR("Failed to allocate memory " > + "for adapter event buffer"); > + rte_free(rx_adapter->eth_devices); > + rte_free(rx_adapter); > + return -ENOMEM; > + } > > - rx_adapter->event_enqueue_buffer.events = events; > + rx_adapter->event_enqueue_buffer.events = events; > + } > > event_eth_rx_adapter[id] = rx_adapter; > > @@ -2327,6 +2406,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > /* use default values for adapter params */ > rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > + rxa_params.use_queue_event_buf = false; > > return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); > } > @@ -2347,9 +2427,9 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > if (rxa_params == NULL) { > rxa_params = &temp_params; > rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > - } > - > - if (rxa_params->event_buf_size == 0) > + rxa_params->use_queue_event_buf = false; > + } else if ((!rxa_params->use_queue_event_buf && > + rxa_params->event_buf_size == 0)) > return -EINVAL; > > pc = rte_malloc(NULL, sizeof(*pc), 0); > @@ -2418,7 +2498,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) > if (rx_adapter->default_cb_arg) > rte_free(rx_adapter->conf_arg); > rte_free(rx_adapter->eth_devices); > - rte_free(rx_adapter->event_enqueue_buffer.events); > + if (!rx_adapter->use_queue_event_buf) > + rte_free(rx_adapter->event_enqueue_buffer.events); > rte_free(rx_adapter); > event_eth_rx_adapter[id] = NULL; > > @@ -2522,6 +2603,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, > return -EINVAL; > } > > + if ((rx_adapter->use_queue_event_buf && > + queue_conf->event_buf_size == 0) || > + (!rx_adapter->use_queue_event_buf && > + queue_conf->event_buf_size != 0)) { > + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); > + return -EINVAL; > + } > + Another error case is configuring both - rx_adapter->use_queue_event_buf = true and queue_conf->event_buf_size != 0. > dev_info = &rx_adapter->eth_devices[eth_dev_id]; > > if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { > -- > 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v5 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-10-05 7:55 ` Jayatheerthan, Jay @ 2021-10-05 14:47 ` Naga Harish K, S V 2021-10-05 15:01 ` Jayatheerthan, Jay 0 siblings, 1 reply; 81+ messages in thread From: Naga Harish K, S V @ 2021-10-05 14:47 UTC (permalink / raw) To: Jayatheerthan, Jay, jerinj; +Cc: dev Hi Jay, > -----Original Message----- > From: Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > Sent: Tuesday, October 5, 2021 1:26 PM > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>; jerinj@marvell.com > Cc: dev@dpdk.org > Subject: RE: [PATCH v5 4/5] eventdev/rx_adapter: implement per queue > event buffer > > > -----Original Message----- > > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > > Sent: Monday, October 4, 2021 11:11 AM > > To: jerinj@marvell.com; Jayatheerthan, Jay > > <jay.jayatheerthan@intel.com> > > Cc: dev@dpdk.org > > Subject: [PATCH v5 4/5] eventdev/rx_adapter: implement per queue > event > > buffer > > > > this patch implement the per queue event buffer with required > > validations. > > > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > > --- > > lib/eventdev/rte_event_eth_rx_adapter.c | 187 > > +++++++++++++++++------- > > 1 file changed, 138 insertions(+), 49 deletions(-) > > > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c > > b/lib/eventdev/rte_event_eth_rx_adapter.c > > index 606db241b8..b61af0e75e 100644 > > --- a/lib/eventdev/rte_event_eth_rx_adapter.c > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c > > @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter { > > uint8_t rss_key_be[RSS_KEY_SIZE]; > > /* Event device identifier */ > > uint8_t eventdev_id; > > - /* Per ethernet device structure */ > > - struct eth_device_info *eth_devices; > > /* Event port identifier */ > > uint8_t event_port_id; > > + /* Flag indicating per rxq event buffer */ > > + bool use_queue_event_buf; > > + /* Per ethernet device structure */ > > + struct eth_device_info *eth_devices; > > /* Lock to serialize config updates with service function */ > > rte_spinlock_t rx_lock; > > /* Max mbufs processed in any service function invocation */ @@ > > -241,6 +243,7 @@ struct eth_rx_queue_info { > > uint32_t flow_id_mask; /* Set to ~0 if app provides flow id > else 0 */ > > uint64_t event; > > struct eth_rx_vector_data vector_data; > > + struct rte_eth_event_enqueue_buffer *event_buf; > > }; > > > > static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ > > -767,10 +770,9 @@ rxa_enq_block_end_ts(struct > rte_event_eth_rx_adapter > > *rx_adapter, > > > > /* Enqueue buffered events to event device */ static inline uint16_t > > -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) > > +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, > > + struct rte_eth_event_enqueue_buffer *buf) > > { > > - struct rte_eth_event_enqueue_buffer *buf = > > - &rx_adapter->event_enqueue_buffer; > > struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; > > uint16_t count = buf->last ? buf->last - buf->head : buf->count; > > > > @@ -888,15 +890,14 @@ rxa_buffer_mbufs(struct > rte_event_eth_rx_adapter *rx_adapter, > > uint16_t eth_dev_id, > > uint16_t rx_queue_id, > > struct rte_mbuf **mbufs, > > - uint16_t num) > > + uint16_t num, > > + struct rte_eth_event_enqueue_buffer *buf) > > { > > uint32_t i; > > struct eth_device_info *dev_info = > > &rx_adapter- > >eth_devices[eth_dev_id]; > > struct eth_rx_queue_info *eth_rx_queue_info = > > &dev_info- > >rx_queue[rx_queue_id]; > > - struct rte_eth_event_enqueue_buffer *buf = > > - &rx_adapter- > >event_enqueue_buffer; > > uint16_t new_tail = buf->tail; > > uint64_t event = eth_rx_queue_info->event; > > uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ - > 995,11 > > +996,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, > > uint16_t queue_id, > > uint32_t rx_count, > > uint32_t max_rx, > > - int *rxq_empty) > > + int *rxq_empty, > > + struct rte_eth_event_enqueue_buffer *buf) > > { > > struct rte_mbuf *mbufs[BATCH_SIZE]; > > - struct rte_eth_event_enqueue_buffer *buf = > > - &rx_adapter- > >event_enqueue_buffer; > > struct rte_event_eth_rx_adapter_stats *stats = > > &rx_adapter->stats; > > uint16_t n; > > @@ -1012,7 +1012,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter > *rx_adapter, > > */ > > while (rxa_pkt_buf_available(buf)) { > > if (buf->count >= BATCH_SIZE) > > - rxa_flush_event_buffer(rx_adapter); > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > stats->rx_poll_count++; > > n = rte_eth_rx_burst(port_id, queue_id, mbufs, > BATCH_SIZE); @@ > > -1021,14 +1021,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter > *rx_adapter, > > *rxq_empty = 1; > > break; > > } > > - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); > > + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, > buf); > > nb_rx += n; > > if (rx_count + nb_rx > max_rx) > > break; > > } > > > > if (buf->count > 0) > > - rxa_flush_event_buffer(rx_adapter); > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > return nb_rx; > > } > > @@ -1169,7 +1169,7 @@ rxa_intr_ring_dequeue(struct > rte_event_eth_rx_adapter *rx_adapter) > > ring_lock = &rx_adapter->intr_ring_lock; > > > > if (buf->count >= BATCH_SIZE) > > - rxa_flush_event_buffer(rx_adapter); > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > while (rxa_pkt_buf_available(buf)) { > > struct eth_device_info *dev_info; > > @@ -1221,7 +1221,7 @@ rxa_intr_ring_dequeue(struct > rte_event_eth_rx_adapter *rx_adapter) > > continue; > > n = rxa_eth_rx(rx_adapter, port, i, nb_rx, > > rx_adapter->max_nb_rx, > > - &rxq_empty); > > + &rxq_empty, buf); > > nb_rx += n; > > > > enq_buffer_full = !rxq_empty && n == 0; > @@ -1242,7 +1242,7 @@ > > rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) > > } else { > > n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, > > rx_adapter->max_nb_rx, > > - &rxq_empty); > > + &rxq_empty, buf); > > rx_adapter->qd_valid = !rxq_empty; > > nb_rx += n; > > if (nb_rx > rx_adapter->max_nb_rx) @@ -1273,13 > +1273,12 @@ > > rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { > > uint32_t num_queue; > > uint32_t nb_rx = 0; > > - struct rte_eth_event_enqueue_buffer *buf; > > + struct rte_eth_event_enqueue_buffer *buf = NULL; > > uint32_t wrr_pos; > > uint32_t max_nb_rx; > > > > wrr_pos = rx_adapter->wrr_pos; > > max_nb_rx = rx_adapter->max_nb_rx; > > - buf = &rx_adapter->event_enqueue_buffer; > > > > /* Iterate through a WRR sequence */ > > for (num_queue = 0; num_queue < rx_adapter->wrr_len; > num_queue++) { > > @@ -1287,24 +1286,36 @@ rxa_poll(struct rte_event_eth_rx_adapter > *rx_adapter) > > uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; > > uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; > > > > + if (rx_adapter->use_queue_event_buf) { > > + struct eth_device_info *dev_info = > > + &rx_adapter->eth_devices[d]; > > + buf = dev_info->rx_queue[qid].event_buf; > > + } else > > + buf = &rx_adapter->event_enqueue_buffer; > > + > > /* Don't do a batch dequeue from the rx queue if there isn't > > * enough space in the enqueue buffer. > > */ > > if (buf->count >= BATCH_SIZE) > > - rxa_flush_event_buffer(rx_adapter); > > + rxa_flush_event_buffer(rx_adapter, buf); > > if (!rxa_pkt_buf_available(buf)) { > > - rx_adapter->wrr_pos = wrr_pos; > > - return nb_rx; > > + if (rx_adapter->use_queue_event_buf) > > + goto poll_next_entry; > > + else { > > + rx_adapter->wrr_pos = wrr_pos; > > + return nb_rx; > > + } > > } > > > > nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, > > - NULL); > > + NULL, buf); > > if (nb_rx > max_nb_rx) { > > rx_adapter->wrr_pos = > > (wrr_pos + 1) % rx_adapter->wrr_len; > > break; > > } > > > > +poll_next_entry: > > if (++wrr_pos == rx_adapter->wrr_len) > > wrr_pos = 0; > > } > > @@ -1315,12 +1326,18 @@ static void > > rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { > > struct rte_event_eth_rx_adapter *rx_adapter = arg; > > - struct rte_eth_event_enqueue_buffer *buf = > > - &rx_adapter->event_enqueue_buffer; > > + struct rte_eth_event_enqueue_buffer *buf = NULL; > > struct rte_event *ev; > > > > + if (rx_adapter->use_queue_event_buf) { > > + struct eth_device_info *dev_info = > > + &rx_adapter->eth_devices[vec->port]; > > + buf = dev_info->rx_queue[vec->queue].event_buf; > > + } else > > + buf = &rx_adapter->event_enqueue_buffer; > > + > > The above code to get the buffer can be made an inline function since it is > needed in more than one place. Added new inline function to get event buffer pointer in v6 patch set. > > > if (buf->count) > > - rxa_flush_event_buffer(rx_adapter); > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > if (vec->vector_ev->nb_elem == 0) > > return; > > @@ -1947,9 +1964,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter > *rx_adapter, > > rx_adapter->num_rx_intr -= intrq; > > dev_info->nb_rx_intr -= intrq; > > dev_info->nb_shared_intr -= intrq && sintrq; > > + if (rx_adapter->use_queue_event_buf) { > > + struct rte_eth_event_enqueue_buffer *event_buf = > > + dev_info->rx_queue[rx_queue_id].event_buf; > > + rte_free(event_buf->events); > > + rte_free(event_buf); > > + dev_info->rx_queue[rx_queue_id].event_buf = NULL; > > + } > > } > > > > -static void > > +static int > > rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > > struct eth_device_info *dev_info, > > int32_t rx_queue_id, > > @@ -1961,15 +1985,21 @@ rxa_add_queue(struct > rte_event_eth_rx_adapter *rx_adapter, > > int intrq; > > int sintrq; > > struct rte_event *qi_ev; > > + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; > > + uint16_t eth_dev_id = dev_info->dev->data->port_id; > > + int ret; > > > > if (rx_queue_id == -1) { > > uint16_t nb_rx_queues; > > uint16_t i; > > > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > > - for (i = 0; i < nb_rx_queues; i++) > > - rxa_add_queue(rx_adapter, dev_info, i, conf); > > - return; > > + for (i = 0; i < nb_rx_queues; i++) { > > + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); > > + if (ret) > > + return ret; > > + } > > + return 0; > > } > > > > pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -2032,6 > +2062,37 > > @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > > dev_info->next_q_idx = 0; > > } > > } > > + > > + if (!rx_adapter->use_queue_event_buf) > > + return 0; > > + > > + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", > > + sizeof(*new_rx_buf), 0, > > + rte_eth_dev_socket_id(eth_dev_id)); > > + if (new_rx_buf == NULL) { > > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta > for " > > + "dev_id: %d queue_id: %d", > > + eth_dev_id, rx_queue_id); > > + return -ENOMEM; > > + } > > + > > + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, > BATCH_SIZE); > > + new_rx_buf->events_size += (2 * BATCH_SIZE); > > + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", > > + sizeof(struct rte_event) * > > + new_rx_buf->events_size, 0, > > + rte_eth_dev_socket_id(eth_dev_id)); > > + if (new_rx_buf->events == NULL) { > > + rte_free(new_rx_buf); > > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " > > + "dev_id: %d queue_id: %d", > > + eth_dev_id, rx_queue_id); > > + return -ENOMEM; > > + } > > + > > + queue_info->event_buf = new_rx_buf; > > + > > + return 0; > > } > > > > static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ > > -2060,6 +2121,16 @@ static int rxa_sw_add(struct > rte_event_eth_rx_adapter *rx_adapter, > > temp_conf.servicing_weight = 1; > > } > > queue_conf = &temp_conf; > > + > > + if (queue_conf->servicing_weight == 0 && > > + rx_adapter->use_queue_event_buf) { > > + > > + RTE_EDEV_LOG_ERR("Use of queue level event > buffer " > > + "not supported for interrupt queues > " > > + "dev_id: %d queue_id: %d", > > + eth_dev_id, rx_queue_id); > > + return -EINVAL; > > + } > > } > > > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > > @@ -2139,7 +2210,9 @@ static int rxa_sw_add(struct > > rte_event_eth_rx_adapter *rx_adapter, > > > > > > > > - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); > > + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, > queue_conf); > > + if (ret) > > + goto err_free_rxqueue; > > rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); > > > > rte_free(rx_adapter->eth_rx_poll); > > @@ -2160,7 +2233,7 @@ static int rxa_sw_add(struct > rte_event_eth_rx_adapter *rx_adapter, > > rte_free(rx_poll); > > rte_free(rx_wrr); > > > > - return 0; > > + return ret; > > } > > > > static int > > @@ -2286,20 +2359,26 @@ rxa_create(uint8_t id, uint8_t dev_id, > > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > > > /* Rx adapter event buffer allocation */ > > - buf = &rx_adapter->event_enqueue_buffer; > > - buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, > BATCH_SIZE); > > - > > - events = rte_zmalloc_socket(rx_adapter->mem_name, > > - buf->events_size * sizeof(*events), > > - 0, socket_id); > > - if (events == NULL) { > > - RTE_EDEV_LOG_ERR("Failed to allocate mem for event > buffer\n"); > > - rte_free(rx_adapter->eth_devices); > > - rte_free(rx_adapter); > > - return -ENOMEM; > > - } > > + rx_adapter->use_queue_event_buf = rxa_params- > >use_queue_event_buf; > > + > > + if (!rx_adapter->use_queue_event_buf) { > > + buf = &rx_adapter->event_enqueue_buffer; > > + buf->events_size = RTE_ALIGN(rxa_params- > >event_buf_size, > > + BATCH_SIZE); > > + > > + events = rte_zmalloc_socket(rx_adapter->mem_name, > > + buf->events_size * > sizeof(*events), > > + 0, socket_id); > > + if (events == NULL) { > > + RTE_EDEV_LOG_ERR("Failed to allocate memory " > > + "for adapter event buffer"); > > + rte_free(rx_adapter->eth_devices); > > + rte_free(rx_adapter); > > + return -ENOMEM; > > + } > > > > - rx_adapter->event_enqueue_buffer.events = events; > > + rx_adapter->event_enqueue_buffer.events = events; > > + } > > > > event_eth_rx_adapter[id] = rx_adapter; > > > > @@ -2327,6 +2406,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t > id, > > uint8_t dev_id, > > > > /* use default values for adapter params */ > > rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > > + rxa_params.use_queue_event_buf = false; > > > > return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } > @@ > > -2347,9 +2427,9 @@ > rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > > if (rxa_params == NULL) { > > rxa_params = &temp_params; > > rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > > - } > > - > > - if (rxa_params->event_buf_size == 0) > > + rxa_params->use_queue_event_buf = false; > > + } else if ((!rxa_params->use_queue_event_buf && > > + rxa_params->event_buf_size == 0)) > > return -EINVAL; > > > > pc = rte_malloc(NULL, sizeof(*pc), 0); @@ -2418,7 +2498,8 @@ > > rte_event_eth_rx_adapter_free(uint8_t id) > > if (rx_adapter->default_cb_arg) > > rte_free(rx_adapter->conf_arg); > > rte_free(rx_adapter->eth_devices); > > - rte_free(rx_adapter->event_enqueue_buffer.events); > > + if (!rx_adapter->use_queue_event_buf) > > + rte_free(rx_adapter->event_enqueue_buffer.events); > > rte_free(rx_adapter); > > event_eth_rx_adapter[id] = NULL; > > > > @@ -2522,6 +2603,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t > id, > > return -EINVAL; > > } > > > > + if ((rx_adapter->use_queue_event_buf && > > + queue_conf->event_buf_size == 0) || > > + (!rx_adapter->use_queue_event_buf && > > + queue_conf->event_buf_size != 0)) { > > + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the > queue"); > > + return -EINVAL; > > + } > > + > > Another error case is configuring both - rx_adapter->use_queue_event_buf > = true and queue_conf->event_buf_size != 0. This is valid case. > > > dev_info = &rx_adapter->eth_devices[eth_dev_id]; > > > > if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { > > -- > > 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v5 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-10-05 14:47 ` Naga Harish K, S V @ 2021-10-05 15:01 ` Jayatheerthan, Jay 2021-10-06 4:06 ` Naga Harish K, S V 0 siblings, 1 reply; 81+ messages in thread From: Jayatheerthan, Jay @ 2021-10-05 15:01 UTC (permalink / raw) To: Naga Harish K, S V, jerinj; +Cc: dev > -----Original Message----- > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > Sent: Tuesday, October 5, 2021 8:18 PM > To: Jayatheerthan, Jay <jay.jayatheerthan@intel.com>; jerinj@marvell.com > Cc: dev@dpdk.org > Subject: RE: [PATCH v5 4/5] eventdev/rx_adapter: implement per queue event buffer > > Hi Jay, > > > -----Original Message----- > > From: Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > > Sent: Tuesday, October 5, 2021 1:26 PM > > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>; jerinj@marvell.com > > Cc: dev@dpdk.org > > Subject: RE: [PATCH v5 4/5] eventdev/rx_adapter: implement per queue > > event buffer > > > > > -----Original Message----- > > > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > > > Sent: Monday, October 4, 2021 11:11 AM > > > To: jerinj@marvell.com; Jayatheerthan, Jay > > > <jay.jayatheerthan@intel.com> > > > Cc: dev@dpdk.org > > > Subject: [PATCH v5 4/5] eventdev/rx_adapter: implement per queue > > event > > > buffer > > > > > > this patch implement the per queue event buffer with required > > > validations. > > > > > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > > > --- > > > lib/eventdev/rte_event_eth_rx_adapter.c | 187 > > > +++++++++++++++++------- > > > 1 file changed, 138 insertions(+), 49 deletions(-) > > > > > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c > > > b/lib/eventdev/rte_event_eth_rx_adapter.c > > > index 606db241b8..b61af0e75e 100644 > > > --- a/lib/eventdev/rte_event_eth_rx_adapter.c > > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c > > > @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter { > > > uint8_t rss_key_be[RSS_KEY_SIZE]; > > > /* Event device identifier */ > > > uint8_t eventdev_id; > > > - /* Per ethernet device structure */ > > > - struct eth_device_info *eth_devices; > > > /* Event port identifier */ > > > uint8_t event_port_id; > > > + /* Flag indicating per rxq event buffer */ > > > + bool use_queue_event_buf; > > > + /* Per ethernet device structure */ > > > + struct eth_device_info *eth_devices; > > > /* Lock to serialize config updates with service function */ > > > rte_spinlock_t rx_lock; > > > /* Max mbufs processed in any service function invocation */ @@ > > > -241,6 +243,7 @@ struct eth_rx_queue_info { > > > uint32_t flow_id_mask; /* Set to ~0 if app provides flow id > > else 0 */ > > > uint64_t event; > > > struct eth_rx_vector_data vector_data; > > > + struct rte_eth_event_enqueue_buffer *event_buf; > > > }; > > > > > > static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ > > > -767,10 +770,9 @@ rxa_enq_block_end_ts(struct > > rte_event_eth_rx_adapter > > > *rx_adapter, > > > > > > /* Enqueue buffered events to event device */ static inline uint16_t > > > -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) > > > +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, > > > + struct rte_eth_event_enqueue_buffer *buf) > > > { > > > - struct rte_eth_event_enqueue_buffer *buf = > > > - &rx_adapter->event_enqueue_buffer; > > > struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; > > > uint16_t count = buf->last ? buf->last - buf->head : buf->count; > > > > > > @@ -888,15 +890,14 @@ rxa_buffer_mbufs(struct > > rte_event_eth_rx_adapter *rx_adapter, > > > uint16_t eth_dev_id, > > > uint16_t rx_queue_id, > > > struct rte_mbuf **mbufs, > > > - uint16_t num) > > > + uint16_t num, > > > + struct rte_eth_event_enqueue_buffer *buf) > > > { > > > uint32_t i; > > > struct eth_device_info *dev_info = > > > &rx_adapter- > > >eth_devices[eth_dev_id]; > > > struct eth_rx_queue_info *eth_rx_queue_info = > > > &dev_info- > > >rx_queue[rx_queue_id]; > > > - struct rte_eth_event_enqueue_buffer *buf = > > > - &rx_adapter- > > >event_enqueue_buffer; > > > uint16_t new_tail = buf->tail; > > > uint64_t event = eth_rx_queue_info->event; > > > uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ - > > 995,11 > > > +996,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, > > > uint16_t queue_id, > > > uint32_t rx_count, > > > uint32_t max_rx, > > > - int *rxq_empty) > > > + int *rxq_empty, > > > + struct rte_eth_event_enqueue_buffer *buf) > > > { > > > struct rte_mbuf *mbufs[BATCH_SIZE]; > > > - struct rte_eth_event_enqueue_buffer *buf = > > > - &rx_adapter- > > >event_enqueue_buffer; > > > struct rte_event_eth_rx_adapter_stats *stats = > > > &rx_adapter->stats; > > > uint16_t n; > > > @@ -1012,7 +1012,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter > > *rx_adapter, > > > */ > > > while (rxa_pkt_buf_available(buf)) { > > > if (buf->count >= BATCH_SIZE) > > > - rxa_flush_event_buffer(rx_adapter); > > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > > > stats->rx_poll_count++; > > > n = rte_eth_rx_burst(port_id, queue_id, mbufs, > > BATCH_SIZE); @@ > > > -1021,14 +1021,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter > > *rx_adapter, > > > *rxq_empty = 1; > > > break; > > > } > > > - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); > > > + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, > > buf); > > > nb_rx += n; > > > if (rx_count + nb_rx > max_rx) > > > break; > > > } > > > > > > if (buf->count > 0) > > > - rxa_flush_event_buffer(rx_adapter); > > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > > > return nb_rx; > > > } > > > @@ -1169,7 +1169,7 @@ rxa_intr_ring_dequeue(struct > > rte_event_eth_rx_adapter *rx_adapter) > > > ring_lock = &rx_adapter->intr_ring_lock; > > > > > > if (buf->count >= BATCH_SIZE) > > > - rxa_flush_event_buffer(rx_adapter); > > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > > > while (rxa_pkt_buf_available(buf)) { > > > struct eth_device_info *dev_info; > > > @@ -1221,7 +1221,7 @@ rxa_intr_ring_dequeue(struct > > rte_event_eth_rx_adapter *rx_adapter) > > > continue; > > > n = rxa_eth_rx(rx_adapter, port, i, nb_rx, > > > rx_adapter->max_nb_rx, > > > - &rxq_empty); > > > + &rxq_empty, buf); > > > nb_rx += n; > > > > > > enq_buffer_full = !rxq_empty && n == 0; > > @@ -1242,7 +1242,7 @@ > > > rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) > > > } else { > > > n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, > > > rx_adapter->max_nb_rx, > > > - &rxq_empty); > > > + &rxq_empty, buf); > > > rx_adapter->qd_valid = !rxq_empty; > > > nb_rx += n; > > > if (nb_rx > rx_adapter->max_nb_rx) @@ -1273,13 > > +1273,12 @@ > > > rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { > > > uint32_t num_queue; > > > uint32_t nb_rx = 0; > > > - struct rte_eth_event_enqueue_buffer *buf; > > > + struct rte_eth_event_enqueue_buffer *buf = NULL; > > > uint32_t wrr_pos; > > > uint32_t max_nb_rx; > > > > > > wrr_pos = rx_adapter->wrr_pos; > > > max_nb_rx = rx_adapter->max_nb_rx; > > > - buf = &rx_adapter->event_enqueue_buffer; > > > > > > /* Iterate through a WRR sequence */ > > > for (num_queue = 0; num_queue < rx_adapter->wrr_len; > > num_queue++) { > > > @@ -1287,24 +1286,36 @@ rxa_poll(struct rte_event_eth_rx_adapter > > *rx_adapter) > > > uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; > > > uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; > > > > > > + if (rx_adapter->use_queue_event_buf) { > > > + struct eth_device_info *dev_info = > > > + &rx_adapter->eth_devices[d]; > > > + buf = dev_info->rx_queue[qid].event_buf; > > > + } else > > > + buf = &rx_adapter->event_enqueue_buffer; > > > + > > > /* Don't do a batch dequeue from the rx queue if there isn't > > > * enough space in the enqueue buffer. > > > */ > > > if (buf->count >= BATCH_SIZE) > > > - rxa_flush_event_buffer(rx_adapter); > > > + rxa_flush_event_buffer(rx_adapter, buf); > > > if (!rxa_pkt_buf_available(buf)) { > > > - rx_adapter->wrr_pos = wrr_pos; > > > - return nb_rx; > > > + if (rx_adapter->use_queue_event_buf) > > > + goto poll_next_entry; > > > + else { > > > + rx_adapter->wrr_pos = wrr_pos; > > > + return nb_rx; > > > + } > > > } > > > > > > nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, > > > - NULL); > > > + NULL, buf); > > > if (nb_rx > max_nb_rx) { > > > rx_adapter->wrr_pos = > > > (wrr_pos + 1) % rx_adapter->wrr_len; > > > break; > > > } > > > > > > +poll_next_entry: > > > if (++wrr_pos == rx_adapter->wrr_len) > > > wrr_pos = 0; > > > } > > > @@ -1315,12 +1326,18 @@ static void > > > rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { > > > struct rte_event_eth_rx_adapter *rx_adapter = arg; > > > - struct rte_eth_event_enqueue_buffer *buf = > > > - &rx_adapter->event_enqueue_buffer; > > > + struct rte_eth_event_enqueue_buffer *buf = NULL; > > > struct rte_event *ev; > > > > > > + if (rx_adapter->use_queue_event_buf) { > > > + struct eth_device_info *dev_info = > > > + &rx_adapter->eth_devices[vec->port]; > > > + buf = dev_info->rx_queue[vec->queue].event_buf; > > > + } else > > > + buf = &rx_adapter->event_enqueue_buffer; > > > + > > > > The above code to get the buffer can be made an inline function since it is > > needed in more than one place. > > Added new inline function to get event buffer pointer in v6 patch set. > > > > > > if (buf->count) > > > - rxa_flush_event_buffer(rx_adapter); > > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > > > if (vec->vector_ev->nb_elem == 0) > > > return; > > > @@ -1947,9 +1964,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter > > *rx_adapter, > > > rx_adapter->num_rx_intr -= intrq; > > > dev_info->nb_rx_intr -= intrq; > > > dev_info->nb_shared_intr -= intrq && sintrq; > > > + if (rx_adapter->use_queue_event_buf) { > > > + struct rte_eth_event_enqueue_buffer *event_buf = > > > + dev_info->rx_queue[rx_queue_id].event_buf; > > > + rte_free(event_buf->events); > > > + rte_free(event_buf); > > > + dev_info->rx_queue[rx_queue_id].event_buf = NULL; > > > + } > > > } > > > > > > -static void > > > +static int > > > rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > > > struct eth_device_info *dev_info, > > > int32_t rx_queue_id, > > > @@ -1961,15 +1985,21 @@ rxa_add_queue(struct > > rte_event_eth_rx_adapter *rx_adapter, > > > int intrq; > > > int sintrq; > > > struct rte_event *qi_ev; > > > + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; > > > + uint16_t eth_dev_id = dev_info->dev->data->port_id; > > > + int ret; > > > > > > if (rx_queue_id == -1) { > > > uint16_t nb_rx_queues; > > > uint16_t i; > > > > > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > > > - for (i = 0; i < nb_rx_queues; i++) > > > - rxa_add_queue(rx_adapter, dev_info, i, conf); > > > - return; > > > + for (i = 0; i < nb_rx_queues; i++) { > > > + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); > > > + if (ret) > > > + return ret; > > > + } > > > + return 0; > > > } > > > > > > pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -2032,6 > > +2062,37 > > > @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > > > dev_info->next_q_idx = 0; > > > } > > > } > > > + > > > + if (!rx_adapter->use_queue_event_buf) > > > + return 0; > > > + > > > + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", > > > + sizeof(*new_rx_buf), 0, > > > + rte_eth_dev_socket_id(eth_dev_id)); > > > + if (new_rx_buf == NULL) { > > > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta > > for " > > > + "dev_id: %d queue_id: %d", > > > + eth_dev_id, rx_queue_id); > > > + return -ENOMEM; > > > + } > > > + > > > + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, > > BATCH_SIZE); > > > + new_rx_buf->events_size += (2 * BATCH_SIZE); > > > + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", > > > + sizeof(struct rte_event) * > > > + new_rx_buf->events_size, 0, > > > + rte_eth_dev_socket_id(eth_dev_id)); > > > + if (new_rx_buf->events == NULL) { > > > + rte_free(new_rx_buf); > > > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " > > > + "dev_id: %d queue_id: %d", > > > + eth_dev_id, rx_queue_id); > > > + return -ENOMEM; > > > + } > > > + > > > + queue_info->event_buf = new_rx_buf; > > > + > > > + return 0; > > > } > > > > > > static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ > > > -2060,6 +2121,16 @@ static int rxa_sw_add(struct > > rte_event_eth_rx_adapter *rx_adapter, > > > temp_conf.servicing_weight = 1; > > > } > > > queue_conf = &temp_conf; > > > + > > > + if (queue_conf->servicing_weight == 0 && > > > + rx_adapter->use_queue_event_buf) { > > > + > > > + RTE_EDEV_LOG_ERR("Use of queue level event > > buffer " > > > + "not supported for interrupt queues > > " > > > + "dev_id: %d queue_id: %d", > > > + eth_dev_id, rx_queue_id); > > > + return -EINVAL; > > > + } > > > } > > > > > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > > > @@ -2139,7 +2210,9 @@ static int rxa_sw_add(struct > > > rte_event_eth_rx_adapter *rx_adapter, > > > > > > > > > > > > - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); > > > + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, > > queue_conf); > > > + if (ret) > > > + goto err_free_rxqueue; > > > rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); > > > > > > rte_free(rx_adapter->eth_rx_poll); > > > @@ -2160,7 +2233,7 @@ static int rxa_sw_add(struct > > rte_event_eth_rx_adapter *rx_adapter, > > > rte_free(rx_poll); > > > rte_free(rx_wrr); > > > > > > - return 0; > > > + return ret; > > > } > > > > > > static int > > > @@ -2286,20 +2359,26 @@ rxa_create(uint8_t id, uint8_t dev_id, > > > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > > > > > /* Rx adapter event buffer allocation */ > > > - buf = &rx_adapter->event_enqueue_buffer; > > > - buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, > > BATCH_SIZE); > > > - > > > - events = rte_zmalloc_socket(rx_adapter->mem_name, > > > - buf->events_size * sizeof(*events), > > > - 0, socket_id); > > > - if (events == NULL) { > > > - RTE_EDEV_LOG_ERR("Failed to allocate mem for event > > buffer\n"); > > > - rte_free(rx_adapter->eth_devices); > > > - rte_free(rx_adapter); > > > - return -ENOMEM; > > > - } > > > + rx_adapter->use_queue_event_buf = rxa_params- > > >use_queue_event_buf; > > > + > > > + if (!rx_adapter->use_queue_event_buf) { > > > + buf = &rx_adapter->event_enqueue_buffer; > > > + buf->events_size = RTE_ALIGN(rxa_params- > > >event_buf_size, > > > + BATCH_SIZE); > > > + > > > + events = rte_zmalloc_socket(rx_adapter->mem_name, > > > + buf->events_size * > > sizeof(*events), > > > + 0, socket_id); > > > + if (events == NULL) { > > > + RTE_EDEV_LOG_ERR("Failed to allocate memory " > > > + "for adapter event buffer"); > > > + rte_free(rx_adapter->eth_devices); > > > + rte_free(rx_adapter); > > > + return -ENOMEM; > > > + } > > > > > > - rx_adapter->event_enqueue_buffer.events = events; > > > + rx_adapter->event_enqueue_buffer.events = events; > > > + } > > > > > > event_eth_rx_adapter[id] = rx_adapter; > > > > > > @@ -2327,6 +2406,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t > > id, > > > uint8_t dev_id, > > > > > > /* use default values for adapter params */ > > > rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > > > + rxa_params.use_queue_event_buf = false; > > > > > > return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } > > @@ > > > -2347,9 +2427,9 @@ > > rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > > > if (rxa_params == NULL) { > > > rxa_params = &temp_params; > > > rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > > > - } > > > - > > > - if (rxa_params->event_buf_size == 0) > > > + rxa_params->use_queue_event_buf = false; > > > + } else if ((!rxa_params->use_queue_event_buf && > > > + rxa_params->event_buf_size == 0)) My earlier comment applies here. Another error case is configuring both - rxa_params->use_queue_event_buf == true and rxa_params->event_buf_size != 0. > > > return -EINVAL; > > > > > > pc = rte_malloc(NULL, sizeof(*pc), 0); @@ -2418,7 +2498,8 @@ > > > rte_event_eth_rx_adapter_free(uint8_t id) > > > if (rx_adapter->default_cb_arg) > > > rte_free(rx_adapter->conf_arg); > > > rte_free(rx_adapter->eth_devices); > > > - rte_free(rx_adapter->event_enqueue_buffer.events); > > > + if (!rx_adapter->use_queue_event_buf) > > > + rte_free(rx_adapter->event_enqueue_buffer.events); > > > rte_free(rx_adapter); > > > event_eth_rx_adapter[id] = NULL; > > > > > > @@ -2522,6 +2603,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t > > id, > > > return -EINVAL; > > > } > > > > > > + if ((rx_adapter->use_queue_event_buf && > > > + queue_conf->event_buf_size == 0) || > > > + (!rx_adapter->use_queue_event_buf && > > > + queue_conf->event_buf_size != 0)) { > > > + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the > > queue"); > > > + return -EINVAL; > > > + } > > > + > > > > Another error case is configuring both - rx_adapter->use_queue_event_buf > > = true and queue_conf->event_buf_size != 0. > > This is valid case. My bad, wrong place. See above. > > > > > > dev_info = &rx_adapter->eth_devices[eth_dev_id]; > > > > > > if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { > > > -- > > > 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v5 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-10-05 15:01 ` Jayatheerthan, Jay @ 2021-10-06 4:06 ` Naga Harish K, S V 0 siblings, 0 replies; 81+ messages in thread From: Naga Harish K, S V @ 2021-10-06 4:06 UTC (permalink / raw) To: Jayatheerthan, Jay, jerinj; +Cc: dev Hi Jay, > -----Original Message----- > From: Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > Sent: Tuesday, October 5, 2021 8:31 PM > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>; jerinj@marvell.com > Cc: dev@dpdk.org > Subject: RE: [PATCH v5 4/5] eventdev/rx_adapter: implement per queue > event buffer > > > -----Original Message----- > > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > > Sent: Tuesday, October 5, 2021 8:18 PM > > To: Jayatheerthan, Jay <jay.jayatheerthan@intel.com>; > > jerinj@marvell.com > > Cc: dev@dpdk.org > > Subject: RE: [PATCH v5 4/5] eventdev/rx_adapter: implement per queue > > event buffer > > > > Hi Jay, > > > > > -----Original Message----- > > > From: Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > > > Sent: Tuesday, October 5, 2021 1:26 PM > > > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>; > > > jerinj@marvell.com > > > Cc: dev@dpdk.org > > > Subject: RE: [PATCH v5 4/5] eventdev/rx_adapter: implement per queue > > > event buffer > > > > > > > -----Original Message----- > > > > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > > > > Sent: Monday, October 4, 2021 11:11 AM > > > > To: jerinj@marvell.com; Jayatheerthan, Jay > > > > <jay.jayatheerthan@intel.com> > > > > Cc: dev@dpdk.org > > > > Subject: [PATCH v5 4/5] eventdev/rx_adapter: implement per queue > > > event > > > > buffer > > > > > > > > this patch implement the per queue event buffer with required > > > > validations. > > > > > > > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > > > > --- > > > > lib/eventdev/rte_event_eth_rx_adapter.c | 187 > > > > +++++++++++++++++------- > > > > 1 file changed, 138 insertions(+), 49 deletions(-) > > > > > > > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c > > > > b/lib/eventdev/rte_event_eth_rx_adapter.c > > > > index 606db241b8..b61af0e75e 100644 > > > > --- a/lib/eventdev/rte_event_eth_rx_adapter.c > > > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c > > > > @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter { > > > > uint8_t rss_key_be[RSS_KEY_SIZE]; > > > > /* Event device identifier */ > > > > uint8_t eventdev_id; > > > > - /* Per ethernet device structure */ > > > > - struct eth_device_info *eth_devices; > > > > /* Event port identifier */ > > > > uint8_t event_port_id; > > > > + /* Flag indicating per rxq event buffer */ > > > > + bool use_queue_event_buf; > > > > + /* Per ethernet device structure */ > > > > + struct eth_device_info *eth_devices; > > > > /* Lock to serialize config updates with service function */ > > > > rte_spinlock_t rx_lock; > > > > /* Max mbufs processed in any service function invocation */ @@ > > > > -241,6 +243,7 @@ struct eth_rx_queue_info { > > > > uint32_t flow_id_mask; /* Set to ~0 if app provides flow id > > > else 0 */ > > > > uint64_t event; > > > > struct eth_rx_vector_data vector_data; > > > > + struct rte_eth_event_enqueue_buffer *event_buf; > > > > }; > > > > > > > > static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ > > > > -767,10 +770,9 @@ rxa_enq_block_end_ts(struct > > > rte_event_eth_rx_adapter > > > > *rx_adapter, > > > > > > > > /* Enqueue buffered events to event device */ static inline > > > > uint16_t -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter > > > > *rx_adapter) > > > > +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter > *rx_adapter, > > > > + struct rte_eth_event_enqueue_buffer *buf) > > > > { > > > > - struct rte_eth_event_enqueue_buffer *buf = > > > > - &rx_adapter->event_enqueue_buffer; > > > > struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; > > > > uint16_t count = buf->last ? buf->last - buf->head : buf->count; > > > > > > > > @@ -888,15 +890,14 @@ rxa_buffer_mbufs(struct > > > rte_event_eth_rx_adapter *rx_adapter, > > > > uint16_t eth_dev_id, > > > > uint16_t rx_queue_id, > > > > struct rte_mbuf **mbufs, > > > > - uint16_t num) > > > > + uint16_t num, > > > > + struct rte_eth_event_enqueue_buffer *buf) > > > > { > > > > uint32_t i; > > > > struct eth_device_info *dev_info = > > > > &rx_adapter- > > > >eth_devices[eth_dev_id]; > > > > struct eth_rx_queue_info *eth_rx_queue_info = > > > > &dev_info- > > > >rx_queue[rx_queue_id]; > > > > - struct rte_eth_event_enqueue_buffer *buf = > > > > - &rx_adapter- > > > >event_enqueue_buffer; > > > > uint16_t new_tail = buf->tail; > > > > uint64_t event = eth_rx_queue_info->event; > > > > uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ - > > > 995,11 > > > > +996,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter > *rx_adapter, > > > > uint16_t queue_id, > > > > uint32_t rx_count, > > > > uint32_t max_rx, > > > > - int *rxq_empty) > > > > + int *rxq_empty, > > > > + struct rte_eth_event_enqueue_buffer *buf) > > > > { > > > > struct rte_mbuf *mbufs[BATCH_SIZE]; > > > > - struct rte_eth_event_enqueue_buffer *buf = > > > > - &rx_adapter- > > > >event_enqueue_buffer; > > > > struct rte_event_eth_rx_adapter_stats *stats = > > > > &rx_adapter->stats; > > > > uint16_t n; > > > > @@ -1012,7 +1012,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter > > > *rx_adapter, > > > > */ > > > > while (rxa_pkt_buf_available(buf)) { > > > > if (buf->count >= BATCH_SIZE) > > > > - rxa_flush_event_buffer(rx_adapter); > > > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > > > > > stats->rx_poll_count++; > > > > n = rte_eth_rx_burst(port_id, queue_id, mbufs, > > > BATCH_SIZE); @@ > > > > -1021,14 +1021,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter > > > *rx_adapter, > > > > *rxq_empty = 1; > > > > break; > > > > } > > > > - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); > > > > + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, > > > buf); > > > > nb_rx += n; > > > > if (rx_count + nb_rx > max_rx) > > > > break; > > > > } > > > > > > > > if (buf->count > 0) > > > > - rxa_flush_event_buffer(rx_adapter); > > > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > > > > > return nb_rx; > > > > } > > > > @@ -1169,7 +1169,7 @@ rxa_intr_ring_dequeue(struct > > > rte_event_eth_rx_adapter *rx_adapter) > > > > ring_lock = &rx_adapter->intr_ring_lock; > > > > > > > > if (buf->count >= BATCH_SIZE) > > > > - rxa_flush_event_buffer(rx_adapter); > > > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > > > > > while (rxa_pkt_buf_available(buf)) { > > > > struct eth_device_info *dev_info; @@ -1221,7 +1221,7 @@ > > > > rxa_intr_ring_dequeue(struct > > > rte_event_eth_rx_adapter *rx_adapter) > > > > continue; > > > > n = rxa_eth_rx(rx_adapter, port, i, nb_rx, > > > > rx_adapter->max_nb_rx, > > > > - &rxq_empty); > > > > + &rxq_empty, buf); > > > > nb_rx += n; > > > > > > > > enq_buffer_full = !rxq_empty && n == 0; > > > @@ -1242,7 +1242,7 @@ > > > > rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) > > > > } else { > > > > n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, > > > > rx_adapter->max_nb_rx, > > > > - &rxq_empty); > > > > + &rxq_empty, buf); > > > > rx_adapter->qd_valid = !rxq_empty; > > > > nb_rx += n; > > > > if (nb_rx > rx_adapter->max_nb_rx) @@ -1273,13 > > > +1273,12 @@ > > > > rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { > > > > uint32_t num_queue; > > > > uint32_t nb_rx = 0; > > > > - struct rte_eth_event_enqueue_buffer *buf; > > > > + struct rte_eth_event_enqueue_buffer *buf = NULL; > > > > uint32_t wrr_pos; > > > > uint32_t max_nb_rx; > > > > > > > > wrr_pos = rx_adapter->wrr_pos; > > > > max_nb_rx = rx_adapter->max_nb_rx; > > > > - buf = &rx_adapter->event_enqueue_buffer; > > > > > > > > /* Iterate through a WRR sequence */ > > > > for (num_queue = 0; num_queue < rx_adapter->wrr_len; > > > num_queue++) { > > > > @@ -1287,24 +1286,36 @@ rxa_poll(struct rte_event_eth_rx_adapter > > > *rx_adapter) > > > > uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; > > > > uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; > > > > > > > > + if (rx_adapter->use_queue_event_buf) { > > > > + struct eth_device_info *dev_info = > > > > + &rx_adapter->eth_devices[d]; > > > > + buf = dev_info->rx_queue[qid].event_buf; > > > > + } else > > > > + buf = &rx_adapter->event_enqueue_buffer; > > > > + > > > > /* Don't do a batch dequeue from the rx queue if there isn't > > > > * enough space in the enqueue buffer. > > > > */ > > > > if (buf->count >= BATCH_SIZE) > > > > - rxa_flush_event_buffer(rx_adapter); > > > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > if (!rxa_pkt_buf_available(buf)) { > > > > - rx_adapter->wrr_pos = wrr_pos; > > > > - return nb_rx; > > > > + if (rx_adapter->use_queue_event_buf) > > > > + goto poll_next_entry; > > > > + else { > > > > + rx_adapter->wrr_pos = wrr_pos; > > > > + return nb_rx; > > > > + } > > > > } > > > > > > > > nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, > > > > - NULL); > > > > + NULL, buf); > > > > if (nb_rx > max_nb_rx) { > > > > rx_adapter->wrr_pos = > > > > (wrr_pos + 1) % rx_adapter->wrr_len; > > > > break; > > > > } > > > > > > > > +poll_next_entry: > > > > if (++wrr_pos == rx_adapter->wrr_len) > > > > wrr_pos = 0; > > > > } > > > > @@ -1315,12 +1326,18 @@ static void rxa_vector_expire(struct > > > > eth_rx_vector_data *vec, void *arg) { > > > > struct rte_event_eth_rx_adapter *rx_adapter = arg; > > > > - struct rte_eth_event_enqueue_buffer *buf = > > > > - &rx_adapter->event_enqueue_buffer; > > > > + struct rte_eth_event_enqueue_buffer *buf = NULL; > > > > struct rte_event *ev; > > > > > > > > + if (rx_adapter->use_queue_event_buf) { > > > > + struct eth_device_info *dev_info = > > > > + &rx_adapter->eth_devices[vec->port]; > > > > + buf = dev_info->rx_queue[vec->queue].event_buf; > > > > + } else > > > > + buf = &rx_adapter->event_enqueue_buffer; > > > > + > > > > > > The above code to get the buffer can be made an inline function > > > since it is needed in more than one place. > > > > Added new inline function to get event buffer pointer in v6 patch set. > > > > > > > > > if (buf->count) > > > > - rxa_flush_event_buffer(rx_adapter); > > > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > > > > > if (vec->vector_ev->nb_elem == 0) > > > > return; > > > > @@ -1947,9 +1964,16 @@ rxa_sw_del(struct > rte_event_eth_rx_adapter > > > *rx_adapter, > > > > rx_adapter->num_rx_intr -= intrq; > > > > dev_info->nb_rx_intr -= intrq; > > > > dev_info->nb_shared_intr -= intrq && sintrq; > > > > + if (rx_adapter->use_queue_event_buf) { > > > > + struct rte_eth_event_enqueue_buffer *event_buf = > > > > + dev_info->rx_queue[rx_queue_id].event_buf; > > > > + rte_free(event_buf->events); > > > > + rte_free(event_buf); > > > > + dev_info->rx_queue[rx_queue_id].event_buf = NULL; > > > > + } > > > > } > > > > > > > > -static void > > > > +static int > > > > rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > > > > struct eth_device_info *dev_info, > > > > int32_t rx_queue_id, > > > > @@ -1961,15 +1985,21 @@ rxa_add_queue(struct > > > rte_event_eth_rx_adapter *rx_adapter, > > > > int intrq; > > > > int sintrq; > > > > struct rte_event *qi_ev; > > > > + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; > > > > + uint16_t eth_dev_id = dev_info->dev->data->port_id; > > > > + int ret; > > > > > > > > if (rx_queue_id == -1) { > > > > uint16_t nb_rx_queues; > > > > uint16_t i; > > > > > > > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > > > > - for (i = 0; i < nb_rx_queues; i++) > > > > - rxa_add_queue(rx_adapter, dev_info, i, conf); > > > > - return; > > > > + for (i = 0; i < nb_rx_queues; i++) { > > > > + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); > > > > + if (ret) > > > > + return ret; > > > > + } > > > > + return 0; > > > > } > > > > > > > > pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -2032,6 > > > +2062,37 > > > > @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > > > > dev_info->next_q_idx = 0; > > > > } > > > > } > > > > + > > > > + if (!rx_adapter->use_queue_event_buf) > > > > + return 0; > > > > + > > > > + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", > > > > + sizeof(*new_rx_buf), 0, > > > > + rte_eth_dev_socket_id(eth_dev_id)); > > > > + if (new_rx_buf == NULL) { > > > > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta > > > for " > > > > + "dev_id: %d queue_id: %d", > > > > + eth_dev_id, rx_queue_id); > > > > + return -ENOMEM; > > > > + } > > > > + > > > > + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, > > > BATCH_SIZE); > > > > + new_rx_buf->events_size += (2 * BATCH_SIZE); > > > > + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", > > > > + sizeof(struct rte_event) * > > > > + new_rx_buf->events_size, 0, > > > > + rte_eth_dev_socket_id(eth_dev_id)); > > > > + if (new_rx_buf->events == NULL) { > > > > + rte_free(new_rx_buf); > > > > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " > > > > + "dev_id: %d queue_id: %d", > > > > + eth_dev_id, rx_queue_id); > > > > + return -ENOMEM; > > > > + } > > > > + > > > > + queue_info->event_buf = new_rx_buf; > > > > + > > > > + return 0; > > > > } > > > > > > > > static int rxa_sw_add(struct rte_event_eth_rx_adapter > > > > *rx_adapter, @@ > > > > -2060,6 +2121,16 @@ static int rxa_sw_add(struct > > > rte_event_eth_rx_adapter *rx_adapter, > > > > temp_conf.servicing_weight = 1; > > > > } > > > > queue_conf = &temp_conf; > > > > + > > > > + if (queue_conf->servicing_weight == 0 && > > > > + rx_adapter->use_queue_event_buf) { > > > > + > > > > + RTE_EDEV_LOG_ERR("Use of queue level event > > > buffer " > > > > + "not supported for interrupt queues > > > " > > > > + "dev_id: %d queue_id: %d", > > > > + eth_dev_id, rx_queue_id); > > > > + return -EINVAL; > > > > + } > > > > } > > > > > > > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > > > > @@ -2139,7 +2210,9 @@ static int rxa_sw_add(struct > > > > rte_event_eth_rx_adapter *rx_adapter, > > > > > > > > > > > > > > > > - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); > > > > + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, > > > queue_conf); > > > > + if (ret) > > > > + goto err_free_rxqueue; > > > > rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); > > > > > > > > rte_free(rx_adapter->eth_rx_poll); > > > > @@ -2160,7 +2233,7 @@ static int rxa_sw_add(struct > > > rte_event_eth_rx_adapter *rx_adapter, > > > > rte_free(rx_poll); > > > > rte_free(rx_wrr); > > > > > > > > - return 0; > > > > + return ret; > > > > } > > > > > > > > static int > > > > @@ -2286,20 +2359,26 @@ rxa_create(uint8_t id, uint8_t dev_id, > > > > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > > > > > > > /* Rx adapter event buffer allocation */ > > > > - buf = &rx_adapter->event_enqueue_buffer; > > > > - buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, > > > BATCH_SIZE); > > > > - > > > > - events = rte_zmalloc_socket(rx_adapter->mem_name, > > > > - buf->events_size * sizeof(*events), > > > > - 0, socket_id); > > > > - if (events == NULL) { > > > > - RTE_EDEV_LOG_ERR("Failed to allocate mem for event > > > buffer\n"); > > > > - rte_free(rx_adapter->eth_devices); > > > > - rte_free(rx_adapter); > > > > - return -ENOMEM; > > > > - } > > > > + rx_adapter->use_queue_event_buf = rxa_params- > > > >use_queue_event_buf; > > > > + > > > > + if (!rx_adapter->use_queue_event_buf) { > > > > + buf = &rx_adapter->event_enqueue_buffer; > > > > + buf->events_size = RTE_ALIGN(rxa_params- > > > >event_buf_size, > > > > + BATCH_SIZE); > > > > + > > > > + events = rte_zmalloc_socket(rx_adapter->mem_name, > > > > + buf->events_size * > > > sizeof(*events), > > > > + 0, socket_id); > > > > + if (events == NULL) { > > > > + RTE_EDEV_LOG_ERR("Failed to allocate memory " > > > > + "for adapter event buffer"); > > > > + rte_free(rx_adapter->eth_devices); > > > > + rte_free(rx_adapter); > > > > + return -ENOMEM; > > > > + } > > > > > > > > - rx_adapter->event_enqueue_buffer.events = events; > > > > + rx_adapter->event_enqueue_buffer.events = events; > > > > + } > > > > > > > > event_eth_rx_adapter[id] = rx_adapter; > > > > > > > > @@ -2327,6 +2406,7 @@ > rte_event_eth_rx_adapter_create_ext(uint8_t > > > id, > > > > uint8_t dev_id, > > > > > > > > /* use default values for adapter params */ > > > > rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > > > > + rxa_params.use_queue_event_buf = false; > > > > > > > > return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); > > > > } > > > @@ > > > > -2347,9 +2427,9 @@ > > > rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t > > > dev_id, > > > > if (rxa_params == NULL) { > > > > rxa_params = &temp_params; > > > > rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > > > > - } > > > > - > > > > - if (rxa_params->event_buf_size == 0) > > > > + rxa_params->use_queue_event_buf = false; > > > > + } else if ((!rxa_params->use_queue_event_buf && > > > > + rxa_params->event_buf_size == 0)) > > My earlier comment applies here. > Another error case is configuring both - rxa_params->use_queue_event_buf > == true and rxa_params->event_buf_size != 0. > Added this additional validation in V7 patch set > > > > return -EINVAL; > > > > > > > > pc = rte_malloc(NULL, sizeof(*pc), 0); @@ -2418,7 +2498,8 @@ > > > > rte_event_eth_rx_adapter_free(uint8_t id) > > > > if (rx_adapter->default_cb_arg) > > > > rte_free(rx_adapter->conf_arg); > > > > rte_free(rx_adapter->eth_devices); > > > > - rte_free(rx_adapter->event_enqueue_buffer.events); > > > > + if (!rx_adapter->use_queue_event_buf) > > > > + rte_free(rx_adapter->event_enqueue_buffer.events); > > > > rte_free(rx_adapter); > > > > event_eth_rx_adapter[id] = NULL; > > > > > > > > @@ -2522,6 +2603,14 @@ > rte_event_eth_rx_adapter_queue_add(uint8_t > > > id, > > > > return -EINVAL; > > > > } > > > > > > > > + if ((rx_adapter->use_queue_event_buf && > > > > + queue_conf->event_buf_size == 0) || > > > > + (!rx_adapter->use_queue_event_buf && > > > > + queue_conf->event_buf_size != 0)) { > > > > + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the > > > queue"); > > > > + return -EINVAL; > > > > + } > > > > + > > > > > > Another error case is configuring both - > > > rx_adapter->use_queue_event_buf = true and queue_conf- > >event_buf_size != 0. > > > > This is valid case. > > My bad, wrong place. See above. > > > > > > > > > > dev_info = &rx_adapter->eth_devices[eth_dev_id]; > > > > > > > > if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { > > > > -- > > > > 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v5 5/5] test/event: add unit test for Rx adapter 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V ` (2 preceding siblings ...) 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 4/5] eventdev/rx_adapter: implement " Naga Harish K S V @ 2021-10-04 5:41 ` Naga Harish K S V 2021-10-05 8:03 ` Jayatheerthan, Jay 2021-10-05 7:19 ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Jayatheerthan, Jay 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 " Naga Harish K S V 5 siblings, 1 reply; 81+ messages in thread From: Naga Harish K S V @ 2021-10-04 5:41 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit tests for checking per rx queue event buffer feature using rte_event_eth_rx_adapter_queue_add api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 86 ++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 7c2cf0dd70..cf3c989efb 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -387,6 +387,90 @@ adapter_create(void) return err; } +static int +adapter_create_with_params(void) +{ + int err; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf rx_p_conf; + struct rte_event_eth_rx_adapter_params rxa_params; + + memset(&rx_p_conf, 0, sizeof(rx_p_conf)); + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + rx_p_conf.new_event_threshold = dev_info.max_num_events; + rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + + rxa_params.use_queue_event_buf = false; + rxa_params.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + rxa_params.use_queue_event_buf = true; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err); + + return TEST_SUCCESS; +} + +static int +adapter_queue_event_buf_test(void) +{ + int err; + struct rte_event ev; + uint32_t cap; + + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; + + err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + ev.queue_id = 0; + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; + ev.priority = 0; + + queue_config.rx_queue_flags = 0; + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) { + ev.flow_id = 1; + queue_config.rx_queue_flags = + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; + } + queue_config.ev = ev; + queue_config.servicing_weight = 1; + queue_config.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + queue_config.event_buf_size = 1024; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + return TEST_SUCCESS; +} + static void adapter_free(void) { @@ -826,6 +910,8 @@ static struct unit_test_suite event_eth_rx_tests = { TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop), TEST_CASE_ST(adapter_create, adapter_free, adapter_stats), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf), + TEST_CASE_ST(adapter_create_with_params, adapter_free, + adapter_queue_event_buf_test), TEST_CASES_END() /**< NULL terminate unit test array */ } }; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v5 5/5] test/event: add unit test for Rx adapter 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 5/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-10-05 8:03 ` Jayatheerthan, Jay 0 siblings, 0 replies; 81+ messages in thread From: Jayatheerthan, Jay @ 2021-10-05 8:03 UTC (permalink / raw) To: Naga Harish K, S V, jerinj; +Cc: dev Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> > -----Original Message----- > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > Sent: Monday, October 4, 2021 11:11 AM > To: jerinj@marvell.com; Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > Cc: dev@dpdk.org > Subject: [PATCH v5 5/5] test/event: add unit test for Rx adapter > > this patch adds unit tests for checking per rx queue event buffer > feature using rte_event_eth_rx_adapter_queue_add api. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > --- > app/test/test_event_eth_rx_adapter.c | 86 ++++++++++++++++++++++++++++ > 1 file changed, 86 insertions(+) > > diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c > index 7c2cf0dd70..cf3c989efb 100644 > --- a/app/test/test_event_eth_rx_adapter.c > +++ b/app/test/test_event_eth_rx_adapter.c > @@ -387,6 +387,90 @@ adapter_create(void) > return err; > } > > +static int > +adapter_create_with_params(void) > +{ > + int err; > + struct rte_event_dev_info dev_info; > + struct rte_event_port_conf rx_p_conf; > + struct rte_event_eth_rx_adapter_params rxa_params; > + > + memset(&rx_p_conf, 0, sizeof(rx_p_conf)); > + > + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); > + TEST_ASSERT(err == 0, "Expected 0 got %d", err); > + > + rx_p_conf.new_event_threshold = dev_info.max_num_events; > + rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; > + rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; > + > + rxa_params.use_queue_event_buf = false; > + rxa_params.event_buf_size = 0; > + > + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > + TEST_DEV_ID, &rx_p_conf, &rxa_params); > + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); > + > + rxa_params.use_queue_event_buf = true; > + > + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > + TEST_DEV_ID, &rx_p_conf, &rxa_params); > + TEST_ASSERT(err == 0, "Expected 0 got %d", err); > + > + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > + TEST_DEV_ID, &rx_p_conf, &rxa_params); > + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err); > + > + return TEST_SUCCESS; > +} > + > +static int > +adapter_queue_event_buf_test(void) > +{ > + int err; > + struct rte_event ev; > + uint32_t cap; > + > + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; > + > + err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, > + &cap); > + TEST_ASSERT(err == 0, "Expected 0 got %d", err); > + > + ev.queue_id = 0; > + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; > + ev.priority = 0; > + > + queue_config.rx_queue_flags = 0; > + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) { > + ev.flow_id = 1; > + queue_config.rx_queue_flags = > + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; > + } > + queue_config.ev = ev; > + queue_config.servicing_weight = 1; > + queue_config.event_buf_size = 0; > + > + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, > + TEST_ETHDEV_ID, 0, > + &queue_config); > + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); > + > + queue_config.event_buf_size = 1024; > + > + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, > + TEST_ETHDEV_ID, 0, > + &queue_config); > + TEST_ASSERT(err == 0, "Expected 0 got %d", err); > + > + err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, > + TEST_ETHDEV_ID, > + 0); > + TEST_ASSERT(err == 0, "Expected 0 got %d", err); > + > + return TEST_SUCCESS; > +} > + > static void > adapter_free(void) > { > @@ -826,6 +910,8 @@ static struct unit_test_suite event_eth_rx_tests = { > TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop), > TEST_CASE_ST(adapter_create, adapter_free, adapter_stats), > TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf), > + TEST_CASE_ST(adapter_create_with_params, adapter_free, > + adapter_queue_event_buf_test), > TEST_CASES_END() /**< NULL terminate unit test array */ > } > }; > -- > 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V ` (3 preceding siblings ...) 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 5/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-10-05 7:19 ` Jayatheerthan, Jay 2021-10-05 14:45 ` Naga Harish K, S V 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 " Naga Harish K S V 5 siblings, 1 reply; 81+ messages in thread From: Jayatheerthan, Jay @ 2021-10-05 7:19 UTC (permalink / raw) To: Naga Harish K, S V, jerinj; +Cc: dev, Kundapura, Ganapati > -----Original Message----- > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > Sent: Monday, October 4, 2021 11:11 AM > To: jerinj@marvell.com; Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > Cc: dev@dpdk.org; Kundapura, Ganapati <ganapati.kundapura@intel.com> > Subject: [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability > > Currently event buffer is static array with a default size defined > internally. > > To configure event buffer size from application, > ``rte_event_eth_rx_adapter_create_with_params`` api is added which > takes ``struct rte_event_eth_rx_adapter_params`` to configure event > buffer size in addition other params . The event buffer size is > rounded up for better buffer utilization and performance . In case > of NULL params argument, default event buffer size is used. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> > > --- > v5: > * reverted queue conf get unit test change > > v4: > * rebased with latest dpdk-next-eventdev branch > * changed queue conf get unit test > > v3: > * updated documentation and code comments as per review comments. > * updated new create api test case name with suitable one. > > v2: > * Updated header file and rx adapter documentation as per review comments. > * new api name is modified as rte_event_eth_rx_adapter_create_with_params > as per review comments. > * rxa_params pointer argument Value NULL is allowed to represent the > default values > > v1: > * Initial implementation with documentation and unit tests. > --- > --- > .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ > lib/eventdev/rte_event_eth_rx_adapter.c | 98 +++++++++++++++++-- > lib/eventdev/rte_event_eth_rx_adapter.h | 41 +++++++- > lib/eventdev/version.map | 2 + > 4 files changed, 140 insertions(+), 8 deletions(-) > > diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > index ce23d8a474..8526aecf57 100644 > --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is > expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` > passed to it. > > +If the application desires to control the event buffer size, it can use the > +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is > +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. > +The function is passed the event device to be associated with the adapter > +and port configuration for the adapter to setup an event port if the > +adapter needs to use a service function. > + > Adding Rx Queues to the Adapter Instance > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c > index 10491ca07b..606db241b8 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.c > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c > @@ -85,7 +85,9 @@ struct rte_eth_event_enqueue_buffer { > /* Count of events in this buffer */ > uint16_t count; > /* Array of events in this buffer */ > - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; > + struct rte_event *events; > + /* size of event buffer */ > + uint16_t events_size; > /* Event enqueue happens from head */ > uint16_t head; > /* New packets from rte_eth_rx_burst is enqued from tail */ > @@ -946,7 +948,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, > dropped = 0; > nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, > buf->last | > - (RTE_DIM(buf->events) & ~buf->last_mask), > + (buf->events_size & ~buf->last_mask), > buf->count >= BATCH_SIZE ? > buf->count - BATCH_SIZE : 0, > &buf->events[buf->tail], > @@ -972,7 +974,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) > uint32_t nb_req = buf->tail + BATCH_SIZE; > > if (!buf->last) { > - if (nb_req <= RTE_DIM(buf->events)) > + if (nb_req <= buf->events_size) > return true; > > if (buf->head >= BATCH_SIZE) { > @@ -2206,12 +2208,15 @@ rxa_ctrl(uint8_t id, int start) > return 0; > } > > -int > -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > - rte_event_eth_rx_adapter_conf_cb conf_cb, > - void *conf_arg) > +static int > +rxa_create(uint8_t id, uint8_t dev_id, > + struct rte_event_eth_rx_adapter_params *rxa_params, > + rte_event_eth_rx_adapter_conf_cb conf_cb, > + void *conf_arg) > { > struct rte_event_eth_rx_adapter *rx_adapter; > + struct rte_eth_event_enqueue_buffer *buf; > + struct rte_event *events; > int ret; > int socket_id; > uint16_t i; > @@ -2226,6 +2231,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); > RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); > + > if (conf_cb == NULL) > return -EINVAL; > > @@ -2273,11 +2279,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > rte_free(rx_adapter); > return -ENOMEM; > } > + > rte_spinlock_init(&rx_adapter->rx_lock); > + > for (i = 0; i < RTE_MAX_ETHPORTS; i++) > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > + /* Rx adapter event buffer allocation */ > + buf = &rx_adapter->event_enqueue_buffer; > + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); Do we need to align event_buf_size again here ? The caller seems to take care of it. > + > + events = rte_zmalloc_socket(rx_adapter->mem_name, > + buf->events_size * sizeof(*events), > + 0, socket_id); > + if (events == NULL) { > + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); > + rte_free(rx_adapter->eth_devices); > + rte_free(rx_adapter); > + return -ENOMEM; > + } > + > + rx_adapter->event_enqueue_buffer.events = events; > + > event_eth_rx_adapter[id] = rx_adapter; > + > if (conf_cb == rxa_default_conf_cb) > rx_adapter->default_cb_arg = 1; > > @@ -2293,6 +2318,61 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > return 0; > } > > +int > +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > + rte_event_eth_rx_adapter_conf_cb conf_cb, > + void *conf_arg) > +{ > + struct rte_event_eth_rx_adapter_params rxa_params; Can initialize rxa_params in case if more fields get added in future that we don't assign here. > + > + /* use default values for adapter params */ > + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > + > + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); > +} > + > +int > +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > + struct rte_event_port_conf *port_config, > + struct rte_event_eth_rx_adapter_params *rxa_params) > +{ > + struct rte_event_port_conf *pc; > + int ret; > + struct rte_event_eth_rx_adapter_params temp_params = {0}; > + > + if (port_config == NULL) > + return -EINVAL; > + > + /* use default values if rxa_parmas is NULL */ > + if (rxa_params == NULL) { > + rxa_params = &temp_params; > + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > + } > + > + if (rxa_params->event_buf_size == 0) > + return -EINVAL; > + > + pc = rte_malloc(NULL, sizeof(*pc), 0); > + if (pc == NULL) > + return -ENOMEM; > + > + *pc = *port_config; > + > + /* adjust event buff size with BATCH_SIZE used for fetching packets > + * from NIC rx queues to get full buffer utilization and prevent > + * unnecessary rollovers. > + */ > + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, > + BATCH_SIZE); > + rxa_params->event_buf_size += BATCH_SIZE + BATCH_SIZE; Please add brackets to be more explicit and readable. > + > + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); > + if (ret) > + rte_free(pc); > + > + return ret; > +} > + > int > rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > struct rte_event_port_conf *port_config) > @@ -2302,12 +2382,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > > if (port_config == NULL) > return -EINVAL; > + > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); > > pc = rte_malloc(NULL, sizeof(*pc), 0); > if (pc == NULL) > return -ENOMEM; > *pc = *port_config; > + > ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, > rxa_default_conf_cb, > pc); > @@ -2336,6 +2418,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) > if (rx_adapter->default_cb_arg) > rte_free(rx_adapter->conf_arg); > rte_free(rx_adapter->eth_devices); > + rte_free(rx_adapter->event_enqueue_buffer.events); > rte_free(rx_adapter); > event_eth_rx_adapter[id] = NULL; > > @@ -2711,6 +2794,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, > > stats->rx_packets += dev_stats_sum.rx_packets; > stats->rx_enq_count += dev_stats_sum.rx_enq_count; > + > return 0; > } > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h > index 470543e434..846ca569e9 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.h > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h > @@ -26,6 +26,7 @@ > * The ethernet Rx event adapter's functions are: > * - rte_event_eth_rx_adapter_create_ext() > * - rte_event_eth_rx_adapter_create() > + * - rte_event_eth_rx_adapter_create_with_params() > * - rte_event_eth_rx_adapter_free() > * - rte_event_eth_rx_adapter_queue_add() > * - rte_event_eth_rx_adapter_queue_del() > @@ -37,7 +38,7 @@ > * > * The application creates an ethernet to event adapter using > * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() > - * functions. > + * or rte_event_eth_rx_adapter_create_with_params() functions. > * The adapter needs to know which ethernet rx queues to poll for mbufs as well > * as event device parameters such as the event queue identifier, event > * priority and scheduling type that the adapter should use when constructing > @@ -257,6 +258,17 @@ struct rte_event_eth_rx_adapter_vector_limits { > */ > }; > > +/** > + * A structure to hold adapter config params > + */ > +struct rte_event_eth_rx_adapter_params { > + uint16_t event_buf_size; > + /**< size of event buffer for the adapter. > + * This value is rounded up for better buffer utilization > + * and performance. > + */ > +}; > + > /** > * > * Callback function invoked by the SW adapter before it continues > @@ -357,6 +369,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > struct rte_event_port_conf *port_config); > > +/** > + * This is a variant of rte_event_eth_rx_adapter_create() with additional > + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. > + * > + * @param id > + * The identifier of the ethernet Rx event adapter. > + * > + * @param dev_id > + * The identifier of the event device to configure. > + * > + * @param port_config > + * Argument of type *rte_event_port_conf* that is passed to the conf_cb > + * function. > + * > + * @param rxa_params > + * Pointer to struct rte_event_eth_rx_adapter_params. > + * In case of NULL, default values are used. > + * > + * @return > + * - 0: Success > + * - <0: Error code on failure > + */ > +__rte_experimental > +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > + struct rte_event_port_conf *port_config, > + struct rte_event_eth_rx_adapter_params *rxa_params); > + > /** > * Free an event adapter > * > diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map > index 9f280160fa..7de18497a6 100644 > --- a/lib/eventdev/version.map > +++ b/lib/eventdev/version.map > @@ -138,6 +138,8 @@ EXPERIMENTAL { > __rte_eventdev_trace_port_setup; > # added in 20.11 > rte_event_pmd_pci_probe_named; > + # added in 21.11 > + rte_event_eth_rx_adapter_create_with_params; > > #added in 21.05 > rte_event_vector_pool_create; > -- > 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability 2021-10-05 7:19 ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Jayatheerthan, Jay @ 2021-10-05 14:45 ` Naga Harish K, S V 0 siblings, 0 replies; 81+ messages in thread From: Naga Harish K, S V @ 2021-10-05 14:45 UTC (permalink / raw) To: Jayatheerthan, Jay, jerinj; +Cc: dev, Kundapura, Ganapati Hi Jay, > -----Original Message----- > From: Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > Sent: Tuesday, October 5, 2021 12:49 PM > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>; jerinj@marvell.com > Cc: dev@dpdk.org; Kundapura, Ganapati <ganapati.kundapura@intel.com> > Subject: RE: [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size > configurability > > > -----Original Message----- > > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > > Sent: Monday, October 4, 2021 11:11 AM > > To: jerinj@marvell.com; Jayatheerthan, Jay > > <jay.jayatheerthan@intel.com> > > Cc: dev@dpdk.org; Kundapura, Ganapati <ganapati.kundapura@intel.com> > > Subject: [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size > > configurability > > > > Currently event buffer is static array with a default size defined > > internally. > > > > To configure event buffer size from application, > > ``rte_event_eth_rx_adapter_create_with_params`` api is added which > > takes ``struct rte_event_eth_rx_adapter_params`` to configure event > > buffer size in addition other params . The event buffer size is > > rounded up for better buffer utilization and performance . In case of > > NULL params argument, default event buffer size is used. > > > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > > Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> > > > > --- > > v5: > > * reverted queue conf get unit test change > > > > v4: > > * rebased with latest dpdk-next-eventdev branch > > * changed queue conf get unit test > > > > v3: > > * updated documentation and code comments as per review comments. > > * updated new create api test case name with suitable one. > > > > v2: > > * Updated header file and rx adapter documentation as per review > comments. > > * new api name is modified as > rte_event_eth_rx_adapter_create_with_params > > as per review comments. > > * rxa_params pointer argument Value NULL is allowed to represent the > > default values > > > > v1: > > * Initial implementation with documentation and unit tests. > > --- > > --- > > .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ > > lib/eventdev/rte_event_eth_rx_adapter.c | 98 +++++++++++++++++-- > > lib/eventdev/rte_event_eth_rx_adapter.h | 41 +++++++- > > lib/eventdev/version.map | 2 + > > 4 files changed, 140 insertions(+), 8 deletions(-) > > > > diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > index ce23d8a474..8526aecf57 100644 > > --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > > @@ -62,6 +62,13 @@ service function and needs to create an event port > > for it. The callback is expected to fill the ``struct > > rte_event_eth_rx_adapter_conf structure`` passed to it. > > > > +If the application desires to control the event buffer size, it can > > +use the ``rte_event_eth_rx_adapter_create_with_params()`` api. The > > +event buffer size is specified using ``struct > rte_event_eth_rx_adapter_params::event_buf_size``. > > +The function is passed the event device to be associated with the > > +adapter and port configuration for the adapter to setup an event port > > +if the adapter needs to use a service function. > > + > > Adding Rx Queues to the Adapter Instance > > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > > > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c > > b/lib/eventdev/rte_event_eth_rx_adapter.c > > index 10491ca07b..606db241b8 100644 > > --- a/lib/eventdev/rte_event_eth_rx_adapter.c > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c > > @@ -85,7 +85,9 @@ struct rte_eth_event_enqueue_buffer { > > /* Count of events in this buffer */ > > uint16_t count; > > /* Array of events in this buffer */ > > - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; > > + struct rte_event *events; > > + /* size of event buffer */ > > + uint16_t events_size; > > /* Event enqueue happens from head */ > > uint16_t head; > > /* New packets from rte_eth_rx_burst is enqued from tail */ @@ > > -946,7 +948,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter > *rx_adapter, > > dropped = 0; > > nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, > > buf->last | > > - (RTE_DIM(buf->events) & ~buf- > >last_mask), > > + (buf->events_size & ~buf->last_mask), > > buf->count >= BATCH_SIZE ? > > buf->count - BATCH_SIZE : 0, > > &buf->events[buf->tail], > > @@ -972,7 +974,7 @@ rxa_pkt_buf_available(struct > rte_eth_event_enqueue_buffer *buf) > > uint32_t nb_req = buf->tail + BATCH_SIZE; > > > > if (!buf->last) { > > - if (nb_req <= RTE_DIM(buf->events)) > > + if (nb_req <= buf->events_size) > > return true; > > > > if (buf->head >= BATCH_SIZE) { > > @@ -2206,12 +2208,15 @@ rxa_ctrl(uint8_t id, int start) > > return 0; > > } > > > > -int > > -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > - rte_event_eth_rx_adapter_conf_cb > conf_cb, > > - void *conf_arg) > > +static int > > +rxa_create(uint8_t id, uint8_t dev_id, > > + struct rte_event_eth_rx_adapter_params *rxa_params, > > + rte_event_eth_rx_adapter_conf_cb conf_cb, > > + void *conf_arg) > > { > > struct rte_event_eth_rx_adapter *rx_adapter; > > + struct rte_eth_event_enqueue_buffer *buf; > > + struct rte_event *events; > > int ret; > > int socket_id; > > uint16_t i; > > @@ -2226,6 +2231,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t > id, > > uint8_t dev_id, > > > > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, - > EINVAL); > > RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); > > + > > if (conf_cb == NULL) > > return -EINVAL; > > > > @@ -2273,11 +2279,30 @@ > rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > rte_free(rx_adapter); > > return -ENOMEM; > > } > > + > > rte_spinlock_init(&rx_adapter->rx_lock); > > + > > for (i = 0; i < RTE_MAX_ETHPORTS; i++) > > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > > > + /* Rx adapter event buffer allocation */ > > + buf = &rx_adapter->event_enqueue_buffer; > > + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, > > +BATCH_SIZE); > > Do we need to align event_buf_size again here ? The caller seems to take > care of it. It is redundant, and is removed in v6 patchset. > > > + > > + events = rte_zmalloc_socket(rx_adapter->mem_name, > > + buf->events_size * sizeof(*events), > > + 0, socket_id); > > + if (events == NULL) { > > + RTE_EDEV_LOG_ERR("Failed to allocate mem for event > buffer\n"); > > + rte_free(rx_adapter->eth_devices); > > + rte_free(rx_adapter); > > + return -ENOMEM; > > + } > > + > > + rx_adapter->event_enqueue_buffer.events = events; > > + > > event_eth_rx_adapter[id] = rx_adapter; > > + > > if (conf_cb == rxa_default_conf_cb) > > rx_adapter->default_cb_arg = 1; > > > > @@ -2293,6 +2318,61 @@ rte_event_eth_rx_adapter_create_ext(uint8_t > id, uint8_t dev_id, > > return 0; > > } > > > > +int > > +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > + rte_event_eth_rx_adapter_conf_cb > conf_cb, > > + void *conf_arg) > > +{ > > + struct rte_event_eth_rx_adapter_params rxa_params; > > Can initialize rxa_params in case if more fields get added in future that we > don't assign here. Done > > > + > > + /* use default values for adapter params */ > > + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > > + > > + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } > > + > > +int > > +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t > dev_id, > > + struct rte_event_port_conf *port_config, > > + struct rte_event_eth_rx_adapter_params > *rxa_params) { > > + struct rte_event_port_conf *pc; > > + int ret; > > + struct rte_event_eth_rx_adapter_params temp_params = {0}; > > + > > + if (port_config == NULL) > > + return -EINVAL; > > + > > + /* use default values if rxa_parmas is NULL */ > > + if (rxa_params == NULL) { > > + rxa_params = &temp_params; > > + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > > + } > > + > > + if (rxa_params->event_buf_size == 0) > > + return -EINVAL; > > + > > + pc = rte_malloc(NULL, sizeof(*pc), 0); > > + if (pc == NULL) > > + return -ENOMEM; > > + > > + *pc = *port_config; > > + > > + /* adjust event buff size with BATCH_SIZE used for fetching packets > > + * from NIC rx queues to get full buffer utilization and prevent > > + * unnecessary rollovers. > > + */ > > + rxa_params->event_buf_size = RTE_ALIGN(rxa_params- > >event_buf_size, > > + BATCH_SIZE); > > + rxa_params->event_buf_size += BATCH_SIZE + BATCH_SIZE; > > Please add brackets to be more explicit and readable. Done > > > + > > + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); > > + if (ret) > > + rte_free(pc); > > + > > + return ret; > > +} > > + > > int > > rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > > struct rte_event_port_conf *port_config) @@ -2302,12 > +2382,14 @@ > > rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > > > > if (port_config == NULL) > > return -EINVAL; > > + > > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, - > EINVAL); > > > > pc = rte_malloc(NULL, sizeof(*pc), 0); > > if (pc == NULL) > > return -ENOMEM; > > *pc = *port_config; > > + > > ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, > > rxa_default_conf_cb, > > pc); > > @@ -2336,6 +2418,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) > > if (rx_adapter->default_cb_arg) > > rte_free(rx_adapter->conf_arg); > > rte_free(rx_adapter->eth_devices); > > + rte_free(rx_adapter->event_enqueue_buffer.events); > > rte_free(rx_adapter); > > event_eth_rx_adapter[id] = NULL; > > > > @@ -2711,6 +2794,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, > > > > stats->rx_packets += dev_stats_sum.rx_packets; > > stats->rx_enq_count += dev_stats_sum.rx_enq_count; > > + > > return 0; > > } > > > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h > > b/lib/eventdev/rte_event_eth_rx_adapter.h > > index 470543e434..846ca569e9 100644 > > --- a/lib/eventdev/rte_event_eth_rx_adapter.h > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h > > @@ -26,6 +26,7 @@ > > * The ethernet Rx event adapter's functions are: > > * - rte_event_eth_rx_adapter_create_ext() > > * - rte_event_eth_rx_adapter_create() > > + * - rte_event_eth_rx_adapter_create_with_params() > > * - rte_event_eth_rx_adapter_free() > > * - rte_event_eth_rx_adapter_queue_add() > > * - rte_event_eth_rx_adapter_queue_del() > > @@ -37,7 +38,7 @@ > > * > > * The application creates an ethernet to event adapter using > > * rte_event_eth_rx_adapter_create_ext() or > > rte_event_eth_rx_adapter_create() > > - * functions. > > + * or rte_event_eth_rx_adapter_create_with_params() functions. > > * The adapter needs to know which ethernet rx queues to poll for mbufs > as well > > * as event device parameters such as the event queue identifier, event > > * priority and scheduling type that the adapter should use when > > constructing @@ -257,6 +258,17 @@ struct > rte_event_eth_rx_adapter_vector_limits { > > */ > > }; > > > > +/** > > + * A structure to hold adapter config params */ struct > > +rte_event_eth_rx_adapter_params { > > + uint16_t event_buf_size; > > + /**< size of event buffer for the adapter. > > + * This value is rounded up for better buffer utilization > > + * and performance. > > + */ > > +}; > > + > > /** > > * > > * Callback function invoked by the SW adapter before it continues @@ > > -357,6 +369,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, > > uint8_t dev_id, int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t > dev_id, > > struct rte_event_port_conf *port_config); > > > > +/** > > + * This is a variant of rte_event_eth_rx_adapter_create() with > > +additional > > + * adapter params specified in ``struct > rte_event_eth_rx_adapter_params``. > > + * > > + * @param id > > + * The identifier of the ethernet Rx event adapter. > > + * > > + * @param dev_id > > + * The identifier of the event device to configure. > > + * > > + * @param port_config > > + * Argument of type *rte_event_port_conf* that is passed to the > > +conf_cb > > + * function. > > + * > > + * @param rxa_params > > + * Pointer to struct rte_event_eth_rx_adapter_params. > > + * In case of NULL, default values are used. > > + * > > + * @return > > + * - 0: Success > > + * - <0: Error code on failure > > + */ > > +__rte_experimental > > +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t > dev_id, > > + struct rte_event_port_conf *port_config, > > + struct rte_event_eth_rx_adapter_params > *rxa_params); > > + > > /** > > * Free an event adapter > > * > > diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index > > 9f280160fa..7de18497a6 100644 > > --- a/lib/eventdev/version.map > > +++ b/lib/eventdev/version.map > > @@ -138,6 +138,8 @@ EXPERIMENTAL { > > __rte_eventdev_trace_port_setup; > > # added in 20.11 > > rte_event_pmd_pci_probe_named; > > + # added in 21.11 > > + rte_event_eth_rx_adapter_create_with_params; > > > > #added in 21.05 > > rte_event_vector_pool_create; > > -- > > 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v6 1/5] eventdev/rx_adapter: add event buffer size configurability 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V ` (4 preceding siblings ...) 2021-10-05 7:19 ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Jayatheerthan, Jay @ 2021-10-05 14:38 ` Naga Harish K S V 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 2/5] test/event: add unit test for Rx adapter Naga Harish K S V ` (4 more replies) 5 siblings, 5 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-05 14:38 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev, Ganapati Kundapura Currently event buffer is static array with a default size defined internally. To configure event buffer size from application, ``rte_event_eth_rx_adapter_create_with_params`` api is added which takes ``struct rte_event_eth_rx_adapter_params`` to configure event buffer size in addition other params . The event buffer size is rounded up for better buffer utilization and performance . In case of NULL params argument, default event buffer size is used. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> --- v6: * address code style related review comments v5: * reverted queue conf get unit test change v4: * rebased with latest dpdk-next-eventdev branch * changed queue conf get unit test v3: * updated documentation and code comments as per review comments. * updated new create api test case name with suitable one. v2: * Updated header file and rx adapter documentation as per review comments. * new api name is modified as rte_event_eth_rx_adapter_create_with_params as per review comments. * rxa_params pointer argument Value NULL is allowed to represent the default values v1: * Initial implementation with documentation and unit tests. --- --- .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ lib/eventdev/rte_event_eth_rx_adapter.c | 98 +++++++++++++++++-- lib/eventdev/rte_event_eth_rx_adapter.h | 41 +++++++- lib/eventdev/version.map | 2 + 4 files changed, 140 insertions(+), 8 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index ce23d8a474..8526aecf57 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. +If the application desires to control the event buffer size, it can use the +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. +The function is passed the event device to be associated with the adapter +and port configuration for the adapter to setup an event port if the +adapter needs to use a service function. + Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 10491ca07b..5ccea168ea 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -85,7 +85,9 @@ struct rte_eth_event_enqueue_buffer { /* Count of events in this buffer */ uint16_t count; /* Array of events in this buffer */ - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; + struct rte_event *events; + /* size of event buffer */ + uint16_t events_size; /* Event enqueue happens from head */ uint16_t head; /* New packets from rte_eth_rx_burst is enqued from tail */ @@ -946,7 +948,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, dropped = 0; nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, buf->last | - (RTE_DIM(buf->events) & ~buf->last_mask), + (buf->events_size & ~buf->last_mask), buf->count >= BATCH_SIZE ? buf->count - BATCH_SIZE : 0, &buf->events[buf->tail], @@ -972,7 +974,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) uint32_t nb_req = buf->tail + BATCH_SIZE; if (!buf->last) { - if (nb_req <= RTE_DIM(buf->events)) + if (nb_req <= buf->events_size) return true; if (buf->head >= BATCH_SIZE) { @@ -2206,12 +2208,15 @@ rxa_ctrl(uint8_t id, int start) return 0; } -int -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, - rte_event_eth_rx_adapter_conf_cb conf_cb, - void *conf_arg) +static int +rxa_create(uint8_t id, uint8_t dev_id, + struct rte_event_eth_rx_adapter_params *rxa_params, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) { struct rte_event_eth_rx_adapter *rx_adapter; + struct rte_eth_event_enqueue_buffer *buf; + struct rte_event *events; int ret; int socket_id; uint16_t i; @@ -2226,6 +2231,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (conf_cb == NULL) return -EINVAL; @@ -2273,11 +2279,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_free(rx_adapter); return -ENOMEM; } + rte_spinlock_init(&rx_adapter->rx_lock); + for (i = 0; i < RTE_MAX_ETHPORTS; i++) rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; + /* Rx adapter event buffer allocation */ + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = rxa_params->event_buf_size; + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } + + rx_adapter->event_enqueue_buffer.events = events; + event_eth_rx_adapter[id] = rx_adapter; + if (conf_cb == rxa_default_conf_cb) rx_adapter->default_cb_arg = 1; @@ -2293,6 +2318,61 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, return 0; } +int +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) +{ + struct rte_event_eth_rx_adapter_params rxa_params = {0}; + + /* use default values for adapter params */ + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); +} + +int +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params) +{ + struct rte_event_port_conf *pc; + int ret; + struct rte_event_eth_rx_adapter_params temp_params = {0}; + + if (port_config == NULL) + return -EINVAL; + + /* use default values if rxa_params is NULL */ + if (rxa_params == NULL) { + rxa_params = &temp_params; + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; + } + + if (rxa_params->event_buf_size == 0) + return -EINVAL; + + pc = rte_malloc(NULL, sizeof(*pc), 0); + if (pc == NULL) + return -ENOMEM; + + *pc = *port_config; + + /* adjust event buff size with BATCH_SIZE used for fetching packets + * from NIC rx queues to get full buffer utilization and prevent + * unnecessary rollovers. + */ + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); + + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); + if (ret) + rte_free(pc); + + return ret; +} + int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config) @@ -2302,12 +2382,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) return -ENOMEM; *pc = *port_config; + ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, rxa_default_conf_cb, pc); @@ -2336,6 +2418,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2711,6 +2794,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, stats->rx_packets += dev_stats_sum.rx_packets; stats->rx_enq_count += dev_stats_sum.rx_enq_count; + return 0; } diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 470543e434..846ca569e9 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -26,6 +26,7 @@ * The ethernet Rx event adapter's functions are: * - rte_event_eth_rx_adapter_create_ext() * - rte_event_eth_rx_adapter_create() + * - rte_event_eth_rx_adapter_create_with_params() * - rte_event_eth_rx_adapter_free() * - rte_event_eth_rx_adapter_queue_add() * - rte_event_eth_rx_adapter_queue_del() @@ -37,7 +38,7 @@ * * The application creates an ethernet to event adapter using * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() - * functions. + * or rte_event_eth_rx_adapter_create_with_params() functions. * The adapter needs to know which ethernet rx queues to poll for mbufs as well * as event device parameters such as the event queue identifier, event * priority and scheduling type that the adapter should use when constructing @@ -257,6 +258,17 @@ struct rte_event_eth_rx_adapter_vector_limits { */ }; +/** + * A structure to hold adapter config params + */ +struct rte_event_eth_rx_adapter_params { + uint16_t event_buf_size; + /**< size of event buffer for the adapter. + * This value is rounded up for better buffer utilization + * and performance. + */ +}; + /** * * Callback function invoked by the SW adapter before it continues @@ -357,6 +369,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config); +/** + * This is a variant of rte_event_eth_rx_adapter_create() with additional + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. + * + * @param id + * The identifier of the ethernet Rx event adapter. + * + * @param dev_id + * The identifier of the event device to configure. + * + * @param port_config + * Argument of type *rte_event_port_conf* that is passed to the conf_cb + * function. + * + * @param rxa_params + * Pointer to struct rte_event_eth_rx_adapter_params. + * In case of NULL, default values are used. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params); + /** * Free an event adapter * diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index 9f280160fa..7de18497a6 100644 --- a/lib/eventdev/version.map +++ b/lib/eventdev/version.map @@ -138,6 +138,8 @@ EXPERIMENTAL { __rte_eventdev_trace_port_setup; # added in 20.11 rte_event_pmd_pci_probe_named; + # added in 21.11 + rte_event_eth_rx_adapter_create_with_params; #added in 21.05 rte_event_vector_pool_create; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v6 2/5] test/event: add unit test for Rx adapter 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 " Naga Harish K S V @ 2021-10-05 14:38 ` Naga Harish K S V 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V ` (3 subsequent siblings) 4 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-05 14:38 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit test for rte_event_eth_rx_adapter_create_with_params api and validate all possible input combinations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 53 +++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 13664a3a3b..7c2cf0dd70 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -428,6 +428,50 @@ adapter_create_free(void) return TEST_SUCCESS; } +static int +adapter_create_free_with_params(void) +{ + int err; + + struct rte_event_port_conf rx_p_conf = { + .dequeue_depth = 8, + .enqueue_depth = 8, + .new_event_threshold = 1200, + }; + + struct rte_event_eth_rx_adapter_params rxa_params = { + .event_buf_size = 1024 + }; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, NULL, NULL); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err); + + rxa_params.event_buf_size = 0; + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + err = rte_event_eth_rx_adapter_free(1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + return TEST_SUCCESS; +} + static int adapter_queue_add_del(void) { @@ -435,7 +479,7 @@ adapter_queue_add_del(void) struct rte_event ev; uint32_t cap; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, &cap); @@ -523,7 +567,7 @@ adapter_multi_eth_add_del(void) uint16_t port_index, port_index_base, drv_id = 0; char driver_name[50]; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; ev.queue_id = 0; ev.sched_type = RTE_SCHED_TYPE_ATOMIC; @@ -594,7 +638,7 @@ adapter_intr_queue_add_del(void) struct rte_event ev; uint32_t cap; uint16_t eth_port; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; if (!default_params.rx_intr_port_inited) return 0; @@ -687,7 +731,7 @@ adapter_start_stop(void) ev.sched_type = RTE_SCHED_TYPE_ATOMIC; ev.priority = 0; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; queue_config.rx_queue_flags = 0; if (default_params.caps & @@ -774,6 +818,7 @@ static struct unit_test_suite event_eth_rx_tests = { .teardown = testsuite_teardown, .unit_test_cases = { TEST_CASE_ST(NULL, NULL, adapter_create_free), + TEST_CASE_ST(NULL, NULL, adapter_create_free_with_params), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_add_del), TEST_CASE_ST(adapter_create, adapter_free, -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v6 3/5] eventdev/rx_adapter: introduce per queue event buffer 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 " Naga Harish K S V 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 2/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-10-05 14:38 ` Naga Harish K S V 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 4/5] eventdev/rx_adapter: implement " Naga Harish K S V ` (2 subsequent siblings) 4 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-05 14:38 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev To configure per queue event buffer size, application sets ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag as true and is passed to ``rte_event_eth_rx_adapter_create_with_params`` api. The per queue event buffer size is populated in ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed to ``rte_event_eth_rx_adapter_queue_add`` api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index 8526aecf57..8b58130fc5 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,12 +62,14 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. -If the application desires to control the event buffer size, it can use the -``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is -specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. -The function is passed the event device to be associated with the adapter -and port configuration for the adapter to setup an event port if the -adapter needs to use a service function. +If the application desires to control the event buffer size at adapter level, +it can use the ``rte_event_eth_rx_adapter_create_with_params()`` api. The event +buffer size is specified using ``struct rte_event_eth_rx_adapter_params:: +event_buf_size``. To configure the event buffer size at queue level, the boolean +flag ``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be +set to true. The function is passed the event device to be associated with +the adapter and port configuration for the adapter to setup an event port +if the adapter needs to use a service function. Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx queue is encoded in the ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf is the relative polling frequency of the Rx queue and is applicable when the -adapter uses a service core function. +adapter uses a service core function. The applications can configure queue +event buffer size in ``struct rte_event_eth_rx_adapter_queue_conf::event_buf_size`` +parameter. .. code-block:: c @@ -90,6 +94,7 @@ adapter uses a service core function. queue_config.rx_queue_flags = 0; queue_config.ev = ev; queue_config.servicing_weight = 1; + queue_config.event_buf_size = 1024; err = rte_event_eth_rx_adapter_queue_add(id, eth_dev_id, diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 846ca569e9..70ca427d66 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -200,6 +200,8 @@ struct rte_event_eth_rx_adapter_queue_conf { * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. */ + uint16_t event_buf_size; + /**< event buffer size for this queue */ }; /** @@ -267,6 +269,8 @@ struct rte_event_eth_rx_adapter_params { * This value is rounded up for better buffer utilization * and performance. */ + bool use_queue_event_buf; + /**< flag to indicate that event buffer is separate for each queue */ }; /** -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v6 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 " Naga Harish K S V 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V @ 2021-10-05 14:38 ` Naga Harish K S V 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 5/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 4 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-05 14:38 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch implement the per queue event buffer with required validations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- lib/eventdev/rte_event_eth_rx_adapter.c | 192 ++++++++++++++++++------ 1 file changed, 143 insertions(+), 49 deletions(-) diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 5ccea168ea..9391e73265 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter { uint8_t rss_key_be[RSS_KEY_SIZE]; /* Event device identifier */ uint8_t eventdev_id; - /* Per ethernet device structure */ - struct eth_device_info *eth_devices; /* Event port identifier */ uint8_t event_port_id; + /* Flag indicating per rxq event buffer */ + bool use_queue_event_buf; + /* Per ethernet device structure */ + struct eth_device_info *eth_devices; /* Lock to serialize config updates with service function */ rte_spinlock_t rx_lock; /* Max mbufs processed in any service function invocation */ @@ -241,6 +243,7 @@ struct eth_rx_queue_info { uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ uint64_t event; struct eth_rx_vector_data vector_data; + struct rte_eth_event_enqueue_buffer *event_buf; }; static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ -262,6 +265,22 @@ rxa_validate_id(uint8_t id) return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; } +static inline struct rte_eth_event_enqueue_buffer * +rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter, + uint16_t eth_dev_id, uint16_t rx_queue_id) +{ + struct rte_eth_event_enqueue_buffer *buf = NULL; + + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[eth_dev_id]; + buf = dev_info->rx_queue[rx_queue_id].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + + return buf; +} + #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ if (!rxa_validate_id(id)) { \ RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \ @@ -767,10 +786,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, /* Enqueue buffered events to event device */ static inline uint16_t -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, + struct rte_eth_event_enqueue_buffer *buf) { - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t count = buf->last ? buf->last - buf->head : buf->count; @@ -888,15 +906,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, uint16_t rx_queue_id, struct rte_mbuf **mbufs, - uint16_t num) + uint16_t num, + struct rte_eth_event_enqueue_buffer *buf) { uint32_t i; struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; struct eth_rx_queue_info *eth_rx_queue_info = &dev_info->rx_queue[rx_queue_id]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; uint16_t new_tail = buf->tail; uint64_t event = eth_rx_queue_info->event; uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ -995,11 +1012,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t queue_id, uint32_t rx_count, uint32_t max_rx, - int *rxq_empty) + int *rxq_empty, + struct rte_eth_event_enqueue_buffer *buf) { struct rte_mbuf *mbufs[BATCH_SIZE]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t n; @@ -1012,7 +1028,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, */ while (rxa_pkt_buf_available(buf)) { if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); stats->rx_poll_count++; n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); @@ -1021,14 +1037,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, *rxq_empty = 1; break; } - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); nb_rx += n; if (rx_count + nb_rx > max_rx) break; } if (buf->count > 0) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); return nb_rx; } @@ -1169,7 +1185,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) ring_lock = &rx_adapter->intr_ring_lock; if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); while (rxa_pkt_buf_available(buf)) { struct eth_device_info *dev_info; @@ -1221,7 +1237,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) continue; n = rxa_eth_rx(rx_adapter, port, i, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); nb_rx += n; enq_buffer_full = !rxq_empty && n == 0; @@ -1242,7 +1258,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) } else { n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); rx_adapter->qd_valid = !rxq_empty; nb_rx += n; if (nb_rx > rx_adapter->max_nb_rx) @@ -1273,13 +1289,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { uint32_t num_queue; uint32_t nb_rx = 0; - struct rte_eth_event_enqueue_buffer *buf; + struct rte_eth_event_enqueue_buffer *buf = NULL; uint32_t wrr_pos; uint32_t max_nb_rx; wrr_pos = rx_adapter->wrr_pos; max_nb_rx = rx_adapter->max_nb_rx; - buf = &rx_adapter->event_enqueue_buffer; /* Iterate through a WRR sequence */ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { @@ -1287,24 +1302,31 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; + buf = rxa_event_buf_get(rx_adapter, d, qid); + /* Don't do a batch dequeue from the rx queue if there isn't * enough space in the enqueue buffer. */ if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (!rxa_pkt_buf_available(buf)) { - rx_adapter->wrr_pos = wrr_pos; - return nb_rx; + if (rx_adapter->use_queue_event_buf) + goto poll_next_entry; + else { + rx_adapter->wrr_pos = wrr_pos; + return nb_rx; + } } nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, - NULL); + NULL, buf); if (nb_rx > max_nb_rx) { rx_adapter->wrr_pos = (wrr_pos + 1) % rx_adapter->wrr_len; break; } +poll_next_entry: if (++wrr_pos == rx_adapter->wrr_len) wrr_pos = 0; } @@ -1315,12 +1337,13 @@ static void rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { struct rte_event_eth_rx_adapter *rx_adapter = arg; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; + struct rte_eth_event_enqueue_buffer *buf = NULL; struct rte_event *ev; + buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue); + if (buf->count) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (vec->vector_ev->nb_elem == 0) return; @@ -1947,9 +1970,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, rx_adapter->num_rx_intr -= intrq; dev_info->nb_rx_intr -= intrq; dev_info->nb_shared_intr -= intrq && sintrq; + if (rx_adapter->use_queue_event_buf) { + struct rte_eth_event_enqueue_buffer *event_buf = + dev_info->rx_queue[rx_queue_id].event_buf; + rte_free(event_buf->events); + rte_free(event_buf); + dev_info->rx_queue[rx_queue_id].event_buf = NULL; + } } -static void +static int rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, struct eth_device_info *dev_info, int32_t rx_queue_id, @@ -1961,15 +1991,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int intrq; int sintrq; struct rte_event *qi_ev; + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; + uint16_t eth_dev_id = dev_info->dev->data->port_id; + int ret; if (rx_queue_id == -1) { uint16_t nb_rx_queues; uint16_t i; nb_rx_queues = dev_info->dev->data->nb_rx_queues; - for (i = 0; i < nb_rx_queues; i++) - rxa_add_queue(rx_adapter, dev_info, i, conf); - return; + for (i = 0; i < nb_rx_queues; i++) { + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); + if (ret) + return ret; + } + return 0; } pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -2032,6 +2068,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, dev_info->next_q_idx = 0; } } + + if (!rx_adapter->use_queue_event_buf) + return 0; + + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", + sizeof(*new_rx_buf), 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); + new_rx_buf->events_size += (2 * BATCH_SIZE); + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", + sizeof(struct rte_event) * + new_rx_buf->events_size, 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf->events == NULL) { + rte_free(new_rx_buf); + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + queue_info->event_buf = new_rx_buf; + + return 0; } static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ -2060,6 +2127,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, temp_conf.servicing_weight = 1; } queue_conf = &temp_conf; + + if (queue_conf->servicing_weight == 0 && + rx_adapter->use_queue_event_buf) { + + RTE_EDEV_LOG_ERR("Use of queue level event buffer " + "not supported for interrupt queues " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -EINVAL; + } } nb_rx_queues = dev_info->dev->data->nb_rx_queues; @@ -2139,7 +2216,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + if (ret) + goto err_free_rxqueue; rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); rte_free(rx_adapter->eth_rx_poll); @@ -2160,7 +2239,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, rte_free(rx_poll); rte_free(rx_wrr); - return 0; + return ret; } static int @@ -2286,20 +2365,25 @@ rxa_create(uint8_t id, uint8_t dev_id, rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; /* Rx adapter event buffer allocation */ - buf = &rx_adapter->event_enqueue_buffer; - buf->events_size = rxa_params->event_buf_size; - - events = rte_zmalloc_socket(rx_adapter->mem_name, - buf->events_size * sizeof(*events), - 0, socket_id); - if (events == NULL) { - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); - rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter); - return -ENOMEM; - } + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; + + if (!rx_adapter->use_queue_event_buf) { + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = rxa_params->event_buf_size; + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate memory " + "for adapter event buffer"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } - rx_adapter->event_enqueue_buffer.events = events; + rx_adapter->event_enqueue_buffer.events = events; + } event_eth_rx_adapter[id] = rx_adapter; @@ -2327,6 +2411,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, /* use default values for adapter params */ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + rxa_params.use_queue_event_buf = false; return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } @@ -2347,9 +2432,9 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, if (rxa_params == NULL) { rxa_params = &temp_params; rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; - } - - if (rxa_params->event_buf_size == 0) + rxa_params->use_queue_event_buf = false; + } else if ((!rxa_params->use_queue_event_buf && + rxa_params->event_buf_size == 0)) return -EINVAL; pc = rte_malloc(NULL, sizeof(*pc), 0); @@ -2418,7 +2503,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter->event_enqueue_buffer.events); + if (!rx_adapter->use_queue_event_buf) + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2522,6 +2608,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, return -EINVAL; } + if ((rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size == 0) || + (!rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); + return -EINVAL; + } + dev_info = &rx_adapter->eth_devices[eth_dev_id]; if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v6 5/5] test/event: add unit test for Rx adapter 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 " Naga Harish K S V ` (2 preceding siblings ...) 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 4/5] eventdev/rx_adapter: implement " Naga Harish K S V @ 2021-10-05 14:38 ` Naga Harish K S V 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 4 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-05 14:38 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit tests for checking per rx queue event buffer feature using rte_event_eth_rx_adapter_queue_add api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 86 ++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 7c2cf0dd70..cf3c989efb 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -387,6 +387,90 @@ adapter_create(void) return err; } +static int +adapter_create_with_params(void) +{ + int err; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf rx_p_conf; + struct rte_event_eth_rx_adapter_params rxa_params; + + memset(&rx_p_conf, 0, sizeof(rx_p_conf)); + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + rx_p_conf.new_event_threshold = dev_info.max_num_events; + rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + + rxa_params.use_queue_event_buf = false; + rxa_params.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + rxa_params.use_queue_event_buf = true; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err); + + return TEST_SUCCESS; +} + +static int +adapter_queue_event_buf_test(void) +{ + int err; + struct rte_event ev; + uint32_t cap; + + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; + + err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + ev.queue_id = 0; + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; + ev.priority = 0; + + queue_config.rx_queue_flags = 0; + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) { + ev.flow_id = 1; + queue_config.rx_queue_flags = + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; + } + queue_config.ev = ev; + queue_config.servicing_weight = 1; + queue_config.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + queue_config.event_buf_size = 1024; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + return TEST_SUCCESS; +} + static void adapter_free(void) { @@ -826,6 +910,8 @@ static struct unit_test_suite event_eth_rx_tests = { TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop), TEST_CASE_ST(adapter_create, adapter_free, adapter_stats), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf), + TEST_CASE_ST(adapter_create_with_params, adapter_free, + adapter_queue_event_buf_test), TEST_CASES_END() /**< NULL terminate unit test array */ } }; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 " Naga Harish K S V ` (3 preceding siblings ...) 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 5/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-10-06 4:02 ` Naga Harish K S V 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 2/5] test/event: add unit test for Rx adapter Naga Harish K S V ` (6 more replies) 4 siblings, 7 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 4:02 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev, Ganapati Kundapura Currently event buffer is static array with a default size defined internally. To configure event buffer size from application, ``rte_event_eth_rx_adapter_create_with_params`` api is added which takes ``struct rte_event_eth_rx_adapter_params`` to configure event buffer size in addition other params . The event buffer size is rounded up for better buffer utilization and performance . In case of NULL params argument, default event buffer size is used. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> --- v7: * added additional validation check in adapter_create_with_params API as per review comments v6: * address code style related review comments v5: * reverted queue conf get unit test change v4: * rebased with latest dpdk-next-eventdev branch * changed queue conf get unit test v3: * updated documentation and code comments as per review comments. * updated new create api test case name with suitable one. v2: * Updated header file and rx adapter documentation as per review comments. * new api name is modified as rte_event_eth_rx_adapter_create_with_params as per review comments. * rxa_params pointer argument Value NULL is allowed to represent the default values v1: * Initial implementation with documentation and unit tests. --- --- .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ lib/eventdev/rte_event_eth_rx_adapter.c | 98 +++++++++++++++++-- lib/eventdev/rte_event_eth_rx_adapter.h | 41 +++++++- lib/eventdev/version.map | 2 + 4 files changed, 140 insertions(+), 8 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index ce23d8a474..8526aecf57 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. +If the application desires to control the event buffer size, it can use the +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. +The function is passed the event device to be associated with the adapter +and port configuration for the adapter to setup an event port if the +adapter needs to use a service function. + Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 10491ca07b..5ccea168ea 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -85,7 +85,9 @@ struct rte_eth_event_enqueue_buffer { /* Count of events in this buffer */ uint16_t count; /* Array of events in this buffer */ - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; + struct rte_event *events; + /* size of event buffer */ + uint16_t events_size; /* Event enqueue happens from head */ uint16_t head; /* New packets from rte_eth_rx_burst is enqued from tail */ @@ -946,7 +948,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, dropped = 0; nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, buf->last | - (RTE_DIM(buf->events) & ~buf->last_mask), + (buf->events_size & ~buf->last_mask), buf->count >= BATCH_SIZE ? buf->count - BATCH_SIZE : 0, &buf->events[buf->tail], @@ -972,7 +974,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) uint32_t nb_req = buf->tail + BATCH_SIZE; if (!buf->last) { - if (nb_req <= RTE_DIM(buf->events)) + if (nb_req <= buf->events_size) return true; if (buf->head >= BATCH_SIZE) { @@ -2206,12 +2208,15 @@ rxa_ctrl(uint8_t id, int start) return 0; } -int -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, - rte_event_eth_rx_adapter_conf_cb conf_cb, - void *conf_arg) +static int +rxa_create(uint8_t id, uint8_t dev_id, + struct rte_event_eth_rx_adapter_params *rxa_params, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) { struct rte_event_eth_rx_adapter *rx_adapter; + struct rte_eth_event_enqueue_buffer *buf; + struct rte_event *events; int ret; int socket_id; uint16_t i; @@ -2226,6 +2231,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (conf_cb == NULL) return -EINVAL; @@ -2273,11 +2279,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_free(rx_adapter); return -ENOMEM; } + rte_spinlock_init(&rx_adapter->rx_lock); + for (i = 0; i < RTE_MAX_ETHPORTS; i++) rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; + /* Rx adapter event buffer allocation */ + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = rxa_params->event_buf_size; + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } + + rx_adapter->event_enqueue_buffer.events = events; + event_eth_rx_adapter[id] = rx_adapter; + if (conf_cb == rxa_default_conf_cb) rx_adapter->default_cb_arg = 1; @@ -2293,6 +2318,61 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, return 0; } +int +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) +{ + struct rte_event_eth_rx_adapter_params rxa_params = {0}; + + /* use default values for adapter params */ + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); +} + +int +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params) +{ + struct rte_event_port_conf *pc; + int ret; + struct rte_event_eth_rx_adapter_params temp_params = {0}; + + if (port_config == NULL) + return -EINVAL; + + /* use default values if rxa_params is NULL */ + if (rxa_params == NULL) { + rxa_params = &temp_params; + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; + } + + if (rxa_params->event_buf_size == 0) + return -EINVAL; + + pc = rte_malloc(NULL, sizeof(*pc), 0); + if (pc == NULL) + return -ENOMEM; + + *pc = *port_config; + + /* adjust event buff size with BATCH_SIZE used for fetching packets + * from NIC rx queues to get full buffer utilization and prevent + * unnecessary rollovers. + */ + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); + + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); + if (ret) + rte_free(pc); + + return ret; +} + int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config) @@ -2302,12 +2382,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) return -ENOMEM; *pc = *port_config; + ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, rxa_default_conf_cb, pc); @@ -2336,6 +2418,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2711,6 +2794,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, stats->rx_packets += dev_stats_sum.rx_packets; stats->rx_enq_count += dev_stats_sum.rx_enq_count; + return 0; } diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 470543e434..846ca569e9 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -26,6 +26,7 @@ * The ethernet Rx event adapter's functions are: * - rte_event_eth_rx_adapter_create_ext() * - rte_event_eth_rx_adapter_create() + * - rte_event_eth_rx_adapter_create_with_params() * - rte_event_eth_rx_adapter_free() * - rte_event_eth_rx_adapter_queue_add() * - rte_event_eth_rx_adapter_queue_del() @@ -37,7 +38,7 @@ * * The application creates an ethernet to event adapter using * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() - * functions. + * or rte_event_eth_rx_adapter_create_with_params() functions. * The adapter needs to know which ethernet rx queues to poll for mbufs as well * as event device parameters such as the event queue identifier, event * priority and scheduling type that the adapter should use when constructing @@ -257,6 +258,17 @@ struct rte_event_eth_rx_adapter_vector_limits { */ }; +/** + * A structure to hold adapter config params + */ +struct rte_event_eth_rx_adapter_params { + uint16_t event_buf_size; + /**< size of event buffer for the adapter. + * This value is rounded up for better buffer utilization + * and performance. + */ +}; + /** * * Callback function invoked by the SW adapter before it continues @@ -357,6 +369,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config); +/** + * This is a variant of rte_event_eth_rx_adapter_create() with additional + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. + * + * @param id + * The identifier of the ethernet Rx event adapter. + * + * @param dev_id + * The identifier of the event device to configure. + * + * @param port_config + * Argument of type *rte_event_port_conf* that is passed to the conf_cb + * function. + * + * @param rxa_params + * Pointer to struct rte_event_eth_rx_adapter_params. + * In case of NULL, default values are used. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params); + /** * Free an event adapter * diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index 9f280160fa..7de18497a6 100644 --- a/lib/eventdev/version.map +++ b/lib/eventdev/version.map @@ -138,6 +138,8 @@ EXPERIMENTAL { __rte_eventdev_trace_port_setup; # added in 20.11 rte_event_pmd_pci_probe_named; + # added in 21.11 + rte_event_eth_rx_adapter_create_with_params; #added in 21.05 rte_event_vector_pool_create; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v7 2/5] test/event: add unit test for Rx adapter 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V @ 2021-10-06 4:02 ` Naga Harish K S V 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V ` (5 subsequent siblings) 6 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 4:02 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit test for rte_event_eth_rx_adapter_create_with_params api and validate all possible input combinations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 53 +++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 13664a3a3b..7c2cf0dd70 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -428,6 +428,50 @@ adapter_create_free(void) return TEST_SUCCESS; } +static int +adapter_create_free_with_params(void) +{ + int err; + + struct rte_event_port_conf rx_p_conf = { + .dequeue_depth = 8, + .enqueue_depth = 8, + .new_event_threshold = 1200, + }; + + struct rte_event_eth_rx_adapter_params rxa_params = { + .event_buf_size = 1024 + }; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, NULL, NULL); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err); + + rxa_params.event_buf_size = 0; + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + err = rte_event_eth_rx_adapter_free(1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + return TEST_SUCCESS; +} + static int adapter_queue_add_del(void) { @@ -435,7 +479,7 @@ adapter_queue_add_del(void) struct rte_event ev; uint32_t cap; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, &cap); @@ -523,7 +567,7 @@ adapter_multi_eth_add_del(void) uint16_t port_index, port_index_base, drv_id = 0; char driver_name[50]; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; ev.queue_id = 0; ev.sched_type = RTE_SCHED_TYPE_ATOMIC; @@ -594,7 +638,7 @@ adapter_intr_queue_add_del(void) struct rte_event ev; uint32_t cap; uint16_t eth_port; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; if (!default_params.rx_intr_port_inited) return 0; @@ -687,7 +731,7 @@ adapter_start_stop(void) ev.sched_type = RTE_SCHED_TYPE_ATOMIC; ev.priority = 0; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; queue_config.rx_queue_flags = 0; if (default_params.caps & @@ -774,6 +818,7 @@ static struct unit_test_suite event_eth_rx_tests = { .teardown = testsuite_teardown, .unit_test_cases = { TEST_CASE_ST(NULL, NULL, adapter_create_free), + TEST_CASE_ST(NULL, NULL, adapter_create_free_with_params), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_add_del), TEST_CASE_ST(adapter_create, adapter_free, -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v7 3/5] eventdev/rx_adapter: introduce per queue event buffer 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 2/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-10-06 4:02 ` Naga Harish K S V 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 4/5] eventdev/rx_adapter: implement " Naga Harish K S V ` (4 subsequent siblings) 6 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 4:02 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev To configure per queue event buffer size, application sets ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag as true and is passed to ``rte_event_eth_rx_adapter_create_with_params`` api. The per queue event buffer size is populated in ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed to ``rte_event_eth_rx_adapter_queue_add`` api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index 8526aecf57..8b58130fc5 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,12 +62,14 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. -If the application desires to control the event buffer size, it can use the -``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is -specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. -The function is passed the event device to be associated with the adapter -and port configuration for the adapter to setup an event port if the -adapter needs to use a service function. +If the application desires to control the event buffer size at adapter level, +it can use the ``rte_event_eth_rx_adapter_create_with_params()`` api. The event +buffer size is specified using ``struct rte_event_eth_rx_adapter_params:: +event_buf_size``. To configure the event buffer size at queue level, the boolean +flag ``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be +set to true. The function is passed the event device to be associated with +the adapter and port configuration for the adapter to setup an event port +if the adapter needs to use a service function. Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx queue is encoded in the ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf is the relative polling frequency of the Rx queue and is applicable when the -adapter uses a service core function. +adapter uses a service core function. The applications can configure queue +event buffer size in ``struct rte_event_eth_rx_adapter_queue_conf::event_buf_size`` +parameter. .. code-block:: c @@ -90,6 +94,7 @@ adapter uses a service core function. queue_config.rx_queue_flags = 0; queue_config.ev = ev; queue_config.servicing_weight = 1; + queue_config.event_buf_size = 1024; err = rte_event_eth_rx_adapter_queue_add(id, eth_dev_id, diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 846ca569e9..70ca427d66 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -200,6 +200,8 @@ struct rte_event_eth_rx_adapter_queue_conf { * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. */ + uint16_t event_buf_size; + /**< event buffer size for this queue */ }; /** @@ -267,6 +269,8 @@ struct rte_event_eth_rx_adapter_params { * This value is rounded up for better buffer utilization * and performance. */ + bool use_queue_event_buf; + /**< flag to indicate that event buffer is separate for each queue */ }; /** -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v7 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V @ 2021-10-06 4:02 ` Naga Harish K S V 2021-10-06 6:42 ` Jayatheerthan, Jay 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 5/5] test/event: add unit test for Rx adapter Naga Harish K S V ` (3 subsequent siblings) 6 siblings, 1 reply; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 4:02 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch implement the per queue event buffer with required validations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- lib/eventdev/rte_event_eth_rx_adapter.c | 206 ++++++++++++++++++------ 1 file changed, 153 insertions(+), 53 deletions(-) diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 5ccea168ea..1a2aa23475 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter { uint8_t rss_key_be[RSS_KEY_SIZE]; /* Event device identifier */ uint8_t eventdev_id; - /* Per ethernet device structure */ - struct eth_device_info *eth_devices; /* Event port identifier */ uint8_t event_port_id; + /* Flag indicating per rxq event buffer */ + bool use_queue_event_buf; + /* Per ethernet device structure */ + struct eth_device_info *eth_devices; /* Lock to serialize config updates with service function */ rte_spinlock_t rx_lock; /* Max mbufs processed in any service function invocation */ @@ -241,6 +243,7 @@ struct eth_rx_queue_info { uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ uint64_t event; struct eth_rx_vector_data vector_data; + struct rte_eth_event_enqueue_buffer *event_buf; }; static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ -262,6 +265,22 @@ rxa_validate_id(uint8_t id) return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; } +static inline struct rte_eth_event_enqueue_buffer * +rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter, + uint16_t eth_dev_id, uint16_t rx_queue_id) +{ + struct rte_eth_event_enqueue_buffer *buf = NULL; + + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[eth_dev_id]; + buf = dev_info->rx_queue[rx_queue_id].event_buf; + } else + buf = &rx_adapter->event_enqueue_buffer; + + return buf; +} + #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ if (!rxa_validate_id(id)) { \ RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \ @@ -767,10 +786,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, /* Enqueue buffered events to event device */ static inline uint16_t -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, + struct rte_eth_event_enqueue_buffer *buf) { - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t count = buf->last ? buf->last - buf->head : buf->count; @@ -888,15 +906,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, uint16_t rx_queue_id, struct rte_mbuf **mbufs, - uint16_t num) + uint16_t num, + struct rte_eth_event_enqueue_buffer *buf) { uint32_t i; struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; struct eth_rx_queue_info *eth_rx_queue_info = &dev_info->rx_queue[rx_queue_id]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; uint16_t new_tail = buf->tail; uint64_t event = eth_rx_queue_info->event; uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ -995,11 +1012,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t queue_id, uint32_t rx_count, uint32_t max_rx, - int *rxq_empty) + int *rxq_empty, + struct rte_eth_event_enqueue_buffer *buf) { struct rte_mbuf *mbufs[BATCH_SIZE]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t n; @@ -1012,7 +1028,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, */ while (rxa_pkt_buf_available(buf)) { if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); stats->rx_poll_count++; n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); @@ -1021,14 +1037,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, *rxq_empty = 1; break; } - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); nb_rx += n; if (rx_count + nb_rx > max_rx) break; } if (buf->count > 0) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); return nb_rx; } @@ -1169,7 +1185,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) ring_lock = &rx_adapter->intr_ring_lock; if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); while (rxa_pkt_buf_available(buf)) { struct eth_device_info *dev_info; @@ -1221,7 +1237,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) continue; n = rxa_eth_rx(rx_adapter, port, i, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); nb_rx += n; enq_buffer_full = !rxq_empty && n == 0; @@ -1242,7 +1258,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) } else { n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); rx_adapter->qd_valid = !rxq_empty; nb_rx += n; if (nb_rx > rx_adapter->max_nb_rx) @@ -1273,13 +1289,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { uint32_t num_queue; uint32_t nb_rx = 0; - struct rte_eth_event_enqueue_buffer *buf; + struct rte_eth_event_enqueue_buffer *buf = NULL; uint32_t wrr_pos; uint32_t max_nb_rx; wrr_pos = rx_adapter->wrr_pos; max_nb_rx = rx_adapter->max_nb_rx; - buf = &rx_adapter->event_enqueue_buffer; /* Iterate through a WRR sequence */ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { @@ -1287,24 +1302,31 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; + buf = rxa_event_buf_get(rx_adapter, d, qid); + /* Don't do a batch dequeue from the rx queue if there isn't * enough space in the enqueue buffer. */ if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (!rxa_pkt_buf_available(buf)) { - rx_adapter->wrr_pos = wrr_pos; - return nb_rx; + if (rx_adapter->use_queue_event_buf) + goto poll_next_entry; + else { + rx_adapter->wrr_pos = wrr_pos; + return nb_rx; + } } nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, - NULL); + NULL, buf); if (nb_rx > max_nb_rx) { rx_adapter->wrr_pos = (wrr_pos + 1) % rx_adapter->wrr_len; break; } +poll_next_entry: if (++wrr_pos == rx_adapter->wrr_len) wrr_pos = 0; } @@ -1315,12 +1337,13 @@ static void rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { struct rte_event_eth_rx_adapter *rx_adapter = arg; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; + struct rte_eth_event_enqueue_buffer *buf = NULL; struct rte_event *ev; + buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue); + if (buf->count) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (vec->vector_ev->nb_elem == 0) return; @@ -1947,9 +1970,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, rx_adapter->num_rx_intr -= intrq; dev_info->nb_rx_intr -= intrq; dev_info->nb_shared_intr -= intrq && sintrq; + if (rx_adapter->use_queue_event_buf) { + struct rte_eth_event_enqueue_buffer *event_buf = + dev_info->rx_queue[rx_queue_id].event_buf; + rte_free(event_buf->events); + rte_free(event_buf); + dev_info->rx_queue[rx_queue_id].event_buf = NULL; + } } -static void +static int rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, struct eth_device_info *dev_info, int32_t rx_queue_id, @@ -1961,15 +1991,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int intrq; int sintrq; struct rte_event *qi_ev; + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; + uint16_t eth_dev_id = dev_info->dev->data->port_id; + int ret; if (rx_queue_id == -1) { uint16_t nb_rx_queues; uint16_t i; nb_rx_queues = dev_info->dev->data->nb_rx_queues; - for (i = 0; i < nb_rx_queues; i++) - rxa_add_queue(rx_adapter, dev_info, i, conf); - return; + for (i = 0; i < nb_rx_queues; i++) { + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); + if (ret) + return ret; + } + return 0; } pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -2032,6 +2068,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, dev_info->next_q_idx = 0; } } + + if (!rx_adapter->use_queue_event_buf) + return 0; + + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", + sizeof(*new_rx_buf), 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); + new_rx_buf->events_size += (2 * BATCH_SIZE); + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", + sizeof(struct rte_event) * + new_rx_buf->events_size, 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf->events == NULL) { + rte_free(new_rx_buf); + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + queue_info->event_buf = new_rx_buf; + + return 0; } static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ -2060,6 +2127,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, temp_conf.servicing_weight = 1; } queue_conf = &temp_conf; + + if (queue_conf->servicing_weight == 0 && + rx_adapter->use_queue_event_buf) { + + RTE_EDEV_LOG_ERR("Use of queue level event buffer " + "not supported for interrupt queues " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -EINVAL; + } } nb_rx_queues = dev_info->dev->data->nb_rx_queues; @@ -2139,7 +2216,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + if (ret) + goto err_free_rxqueue; rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); rte_free(rx_adapter->eth_rx_poll); @@ -2160,7 +2239,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, rte_free(rx_poll); rte_free(rx_wrr); - return 0; + return ret; } static int @@ -2286,20 +2365,25 @@ rxa_create(uint8_t id, uint8_t dev_id, rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; /* Rx adapter event buffer allocation */ - buf = &rx_adapter->event_enqueue_buffer; - buf->events_size = rxa_params->event_buf_size; - - events = rte_zmalloc_socket(rx_adapter->mem_name, - buf->events_size * sizeof(*events), - 0, socket_id); - if (events == NULL) { - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); - rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter); - return -ENOMEM; - } + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; + + if (!rx_adapter->use_queue_event_buf) { + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = rxa_params->event_buf_size; + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate memory " + "for adapter event buffer"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } - rx_adapter->event_enqueue_buffer.events = events; + rx_adapter->event_enqueue_buffer.events = events; + } event_eth_rx_adapter[id] = rx_adapter; @@ -2327,6 +2411,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, /* use default values for adapter params */ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + rxa_params.use_queue_event_buf = false; return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } @@ -2346,11 +2431,15 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, /* use default values if rxa_params is NULL */ if (rxa_params == NULL) { rxa_params = &temp_params; - rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; - } - - if (rxa_params->event_buf_size == 0) + rxa_params->event_buf_size = 4 * BATCH_SIZE; + rxa_params->use_queue_event_buf = false; + } else if ((!rxa_params->use_queue_event_buf && + rxa_params->event_buf_size == 0) || + (rxa_params->use_queue_event_buf && + rxa_params->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid adapter params\n"); return -EINVAL; + } pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) @@ -2362,9 +2451,11 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, * from NIC rx queues to get full buffer utilization and prevent * unnecessary rollovers. */ - rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, - BATCH_SIZE); - rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); + if (!rxa_params->use_queue_event_buf) { + rxa_params->event_buf_size = + RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); + rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); + } ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); if (ret) @@ -2418,7 +2509,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter->event_enqueue_buffer.events); + if (!rx_adapter->use_queue_event_buf) + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2522,6 +2614,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, return -EINVAL; } + if ((rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size == 0) || + (!rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); + return -EINVAL; + } + dev_info = &rx_adapter->eth_devices[eth_dev_id]; if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v7 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 4/5] eventdev/rx_adapter: implement " Naga Harish K S V @ 2021-10-06 6:42 ` Jayatheerthan, Jay 2021-10-06 7:53 ` Naga Harish K, S V 0 siblings, 1 reply; 81+ messages in thread From: Jayatheerthan, Jay @ 2021-10-06 6:42 UTC (permalink / raw) To: Naga Harish K, S V, jerinj; +Cc: dev > -----Original Message----- > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > Sent: Wednesday, October 6, 2021 9:33 AM > To: jerinj@marvell.com; Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > Cc: dev@dpdk.org > Subject: [PATCH v7 4/5] eventdev/rx_adapter: implement per queue event buffer > > this patch implement the per queue event buffer with > required validations. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > --- > lib/eventdev/rte_event_eth_rx_adapter.c | 206 ++++++++++++++++++------ > 1 file changed, 153 insertions(+), 53 deletions(-) > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c > index 5ccea168ea..1a2aa23475 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.c > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c > @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter { > uint8_t rss_key_be[RSS_KEY_SIZE]; > /* Event device identifier */ > uint8_t eventdev_id; > - /* Per ethernet device structure */ > - struct eth_device_info *eth_devices; > /* Event port identifier */ > uint8_t event_port_id; > + /* Flag indicating per rxq event buffer */ > + bool use_queue_event_buf; > + /* Per ethernet device structure */ > + struct eth_device_info *eth_devices; > /* Lock to serialize config updates with service function */ > rte_spinlock_t rx_lock; > /* Max mbufs processed in any service function invocation */ > @@ -241,6 +243,7 @@ struct eth_rx_queue_info { > uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ > uint64_t event; > struct eth_rx_vector_data vector_data; > + struct rte_eth_event_enqueue_buffer *event_buf; > }; > > static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; > @@ -262,6 +265,22 @@ rxa_validate_id(uint8_t id) > return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; > } > > +static inline struct rte_eth_event_enqueue_buffer * > +rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter, > + uint16_t eth_dev_id, uint16_t rx_queue_id) > +{ > + struct rte_eth_event_enqueue_buffer *buf = NULL; > + > + if (rx_adapter->use_queue_event_buf) { > + struct eth_device_info *dev_info = > + &rx_adapter->eth_devices[eth_dev_id]; > + buf = dev_info->rx_queue[rx_queue_id].event_buf; We can return here. It may save an instr or two. > + } else > + buf = &rx_adapter->event_enqueue_buffer; Same here. > + > + return buf; > +} > + > #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ > if (!rxa_validate_id(id)) { \ > RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \ > @@ -767,10 +786,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, > > /* Enqueue buffered events to event device */ > static inline uint16_t > -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) > +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, > + struct rte_eth_event_enqueue_buffer *buf) > { > - struct rte_eth_event_enqueue_buffer *buf = > - &rx_adapter->event_enqueue_buffer; > struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; > uint16_t count = buf->last ? buf->last - buf->head : buf->count; > > @@ -888,15 +906,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, > uint16_t eth_dev_id, > uint16_t rx_queue_id, > struct rte_mbuf **mbufs, > - uint16_t num) > + uint16_t num, > + struct rte_eth_event_enqueue_buffer *buf) > { > uint32_t i; > struct eth_device_info *dev_info = > &rx_adapter->eth_devices[eth_dev_id]; > struct eth_rx_queue_info *eth_rx_queue_info = > &dev_info->rx_queue[rx_queue_id]; > - struct rte_eth_event_enqueue_buffer *buf = > - &rx_adapter->event_enqueue_buffer; > uint16_t new_tail = buf->tail; > uint64_t event = eth_rx_queue_info->event; > uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; > @@ -995,11 +1012,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, > uint16_t queue_id, > uint32_t rx_count, > uint32_t max_rx, > - int *rxq_empty) > + int *rxq_empty, > + struct rte_eth_event_enqueue_buffer *buf) > { > struct rte_mbuf *mbufs[BATCH_SIZE]; > - struct rte_eth_event_enqueue_buffer *buf = > - &rx_adapter->event_enqueue_buffer; > struct rte_event_eth_rx_adapter_stats *stats = > &rx_adapter->stats; > uint16_t n; > @@ -1012,7 +1028,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, > */ > while (rxa_pkt_buf_available(buf)) { > if (buf->count >= BATCH_SIZE) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > > stats->rx_poll_count++; > n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); > @@ -1021,14 +1037,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, > *rxq_empty = 1; > break; > } > - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); > + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); > nb_rx += n; > if (rx_count + nb_rx > max_rx) > break; > } > > if (buf->count > 0) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > > return nb_rx; > } > @@ -1169,7 +1185,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) > ring_lock = &rx_adapter->intr_ring_lock; > > if (buf->count >= BATCH_SIZE) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > > while (rxa_pkt_buf_available(buf)) { > struct eth_device_info *dev_info; > @@ -1221,7 +1237,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) > continue; > n = rxa_eth_rx(rx_adapter, port, i, nb_rx, > rx_adapter->max_nb_rx, > - &rxq_empty); > + &rxq_empty, buf); > nb_rx += n; > > enq_buffer_full = !rxq_empty && n == 0; > @@ -1242,7 +1258,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) > } else { > n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, > rx_adapter->max_nb_rx, > - &rxq_empty); > + &rxq_empty, buf); > rx_adapter->qd_valid = !rxq_empty; > nb_rx += n; > if (nb_rx > rx_adapter->max_nb_rx) > @@ -1273,13 +1289,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) > { > uint32_t num_queue; > uint32_t nb_rx = 0; > - struct rte_eth_event_enqueue_buffer *buf; > + struct rte_eth_event_enqueue_buffer *buf = NULL; > uint32_t wrr_pos; > uint32_t max_nb_rx; > > wrr_pos = rx_adapter->wrr_pos; > max_nb_rx = rx_adapter->max_nb_rx; > - buf = &rx_adapter->event_enqueue_buffer; > > /* Iterate through a WRR sequence */ > for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { > @@ -1287,24 +1302,31 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) > uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; > uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; > > + buf = rxa_event_buf_get(rx_adapter, d, qid); > + > /* Don't do a batch dequeue from the rx queue if there isn't > * enough space in the enqueue buffer. > */ > if (buf->count >= BATCH_SIZE) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > if (!rxa_pkt_buf_available(buf)) { > - rx_adapter->wrr_pos = wrr_pos; > - return nb_rx; > + if (rx_adapter->use_queue_event_buf) > + goto poll_next_entry; > + else { > + rx_adapter->wrr_pos = wrr_pos; > + return nb_rx; > + } > } > > nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, > - NULL); > + NULL, buf); > if (nb_rx > max_nb_rx) { > rx_adapter->wrr_pos = > (wrr_pos + 1) % rx_adapter->wrr_len; > break; > } > > +poll_next_entry: > if (++wrr_pos == rx_adapter->wrr_len) > wrr_pos = 0; > } > @@ -1315,12 +1337,13 @@ static void > rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) > { > struct rte_event_eth_rx_adapter *rx_adapter = arg; > - struct rte_eth_event_enqueue_buffer *buf = > - &rx_adapter->event_enqueue_buffer; > + struct rte_eth_event_enqueue_buffer *buf = NULL; > struct rte_event *ev; > > + buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue); > + > if (buf->count) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > > if (vec->vector_ev->nb_elem == 0) > return; > @@ -1947,9 +1970,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, > rx_adapter->num_rx_intr -= intrq; > dev_info->nb_rx_intr -= intrq; > dev_info->nb_shared_intr -= intrq && sintrq; > + if (rx_adapter->use_queue_event_buf) { > + struct rte_eth_event_enqueue_buffer *event_buf = > + dev_info->rx_queue[rx_queue_id].event_buf; > + rte_free(event_buf->events); > + rte_free(event_buf); > + dev_info->rx_queue[rx_queue_id].event_buf = NULL; > + } > } > > -static void > +static int > rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > struct eth_device_info *dev_info, > int32_t rx_queue_id, > @@ -1961,15 +1991,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > int intrq; > int sintrq; > struct rte_event *qi_ev; > + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; > + uint16_t eth_dev_id = dev_info->dev->data->port_id; > + int ret; > > if (rx_queue_id == -1) { > uint16_t nb_rx_queues; > uint16_t i; > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > - for (i = 0; i < nb_rx_queues; i++) > - rxa_add_queue(rx_adapter, dev_info, i, conf); > - return; > + for (i = 0; i < nb_rx_queues; i++) { > + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); > + if (ret) > + return ret; > + } > + return 0; > } > > pollq = rxa_polled_queue(dev_info, rx_queue_id); > @@ -2032,6 +2068,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > dev_info->next_q_idx = 0; > } > } > + > + if (!rx_adapter->use_queue_event_buf) > + return 0; > + > + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", > + sizeof(*new_rx_buf), 0, > + rte_eth_dev_socket_id(eth_dev_id)); > + if (new_rx_buf == NULL) { > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " > + "dev_id: %d queue_id: %d", > + eth_dev_id, rx_queue_id); > + return -ENOMEM; > + } > + > + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); > + new_rx_buf->events_size += (2 * BATCH_SIZE); > + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", > + sizeof(struct rte_event) * > + new_rx_buf->events_size, 0, > + rte_eth_dev_socket_id(eth_dev_id)); > + if (new_rx_buf->events == NULL) { > + rte_free(new_rx_buf); > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " > + "dev_id: %d queue_id: %d", > + eth_dev_id, rx_queue_id); > + return -ENOMEM; > + } > + > + queue_info->event_buf = new_rx_buf; > + > + return 0; > } > > static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, > @@ -2060,6 +2127,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, > temp_conf.servicing_weight = 1; > } > queue_conf = &temp_conf; > + > + if (queue_conf->servicing_weight == 0 && > + rx_adapter->use_queue_event_buf) { > + > + RTE_EDEV_LOG_ERR("Use of queue level event buffer " > + "not supported for interrupt queues " > + "dev_id: %d queue_id: %d", > + eth_dev_id, rx_queue_id); > + return -EINVAL; > + } > } > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > @@ -2139,7 +2216,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, > > > > - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); > + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); > + if (ret) > + goto err_free_rxqueue; > rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); > > rte_free(rx_adapter->eth_rx_poll); > @@ -2160,7 +2239,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, > rte_free(rx_poll); > rte_free(rx_wrr); > > - return 0; > + return ret; > } > > static int > @@ -2286,20 +2365,25 @@ rxa_create(uint8_t id, uint8_t dev_id, > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > /* Rx adapter event buffer allocation */ > - buf = &rx_adapter->event_enqueue_buffer; > - buf->events_size = rxa_params->event_buf_size; > - > - events = rte_zmalloc_socket(rx_adapter->mem_name, > - buf->events_size * sizeof(*events), > - 0, socket_id); > - if (events == NULL) { > - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); > - rte_free(rx_adapter->eth_devices); > - rte_free(rx_adapter); > - return -ENOMEM; > - } > + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; > + > + if (!rx_adapter->use_queue_event_buf) { > + buf = &rx_adapter->event_enqueue_buffer; > + buf->events_size = rxa_params->event_buf_size; > + > + events = rte_zmalloc_socket(rx_adapter->mem_name, > + buf->events_size * sizeof(*events), > + 0, socket_id); > + if (events == NULL) { > + RTE_EDEV_LOG_ERR("Failed to allocate memory " > + "for adapter event buffer"); > + rte_free(rx_adapter->eth_devices); > + rte_free(rx_adapter); > + return -ENOMEM; > + } > > - rx_adapter->event_enqueue_buffer.events = events; > + rx_adapter->event_enqueue_buffer.events = events; > + } > > event_eth_rx_adapter[id] = rx_adapter; > > @@ -2327,6 +2411,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > /* use default values for adapter params */ > rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > + rxa_params.use_queue_event_buf = false; > > return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); > } > @@ -2346,11 +2431,15 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > /* use default values if rxa_params is NULL */ > if (rxa_params == NULL) { > rxa_params = &temp_params; > - rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > - } > - > - if (rxa_params->event_buf_size == 0) > + rxa_params->event_buf_size = 4 * BATCH_SIZE; This assumes ETH_EVENT_BUFFER_SIZE to be set to 6 * BATCH_SIZE so we can have 4x here and 2x later. It may break if ETH_EVENT_BUFFER_SIZE is changed later. Can we change the code to just use ETH_EVENT_BUFFER_SIZE here. See below. > + rxa_params->use_queue_event_buf = false; > + } else if ((!rxa_params->use_queue_event_buf && > + rxa_params->event_buf_size == 0) || > + (rxa_params->use_queue_event_buf && > + rxa_params->event_buf_size != 0)) { > + RTE_EDEV_LOG_ERR("Invalid adapter params\n"); > return -EINVAL; > + } > > pc = rte_malloc(NULL, sizeof(*pc), 0); > if (pc == NULL) > @@ -2362,9 +2451,11 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > * from NIC rx queues to get full buffer utilization and prevent > * unnecessary rollovers. > */ > - rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, > - BATCH_SIZE); > - rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); > + if (!rxa_params->use_queue_event_buf) { > + rxa_params->event_buf_size = > + RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); > + rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); > + } Above if condition can be added as an else part of rxa_params == NULL. Something like: if (rxa_params == NULL) { rxa_params = &temp_params; rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; rxa_params->use_queue_event_buf = false; } else if ((!rxa_params->use_queue_event_buf && rxa_params->event_buf_size == 0) || (rxa_params->use_queue_event_buf && rxa_params->event_buf_size != 0)) { RTE_EDEV_LOG_ERR("Invalid adapter params\n"); return -EINVAL; } else if (!rxa_params->use_queue_event_buf) { rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); } > > ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); > if (ret) > @@ -2418,7 +2509,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) > if (rx_adapter->default_cb_arg) > rte_free(rx_adapter->conf_arg); > rte_free(rx_adapter->eth_devices); > - rte_free(rx_adapter->event_enqueue_buffer.events); > + if (!rx_adapter->use_queue_event_buf) > + rte_free(rx_adapter->event_enqueue_buffer.events); > rte_free(rx_adapter); > event_eth_rx_adapter[id] = NULL; > > @@ -2522,6 +2614,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, > return -EINVAL; > } > > + if ((rx_adapter->use_queue_event_buf && > + queue_conf->event_buf_size == 0) || > + (!rx_adapter->use_queue_event_buf && > + queue_conf->event_buf_size != 0)) { > + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); > + return -EINVAL; > + } > + > dev_info = &rx_adapter->eth_devices[eth_dev_id]; > > if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { > -- > 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v7 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-10-06 6:42 ` Jayatheerthan, Jay @ 2021-10-06 7:53 ` Naga Harish K, S V 0 siblings, 0 replies; 81+ messages in thread From: Naga Harish K, S V @ 2021-10-06 7:53 UTC (permalink / raw) To: Jayatheerthan, Jay, jerinj; +Cc: dev Hi Jay, > -----Original Message----- > From: Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > Sent: Wednesday, October 6, 2021 12:13 PM > To: Naga Harish K, S V <s.v.naga.harish.k@intel.com>; jerinj@marvell.com > Cc: dev@dpdk.org > Subject: RE: [PATCH v7 4/5] eventdev/rx_adapter: implement per queue > event buffer > > > -----Original Message----- > > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > > Sent: Wednesday, October 6, 2021 9:33 AM > > To: jerinj@marvell.com; Jayatheerthan, Jay > > <jay.jayatheerthan@intel.com> > > Cc: dev@dpdk.org > > Subject: [PATCH v7 4/5] eventdev/rx_adapter: implement per queue > event > > buffer > > > > this patch implement the per queue event buffer with required > > validations. > > > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > > --- > > lib/eventdev/rte_event_eth_rx_adapter.c | 206 > > ++++++++++++++++++------ > > 1 file changed, 153 insertions(+), 53 deletions(-) > > > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c > > b/lib/eventdev/rte_event_eth_rx_adapter.c > > index 5ccea168ea..1a2aa23475 100644 > > --- a/lib/eventdev/rte_event_eth_rx_adapter.c > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c > > @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter { > > uint8_t rss_key_be[RSS_KEY_SIZE]; > > /* Event device identifier */ > > uint8_t eventdev_id; > > - /* Per ethernet device structure */ > > - struct eth_device_info *eth_devices; > > /* Event port identifier */ > > uint8_t event_port_id; > > + /* Flag indicating per rxq event buffer */ > > + bool use_queue_event_buf; > > + /* Per ethernet device structure */ > > + struct eth_device_info *eth_devices; > > /* Lock to serialize config updates with service function */ > > rte_spinlock_t rx_lock; > > /* Max mbufs processed in any service function invocation */ @@ > > -241,6 +243,7 @@ struct eth_rx_queue_info { > > uint32_t flow_id_mask; /* Set to ~0 if app provides flow id > else 0 */ > > uint64_t event; > > struct eth_rx_vector_data vector_data; > > + struct rte_eth_event_enqueue_buffer *event_buf; > > }; > > > > static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ > > -262,6 +265,22 @@ rxa_validate_id(uint8_t id) > > return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; > > } > > > > +static inline struct rte_eth_event_enqueue_buffer * > > +rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter, > > + uint16_t eth_dev_id, uint16_t rx_queue_id) { > > + struct rte_eth_event_enqueue_buffer *buf = NULL; > > + > > + if (rx_adapter->use_queue_event_buf) { > > + struct eth_device_info *dev_info = > > + &rx_adapter->eth_devices[eth_dev_id]; > > + buf = dev_info->rx_queue[rx_queue_id].event_buf; > > We can return here. It may save an instr or two. Done > > > + } else > > + buf = &rx_adapter->event_enqueue_buffer; > > Same here. Done > > > + > > + return buf; > > +} > > + > > #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, > retval) do { \ > > if (!rxa_validate_id(id)) { \ > > RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \ > @@ > > -767,10 +786,9 @@ rxa_enq_block_end_ts(struct > rte_event_eth_rx_adapter > > *rx_adapter, > > > > /* Enqueue buffered events to event device */ static inline uint16_t > > -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) > > +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, > > + struct rte_eth_event_enqueue_buffer *buf) > > { > > - struct rte_eth_event_enqueue_buffer *buf = > > - &rx_adapter->event_enqueue_buffer; > > struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; > > uint16_t count = buf->last ? buf->last - buf->head : buf->count; > > > > @@ -888,15 +906,14 @@ rxa_buffer_mbufs(struct > rte_event_eth_rx_adapter *rx_adapter, > > uint16_t eth_dev_id, > > uint16_t rx_queue_id, > > struct rte_mbuf **mbufs, > > - uint16_t num) > > + uint16_t num, > > + struct rte_eth_event_enqueue_buffer *buf) > > { > > uint32_t i; > > struct eth_device_info *dev_info = > > &rx_adapter- > >eth_devices[eth_dev_id]; > > struct eth_rx_queue_info *eth_rx_queue_info = > > &dev_info- > >rx_queue[rx_queue_id]; > > - struct rte_eth_event_enqueue_buffer *buf = > > - &rx_adapter- > >event_enqueue_buffer; > > uint16_t new_tail = buf->tail; > > uint64_t event = eth_rx_queue_info->event; > > uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ - > 995,11 > > +1012,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, > > uint16_t queue_id, > > uint32_t rx_count, > > uint32_t max_rx, > > - int *rxq_empty) > > + int *rxq_empty, > > + struct rte_eth_event_enqueue_buffer *buf) > > { > > struct rte_mbuf *mbufs[BATCH_SIZE]; > > - struct rte_eth_event_enqueue_buffer *buf = > > - &rx_adapter- > >event_enqueue_buffer; > > struct rte_event_eth_rx_adapter_stats *stats = > > &rx_adapter->stats; > > uint16_t n; > > @@ -1012,7 +1028,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter > *rx_adapter, > > */ > > while (rxa_pkt_buf_available(buf)) { > > if (buf->count >= BATCH_SIZE) > > - rxa_flush_event_buffer(rx_adapter); > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > stats->rx_poll_count++; > > n = rte_eth_rx_burst(port_id, queue_id, mbufs, > BATCH_SIZE); @@ > > -1021,14 +1037,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter > *rx_adapter, > > *rxq_empty = 1; > > break; > > } > > - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); > > + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, > buf); > > nb_rx += n; > > if (rx_count + nb_rx > max_rx) > > break; > > } > > > > if (buf->count > 0) > > - rxa_flush_event_buffer(rx_adapter); > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > return nb_rx; > > } > > @@ -1169,7 +1185,7 @@ rxa_intr_ring_dequeue(struct > rte_event_eth_rx_adapter *rx_adapter) > > ring_lock = &rx_adapter->intr_ring_lock; > > > > if (buf->count >= BATCH_SIZE) > > - rxa_flush_event_buffer(rx_adapter); > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > while (rxa_pkt_buf_available(buf)) { > > struct eth_device_info *dev_info; > > @@ -1221,7 +1237,7 @@ rxa_intr_ring_dequeue(struct > rte_event_eth_rx_adapter *rx_adapter) > > continue; > > n = rxa_eth_rx(rx_adapter, port, i, nb_rx, > > rx_adapter->max_nb_rx, > > - &rxq_empty); > > + &rxq_empty, buf); > > nb_rx += n; > > > > enq_buffer_full = !rxq_empty && n == 0; > @@ -1242,7 +1258,7 @@ > > rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) > > } else { > > n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, > > rx_adapter->max_nb_rx, > > - &rxq_empty); > > + &rxq_empty, buf); > > rx_adapter->qd_valid = !rxq_empty; > > nb_rx += n; > > if (nb_rx > rx_adapter->max_nb_rx) @@ -1273,13 > +1289,12 @@ > > rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { > > uint32_t num_queue; > > uint32_t nb_rx = 0; > > - struct rte_eth_event_enqueue_buffer *buf; > > + struct rte_eth_event_enqueue_buffer *buf = NULL; > > uint32_t wrr_pos; > > uint32_t max_nb_rx; > > > > wrr_pos = rx_adapter->wrr_pos; > > max_nb_rx = rx_adapter->max_nb_rx; > > - buf = &rx_adapter->event_enqueue_buffer; > > > > /* Iterate through a WRR sequence */ > > for (num_queue = 0; num_queue < rx_adapter->wrr_len; > num_queue++) { > > @@ -1287,24 +1302,31 @@ rxa_poll(struct rte_event_eth_rx_adapter > *rx_adapter) > > uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; > > uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; > > > > + buf = rxa_event_buf_get(rx_adapter, d, qid); > > + > > /* Don't do a batch dequeue from the rx queue if there isn't > > * enough space in the enqueue buffer. > > */ > > if (buf->count >= BATCH_SIZE) > > - rxa_flush_event_buffer(rx_adapter); > > + rxa_flush_event_buffer(rx_adapter, buf); > > if (!rxa_pkt_buf_available(buf)) { > > - rx_adapter->wrr_pos = wrr_pos; > > - return nb_rx; > > + if (rx_adapter->use_queue_event_buf) > > + goto poll_next_entry; > > + else { > > + rx_adapter->wrr_pos = wrr_pos; > > + return nb_rx; > > + } > > } > > > > nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, > > - NULL); > > + NULL, buf); > > if (nb_rx > max_nb_rx) { > > rx_adapter->wrr_pos = > > (wrr_pos + 1) % rx_adapter->wrr_len; > > break; > > } > > > > +poll_next_entry: > > if (++wrr_pos == rx_adapter->wrr_len) > > wrr_pos = 0; > > } > > @@ -1315,12 +1337,13 @@ static void > > rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { > > struct rte_event_eth_rx_adapter *rx_adapter = arg; > > - struct rte_eth_event_enqueue_buffer *buf = > > - &rx_adapter->event_enqueue_buffer; > > + struct rte_eth_event_enqueue_buffer *buf = NULL; > > struct rte_event *ev; > > > > + buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue); > > + > > if (buf->count) > > - rxa_flush_event_buffer(rx_adapter); > > + rxa_flush_event_buffer(rx_adapter, buf); > > > > if (vec->vector_ev->nb_elem == 0) > > return; > > @@ -1947,9 +1970,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter > *rx_adapter, > > rx_adapter->num_rx_intr -= intrq; > > dev_info->nb_rx_intr -= intrq; > > dev_info->nb_shared_intr -= intrq && sintrq; > > + if (rx_adapter->use_queue_event_buf) { > > + struct rte_eth_event_enqueue_buffer *event_buf = > > + dev_info->rx_queue[rx_queue_id].event_buf; > > + rte_free(event_buf->events); > > + rte_free(event_buf); > > + dev_info->rx_queue[rx_queue_id].event_buf = NULL; > > + } > > } > > > > -static void > > +static int > > rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > > struct eth_device_info *dev_info, > > int32_t rx_queue_id, > > @@ -1961,15 +1991,21 @@ rxa_add_queue(struct > rte_event_eth_rx_adapter *rx_adapter, > > int intrq; > > int sintrq; > > struct rte_event *qi_ev; > > + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; > > + uint16_t eth_dev_id = dev_info->dev->data->port_id; > > + int ret; > > > > if (rx_queue_id == -1) { > > uint16_t nb_rx_queues; > > uint16_t i; > > > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > > - for (i = 0; i < nb_rx_queues; i++) > > - rxa_add_queue(rx_adapter, dev_info, i, conf); > > - return; > > + for (i = 0; i < nb_rx_queues; i++) { > > + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); > > + if (ret) > > + return ret; > > + } > > + return 0; > > } > > > > pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -2032,6 > +2068,37 > > @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > > dev_info->next_q_idx = 0; > > } > > } > > + > > + if (!rx_adapter->use_queue_event_buf) > > + return 0; > > + > > + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", > > + sizeof(*new_rx_buf), 0, > > + rte_eth_dev_socket_id(eth_dev_id)); > > + if (new_rx_buf == NULL) { > > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta > for " > > + "dev_id: %d queue_id: %d", > > + eth_dev_id, rx_queue_id); > > + return -ENOMEM; > > + } > > + > > + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, > BATCH_SIZE); > > + new_rx_buf->events_size += (2 * BATCH_SIZE); > > + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", > > + sizeof(struct rte_event) * > > + new_rx_buf->events_size, 0, > > + rte_eth_dev_socket_id(eth_dev_id)); > > + if (new_rx_buf->events == NULL) { > > + rte_free(new_rx_buf); > > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " > > + "dev_id: %d queue_id: %d", > > + eth_dev_id, rx_queue_id); > > + return -ENOMEM; > > + } > > + > > + queue_info->event_buf = new_rx_buf; > > + > > + return 0; > > } > > > > static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ > > -2060,6 +2127,16 @@ static int rxa_sw_add(struct > rte_event_eth_rx_adapter *rx_adapter, > > temp_conf.servicing_weight = 1; > > } > > queue_conf = &temp_conf; > > + > > + if (queue_conf->servicing_weight == 0 && > > + rx_adapter->use_queue_event_buf) { > > + > > + RTE_EDEV_LOG_ERR("Use of queue level event > buffer " > > + "not supported for interrupt queues > " > > + "dev_id: %d queue_id: %d", > > + eth_dev_id, rx_queue_id); > > + return -EINVAL; > > + } > > } > > > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > > @@ -2139,7 +2216,9 @@ static int rxa_sw_add(struct > > rte_event_eth_rx_adapter *rx_adapter, > > > > > > > > - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); > > + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, > queue_conf); > > + if (ret) > > + goto err_free_rxqueue; > > rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); > > > > rte_free(rx_adapter->eth_rx_poll); > > @@ -2160,7 +2239,7 @@ static int rxa_sw_add(struct > rte_event_eth_rx_adapter *rx_adapter, > > rte_free(rx_poll); > > rte_free(rx_wrr); > > > > - return 0; > > + return ret; > > } > > > > static int > > @@ -2286,20 +2365,25 @@ rxa_create(uint8_t id, uint8_t dev_id, > > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > > > /* Rx adapter event buffer allocation */ > > - buf = &rx_adapter->event_enqueue_buffer; > > - buf->events_size = rxa_params->event_buf_size; > > - > > - events = rte_zmalloc_socket(rx_adapter->mem_name, > > - buf->events_size * sizeof(*events), > > - 0, socket_id); > > - if (events == NULL) { > > - RTE_EDEV_LOG_ERR("Failed to allocate mem for event > buffer\n"); > > - rte_free(rx_adapter->eth_devices); > > - rte_free(rx_adapter); > > - return -ENOMEM; > > - } > > + rx_adapter->use_queue_event_buf = rxa_params- > >use_queue_event_buf; > > + > > + if (!rx_adapter->use_queue_event_buf) { > > + buf = &rx_adapter->event_enqueue_buffer; > > + buf->events_size = rxa_params->event_buf_size; > > + > > + events = rte_zmalloc_socket(rx_adapter->mem_name, > > + buf->events_size * > sizeof(*events), > > + 0, socket_id); > > + if (events == NULL) { > > + RTE_EDEV_LOG_ERR("Failed to allocate memory " > > + "for adapter event buffer"); > > + rte_free(rx_adapter->eth_devices); > > + rte_free(rx_adapter); > > + return -ENOMEM; > > + } > > > > - rx_adapter->event_enqueue_buffer.events = events; > > + rx_adapter->event_enqueue_buffer.events = events; > > + } > > > > event_eth_rx_adapter[id] = rx_adapter; > > > > @@ -2327,6 +2411,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t > id, > > uint8_t dev_id, > > > > /* use default values for adapter params */ > > rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > > + rxa_params.use_queue_event_buf = false; > > > > return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } > @@ > > -2346,11 +2431,15 @@ > rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > > /* use default values if rxa_params is NULL */ > > if (rxa_params == NULL) { > > rxa_params = &temp_params; > > - rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > > - } > > - > > - if (rxa_params->event_buf_size == 0) > > + rxa_params->event_buf_size = 4 * BATCH_SIZE; > > This assumes ETH_EVENT_BUFFER_SIZE to be set to 6 * BATCH_SIZE so we > can have 4x here and 2x later. It may break if ETH_EVENT_BUFFER_SIZE is > changed later. > Can we change the code to just use ETH_EVENT_BUFFER_SIZE here. See > below. > Updated in V8 patch set > > + rxa_params->use_queue_event_buf = false; > > + } else if ((!rxa_params->use_queue_event_buf && > > + rxa_params->event_buf_size == 0) || > > + (rxa_params->use_queue_event_buf && > > + rxa_params->event_buf_size != 0)) { > > + RTE_EDEV_LOG_ERR("Invalid adapter params\n"); > > return -EINVAL; > > + } > > > > pc = rte_malloc(NULL, sizeof(*pc), 0); > > if (pc == NULL) > > @@ -2362,9 +2451,11 @@ > rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > > * from NIC rx queues to get full buffer utilization and prevent > > * unnecessary rollovers. > > */ > > - rxa_params->event_buf_size = RTE_ALIGN(rxa_params- > >event_buf_size, > > - BATCH_SIZE); > > - rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); > > + if (!rxa_params->use_queue_event_buf) { > > + rxa_params->event_buf_size = > > + RTE_ALIGN(rxa_params->event_buf_size, > BATCH_SIZE); > > + rxa_params->event_buf_size += (BATCH_SIZE + > BATCH_SIZE); > > + } > > Above if condition can be added as an else part of rxa_params == NULL. > Something like: > > if (rxa_params == NULL) { > rxa_params = &temp_params; > rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > rxa_params->use_queue_event_buf = false; > } else if ((!rxa_params->use_queue_event_buf && > rxa_params->event_buf_size == 0) || > (rxa_params->use_queue_event_buf && > rxa_params->event_buf_size != 0)) { > RTE_EDEV_LOG_ERR("Invalid adapter params\n"); > return -EINVAL; > } else if (!rxa_params->use_queue_event_buf) { > rxa_params->event_buf_size = > RTE_ALIGN(rxa_params->event_buf_size, > BATCH_SIZE); > rxa_params->event_buf_size += (BATCH_SIZE + > BATCH_SIZE); > } > Done in V8 patch set > > > > ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); > > if (ret) > > @@ -2418,7 +2509,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) > > if (rx_adapter->default_cb_arg) > > rte_free(rx_adapter->conf_arg); > > rte_free(rx_adapter->eth_devices); > > - rte_free(rx_adapter->event_enqueue_buffer.events); > > + if (!rx_adapter->use_queue_event_buf) > > + rte_free(rx_adapter->event_enqueue_buffer.events); > > rte_free(rx_adapter); > > event_eth_rx_adapter[id] = NULL; > > > > @@ -2522,6 +2614,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t > id, > > return -EINVAL; > > } > > > > + if ((rx_adapter->use_queue_event_buf && > > + queue_conf->event_buf_size == 0) || > > + (!rx_adapter->use_queue_event_buf && > > + queue_conf->event_buf_size != 0)) { > > + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the > queue"); > > + return -EINVAL; > > + } > > + > > dev_info = &rx_adapter->eth_devices[eth_dev_id]; > > > > if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { > > -- > > 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v7 5/5] test/event: add unit test for Rx adapter 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V ` (2 preceding siblings ...) 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 4/5] eventdev/rx_adapter: implement " Naga Harish K S V @ 2021-10-06 4:02 ` Naga Harish K S V 2021-10-06 6:19 ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Jayatheerthan, Jay ` (2 subsequent siblings) 6 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 4:02 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit tests for checking per rx queue event buffer feature using rte_event_eth_rx_adapter_queue_add api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- app/test/test_event_eth_rx_adapter.c | 86 ++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 7c2cf0dd70..cf3c989efb 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -387,6 +387,90 @@ adapter_create(void) return err; } +static int +adapter_create_with_params(void) +{ + int err; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf rx_p_conf; + struct rte_event_eth_rx_adapter_params rxa_params; + + memset(&rx_p_conf, 0, sizeof(rx_p_conf)); + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + rx_p_conf.new_event_threshold = dev_info.max_num_events; + rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + + rxa_params.use_queue_event_buf = false; + rxa_params.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + rxa_params.use_queue_event_buf = true; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err); + + return TEST_SUCCESS; +} + +static int +adapter_queue_event_buf_test(void) +{ + int err; + struct rte_event ev; + uint32_t cap; + + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; + + err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + ev.queue_id = 0; + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; + ev.priority = 0; + + queue_config.rx_queue_flags = 0; + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) { + ev.flow_id = 1; + queue_config.rx_queue_flags = + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; + } + queue_config.ev = ev; + queue_config.servicing_weight = 1; + queue_config.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + queue_config.event_buf_size = 1024; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + return TEST_SUCCESS; +} + static void adapter_free(void) { @@ -826,6 +910,8 @@ static struct unit_test_suite event_eth_rx_tests = { TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop), TEST_CASE_ST(adapter_create, adapter_free, adapter_stats), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf), + TEST_CASE_ST(adapter_create_with_params, adapter_free, + adapter_queue_event_buf_test), TEST_CASES_END() /**< NULL terminate unit test array */ } }; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V ` (3 preceding siblings ...) 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 5/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-10-06 6:19 ` Jayatheerthan, Jay 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 " Naga Harish K S V 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 6 siblings, 0 replies; 81+ messages in thread From: Jayatheerthan, Jay @ 2021-10-06 6:19 UTC (permalink / raw) To: Naga Harish K, S V, jerinj; +Cc: dev, Kundapura, Ganapati Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> > -----Original Message----- > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > Sent: Wednesday, October 6, 2021 9:33 AM > To: jerinj@marvell.com; Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > Cc: dev@dpdk.org; Kundapura, Ganapati <ganapati.kundapura@intel.com> > Subject: [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability > > Currently event buffer is static array with a default size defined > internally. > > To configure event buffer size from application, > ``rte_event_eth_rx_adapter_create_with_params`` api is added which > takes ``struct rte_event_eth_rx_adapter_params`` to configure event > buffer size in addition other params . The event buffer size is > rounded up for better buffer utilization and performance . In case > of NULL params argument, default event buffer size is used. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> > > --- > v7: > * added additional validation check in adapter_create_with_params > API as per review comments > > v6: > * address code style related review comments > > v5: > * reverted queue conf get unit test change > > v4: > * rebased with latest dpdk-next-eventdev branch > * changed queue conf get unit test > > v3: > * updated documentation and code comments as per review comments. > * updated new create api test case name with suitable one. > > v2: > * Updated header file and rx adapter documentation as per review comments. > * new api name is modified as rte_event_eth_rx_adapter_create_with_params > as per review comments. > * rxa_params pointer argument Value NULL is allowed to represent the > default values > > v1: > * Initial implementation with documentation and unit tests. > --- > --- > .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ > lib/eventdev/rte_event_eth_rx_adapter.c | 98 +++++++++++++++++-- > lib/eventdev/rte_event_eth_rx_adapter.h | 41 +++++++- > lib/eventdev/version.map | 2 + > 4 files changed, 140 insertions(+), 8 deletions(-) > > diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > index ce23d8a474..8526aecf57 100644 > --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is > expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` > passed to it. > > +If the application desires to control the event buffer size, it can use the > +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is > +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. > +The function is passed the event device to be associated with the adapter > +and port configuration for the adapter to setup an event port if the > +adapter needs to use a service function. > + > Adding Rx Queues to the Adapter Instance > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c > index 10491ca07b..5ccea168ea 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.c > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c > @@ -85,7 +85,9 @@ struct rte_eth_event_enqueue_buffer { > /* Count of events in this buffer */ > uint16_t count; > /* Array of events in this buffer */ > - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; > + struct rte_event *events; > + /* size of event buffer */ > + uint16_t events_size; > /* Event enqueue happens from head */ > uint16_t head; > /* New packets from rte_eth_rx_burst is enqued from tail */ > @@ -946,7 +948,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, > dropped = 0; > nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, > buf->last | > - (RTE_DIM(buf->events) & ~buf->last_mask), > + (buf->events_size & ~buf->last_mask), > buf->count >= BATCH_SIZE ? > buf->count - BATCH_SIZE : 0, > &buf->events[buf->tail], > @@ -972,7 +974,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) > uint32_t nb_req = buf->tail + BATCH_SIZE; > > if (!buf->last) { > - if (nb_req <= RTE_DIM(buf->events)) > + if (nb_req <= buf->events_size) > return true; > > if (buf->head >= BATCH_SIZE) { > @@ -2206,12 +2208,15 @@ rxa_ctrl(uint8_t id, int start) > return 0; > } > > -int > -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > - rte_event_eth_rx_adapter_conf_cb conf_cb, > - void *conf_arg) > +static int > +rxa_create(uint8_t id, uint8_t dev_id, > + struct rte_event_eth_rx_adapter_params *rxa_params, > + rte_event_eth_rx_adapter_conf_cb conf_cb, > + void *conf_arg) > { > struct rte_event_eth_rx_adapter *rx_adapter; > + struct rte_eth_event_enqueue_buffer *buf; > + struct rte_event *events; > int ret; > int socket_id; > uint16_t i; > @@ -2226,6 +2231,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); > RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); > + > if (conf_cb == NULL) > return -EINVAL; > > @@ -2273,11 +2279,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > rte_free(rx_adapter); > return -ENOMEM; > } > + > rte_spinlock_init(&rx_adapter->rx_lock); > + > for (i = 0; i < RTE_MAX_ETHPORTS; i++) > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > + /* Rx adapter event buffer allocation */ > + buf = &rx_adapter->event_enqueue_buffer; > + buf->events_size = rxa_params->event_buf_size; > + > + events = rte_zmalloc_socket(rx_adapter->mem_name, > + buf->events_size * sizeof(*events), > + 0, socket_id); > + if (events == NULL) { > + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); > + rte_free(rx_adapter->eth_devices); > + rte_free(rx_adapter); > + return -ENOMEM; > + } > + > + rx_adapter->event_enqueue_buffer.events = events; > + > event_eth_rx_adapter[id] = rx_adapter; > + > if (conf_cb == rxa_default_conf_cb) > rx_adapter->default_cb_arg = 1; > > @@ -2293,6 +2318,61 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > return 0; > } > > +int > +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > + rte_event_eth_rx_adapter_conf_cb conf_cb, > + void *conf_arg) > +{ > + struct rte_event_eth_rx_adapter_params rxa_params = {0}; > + > + /* use default values for adapter params */ > + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > + > + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); > +} > + > +int > +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > + struct rte_event_port_conf *port_config, > + struct rte_event_eth_rx_adapter_params *rxa_params) > +{ > + struct rte_event_port_conf *pc; > + int ret; > + struct rte_event_eth_rx_adapter_params temp_params = {0}; > + > + if (port_config == NULL) > + return -EINVAL; > + > + /* use default values if rxa_params is NULL */ > + if (rxa_params == NULL) { > + rxa_params = &temp_params; > + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > + } > + > + if (rxa_params->event_buf_size == 0) > + return -EINVAL; > + > + pc = rte_malloc(NULL, sizeof(*pc), 0); > + if (pc == NULL) > + return -ENOMEM; > + > + *pc = *port_config; > + > + /* adjust event buff size with BATCH_SIZE used for fetching packets > + * from NIC rx queues to get full buffer utilization and prevent > + * unnecessary rollovers. > + */ > + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, > + BATCH_SIZE); > + rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); > + > + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); > + if (ret) > + rte_free(pc); > + > + return ret; > +} > + > int > rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > struct rte_event_port_conf *port_config) > @@ -2302,12 +2382,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > > if (port_config == NULL) > return -EINVAL; > + > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); > > pc = rte_malloc(NULL, sizeof(*pc), 0); > if (pc == NULL) > return -ENOMEM; > *pc = *port_config; > + > ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, > rxa_default_conf_cb, > pc); > @@ -2336,6 +2418,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) > if (rx_adapter->default_cb_arg) > rte_free(rx_adapter->conf_arg); > rte_free(rx_adapter->eth_devices); > + rte_free(rx_adapter->event_enqueue_buffer.events); > rte_free(rx_adapter); > event_eth_rx_adapter[id] = NULL; > > @@ -2711,6 +2794,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, > > stats->rx_packets += dev_stats_sum.rx_packets; > stats->rx_enq_count += dev_stats_sum.rx_enq_count; > + > return 0; > } > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h > index 470543e434..846ca569e9 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.h > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h > @@ -26,6 +26,7 @@ > * The ethernet Rx event adapter's functions are: > * - rte_event_eth_rx_adapter_create_ext() > * - rte_event_eth_rx_adapter_create() > + * - rte_event_eth_rx_adapter_create_with_params() > * - rte_event_eth_rx_adapter_free() > * - rte_event_eth_rx_adapter_queue_add() > * - rte_event_eth_rx_adapter_queue_del() > @@ -37,7 +38,7 @@ > * > * The application creates an ethernet to event adapter using > * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() > - * functions. > + * or rte_event_eth_rx_adapter_create_with_params() functions. > * The adapter needs to know which ethernet rx queues to poll for mbufs as well > * as event device parameters such as the event queue identifier, event > * priority and scheduling type that the adapter should use when constructing > @@ -257,6 +258,17 @@ struct rte_event_eth_rx_adapter_vector_limits { > */ > }; > > +/** > + * A structure to hold adapter config params > + */ > +struct rte_event_eth_rx_adapter_params { > + uint16_t event_buf_size; > + /**< size of event buffer for the adapter. > + * This value is rounded up for better buffer utilization > + * and performance. > + */ > +}; > + > /** > * > * Callback function invoked by the SW adapter before it continues > @@ -357,6 +369,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > struct rte_event_port_conf *port_config); > > +/** > + * This is a variant of rte_event_eth_rx_adapter_create() with additional > + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. > + * > + * @param id > + * The identifier of the ethernet Rx event adapter. > + * > + * @param dev_id > + * The identifier of the event device to configure. > + * > + * @param port_config > + * Argument of type *rte_event_port_conf* that is passed to the conf_cb > + * function. > + * > + * @param rxa_params > + * Pointer to struct rte_event_eth_rx_adapter_params. > + * In case of NULL, default values are used. > + * > + * @return > + * - 0: Success > + * - <0: Error code on failure > + */ > +__rte_experimental > +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > + struct rte_event_port_conf *port_config, > + struct rte_event_eth_rx_adapter_params *rxa_params); > + > /** > * Free an event adapter > * > diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map > index 9f280160fa..7de18497a6 100644 > --- a/lib/eventdev/version.map > +++ b/lib/eventdev/version.map > @@ -138,6 +138,8 @@ EXPERIMENTAL { > __rte_eventdev_trace_port_setup; > # added in 20.11 > rte_event_pmd_pci_probe_named; > + # added in 21.11 > + rte_event_eth_rx_adapter_create_with_params; > > #added in 21.05 > rte_event_vector_pool_create; > -- > 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v8 1/5] eventdev/rx_adapter: add event buffer size configurability 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V ` (4 preceding siblings ...) 2021-10-06 6:19 ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Jayatheerthan, Jay @ 2021-10-06 7:47 ` Naga Harish K S V 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 2/5] test/event: add unit test for Rx adapter Naga Harish K S V ` (3 more replies) 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 6 siblings, 4 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 7:47 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev, Ganapati Kundapura Currently event buffer is static array with a default size defined internally. To configure event buffer size from application, ``rte_event_eth_rx_adapter_create_with_params`` api is added which takes ``struct rte_event_eth_rx_adapter_params`` to configure event buffer size in addition other params . The event buffer size is rounded up for better buffer utilization and performance . In case of NULL params argument, default event buffer size is used. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> --- v8: * changed rxa_event_buf_get function as per review comments adapter_create_with_params modified as per comments v7: * added additional validation check in adapter_create_with_params API as per review comments v6: * address code style related review comments v5: * reverted queue conf get unit test change v4: * rebased with latest dpdk-next-eventdev branch * changed queue conf get unit test v3: * updated documentation and code comments as per review comments. * updated new create api test case name with suitable one. v2: * Updated header file and rx adapter documentation as per review comments. * new api name is modified as rte_event_eth_rx_adapter_create_with_params as per review comments. * rxa_params pointer argument Value NULL is allowed to represent the default values v1: * Initial implementation with documentation and unit tests. --- --- .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ lib/eventdev/rte_event_eth_rx_adapter.c | 98 +++++++++++++++++-- lib/eventdev/rte_event_eth_rx_adapter.h | 41 +++++++- lib/eventdev/version.map | 2 + 4 files changed, 140 insertions(+), 8 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index ce23d8a474..8526aecf57 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. +If the application desires to control the event buffer size, it can use the +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. +The function is passed the event device to be associated with the adapter +and port configuration for the adapter to setup an event port if the +adapter needs to use a service function. + Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 10491ca07b..5ccea168ea 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -85,7 +85,9 @@ struct rte_eth_event_enqueue_buffer { /* Count of events in this buffer */ uint16_t count; /* Array of events in this buffer */ - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; + struct rte_event *events; + /* size of event buffer */ + uint16_t events_size; /* Event enqueue happens from head */ uint16_t head; /* New packets from rte_eth_rx_burst is enqued from tail */ @@ -946,7 +948,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, dropped = 0; nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, buf->last | - (RTE_DIM(buf->events) & ~buf->last_mask), + (buf->events_size & ~buf->last_mask), buf->count >= BATCH_SIZE ? buf->count - BATCH_SIZE : 0, &buf->events[buf->tail], @@ -972,7 +974,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) uint32_t nb_req = buf->tail + BATCH_SIZE; if (!buf->last) { - if (nb_req <= RTE_DIM(buf->events)) + if (nb_req <= buf->events_size) return true; if (buf->head >= BATCH_SIZE) { @@ -2206,12 +2208,15 @@ rxa_ctrl(uint8_t id, int start) return 0; } -int -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, - rte_event_eth_rx_adapter_conf_cb conf_cb, - void *conf_arg) +static int +rxa_create(uint8_t id, uint8_t dev_id, + struct rte_event_eth_rx_adapter_params *rxa_params, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) { struct rte_event_eth_rx_adapter *rx_adapter; + struct rte_eth_event_enqueue_buffer *buf; + struct rte_event *events; int ret; int socket_id; uint16_t i; @@ -2226,6 +2231,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (conf_cb == NULL) return -EINVAL; @@ -2273,11 +2279,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_free(rx_adapter); return -ENOMEM; } + rte_spinlock_init(&rx_adapter->rx_lock); + for (i = 0; i < RTE_MAX_ETHPORTS; i++) rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; + /* Rx adapter event buffer allocation */ + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = rxa_params->event_buf_size; + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } + + rx_adapter->event_enqueue_buffer.events = events; + event_eth_rx_adapter[id] = rx_adapter; + if (conf_cb == rxa_default_conf_cb) rx_adapter->default_cb_arg = 1; @@ -2293,6 +2318,61 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, return 0; } +int +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) +{ + struct rte_event_eth_rx_adapter_params rxa_params = {0}; + + /* use default values for adapter params */ + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); +} + +int +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params) +{ + struct rte_event_port_conf *pc; + int ret; + struct rte_event_eth_rx_adapter_params temp_params = {0}; + + if (port_config == NULL) + return -EINVAL; + + /* use default values if rxa_params is NULL */ + if (rxa_params == NULL) { + rxa_params = &temp_params; + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; + } + + if (rxa_params->event_buf_size == 0) + return -EINVAL; + + pc = rte_malloc(NULL, sizeof(*pc), 0); + if (pc == NULL) + return -ENOMEM; + + *pc = *port_config; + + /* adjust event buff size with BATCH_SIZE used for fetching packets + * from NIC rx queues to get full buffer utilization and prevent + * unnecessary rollovers. + */ + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); + + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); + if (ret) + rte_free(pc); + + return ret; +} + int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config) @@ -2302,12 +2382,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) return -ENOMEM; *pc = *port_config; + ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, rxa_default_conf_cb, pc); @@ -2336,6 +2418,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2711,6 +2794,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, stats->rx_packets += dev_stats_sum.rx_packets; stats->rx_enq_count += dev_stats_sum.rx_enq_count; + return 0; } diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 470543e434..846ca569e9 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -26,6 +26,7 @@ * The ethernet Rx event adapter's functions are: * - rte_event_eth_rx_adapter_create_ext() * - rte_event_eth_rx_adapter_create() + * - rte_event_eth_rx_adapter_create_with_params() * - rte_event_eth_rx_adapter_free() * - rte_event_eth_rx_adapter_queue_add() * - rte_event_eth_rx_adapter_queue_del() @@ -37,7 +38,7 @@ * * The application creates an ethernet to event adapter using * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() - * functions. + * or rte_event_eth_rx_adapter_create_with_params() functions. * The adapter needs to know which ethernet rx queues to poll for mbufs as well * as event device parameters such as the event queue identifier, event * priority and scheduling type that the adapter should use when constructing @@ -257,6 +258,17 @@ struct rte_event_eth_rx_adapter_vector_limits { */ }; +/** + * A structure to hold adapter config params + */ +struct rte_event_eth_rx_adapter_params { + uint16_t event_buf_size; + /**< size of event buffer for the adapter. + * This value is rounded up for better buffer utilization + * and performance. + */ +}; + /** * * Callback function invoked by the SW adapter before it continues @@ -357,6 +369,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config); +/** + * This is a variant of rte_event_eth_rx_adapter_create() with additional + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. + * + * @param id + * The identifier of the ethernet Rx event adapter. + * + * @param dev_id + * The identifier of the event device to configure. + * + * @param port_config + * Argument of type *rte_event_port_conf* that is passed to the conf_cb + * function. + * + * @param rxa_params + * Pointer to struct rte_event_eth_rx_adapter_params. + * In case of NULL, default values are used. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params); + /** * Free an event adapter * diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index 9f280160fa..7de18497a6 100644 --- a/lib/eventdev/version.map +++ b/lib/eventdev/version.map @@ -138,6 +138,8 @@ EXPERIMENTAL { __rte_eventdev_trace_port_setup; # added in 20.11 rte_event_pmd_pci_probe_named; + # added in 21.11 + rte_event_eth_rx_adapter_create_with_params; #added in 21.05 rte_event_vector_pool_create; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v8 2/5] test/event: add unit test for Rx adapter 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 " Naga Harish K S V @ 2021-10-06 7:47 ` Naga Harish K S V 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V ` (2 subsequent siblings) 3 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 7:47 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit test for rte_event_eth_rx_adapter_create_with_params api and validate all possible input combinations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> --- app/test/test_event_eth_rx_adapter.c | 53 +++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 13664a3a3b..7c2cf0dd70 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -428,6 +428,50 @@ adapter_create_free(void) return TEST_SUCCESS; } +static int +adapter_create_free_with_params(void) +{ + int err; + + struct rte_event_port_conf rx_p_conf = { + .dequeue_depth = 8, + .enqueue_depth = 8, + .new_event_threshold = 1200, + }; + + struct rte_event_eth_rx_adapter_params rxa_params = { + .event_buf_size = 1024 + }; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, NULL, NULL); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err); + + rxa_params.event_buf_size = 0; + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + err = rte_event_eth_rx_adapter_free(1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + return TEST_SUCCESS; +} + static int adapter_queue_add_del(void) { @@ -435,7 +479,7 @@ adapter_queue_add_del(void) struct rte_event ev; uint32_t cap; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, &cap); @@ -523,7 +567,7 @@ adapter_multi_eth_add_del(void) uint16_t port_index, port_index_base, drv_id = 0; char driver_name[50]; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; ev.queue_id = 0; ev.sched_type = RTE_SCHED_TYPE_ATOMIC; @@ -594,7 +638,7 @@ adapter_intr_queue_add_del(void) struct rte_event ev; uint32_t cap; uint16_t eth_port; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; if (!default_params.rx_intr_port_inited) return 0; @@ -687,7 +731,7 @@ adapter_start_stop(void) ev.sched_type = RTE_SCHED_TYPE_ATOMIC; ev.priority = 0; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; queue_config.rx_queue_flags = 0; if (default_params.caps & @@ -774,6 +818,7 @@ static struct unit_test_suite event_eth_rx_tests = { .teardown = testsuite_teardown, .unit_test_cases = { TEST_CASE_ST(NULL, NULL, adapter_create_free), + TEST_CASE_ST(NULL, NULL, adapter_create_free_with_params), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_add_del), TEST_CASE_ST(adapter_create, adapter_free, -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v8 3/5] eventdev/rx_adapter: introduce per queue event buffer 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 " Naga Harish K S V 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 2/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-10-06 7:47 ` Naga Harish K S V 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 4/5] eventdev/rx_adapter: implement " Naga Harish K S V 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 5/5] test/event: add unit test for Rx adapter Naga Harish K S V 3 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 7:47 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev To configure per queue event buffer size, application sets ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag as true and is passed to ``rte_event_eth_rx_adapter_create_with_params`` api. The per queue event buffer size is populated in ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed to ``rte_event_eth_rx_adapter_queue_add`` api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> --- .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index 8526aecf57..8b58130fc5 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,12 +62,14 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. -If the application desires to control the event buffer size, it can use the -``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is -specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. -The function is passed the event device to be associated with the adapter -and port configuration for the adapter to setup an event port if the -adapter needs to use a service function. +If the application desires to control the event buffer size at adapter level, +it can use the ``rte_event_eth_rx_adapter_create_with_params()`` api. The event +buffer size is specified using ``struct rte_event_eth_rx_adapter_params:: +event_buf_size``. To configure the event buffer size at queue level, the boolean +flag ``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be +set to true. The function is passed the event device to be associated with +the adapter and port configuration for the adapter to setup an event port +if the adapter needs to use a service function. Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx queue is encoded in the ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf is the relative polling frequency of the Rx queue and is applicable when the -adapter uses a service core function. +adapter uses a service core function. The applications can configure queue +event buffer size in ``struct rte_event_eth_rx_adapter_queue_conf::event_buf_size`` +parameter. .. code-block:: c @@ -90,6 +94,7 @@ adapter uses a service core function. queue_config.rx_queue_flags = 0; queue_config.ev = ev; queue_config.servicing_weight = 1; + queue_config.event_buf_size = 1024; err = rte_event_eth_rx_adapter_queue_add(id, eth_dev_id, diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 846ca569e9..70ca427d66 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -200,6 +200,8 @@ struct rte_event_eth_rx_adapter_queue_conf { * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. */ + uint16_t event_buf_size; + /**< event buffer size for this queue */ }; /** @@ -267,6 +269,8 @@ struct rte_event_eth_rx_adapter_params { * This value is rounded up for better buffer utilization * and performance. */ + bool use_queue_event_buf; + /**< flag to indicate that event buffer is separate for each queue */ }; /** -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v8 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 " Naga Harish K S V 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V @ 2021-10-06 7:47 ` Naga Harish K S V 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 5/5] test/event: add unit test for Rx adapter Naga Harish K S V 3 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 7:47 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch implement the per queue event buffer with required validations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- lib/eventdev/rte_event_eth_rx_adapter.c | 213 +++++++++++++++++------- 1 file changed, 154 insertions(+), 59 deletions(-) diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 5ccea168ea..8e3cd793c1 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter { uint8_t rss_key_be[RSS_KEY_SIZE]; /* Event device identifier */ uint8_t eventdev_id; - /* Per ethernet device structure */ - struct eth_device_info *eth_devices; /* Event port identifier */ uint8_t event_port_id; + /* Flag indicating per rxq event buffer */ + bool use_queue_event_buf; + /* Per ethernet device structure */ + struct eth_device_info *eth_devices; /* Lock to serialize config updates with service function */ rte_spinlock_t rx_lock; /* Max mbufs processed in any service function invocation */ @@ -241,6 +243,7 @@ struct eth_rx_queue_info { uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ uint64_t event; struct eth_rx_vector_data vector_data; + struct rte_eth_event_enqueue_buffer *event_buf; }; static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ -262,6 +265,18 @@ rxa_validate_id(uint8_t id) return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; } +static inline struct rte_eth_event_enqueue_buffer * +rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter, + uint16_t eth_dev_id, uint16_t rx_queue_id) +{ + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[eth_dev_id]; + return dev_info->rx_queue[rx_queue_id].event_buf; + } else + return &rx_adapter->event_enqueue_buffer; +} + #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ if (!rxa_validate_id(id)) { \ RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \ @@ -767,10 +782,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, /* Enqueue buffered events to event device */ static inline uint16_t -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, + struct rte_eth_event_enqueue_buffer *buf) { - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t count = buf->last ? buf->last - buf->head : buf->count; @@ -888,15 +902,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, uint16_t rx_queue_id, struct rte_mbuf **mbufs, - uint16_t num) + uint16_t num, + struct rte_eth_event_enqueue_buffer *buf) { uint32_t i; struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; struct eth_rx_queue_info *eth_rx_queue_info = &dev_info->rx_queue[rx_queue_id]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; uint16_t new_tail = buf->tail; uint64_t event = eth_rx_queue_info->event; uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ -995,11 +1008,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t queue_id, uint32_t rx_count, uint32_t max_rx, - int *rxq_empty) + int *rxq_empty, + struct rte_eth_event_enqueue_buffer *buf) { struct rte_mbuf *mbufs[BATCH_SIZE]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t n; @@ -1012,7 +1024,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, */ while (rxa_pkt_buf_available(buf)) { if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); stats->rx_poll_count++; n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); @@ -1021,14 +1033,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, *rxq_empty = 1; break; } - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); nb_rx += n; if (rx_count + nb_rx > max_rx) break; } if (buf->count > 0) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); return nb_rx; } @@ -1169,7 +1181,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) ring_lock = &rx_adapter->intr_ring_lock; if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); while (rxa_pkt_buf_available(buf)) { struct eth_device_info *dev_info; @@ -1221,7 +1233,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) continue; n = rxa_eth_rx(rx_adapter, port, i, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); nb_rx += n; enq_buffer_full = !rxq_empty && n == 0; @@ -1242,7 +1254,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) } else { n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); rx_adapter->qd_valid = !rxq_empty; nb_rx += n; if (nb_rx > rx_adapter->max_nb_rx) @@ -1273,13 +1285,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { uint32_t num_queue; uint32_t nb_rx = 0; - struct rte_eth_event_enqueue_buffer *buf; + struct rte_eth_event_enqueue_buffer *buf = NULL; uint32_t wrr_pos; uint32_t max_nb_rx; wrr_pos = rx_adapter->wrr_pos; max_nb_rx = rx_adapter->max_nb_rx; - buf = &rx_adapter->event_enqueue_buffer; /* Iterate through a WRR sequence */ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { @@ -1287,24 +1298,31 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; + buf = rxa_event_buf_get(rx_adapter, d, qid); + /* Don't do a batch dequeue from the rx queue if there isn't * enough space in the enqueue buffer. */ if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (!rxa_pkt_buf_available(buf)) { - rx_adapter->wrr_pos = wrr_pos; - return nb_rx; + if (rx_adapter->use_queue_event_buf) + goto poll_next_entry; + else { + rx_adapter->wrr_pos = wrr_pos; + return nb_rx; + } } nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, - NULL); + NULL, buf); if (nb_rx > max_nb_rx) { rx_adapter->wrr_pos = (wrr_pos + 1) % rx_adapter->wrr_len; break; } +poll_next_entry: if (++wrr_pos == rx_adapter->wrr_len) wrr_pos = 0; } @@ -1315,12 +1333,13 @@ static void rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { struct rte_event_eth_rx_adapter *rx_adapter = arg; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; + struct rte_eth_event_enqueue_buffer *buf = NULL; struct rte_event *ev; + buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue); + if (buf->count) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (vec->vector_ev->nb_elem == 0) return; @@ -1947,9 +1966,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, rx_adapter->num_rx_intr -= intrq; dev_info->nb_rx_intr -= intrq; dev_info->nb_shared_intr -= intrq && sintrq; + if (rx_adapter->use_queue_event_buf) { + struct rte_eth_event_enqueue_buffer *event_buf = + dev_info->rx_queue[rx_queue_id].event_buf; + rte_free(event_buf->events); + rte_free(event_buf); + dev_info->rx_queue[rx_queue_id].event_buf = NULL; + } } -static void +static int rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, struct eth_device_info *dev_info, int32_t rx_queue_id, @@ -1961,15 +1987,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int intrq; int sintrq; struct rte_event *qi_ev; + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; + uint16_t eth_dev_id = dev_info->dev->data->port_id; + int ret; if (rx_queue_id == -1) { uint16_t nb_rx_queues; uint16_t i; nb_rx_queues = dev_info->dev->data->nb_rx_queues; - for (i = 0; i < nb_rx_queues; i++) - rxa_add_queue(rx_adapter, dev_info, i, conf); - return; + for (i = 0; i < nb_rx_queues; i++) { + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); + if (ret) + return ret; + } + return 0; } pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -2032,6 +2064,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, dev_info->next_q_idx = 0; } } + + if (!rx_adapter->use_queue_event_buf) + return 0; + + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", + sizeof(*new_rx_buf), 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); + new_rx_buf->events_size += (2 * BATCH_SIZE); + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", + sizeof(struct rte_event) * + new_rx_buf->events_size, 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf->events == NULL) { + rte_free(new_rx_buf); + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + queue_info->event_buf = new_rx_buf; + + return 0; } static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ -2060,6 +2123,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, temp_conf.servicing_weight = 1; } queue_conf = &temp_conf; + + if (queue_conf->servicing_weight == 0 && + rx_adapter->use_queue_event_buf) { + + RTE_EDEV_LOG_ERR("Use of queue level event buffer " + "not supported for interrupt queues " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -EINVAL; + } } nb_rx_queues = dev_info->dev->data->nb_rx_queues; @@ -2139,7 +2212,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + if (ret) + goto err_free_rxqueue; rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); rte_free(rx_adapter->eth_rx_poll); @@ -2160,7 +2235,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, rte_free(rx_poll); rte_free(rx_wrr); - return 0; + return ret; } static int @@ -2286,20 +2361,25 @@ rxa_create(uint8_t id, uint8_t dev_id, rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; /* Rx adapter event buffer allocation */ - buf = &rx_adapter->event_enqueue_buffer; - buf->events_size = rxa_params->event_buf_size; - - events = rte_zmalloc_socket(rx_adapter->mem_name, - buf->events_size * sizeof(*events), - 0, socket_id); - if (events == NULL) { - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); - rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter); - return -ENOMEM; - } + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; + + if (!rx_adapter->use_queue_event_buf) { + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = rxa_params->event_buf_size; + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate memory " + "for adapter event buffer"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } - rx_adapter->event_enqueue_buffer.events = events; + rx_adapter->event_enqueue_buffer.events = events; + } event_eth_rx_adapter[id] = rx_adapter; @@ -2327,6 +2407,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, /* use default values for adapter params */ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + rxa_params.use_queue_event_buf = false; return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } @@ -2343,14 +2424,27 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; - /* use default values if rxa_params is NULL */ if (rxa_params == NULL) { + /* use default values if rxa_params is NULL */ rxa_params = &temp_params; - rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; - } - - if (rxa_params->event_buf_size == 0) + rxa_params->event_buf_size = 4 * BATCH_SIZE; + rxa_params->use_queue_event_buf = false; + } else if ((!rxa_params->use_queue_event_buf && + rxa_params->event_buf_size == 0) || + (rxa_params->use_queue_event_buf && + rxa_params->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid adapter params\n"); return -EINVAL; + } else if (!rxa_params->use_queue_event_buf) { + /* adjust event buff size with BATCH_SIZE used for fetching + * packets from NIC rx queues to get full buffer utilization + * and prevent unnecessary rollovers. + */ + + rxa_params->event_buf_size = + RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); + rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); + } pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) @@ -2358,14 +2452,6 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, *pc = *port_config; - /* adjust event buff size with BATCH_SIZE used for fetching packets - * from NIC rx queues to get full buffer utilization and prevent - * unnecessary rollovers. - */ - rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, - BATCH_SIZE); - rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); - ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); if (ret) rte_free(pc); @@ -2418,7 +2504,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter->event_enqueue_buffer.events); + if (!rx_adapter->use_queue_event_buf) + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2522,6 +2609,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, return -EINVAL; } + if ((rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size == 0) || + (!rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); + return -EINVAL; + } + dev_info = &rx_adapter->eth_devices[eth_dev_id]; if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v8 5/5] test/event: add unit test for Rx adapter 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 " Naga Harish K S V ` (2 preceding siblings ...) 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 4/5] eventdev/rx_adapter: implement " Naga Harish K S V @ 2021-10-06 7:47 ` Naga Harish K S V 3 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 7:47 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit tests for checking per rx queue event buffer feature using rte_event_eth_rx_adapter_queue_add api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> --- app/test/test_event_eth_rx_adapter.c | 86 ++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 7c2cf0dd70..cf3c989efb 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -387,6 +387,90 @@ adapter_create(void) return err; } +static int +adapter_create_with_params(void) +{ + int err; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf rx_p_conf; + struct rte_event_eth_rx_adapter_params rxa_params; + + memset(&rx_p_conf, 0, sizeof(rx_p_conf)); + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + rx_p_conf.new_event_threshold = dev_info.max_num_events; + rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + + rxa_params.use_queue_event_buf = false; + rxa_params.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + rxa_params.use_queue_event_buf = true; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err); + + return TEST_SUCCESS; +} + +static int +adapter_queue_event_buf_test(void) +{ + int err; + struct rte_event ev; + uint32_t cap; + + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; + + err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + ev.queue_id = 0; + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; + ev.priority = 0; + + queue_config.rx_queue_flags = 0; + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) { + ev.flow_id = 1; + queue_config.rx_queue_flags = + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; + } + queue_config.ev = ev; + queue_config.servicing_weight = 1; + queue_config.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + queue_config.event_buf_size = 1024; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + return TEST_SUCCESS; +} + static void adapter_free(void) { @@ -826,6 +910,8 @@ static struct unit_test_suite event_eth_rx_tests = { TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop), TEST_CASE_ST(adapter_create, adapter_free, adapter_stats), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf), + TEST_CASE_ST(adapter_create_with_params, adapter_free, + adapter_queue_event_buf_test), TEST_CASES_END() /**< NULL terminate unit test array */ } }; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v8 1/5] eventdev/rx_adapter: add event buffer size configurability 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V ` (5 preceding siblings ...) 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 " Naga Harish K S V @ 2021-10-06 7:55 ` Naga Harish K S V 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 2/5] test/event: add unit test for Rx adapter Naga Harish K S V ` (3 more replies) 6 siblings, 4 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 7:55 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev, Ganapati Kundapura Currently event buffer is static array with a default size defined internally. To configure event buffer size from application, ``rte_event_eth_rx_adapter_create_with_params`` api is added which takes ``struct rte_event_eth_rx_adapter_params`` to configure event buffer size in addition other params . The event buffer size is rounded up for better buffer utilization and performance . In case of NULL params argument, default event buffer size is used. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> --- v8: * changed rxa_event_buf_get function as per review comments adapter_create_with_params modified as per comments v7: * added additional validation check in adapter_create_with_params API as per review comments v6: * address code style related review comments v5: * reverted queue conf get unit test change v4: * rebased with latest dpdk-next-eventdev branch * changed queue conf get unit test v3: * updated documentation and code comments as per review comments. * updated new create api test case name with suitable one. v2: * Updated header file and rx adapter documentation as per review comments. * new api name is modified as rte_event_eth_rx_adapter_create_with_params as per review comments. * rxa_params pointer argument Value NULL is allowed to represent the default values v1: * Initial implementation with documentation and unit tests. --- --- .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ lib/eventdev/rte_event_eth_rx_adapter.c | 98 +++++++++++++++++-- lib/eventdev/rte_event_eth_rx_adapter.h | 41 +++++++- lib/eventdev/version.map | 2 + 4 files changed, 140 insertions(+), 8 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index ce23d8a474..8526aecf57 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. +If the application desires to control the event buffer size, it can use the +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. +The function is passed the event device to be associated with the adapter +and port configuration for the adapter to setup an event port if the +adapter needs to use a service function. + Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 10491ca07b..5ccea168ea 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -85,7 +85,9 @@ struct rte_eth_event_enqueue_buffer { /* Count of events in this buffer */ uint16_t count; /* Array of events in this buffer */ - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; + struct rte_event *events; + /* size of event buffer */ + uint16_t events_size; /* Event enqueue happens from head */ uint16_t head; /* New packets from rte_eth_rx_burst is enqued from tail */ @@ -946,7 +948,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, dropped = 0; nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, buf->last | - (RTE_DIM(buf->events) & ~buf->last_mask), + (buf->events_size & ~buf->last_mask), buf->count >= BATCH_SIZE ? buf->count - BATCH_SIZE : 0, &buf->events[buf->tail], @@ -972,7 +974,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) uint32_t nb_req = buf->tail + BATCH_SIZE; if (!buf->last) { - if (nb_req <= RTE_DIM(buf->events)) + if (nb_req <= buf->events_size) return true; if (buf->head >= BATCH_SIZE) { @@ -2206,12 +2208,15 @@ rxa_ctrl(uint8_t id, int start) return 0; } -int -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, - rte_event_eth_rx_adapter_conf_cb conf_cb, - void *conf_arg) +static int +rxa_create(uint8_t id, uint8_t dev_id, + struct rte_event_eth_rx_adapter_params *rxa_params, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) { struct rte_event_eth_rx_adapter *rx_adapter; + struct rte_eth_event_enqueue_buffer *buf; + struct rte_event *events; int ret; int socket_id; uint16_t i; @@ -2226,6 +2231,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (conf_cb == NULL) return -EINVAL; @@ -2273,11 +2279,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, rte_free(rx_adapter); return -ENOMEM; } + rte_spinlock_init(&rx_adapter->rx_lock); + for (i = 0; i < RTE_MAX_ETHPORTS; i++) rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; + /* Rx adapter event buffer allocation */ + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = rxa_params->event_buf_size; + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } + + rx_adapter->event_enqueue_buffer.events = events; + event_eth_rx_adapter[id] = rx_adapter; + if (conf_cb == rxa_default_conf_cb) rx_adapter->default_cb_arg = 1; @@ -2293,6 +2318,61 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, return 0; } +int +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, + rte_event_eth_rx_adapter_conf_cb conf_cb, + void *conf_arg) +{ + struct rte_event_eth_rx_adapter_params rxa_params = {0}; + + /* use default values for adapter params */ + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); +} + +int +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params) +{ + struct rte_event_port_conf *pc; + int ret; + struct rte_event_eth_rx_adapter_params temp_params = {0}; + + if (port_config == NULL) + return -EINVAL; + + /* use default values if rxa_params is NULL */ + if (rxa_params == NULL) { + rxa_params = &temp_params; + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; + } + + if (rxa_params->event_buf_size == 0) + return -EINVAL; + + pc = rte_malloc(NULL, sizeof(*pc), 0); + if (pc == NULL) + return -ENOMEM; + + *pc = *port_config; + + /* adjust event buff size with BATCH_SIZE used for fetching packets + * from NIC rx queues to get full buffer utilization and prevent + * unnecessary rollovers. + */ + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, + BATCH_SIZE); + rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); + + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); + if (ret) + rte_free(pc); + + return ret; +} + int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config) @@ -2302,12 +2382,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) return -ENOMEM; *pc = *port_config; + ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, rxa_default_conf_cb, pc); @@ -2336,6 +2418,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2711,6 +2794,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, stats->rx_packets += dev_stats_sum.rx_packets; stats->rx_enq_count += dev_stats_sum.rx_enq_count; + return 0; } diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 470543e434..846ca569e9 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -26,6 +26,7 @@ * The ethernet Rx event adapter's functions are: * - rte_event_eth_rx_adapter_create_ext() * - rte_event_eth_rx_adapter_create() + * - rte_event_eth_rx_adapter_create_with_params() * - rte_event_eth_rx_adapter_free() * - rte_event_eth_rx_adapter_queue_add() * - rte_event_eth_rx_adapter_queue_del() @@ -37,7 +38,7 @@ * * The application creates an ethernet to event adapter using * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() - * functions. + * or rte_event_eth_rx_adapter_create_with_params() functions. * The adapter needs to know which ethernet rx queues to poll for mbufs as well * as event device parameters such as the event queue identifier, event * priority and scheduling type that the adapter should use when constructing @@ -257,6 +258,17 @@ struct rte_event_eth_rx_adapter_vector_limits { */ }; +/** + * A structure to hold adapter config params + */ +struct rte_event_eth_rx_adapter_params { + uint16_t event_buf_size; + /**< size of event buffer for the adapter. + * This value is rounded up for better buffer utilization + * and performance. + */ +}; + /** * * Callback function invoked by the SW adapter before it continues @@ -357,6 +369,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, struct rte_event_port_conf *port_config); +/** + * This is a variant of rte_event_eth_rx_adapter_create() with additional + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. + * + * @param id + * The identifier of the ethernet Rx event adapter. + * + * @param dev_id + * The identifier of the event device to configure. + * + * @param port_config + * Argument of type *rte_event_port_conf* that is passed to the conf_cb + * function. + * + * @param rxa_params + * Pointer to struct rte_event_eth_rx_adapter_params. + * In case of NULL, default values are used. + * + * @return + * - 0: Success + * - <0: Error code on failure + */ +__rte_experimental +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, + struct rte_event_port_conf *port_config, + struct rte_event_eth_rx_adapter_params *rxa_params); + /** * Free an event adapter * diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index 9f280160fa..7de18497a6 100644 --- a/lib/eventdev/version.map +++ b/lib/eventdev/version.map @@ -138,6 +138,8 @@ EXPERIMENTAL { __rte_eventdev_trace_port_setup; # added in 20.11 rte_event_pmd_pci_probe_named; + # added in 21.11 + rte_event_eth_rx_adapter_create_with_params; #added in 21.05 rte_event_vector_pool_create; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v8 2/5] test/event: add unit test for Rx adapter 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V @ 2021-10-06 7:55 ` Naga Harish K S V 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V ` (2 subsequent siblings) 3 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 7:55 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit test for rte_event_eth_rx_adapter_create_with_params api and validate all possible input combinations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> --- app/test/test_event_eth_rx_adapter.c | 53 +++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 13664a3a3b..7c2cf0dd70 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -428,6 +428,50 @@ adapter_create_free(void) return TEST_SUCCESS; } +static int +adapter_create_free_with_params(void) +{ + int err; + + struct rte_event_port_conf rx_p_conf = { + .dequeue_depth = 8, + .enqueue_depth = 8, + .new_event_threshold = 1200, + }; + + struct rte_event_eth_rx_adapter_params rxa_params = { + .event_buf_size = 1024 + }; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, NULL, NULL); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err); + + rxa_params.event_buf_size = 0; + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_free(TEST_INST_ID); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + err = rte_event_eth_rx_adapter_free(1); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err); + + return TEST_SUCCESS; +} + static int adapter_queue_add_del(void) { @@ -435,7 +479,7 @@ adapter_queue_add_del(void) struct rte_event ev; uint32_t cap; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, &cap); @@ -523,7 +567,7 @@ adapter_multi_eth_add_del(void) uint16_t port_index, port_index_base, drv_id = 0; char driver_name[50]; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; ev.queue_id = 0; ev.sched_type = RTE_SCHED_TYPE_ATOMIC; @@ -594,7 +638,7 @@ adapter_intr_queue_add_del(void) struct rte_event ev; uint32_t cap; uint16_t eth_port; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; if (!default_params.rx_intr_port_inited) return 0; @@ -687,7 +731,7 @@ adapter_start_stop(void) ev.sched_type = RTE_SCHED_TYPE_ATOMIC; ev.priority = 0; - struct rte_event_eth_rx_adapter_queue_conf queue_config; + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; queue_config.rx_queue_flags = 0; if (default_params.caps & @@ -774,6 +818,7 @@ static struct unit_test_suite event_eth_rx_tests = { .teardown = testsuite_teardown, .unit_test_cases = { TEST_CASE_ST(NULL, NULL, adapter_create_free), + TEST_CASE_ST(NULL, NULL, adapter_create_free_with_params), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_add_del), TEST_CASE_ST(adapter_create, adapter_free, -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v8 3/5] eventdev/rx_adapter: introduce per queue event buffer 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 2/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-10-06 7:55 ` Naga Harish K S V 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 4/5] eventdev/rx_adapter: implement " Naga Harish K S V 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 5/5] test/event: add unit test for Rx adapter Naga Harish K S V 3 siblings, 0 replies; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 7:55 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev To configure per queue event buffer size, application sets ``rte_event_eth_rx_adapter_params::use_queue_event_buf`` flag as true and is passed to ``rte_event_eth_rx_adapter_create_with_params`` api. The per queue event buffer size is populated in ``rte_event_eth_rx_adapter_queue_conf::event_buf_size`` and passed to ``rte_event_eth_rx_adapter_queue_add`` api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> --- .../prog_guide/event_ethernet_rx_adapter.rst | 19 ++++++++++++------- lib/eventdev/rte_event_eth_rx_adapter.h | 4 ++++ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst index 8526aecf57..8b58130fc5 100644 --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst @@ -62,12 +62,14 @@ service function and needs to create an event port for it. The callback is expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` passed to it. -If the application desires to control the event buffer size, it can use the -``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is -specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. -The function is passed the event device to be associated with the adapter -and port configuration for the adapter to setup an event port if the -adapter needs to use a service function. +If the application desires to control the event buffer size at adapter level, +it can use the ``rte_event_eth_rx_adapter_create_with_params()`` api. The event +buffer size is specified using ``struct rte_event_eth_rx_adapter_params:: +event_buf_size``. To configure the event buffer size at queue level, the boolean +flag ``struct rte_event_eth_rx_adapter_params::use_queue_event_buf`` need to be +set to true. The function is passed the event device to be associated with +the adapter and port configuration for the adapter to setup an event port +if the adapter needs to use a service function. Adding Rx Queues to the Adapter Instance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -79,7 +81,9 @@ parameter. Event information for packets from this Rx queue is encoded in the ``ev`` field of ``struct rte_event_eth_rx_adapter_queue_conf``. The servicing_weight member of the struct rte_event_eth_rx_adapter_queue_conf is the relative polling frequency of the Rx queue and is applicable when the -adapter uses a service core function. +adapter uses a service core function. The applications can configure queue +event buffer size in ``struct rte_event_eth_rx_adapter_queue_conf::event_buf_size`` +parameter. .. code-block:: c @@ -90,6 +94,7 @@ adapter uses a service core function. queue_config.rx_queue_flags = 0; queue_config.ev = ev; queue_config.servicing_weight = 1; + queue_config.event_buf_size = 1024; err = rte_event_eth_rx_adapter_queue_add(id, eth_dev_id, diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 846ca569e9..70ca427d66 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -200,6 +200,8 @@ struct rte_event_eth_rx_adapter_queue_conf { * Valid when RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR flag is set in * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags. */ + uint16_t event_buf_size; + /**< event buffer size for this queue */ }; /** @@ -267,6 +269,8 @@ struct rte_event_eth_rx_adapter_params { * This value is rounded up for better buffer utilization * and performance. */ + bool use_queue_event_buf; + /**< flag to indicate that event buffer is separate for each queue */ }; /** -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v8 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V @ 2021-10-06 7:55 ` Naga Harish K S V 2021-10-06 9:11 ` Jayatheerthan, Jay 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 5/5] test/event: add unit test for Rx adapter Naga Harish K S V 3 siblings, 1 reply; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 7:55 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch implement the per queue event buffer with required validations. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> --- lib/eventdev/rte_event_eth_rx_adapter.c | 211 +++++++++++++++++------- 1 file changed, 153 insertions(+), 58 deletions(-) diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 5ccea168ea..c5c9c26ded 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter { uint8_t rss_key_be[RSS_KEY_SIZE]; /* Event device identifier */ uint8_t eventdev_id; - /* Per ethernet device structure */ - struct eth_device_info *eth_devices; /* Event port identifier */ uint8_t event_port_id; + /* Flag indicating per rxq event buffer */ + bool use_queue_event_buf; + /* Per ethernet device structure */ + struct eth_device_info *eth_devices; /* Lock to serialize config updates with service function */ rte_spinlock_t rx_lock; /* Max mbufs processed in any service function invocation */ @@ -241,6 +243,7 @@ struct eth_rx_queue_info { uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ uint64_t event; struct eth_rx_vector_data vector_data; + struct rte_eth_event_enqueue_buffer *event_buf; }; static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ -262,6 +265,18 @@ rxa_validate_id(uint8_t id) return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; } +static inline struct rte_eth_event_enqueue_buffer * +rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter, + uint16_t eth_dev_id, uint16_t rx_queue_id) +{ + if (rx_adapter->use_queue_event_buf) { + struct eth_device_info *dev_info = + &rx_adapter->eth_devices[eth_dev_id]; + return dev_info->rx_queue[rx_queue_id].event_buf; + } else + return &rx_adapter->event_enqueue_buffer; +} + #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ if (!rxa_validate_id(id)) { \ RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \ @@ -767,10 +782,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, /* Enqueue buffered events to event device */ static inline uint16_t -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, + struct rte_eth_event_enqueue_buffer *buf) { - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t count = buf->last ? buf->last - buf->head : buf->count; @@ -888,15 +902,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t eth_dev_id, uint16_t rx_queue_id, struct rte_mbuf **mbufs, - uint16_t num) + uint16_t num, + struct rte_eth_event_enqueue_buffer *buf) { uint32_t i; struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; struct eth_rx_queue_info *eth_rx_queue_info = &dev_info->rx_queue[rx_queue_id]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; uint16_t new_tail = buf->tail; uint64_t event = eth_rx_queue_info->event; uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; @@ -995,11 +1008,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t queue_id, uint32_t rx_count, uint32_t max_rx, - int *rxq_empty) + int *rxq_empty, + struct rte_eth_event_enqueue_buffer *buf) { struct rte_mbuf *mbufs[BATCH_SIZE]; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; uint16_t n; @@ -1012,7 +1024,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, */ while (rxa_pkt_buf_available(buf)) { if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); stats->rx_poll_count++; n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); @@ -1021,14 +1033,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, *rxq_empty = 1; break; } - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); nb_rx += n; if (rx_count + nb_rx > max_rx) break; } if (buf->count > 0) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); return nb_rx; } @@ -1169,7 +1181,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) ring_lock = &rx_adapter->intr_ring_lock; if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); while (rxa_pkt_buf_available(buf)) { struct eth_device_info *dev_info; @@ -1221,7 +1233,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) continue; n = rxa_eth_rx(rx_adapter, port, i, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); nb_rx += n; enq_buffer_full = !rxq_empty && n == 0; @@ -1242,7 +1254,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) } else { n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, rx_adapter->max_nb_rx, - &rxq_empty); + &rxq_empty, buf); rx_adapter->qd_valid = !rxq_empty; nb_rx += n; if (nb_rx > rx_adapter->max_nb_rx) @@ -1273,13 +1285,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) { uint32_t num_queue; uint32_t nb_rx = 0; - struct rte_eth_event_enqueue_buffer *buf; + struct rte_eth_event_enqueue_buffer *buf = NULL; uint32_t wrr_pos; uint32_t max_nb_rx; wrr_pos = rx_adapter->wrr_pos; max_nb_rx = rx_adapter->max_nb_rx; - buf = &rx_adapter->event_enqueue_buffer; /* Iterate through a WRR sequence */ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { @@ -1287,24 +1298,31 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; + buf = rxa_event_buf_get(rx_adapter, d, qid); + /* Don't do a batch dequeue from the rx queue if there isn't * enough space in the enqueue buffer. */ if (buf->count >= BATCH_SIZE) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (!rxa_pkt_buf_available(buf)) { - rx_adapter->wrr_pos = wrr_pos; - return nb_rx; + if (rx_adapter->use_queue_event_buf) + goto poll_next_entry; + else { + rx_adapter->wrr_pos = wrr_pos; + return nb_rx; + } } nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, - NULL); + NULL, buf); if (nb_rx > max_nb_rx) { rx_adapter->wrr_pos = (wrr_pos + 1) % rx_adapter->wrr_len; break; } +poll_next_entry: if (++wrr_pos == rx_adapter->wrr_len) wrr_pos = 0; } @@ -1315,12 +1333,13 @@ static void rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) { struct rte_event_eth_rx_adapter *rx_adapter = arg; - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; + struct rte_eth_event_enqueue_buffer *buf = NULL; struct rte_event *ev; + buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue); + if (buf->count) - rxa_flush_event_buffer(rx_adapter); + rxa_flush_event_buffer(rx_adapter, buf); if (vec->vector_ev->nb_elem == 0) return; @@ -1947,9 +1966,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, rx_adapter->num_rx_intr -= intrq; dev_info->nb_rx_intr -= intrq; dev_info->nb_shared_intr -= intrq && sintrq; + if (rx_adapter->use_queue_event_buf) { + struct rte_eth_event_enqueue_buffer *event_buf = + dev_info->rx_queue[rx_queue_id].event_buf; + rte_free(event_buf->events); + rte_free(event_buf); + dev_info->rx_queue[rx_queue_id].event_buf = NULL; + } } -static void +static int rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, struct eth_device_info *dev_info, int32_t rx_queue_id, @@ -1961,15 +1987,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int intrq; int sintrq; struct rte_event *qi_ev; + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; + uint16_t eth_dev_id = dev_info->dev->data->port_id; + int ret; if (rx_queue_id == -1) { uint16_t nb_rx_queues; uint16_t i; nb_rx_queues = dev_info->dev->data->nb_rx_queues; - for (i = 0; i < nb_rx_queues; i++) - rxa_add_queue(rx_adapter, dev_info, i, conf); - return; + for (i = 0; i < nb_rx_queues; i++) { + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); + if (ret) + return ret; + } + return 0; } pollq = rxa_polled_queue(dev_info, rx_queue_id); @@ -2032,6 +2064,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, dev_info->next_q_idx = 0; } } + + if (!rx_adapter->use_queue_event_buf) + return 0; + + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", + sizeof(*new_rx_buf), 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); + new_rx_buf->events_size += (2 * BATCH_SIZE); + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", + sizeof(struct rte_event) * + new_rx_buf->events_size, 0, + rte_eth_dev_socket_id(eth_dev_id)); + if (new_rx_buf->events == NULL) { + rte_free(new_rx_buf); + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -ENOMEM; + } + + queue_info->event_buf = new_rx_buf; + + return 0; } static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, @@ -2060,6 +2123,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, temp_conf.servicing_weight = 1; } queue_conf = &temp_conf; + + if (queue_conf->servicing_weight == 0 && + rx_adapter->use_queue_event_buf) { + + RTE_EDEV_LOG_ERR("Use of queue level event buffer " + "not supported for interrupt queues " + "dev_id: %d queue_id: %d", + eth_dev_id, rx_queue_id); + return -EINVAL; + } } nb_rx_queues = dev_info->dev->data->nb_rx_queues; @@ -2139,7 +2212,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); + if (ret) + goto err_free_rxqueue; rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); rte_free(rx_adapter->eth_rx_poll); @@ -2160,7 +2235,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, rte_free(rx_poll); rte_free(rx_wrr); - return 0; + return ret; } static int @@ -2286,20 +2361,25 @@ rxa_create(uint8_t id, uint8_t dev_id, rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; /* Rx adapter event buffer allocation */ - buf = &rx_adapter->event_enqueue_buffer; - buf->events_size = rxa_params->event_buf_size; - - events = rte_zmalloc_socket(rx_adapter->mem_name, - buf->events_size * sizeof(*events), - 0, socket_id); - if (events == NULL) { - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); - rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter); - return -ENOMEM; - } + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; + + if (!rx_adapter->use_queue_event_buf) { + buf = &rx_adapter->event_enqueue_buffer; + buf->events_size = rxa_params->event_buf_size; + + events = rte_zmalloc_socket(rx_adapter->mem_name, + buf->events_size * sizeof(*events), + 0, socket_id); + if (events == NULL) { + RTE_EDEV_LOG_ERR("Failed to allocate memory " + "for adapter event buffer"); + rte_free(rx_adapter->eth_devices); + rte_free(rx_adapter); + return -ENOMEM; + } - rx_adapter->event_enqueue_buffer.events = events; + rx_adapter->event_enqueue_buffer.events = events; + } event_eth_rx_adapter[id] = rx_adapter; @@ -2327,6 +2407,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, /* use default values for adapter params */ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; + rxa_params.use_queue_event_buf = false; return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); } @@ -2343,14 +2424,27 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, if (port_config == NULL) return -EINVAL; - /* use default values if rxa_params is NULL */ if (rxa_params == NULL) { + /* use default values if rxa_params is NULL */ rxa_params = &temp_params; rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; - } - - if (rxa_params->event_buf_size == 0) + rxa_params->use_queue_event_buf = false; + } else if ((!rxa_params->use_queue_event_buf && + rxa_params->event_buf_size == 0) || + (rxa_params->use_queue_event_buf && + rxa_params->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid adapter params\n"); return -EINVAL; + } else if (!rxa_params->use_queue_event_buf) { + /* adjust event buff size with BATCH_SIZE used for fetching + * packets from NIC rx queues to get full buffer utilization + * and prevent unnecessary rollovers. + */ + + rxa_params->event_buf_size = + RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); + rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); + } pc = rte_malloc(NULL, sizeof(*pc), 0); if (pc == NULL) @@ -2358,14 +2452,6 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, *pc = *port_config; - /* adjust event buff size with BATCH_SIZE used for fetching packets - * from NIC rx queues to get full buffer utilization and prevent - * unnecessary rollovers. - */ - rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, - BATCH_SIZE); - rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); - ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); if (ret) rte_free(pc); @@ -2418,7 +2504,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) if (rx_adapter->default_cb_arg) rte_free(rx_adapter->conf_arg); rte_free(rx_adapter->eth_devices); - rte_free(rx_adapter->event_enqueue_buffer.events); + if (!rx_adapter->use_queue_event_buf) + rte_free(rx_adapter->event_enqueue_buffer.events); rte_free(rx_adapter); event_eth_rx_adapter[id] = NULL; @@ -2522,6 +2609,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, return -EINVAL; } + if ((rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size == 0) || + (!rx_adapter->use_queue_event_buf && + queue_conf->event_buf_size != 0)) { + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); + return -EINVAL; + } + dev_info = &rx_adapter->eth_devices[eth_dev_id]; if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v8 4/5] eventdev/rx_adapter: implement per queue event buffer 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 4/5] eventdev/rx_adapter: implement " Naga Harish K S V @ 2021-10-06 9:11 ` Jayatheerthan, Jay 0 siblings, 0 replies; 81+ messages in thread From: Jayatheerthan, Jay @ 2021-10-06 9:11 UTC (permalink / raw) To: Naga Harish K, S V, jerinj; +Cc: dev The full patchset looks good to me. Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> > -----Original Message----- > From: Naga Harish K, S V <s.v.naga.harish.k@intel.com> > Sent: Wednesday, October 6, 2021 1:26 PM > To: jerinj@marvell.com; Jayatheerthan, Jay <jay.jayatheerthan@intel.com> > Cc: dev@dpdk.org > Subject: [PATCH v8 4/5] eventdev/rx_adapter: implement per queue event buffer > > this patch implement the per queue event buffer with > required validations. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > --- > lib/eventdev/rte_event_eth_rx_adapter.c | 211 +++++++++++++++++------- > 1 file changed, 153 insertions(+), 58 deletions(-) > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c > index 5ccea168ea..c5c9c26ded 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.c > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c > @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter { > uint8_t rss_key_be[RSS_KEY_SIZE]; > /* Event device identifier */ > uint8_t eventdev_id; > - /* Per ethernet device structure */ > - struct eth_device_info *eth_devices; > /* Event port identifier */ > uint8_t event_port_id; > + /* Flag indicating per rxq event buffer */ > + bool use_queue_event_buf; > + /* Per ethernet device structure */ > + struct eth_device_info *eth_devices; > /* Lock to serialize config updates with service function */ > rte_spinlock_t rx_lock; > /* Max mbufs processed in any service function invocation */ > @@ -241,6 +243,7 @@ struct eth_rx_queue_info { > uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ > uint64_t event; > struct eth_rx_vector_data vector_data; > + struct rte_eth_event_enqueue_buffer *event_buf; > }; > > static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; > @@ -262,6 +265,18 @@ rxa_validate_id(uint8_t id) > return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; > } > > +static inline struct rte_eth_event_enqueue_buffer * > +rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter, > + uint16_t eth_dev_id, uint16_t rx_queue_id) > +{ > + if (rx_adapter->use_queue_event_buf) { > + struct eth_device_info *dev_info = > + &rx_adapter->eth_devices[eth_dev_id]; > + return dev_info->rx_queue[rx_queue_id].event_buf; > + } else > + return &rx_adapter->event_enqueue_buffer; > +} > + > #define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \ > if (!rxa_validate_id(id)) { \ > RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \ > @@ -767,10 +782,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, > > /* Enqueue buffered events to event device */ > static inline uint16_t > -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) > +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter, > + struct rte_eth_event_enqueue_buffer *buf) > { > - struct rte_eth_event_enqueue_buffer *buf = > - &rx_adapter->event_enqueue_buffer; > struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats; > uint16_t count = buf->last ? buf->last - buf->head : buf->count; > > @@ -888,15 +902,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, > uint16_t eth_dev_id, > uint16_t rx_queue_id, > struct rte_mbuf **mbufs, > - uint16_t num) > + uint16_t num, > + struct rte_eth_event_enqueue_buffer *buf) > { > uint32_t i; > struct eth_device_info *dev_info = > &rx_adapter->eth_devices[eth_dev_id]; > struct eth_rx_queue_info *eth_rx_queue_info = > &dev_info->rx_queue[rx_queue_id]; > - struct rte_eth_event_enqueue_buffer *buf = > - &rx_adapter->event_enqueue_buffer; > uint16_t new_tail = buf->tail; > uint64_t event = eth_rx_queue_info->event; > uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; > @@ -995,11 +1008,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, > uint16_t queue_id, > uint32_t rx_count, > uint32_t max_rx, > - int *rxq_empty) > + int *rxq_empty, > + struct rte_eth_event_enqueue_buffer *buf) > { > struct rte_mbuf *mbufs[BATCH_SIZE]; > - struct rte_eth_event_enqueue_buffer *buf = > - &rx_adapter->event_enqueue_buffer; > struct rte_event_eth_rx_adapter_stats *stats = > &rx_adapter->stats; > uint16_t n; > @@ -1012,7 +1024,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, > */ > while (rxa_pkt_buf_available(buf)) { > if (buf->count >= BATCH_SIZE) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > > stats->rx_poll_count++; > n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE); > @@ -1021,14 +1033,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, > *rxq_empty = 1; > break; > } > - rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n); > + rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf); > nb_rx += n; > if (rx_count + nb_rx > max_rx) > break; > } > > if (buf->count > 0) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > > return nb_rx; > } > @@ -1169,7 +1181,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) > ring_lock = &rx_adapter->intr_ring_lock; > > if (buf->count >= BATCH_SIZE) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > > while (rxa_pkt_buf_available(buf)) { > struct eth_device_info *dev_info; > @@ -1221,7 +1233,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) > continue; > n = rxa_eth_rx(rx_adapter, port, i, nb_rx, > rx_adapter->max_nb_rx, > - &rxq_empty); > + &rxq_empty, buf); > nb_rx += n; > > enq_buffer_full = !rxq_empty && n == 0; > @@ -1242,7 +1254,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter) > } else { > n = rxa_eth_rx(rx_adapter, port, queue, nb_rx, > rx_adapter->max_nb_rx, > - &rxq_empty); > + &rxq_empty, buf); > rx_adapter->qd_valid = !rxq_empty; > nb_rx += n; > if (nb_rx > rx_adapter->max_nb_rx) > @@ -1273,13 +1285,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) > { > uint32_t num_queue; > uint32_t nb_rx = 0; > - struct rte_eth_event_enqueue_buffer *buf; > + struct rte_eth_event_enqueue_buffer *buf = NULL; > uint32_t wrr_pos; > uint32_t max_nb_rx; > > wrr_pos = rx_adapter->wrr_pos; > max_nb_rx = rx_adapter->max_nb_rx; > - buf = &rx_adapter->event_enqueue_buffer; > > /* Iterate through a WRR sequence */ > for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { > @@ -1287,24 +1298,31 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) > uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid; > uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id; > > + buf = rxa_event_buf_get(rx_adapter, d, qid); > + > /* Don't do a batch dequeue from the rx queue if there isn't > * enough space in the enqueue buffer. > */ > if (buf->count >= BATCH_SIZE) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > if (!rxa_pkt_buf_available(buf)) { > - rx_adapter->wrr_pos = wrr_pos; > - return nb_rx; > + if (rx_adapter->use_queue_event_buf) > + goto poll_next_entry; > + else { > + rx_adapter->wrr_pos = wrr_pos; > + return nb_rx; > + } > } > > nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx, > - NULL); > + NULL, buf); > if (nb_rx > max_nb_rx) { > rx_adapter->wrr_pos = > (wrr_pos + 1) % rx_adapter->wrr_len; > break; > } > > +poll_next_entry: > if (++wrr_pos == rx_adapter->wrr_len) > wrr_pos = 0; > } > @@ -1315,12 +1333,13 @@ static void > rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg) > { > struct rte_event_eth_rx_adapter *rx_adapter = arg; > - struct rte_eth_event_enqueue_buffer *buf = > - &rx_adapter->event_enqueue_buffer; > + struct rte_eth_event_enqueue_buffer *buf = NULL; > struct rte_event *ev; > > + buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue); > + > if (buf->count) > - rxa_flush_event_buffer(rx_adapter); > + rxa_flush_event_buffer(rx_adapter, buf); > > if (vec->vector_ev->nb_elem == 0) > return; > @@ -1947,9 +1966,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter, > rx_adapter->num_rx_intr -= intrq; > dev_info->nb_rx_intr -= intrq; > dev_info->nb_shared_intr -= intrq && sintrq; > + if (rx_adapter->use_queue_event_buf) { > + struct rte_eth_event_enqueue_buffer *event_buf = > + dev_info->rx_queue[rx_queue_id].event_buf; > + rte_free(event_buf->events); > + rte_free(event_buf); > + dev_info->rx_queue[rx_queue_id].event_buf = NULL; > + } > } > > -static void > +static int > rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > struct eth_device_info *dev_info, > int32_t rx_queue_id, > @@ -1961,15 +1987,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > int intrq; > int sintrq; > struct rte_event *qi_ev; > + struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL; > + uint16_t eth_dev_id = dev_info->dev->data->port_id; > + int ret; > > if (rx_queue_id == -1) { > uint16_t nb_rx_queues; > uint16_t i; > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > - for (i = 0; i < nb_rx_queues; i++) > - rxa_add_queue(rx_adapter, dev_info, i, conf); > - return; > + for (i = 0; i < nb_rx_queues; i++) { > + ret = rxa_add_queue(rx_adapter, dev_info, i, conf); > + if (ret) > + return ret; > + } > + return 0; > } > > pollq = rxa_polled_queue(dev_info, rx_queue_id); > @@ -2032,6 +2064,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, > dev_info->next_q_idx = 0; > } > } > + > + if (!rx_adapter->use_queue_event_buf) > + return 0; > + > + new_rx_buf = rte_zmalloc_socket("rx_buffer_meta", > + sizeof(*new_rx_buf), 0, > + rte_eth_dev_socket_id(eth_dev_id)); > + if (new_rx_buf == NULL) { > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for " > + "dev_id: %d queue_id: %d", > + eth_dev_id, rx_queue_id); > + return -ENOMEM; > + } > + > + new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE); > + new_rx_buf->events_size += (2 * BATCH_SIZE); > + new_rx_buf->events = rte_zmalloc_socket("rx_buffer", > + sizeof(struct rte_event) * > + new_rx_buf->events_size, 0, > + rte_eth_dev_socket_id(eth_dev_id)); > + if (new_rx_buf->events == NULL) { > + rte_free(new_rx_buf); > + RTE_EDEV_LOG_ERR("Failed to allocate event buffer for " > + "dev_id: %d queue_id: %d", > + eth_dev_id, rx_queue_id); > + return -ENOMEM; > + } > + > + queue_info->event_buf = new_rx_buf; > + > + return 0; > } > > static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, > @@ -2060,6 +2123,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, > temp_conf.servicing_weight = 1; > } > queue_conf = &temp_conf; > + > + if (queue_conf->servicing_weight == 0 && > + rx_adapter->use_queue_event_buf) { > + > + RTE_EDEV_LOG_ERR("Use of queue level event buffer " > + "not supported for interrupt queues " > + "dev_id: %d queue_id: %d", > + eth_dev_id, rx_queue_id); > + return -EINVAL; > + } > } > > nb_rx_queues = dev_info->dev->data->nb_rx_queues; > @@ -2139,7 +2212,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, > > > > - rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); > + ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf); > + if (ret) > + goto err_free_rxqueue; > rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr); > > rte_free(rx_adapter->eth_rx_poll); > @@ -2160,7 +2235,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter, > rte_free(rx_poll); > rte_free(rx_wrr); > > - return 0; > + return ret; > } > > static int > @@ -2286,20 +2361,25 @@ rxa_create(uint8_t id, uint8_t dev_id, > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > /* Rx adapter event buffer allocation */ > - buf = &rx_adapter->event_enqueue_buffer; > - buf->events_size = rxa_params->event_buf_size; > - > - events = rte_zmalloc_socket(rx_adapter->mem_name, > - buf->events_size * sizeof(*events), > - 0, socket_id); > - if (events == NULL) { > - RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); > - rte_free(rx_adapter->eth_devices); > - rte_free(rx_adapter); > - return -ENOMEM; > - } > + rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf; > + > + if (!rx_adapter->use_queue_event_buf) { > + buf = &rx_adapter->event_enqueue_buffer; > + buf->events_size = rxa_params->event_buf_size; > + > + events = rte_zmalloc_socket(rx_adapter->mem_name, > + buf->events_size * sizeof(*events), > + 0, socket_id); > + if (events == NULL) { > + RTE_EDEV_LOG_ERR("Failed to allocate memory " > + "for adapter event buffer"); > + rte_free(rx_adapter->eth_devices); > + rte_free(rx_adapter); > + return -ENOMEM; > + } > > - rx_adapter->event_enqueue_buffer.events = events; > + rx_adapter->event_enqueue_buffer.events = events; > + } > > event_eth_rx_adapter[id] = rx_adapter; > > @@ -2327,6 +2407,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > /* use default values for adapter params */ > rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > + rxa_params.use_queue_event_buf = false; > > return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); > } > @@ -2343,14 +2424,27 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > if (port_config == NULL) > return -EINVAL; > > - /* use default values if rxa_params is NULL */ > if (rxa_params == NULL) { > + /* use default values if rxa_params is NULL */ > rxa_params = &temp_params; > rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > - } > - > - if (rxa_params->event_buf_size == 0) > + rxa_params->use_queue_event_buf = false; > + } else if ((!rxa_params->use_queue_event_buf && > + rxa_params->event_buf_size == 0) || > + (rxa_params->use_queue_event_buf && > + rxa_params->event_buf_size != 0)) { > + RTE_EDEV_LOG_ERR("Invalid adapter params\n"); > return -EINVAL; > + } else if (!rxa_params->use_queue_event_buf) { > + /* adjust event buff size with BATCH_SIZE used for fetching > + * packets from NIC rx queues to get full buffer utilization > + * and prevent unnecessary rollovers. > + */ > + > + rxa_params->event_buf_size = > + RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); > + rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); > + } > > pc = rte_malloc(NULL, sizeof(*pc), 0); > if (pc == NULL) > @@ -2358,14 +2452,6 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > > *pc = *port_config; > > - /* adjust event buff size with BATCH_SIZE used for fetching packets > - * from NIC rx queues to get full buffer utilization and prevent > - * unnecessary rollovers. > - */ > - rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, > - BATCH_SIZE); > - rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE); > - > ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); > if (ret) > rte_free(pc); > @@ -2418,7 +2504,8 @@ rte_event_eth_rx_adapter_free(uint8_t id) > if (rx_adapter->default_cb_arg) > rte_free(rx_adapter->conf_arg); > rte_free(rx_adapter->eth_devices); > - rte_free(rx_adapter->event_enqueue_buffer.events); > + if (!rx_adapter->use_queue_event_buf) > + rte_free(rx_adapter->event_enqueue_buffer.events); > rte_free(rx_adapter); > event_eth_rx_adapter[id] = NULL; > > @@ -2522,6 +2609,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id, > return -EINVAL; > } > > + if ((rx_adapter->use_queue_event_buf && > + queue_conf->event_buf_size == 0) || > + (!rx_adapter->use_queue_event_buf && > + queue_conf->event_buf_size != 0)) { > + RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue"); > + return -EINVAL; > + } > + > dev_info = &rx_adapter->eth_devices[eth_dev_id]; > > if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { > -- > 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* [dpdk-dev] [PATCH v8 5/5] test/event: add unit test for Rx adapter 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V ` (2 preceding siblings ...) 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 4/5] eventdev/rx_adapter: implement " Naga Harish K S V @ 2021-10-06 7:55 ` Naga Harish K S V 2021-10-07 8:52 ` Jerin Jacob 3 siblings, 1 reply; 81+ messages in thread From: Naga Harish K S V @ 2021-10-06 7:55 UTC (permalink / raw) To: jerinj, jay.jayatheerthan; +Cc: dev this patch adds unit tests for checking per rx queue event buffer feature using rte_event_eth_rx_adapter_queue_add api. Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> --- app/test/test_event_eth_rx_adapter.c | 86 ++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c index 7c2cf0dd70..cf3c989efb 100644 --- a/app/test/test_event_eth_rx_adapter.c +++ b/app/test/test_event_eth_rx_adapter.c @@ -387,6 +387,90 @@ adapter_create(void) return err; } +static int +adapter_create_with_params(void) +{ + int err; + struct rte_event_dev_info dev_info; + struct rte_event_port_conf rx_p_conf; + struct rte_event_eth_rx_adapter_params rxa_params; + + memset(&rx_p_conf, 0, sizeof(rx_p_conf)); + + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + rx_p_conf.new_event_threshold = dev_info.max_num_events; + rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; + rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; + + rxa_params.use_queue_event_buf = false; + rxa_params.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + rxa_params.use_queue_event_buf = true; + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, + TEST_DEV_ID, &rx_p_conf, &rxa_params); + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err); + + return TEST_SUCCESS; +} + +static int +adapter_queue_event_buf_test(void) +{ + int err; + struct rte_event ev; + uint32_t cap; + + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; + + err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, + &cap); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + ev.queue_id = 0; + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; + ev.priority = 0; + + queue_config.rx_queue_flags = 0; + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) { + ev.flow_id = 1; + queue_config.rx_queue_flags = + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; + } + queue_config.ev = ev; + queue_config.servicing_weight = 1; + queue_config.event_buf_size = 0; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); + + queue_config.event_buf_size = 1024; + + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, + TEST_ETHDEV_ID, 0, + &queue_config); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, + TEST_ETHDEV_ID, + 0); + TEST_ASSERT(err == 0, "Expected 0 got %d", err); + + return TEST_SUCCESS; +} + static void adapter_free(void) { @@ -826,6 +910,8 @@ static struct unit_test_suite event_eth_rx_tests = { TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop), TEST_CASE_ST(adapter_create, adapter_free, adapter_stats), TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf), + TEST_CASE_ST(adapter_create_with_params, adapter_free, + adapter_queue_event_buf_test), TEST_CASES_END() /**< NULL terminate unit test array */ } }; -- 2.25.1 ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v8 5/5] test/event: add unit test for Rx adapter 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 5/5] test/event: add unit test for Rx adapter Naga Harish K S V @ 2021-10-07 8:52 ` Jerin Jacob 0 siblings, 0 replies; 81+ messages in thread From: Jerin Jacob @ 2021-10-07 8:52 UTC (permalink / raw) To: Naga Harish K S V; +Cc: Jerin Jacob, Jayatheerthan, Jay, dpdk-dev On Wed, Oct 6, 2021 at 1:27 PM Naga Harish K S V <s.v.naga.harish.k@intel.com> wrote: > > this patch adds unit tests for checking per rx queue event buffer > feature using rte_event_eth_rx_adapter_queue_add api. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > Acked-by: Jay Jayatheerthan <jay.jayatheerthan@intel.com> Series applied to dpdk-next-eventdev/for-main. Thanks. > --- > app/test/test_event_eth_rx_adapter.c | 86 ++++++++++++++++++++++++++++ > 1 file changed, 86 insertions(+) > > diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c > index 7c2cf0dd70..cf3c989efb 100644 > --- a/app/test/test_event_eth_rx_adapter.c > +++ b/app/test/test_event_eth_rx_adapter.c > @@ -387,6 +387,90 @@ adapter_create(void) > return err; > } > > +static int > +adapter_create_with_params(void) > +{ > + int err; > + struct rte_event_dev_info dev_info; > + struct rte_event_port_conf rx_p_conf; > + struct rte_event_eth_rx_adapter_params rxa_params; > + > + memset(&rx_p_conf, 0, sizeof(rx_p_conf)); > + > + err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info); > + TEST_ASSERT(err == 0, "Expected 0 got %d", err); > + > + rx_p_conf.new_event_threshold = dev_info.max_num_events; > + rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth; > + rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth; > + > + rxa_params.use_queue_event_buf = false; > + rxa_params.event_buf_size = 0; > + > + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > + TEST_DEV_ID, &rx_p_conf, &rxa_params); > + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); > + > + rxa_params.use_queue_event_buf = true; > + > + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > + TEST_DEV_ID, &rx_p_conf, &rxa_params); > + TEST_ASSERT(err == 0, "Expected 0 got %d", err); > + > + err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID, > + TEST_DEV_ID, &rx_p_conf, &rxa_params); > + TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err); > + > + return TEST_SUCCESS; > +} > + > +static int > +adapter_queue_event_buf_test(void) > +{ > + int err; > + struct rte_event ev; > + uint32_t cap; > + > + struct rte_event_eth_rx_adapter_queue_conf queue_config = {0}; > + > + err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, > + &cap); > + TEST_ASSERT(err == 0, "Expected 0 got %d", err); > + > + ev.queue_id = 0; > + ev.sched_type = RTE_SCHED_TYPE_ATOMIC; > + ev.priority = 0; > + > + queue_config.rx_queue_flags = 0; > + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) { > + ev.flow_id = 1; > + queue_config.rx_queue_flags = > + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID; > + } > + queue_config.ev = ev; > + queue_config.servicing_weight = 1; > + queue_config.event_buf_size = 0; > + > + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, > + TEST_ETHDEV_ID, 0, > + &queue_config); > + TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err); > + > + queue_config.event_buf_size = 1024; > + > + err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID, > + TEST_ETHDEV_ID, 0, > + &queue_config); > + TEST_ASSERT(err == 0, "Expected 0 got %d", err); > + > + err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, > + TEST_ETHDEV_ID, > + 0); > + TEST_ASSERT(err == 0, "Expected 0 got %d", err); > + > + return TEST_SUCCESS; > +} > + > static void > adapter_free(void) > { > @@ -826,6 +910,8 @@ static struct unit_test_suite event_eth_rx_tests = { > TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop), > TEST_CASE_ST(adapter_create, adapter_free, adapter_stats), > TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf), > + TEST_CASE_ST(adapter_create_with_params, adapter_free, > + adapter_queue_event_buf_test), > TEST_CASES_END() /**< NULL terminate unit test array */ > } > }; > -- > 2.25.1 > ^ permalink raw reply [flat|nested] 81+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V ` (5 preceding siblings ...) 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 " Naga Harish K S V @ 2021-10-07 8:51 ` Jerin Jacob 6 siblings, 0 replies; 81+ messages in thread From: Jerin Jacob @ 2021-10-07 8:51 UTC (permalink / raw) To: Naga Harish K S V Cc: Jerin Jacob, Jayatheerthan, Jay, dpdk-dev, Ganapati Kundapura On Wed, Sep 22, 2021 at 8:44 PM Naga Harish K S V <s.v.naga.harish.k@intel.com> wrote: > > Currently event buffer is static array with a default size defined > internally. > > To configure event buffer size from application, > ``rte_event_eth_rx_adapter_create_with_params`` api is added which > takes ``struct rte_event_eth_rx_adapter_params`` to configure event > buffer size in addition other params . The event buffer size is > rounded up for better buffer utilization and performance . In case > of NULL params argument, default event buffer size is used. > > Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com> > Signed-off-by: Ganapati Kundapura <ganapati.kundapura@intel.com> Acked-by: Jerin Jacob <jerinj@marvell.com> > > --- > v3: > * updated documentation and code comments as per review comments. > * updated new create api test case name with suitable one. > > v2: > * Updated header file and rx adapter documentation as per review comments. > * new api name is modified as rte_event_eth_rx_adapter_create_with_params > as per review comments. > * rxa_params pointer argument Value NULL is allowed to represent the > default values > > v1: > * Initial implementation with documentation and unit tests. > --- > .../prog_guide/event_ethernet_rx_adapter.rst | 7 ++ > lib/eventdev/rte_event_eth_rx_adapter.c | 98 +++++++++++++++++-- > lib/eventdev/rte_event_eth_rx_adapter.h | 41 +++++++- > lib/eventdev/version.map | 2 + > 4 files changed, 140 insertions(+), 8 deletions(-) > > diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > index 0780b6f711..dd753613bd 100644 > --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst > @@ -62,6 +62,13 @@ service function and needs to create an event port for it. The callback is > expected to fill the ``struct rte_event_eth_rx_adapter_conf structure`` > passed to it. > > +If the application desires to control the event buffer size, it can use the > +``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is > +specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``. > +The function is passed the event device to be associated with the adapter > +and port configuration for the adapter to setup an event port if the > +adapter needs to use a service function. > + > Adding Rx Queues to the Adapter Instance > ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c > index f2dc69503d..7dec9a8734 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.c > +++ b/lib/eventdev/rte_event_eth_rx_adapter.c > @@ -82,7 +82,9 @@ struct rte_eth_event_enqueue_buffer { > /* Count of events in this buffer */ > uint16_t count; > /* Array of events in this buffer */ > - struct rte_event events[ETH_EVENT_BUFFER_SIZE]; > + struct rte_event *events; > + /* size of event buffer */ > + uint16_t events_size; > /* Event enqueue happens from head */ > uint16_t head; > /* New packets from rte_eth_rx_burst is enqued from tail */ > @@ -919,7 +921,7 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, > dropped = 0; > nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, > buf->last | > - (RTE_DIM(buf->events) & ~buf->last_mask), > + (buf->events_size & ~buf->last_mask), > buf->count >= BATCH_SIZE ? > buf->count - BATCH_SIZE : 0, > &buf->events[buf->tail], > @@ -945,7 +947,7 @@ rxa_pkt_buf_available(struct rte_eth_event_enqueue_buffer *buf) > uint32_t nb_req = buf->tail + BATCH_SIZE; > > if (!buf->last) { > - if (nb_req <= RTE_DIM(buf->events)) > + if (nb_req <= buf->events_size) > return true; > > if (buf->head >= BATCH_SIZE) { > @@ -2164,12 +2166,15 @@ rxa_ctrl(uint8_t id, int start) > return 0; > } > > -int > -rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > - rte_event_eth_rx_adapter_conf_cb conf_cb, > - void *conf_arg) > +static int > +rxa_create(uint8_t id, uint8_t dev_id, > + struct rte_event_eth_rx_adapter_params *rxa_params, > + rte_event_eth_rx_adapter_conf_cb conf_cb, > + void *conf_arg) > { > struct rte_event_eth_rx_adapter *rx_adapter; > + struct rte_eth_event_enqueue_buffer *buf; > + struct rte_event *events; > int ret; > int socket_id; > uint16_t i; > @@ -2184,6 +2189,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); > RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); > + > if (conf_cb == NULL) > return -EINVAL; > > @@ -2231,11 +2237,30 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > rte_free(rx_adapter); > return -ENOMEM; > } > + > rte_spinlock_init(&rx_adapter->rx_lock); > + > for (i = 0; i < RTE_MAX_ETHPORTS; i++) > rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; > > + /* Rx adapter event buffer allocation */ > + buf = &rx_adapter->event_enqueue_buffer; > + buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE); > + > + events = rte_zmalloc_socket(rx_adapter->mem_name, > + buf->events_size * sizeof(*events), > + 0, socket_id); > + if (events == NULL) { > + RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n"); > + rte_free(rx_adapter->eth_devices); > + rte_free(rx_adapter); > + return -ENOMEM; > + } > + > + rx_adapter->event_enqueue_buffer.events = events; > + > event_eth_rx_adapter[id] = rx_adapter; > + > if (conf_cb == rxa_default_conf_cb) > rx_adapter->default_cb_arg = 1; > rte_eventdev_trace_eth_rx_adapter_create(id, dev_id, conf_cb, > @@ -2243,6 +2268,61 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > return 0; > } > > +int > +rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > + rte_event_eth_rx_adapter_conf_cb conf_cb, > + void *conf_arg) > +{ > + struct rte_event_eth_rx_adapter_params rxa_params; > + > + /* use default values for adapter params */ > + rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE; > + > + return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg); > +} > + > +int > +rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > + struct rte_event_port_conf *port_config, > + struct rte_event_eth_rx_adapter_params *rxa_params) > +{ > + struct rte_event_port_conf *pc; > + int ret; > + struct rte_event_eth_rx_adapter_params temp_params = {0}; > + > + if (port_config == NULL) > + return -EINVAL; > + > + /* use default values if rxa_parmas is NULL */ > + if (rxa_params == NULL) { > + rxa_params = &temp_params; > + rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE; > + } > + > + if (rxa_params->event_buf_size == 0) > + return -EINVAL; > + > + pc = rte_malloc(NULL, sizeof(*pc), 0); > + if (pc == NULL) > + return -ENOMEM; > + > + *pc = *port_config; > + > + /* adjust event buff size with BATCH_SIZE used for fetching packets > + * from NIC rx queues to get full buffer utilization and prevent > + * unnecessary rollovers. > + */ > + rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size, > + BATCH_SIZE); > + rxa_params->event_buf_size += BATCH_SIZE + BATCH_SIZE; > + > + ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc); > + if (ret) > + rte_free(pc); > + > + return ret; > +} > + > int > rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > struct rte_event_port_conf *port_config) > @@ -2252,12 +2332,14 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > > if (port_config == NULL) > return -EINVAL; > + > RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); > > pc = rte_malloc(NULL, sizeof(*pc), 0); > if (pc == NULL) > return -ENOMEM; > *pc = *port_config; > + > ret = rte_event_eth_rx_adapter_create_ext(id, dev_id, > rxa_default_conf_cb, > pc); > @@ -2286,6 +2368,7 @@ rte_event_eth_rx_adapter_free(uint8_t id) > if (rx_adapter->default_cb_arg) > rte_free(rx_adapter->conf_arg); > rte_free(rx_adapter->eth_devices); > + rte_free(rx_adapter->event_enqueue_buffer.events); > rte_free(rx_adapter); > event_eth_rx_adapter[id] = NULL; > > @@ -2658,6 +2741,7 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id, > > stats->rx_packets += dev_stats_sum.rx_packets; > stats->rx_enq_count += dev_stats_sum.rx_enq_count; > + > return 0; > } > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h > index 3f8b362295..6e8b3085f8 100644 > --- a/lib/eventdev/rte_event_eth_rx_adapter.h > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h > @@ -26,6 +26,7 @@ > * The ethernet Rx event adapter's functions are: > * - rte_event_eth_rx_adapter_create_ext() > * - rte_event_eth_rx_adapter_create() > + * - rte_event_eth_rx_adapter_create_with_params() > * - rte_event_eth_rx_adapter_free() > * - rte_event_eth_rx_adapter_queue_add() > * - rte_event_eth_rx_adapter_queue_del() > @@ -36,7 +37,7 @@ > * > * The application creates an ethernet to event adapter using > * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() > - * functions. > + * or rte_event_eth_rx_adapter_create_with_params() functions. > * The adapter needs to know which ethernet rx queues to poll for mbufs as well > * as event device parameters such as the event queue identifier, event > * priority and scheduling type that the adapter should use when constructing > @@ -256,6 +257,17 @@ struct rte_event_eth_rx_adapter_vector_limits { > */ > }; > > +/** > + * A structure to hold adapter config params > + */ > +struct rte_event_eth_rx_adapter_params { > + uint16_t event_buf_size; > + /**< size of event buffer for the adapter. > + * This value is rounded up for better buffer utilization > + * and performance. > + */ > +}; > + > /** > * > * Callback function invoked by the SW adapter before it continues > @@ -356,6 +368,33 @@ int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, > int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id, > struct rte_event_port_conf *port_config); > > +/** > + * This is a variant of rte_event_eth_rx_adapter_create() with additional > + * adapter params specified in ``struct rte_event_eth_rx_adapter_params``. > + * > + * @param id > + * The identifier of the ethernet Rx event adapter. > + * > + * @param dev_id > + * The identifier of the event device to configure. > + * > + * @param port_config > + * Argument of type *rte_event_port_conf* that is passed to the conf_cb > + * function. > + * > + * @param rxa_params > + * Pointer to struct rte_event_eth_rx_adapter_params. > + * In case of NULL, default values are used. > + * > + * @return > + * - 0: Success > + * - <0: Error code on failure > + */ > +__rte_experimental > +int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id, > + struct rte_event_port_conf *port_config, > + struct rte_event_eth_rx_adapter_params *rxa_params); > + > /** > * Free an event adapter > * > diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map > index cd86d2d908..87586de879 100644 > --- a/lib/eventdev/version.map > +++ b/lib/eventdev/version.map > @@ -138,6 +138,8 @@ EXPERIMENTAL { > __rte_eventdev_trace_port_setup; > # added in 20.11 > rte_event_pmd_pci_probe_named; > + # added in 21.11 > + rte_event_eth_rx_adapter_create_with_params; > > #added in 21.05 > rte_event_vector_pool_create; > -- > 2.25.1 > ^ permalink raw reply [flat|nested] 81+ messages in thread
end of thread, other threads:[~2021-10-07 8:53 UTC | newest] Thread overview: 81+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2021-09-18 13:11 [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Naga Harish K S V 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 2/5] test/event: add unit test for rte_event_eth_rx_adapter_create2 api Naga Harish K S V 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 3/5] eventdev:rx_adapter:add per queue event buffer configure support Naga Harish K S V 2021-09-20 6:23 ` Jerin Jacob 2021-09-21 13:46 ` Naga Harish K, S V 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 4/5] eventdev:rx_adapter: implement per queue event buffer Naga Harish K S V 2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 5/5] test/eventdev: add per rx queue event buffer unit Naga Harish K S V 2021-09-20 6:20 ` [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Jerin Jacob 2021-09-21 13:45 ` Naga Harish K, S V 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: " Naga Harish K S V 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for event buffer size config api Naga Harish K S V 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support Naga Harish K S V 2021-09-21 16:24 ` Jerin Jacob 2021-09-22 15:21 ` Naga Harish K, S V 2021-09-21 20:32 ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula 2021-09-22 15:39 ` Naga Harish K, S V 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 4/5] eventdev/rx_adapter: implement per queue event buffer Naga Harish K S V 2021-09-21 9:21 ` [dpdk-dev] [PATCH v2 5/5] test/eventdev: add per rx queue event buffer unit Naga Harish K S V 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Naga Harish K S V 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for event buffer size config api Naga Harish K S V 2021-09-21 20:28 ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula 2021-09-22 15:37 ` Naga Harish K, S V 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support Naga Harish K S V 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 4/5] eventdev/rx_adapter: implement per queue event buffer Naga Harish K S V 2021-09-21 20:51 ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula 2021-09-22 15:45 ` Naga Harish K, S V 2021-09-21 9:45 ` [dpdk-dev] [PATCH v2 5/5] test/eventdev: add per rx queue event buffer unit Naga Harish K S V 2021-09-21 20:24 ` [dpdk-dev] [EXT] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Pavan Nikhilesh Bhagavatula 2021-09-22 15:36 ` Naga Harish K, S V 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V 2021-09-22 15:13 ` [dpdk-dev] [PATCH v3 4/5] eventdev/rx_adapter: implement " Naga Harish K S V 2021-09-22 15:14 ` [dpdk-dev] [PATCH v3 5/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-09-29 5:16 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Jerin Jacob 2021-09-30 8:34 ` Jayatheerthan, Jay 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 " Naga Harish K S V 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-09-30 8:28 ` [dpdk-dev] [PATCH v4 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V 2021-09-30 8:29 ` [dpdk-dev] [PATCH v4 4/5] eventdev/rx_adapter: implement " Naga Harish K S V 2021-09-30 8:29 ` [dpdk-dev] [PATCH v4 5/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-05 7:36 ` Jayatheerthan, Jay 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V 2021-10-05 7:39 ` Jayatheerthan, Jay 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 4/5] eventdev/rx_adapter: implement " Naga Harish K S V 2021-10-05 7:55 ` Jayatheerthan, Jay 2021-10-05 14:47 ` Naga Harish K, S V 2021-10-05 15:01 ` Jayatheerthan, Jay 2021-10-06 4:06 ` Naga Harish K, S V 2021-10-04 5:41 ` [dpdk-dev] [PATCH v5 5/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-05 8:03 ` Jayatheerthan, Jay 2021-10-05 7:19 ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Jayatheerthan, Jay 2021-10-05 14:45 ` Naga Harish K, S V 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 " Naga Harish K S V 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 4/5] eventdev/rx_adapter: implement " Naga Harish K S V 2021-10-05 14:38 ` [dpdk-dev] [PATCH v6 5/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 4/5] eventdev/rx_adapter: implement " Naga Harish K S V 2021-10-06 6:42 ` Jayatheerthan, Jay 2021-10-06 7:53 ` Naga Harish K, S V 2021-10-06 4:02 ` [dpdk-dev] [PATCH v7 5/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-06 6:19 ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Jayatheerthan, Jay 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 " Naga Harish K S V 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 4/5] eventdev/rx_adapter: implement " Naga Harish K S V 2021-10-06 7:47 ` [dpdk-dev] [PATCH v8 5/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 2/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 4/5] eventdev/rx_adapter: implement " Naga Harish K S V 2021-10-06 9:11 ` Jayatheerthan, Jay 2021-10-06 7:55 ` [dpdk-dev] [PATCH v8 5/5] test/event: add unit test for Rx adapter Naga Harish K S V 2021-10-07 8:52 ` Jerin Jacob 2021-10-07 8:51 ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Jerin Jacob
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).