* [PATCH 0/3] Rx adapter API to add Rx queues in burst
@ 2025-02-07 14:09 Shijith Thotton
2025-02-07 14:09 ` [PATCH 1/3] eventdev/eth_rx: add API to burst add queues to Rx adapter Shijith Thotton
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Shijith Thotton @ 2025-02-07 14:09 UTC (permalink / raw)
To: jerinj; +Cc: pbhagavatula, Shijith Thotton, dev
This patch series introduces the rte_event_eth_rx_adapter_queues_add()
API, allowing multiple Rx queues to be added to an Rx adapter in a
single burst. This enhancement benefits applications that require bulk
addition of Rx queues.
To support this API, a new internal eventdev PMD operation for adding Rx
queues has been implemented. The second patch in the series enables this
operation for the CNXK PMD.
Additionally, a unit test has been added to validate the API's
functionality.
RFC: https://patches.dpdk.org/project/dpdk/patch/20241219073405.1724200-1-sthotton@marvell.com/
Shijith Thotton (3):
eventdev/eth_rx: add API to burst add queues to Rx adapter
event/cnxk: enable PMD op to burst add queues to Rx adapter
test/event: unit test to burst add Rx queues to adapter
app/test/test_event_eth_rx_adapter.c | 86 ++++++++++
drivers/event/cnxk/cn10k_eventdev.c | 82 ++++++++--
drivers/event/cnxk/cn20k_eventdev.c | 195 ++++++++++++++++-------
drivers/event/cnxk/cn9k_eventdev.c | 38 ++++-
drivers/event/cnxk/cnxk_eventdev.h | 8 +-
drivers/event/cnxk/cnxk_eventdev_adptr.c | 102 +++++++-----
lib/eventdev/eventdev_pmd.h | 34 ++++
lib/eventdev/eventdev_trace.h | 14 ++
lib/eventdev/eventdev_trace_points.c | 3 +
lib/eventdev/rte_event_eth_rx_adapter.c | 146 +++++++++++++++++
lib/eventdev/rte_event_eth_rx_adapter.h | 33 ++++
lib/eventdev/version.map | 3 +
12 files changed, 627 insertions(+), 117 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 1/3] eventdev/eth_rx: add API to burst add queues to Rx adapter
2025-02-07 14:09 [PATCH 0/3] Rx adapter API to add Rx queues in burst Shijith Thotton
@ 2025-02-07 14:09 ` Shijith Thotton
2025-02-07 14:09 ` [PATCH 2/3] event/cnxk: enable PMD op " Shijith Thotton
2025-02-07 14:09 ` [PATCH 3/3] test/event: unit test to burst add Rx queues to adapter Shijith Thotton
2 siblings, 0 replies; 4+ messages in thread
From: Shijith Thotton @ 2025-02-07 14:09 UTC (permalink / raw)
To: jerinj
Cc: pbhagavatula, Shijith Thotton, dev, Pravin Pathak,
Hemant Agrawal, Sachin Saxena, Mattias R_nnblom, Liang Ma,
Peter Mccarthy, Harry van Haaren, Erik Gabriel Carrillo,
Abhinandan Gujjar, Amit Prakash Shukla, Naga Harish K S V,
Anatoly Burakov
This patch introduces a new API, rte_event_eth_rx_adapter_queues_add(),
to allow bulk addition of multiple Rx queues in the eventdev Rx adapter.
The existing rte_event_eth_rx_adapter_queue_add() API supports adding
multiple queues by specifying rx_queue_id = -1, but it lacks the ability
to apply specific configurations to each of the added queues.
A new internal PMD operation, eventdev_eth_rx_adapter_queues_add_t, has
been introduced to enable this functionality. It takes an array of
receive queue IDs along with their corresponding queue configurations.
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
---
lib/eventdev/eventdev_pmd.h | 34 ++++++
lib/eventdev/eventdev_trace.h | 14 +++
lib/eventdev/eventdev_trace_points.c | 3 +
lib/eventdev/rte_event_eth_rx_adapter.c | 146 ++++++++++++++++++++++++
lib/eventdev/rte_event_eth_rx_adapter.h | 33 ++++++
lib/eventdev/version.map | 3 +
6 files changed, 233 insertions(+)
diff --git a/lib/eventdev/eventdev_pmd.h b/lib/eventdev/eventdev_pmd.h
index 36148f8d86..ad13ba5b03 100644
--- a/lib/eventdev/eventdev_pmd.h
+++ b/lib/eventdev/eventdev_pmd.h
@@ -25,6 +25,7 @@
#include <rte_mbuf_dyn.h>
#include "event_timer_adapter_pmd.h"
+#include "rte_event_eth_rx_adapter.h"
#include "rte_eventdev.h"
#ifdef __cplusplus
@@ -708,6 +709,37 @@ typedef int (*eventdev_eth_rx_adapter_queue_add_t)(
int32_t rx_queue_id,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+/**
+ * Add ethernet Rx queues to event device in burst. This callback is invoked if
+ * the caps returned from rte_eventdev_eth_rx_adapter_caps_get(, eth_port_id)
+ * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @param rx_queue_id
+ * Ethernet device receive queue index array
+ *
+ * @param queue_conf
+ * Additional configuration structure array
+ *
+ * @param nb_rx_queues
+ * Number of ethernet device receive queues
+ *
+ * @return
+ * - 0: Success, ethernet receive queues added successfully.
+ * - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_eth_rx_adapter_queues_add_t)(
+ const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id[],
+ const struct rte_event_eth_rx_adapter_queue_conf queue_conf[],
+ uint16_t nb_rx_queues);
+
/**
* Delete ethernet Rx queues from event device. This callback is invoked if
* the caps returned from eventdev_eth_rx_adapter_caps_get(, eth_port_id)
@@ -1578,6 +1610,8 @@ struct eventdev_ops {
/**< Get ethernet Rx adapter capabilities */
eventdev_eth_rx_adapter_queue_add_t eth_rx_adapter_queue_add;
/**< Add Rx queues to ethernet Rx adapter */
+ eventdev_eth_rx_adapter_queues_add_t eth_rx_adapter_queues_add;
+ /**< Add Rx queues to ethernet Rx adapter in burst */
eventdev_eth_rx_adapter_queue_del_t eth_rx_adapter_queue_del;
/**< Delete Rx queues from ethernet Rx adapter */
eventdev_eth_rx_adapter_queue_conf_get_t eth_rx_adapter_queue_conf_get;
diff --git a/lib/eventdev/eventdev_trace.h b/lib/eventdev/eventdev_trace.h
index 8ff8841729..6b334d8bd1 100644
--- a/lib/eventdev/eventdev_trace.h
+++ b/lib/eventdev/eventdev_trace.h
@@ -159,6 +159,20 @@ RTE_TRACE_POINT(
rte_trace_point_emit_int(rc);
)
+RTE_TRACE_POINT(
+ rte_eventdev_trace_eth_rx_adapter_queues_add,
+ RTE_TRACE_POINT_ARGS(uint8_t adptr_id, uint16_t eth_dev_id,
+ uint16_t nb_rx_queues, void *rx_queue_id,
+ const void *queue_conf,
+ int rc),
+ rte_trace_point_emit_u8(adptr_id);
+ rte_trace_point_emit_u16(eth_dev_id);
+ rte_trace_point_emit_u16(nb_rx_queues);
+ rte_trace_point_emit_ptr(rx_queue_id);
+ rte_trace_point_emit_ptr(queue_conf);
+ rte_trace_point_emit_int(rc);
+)
+
RTE_TRACE_POINT(
rte_eventdev_trace_eth_rx_adapter_queue_del,
RTE_TRACE_POINT_ARGS(uint8_t adptr_id, uint16_t eth_dev_id,
diff --git a/lib/eventdev/eventdev_trace_points.c b/lib/eventdev/eventdev_trace_points.c
index e7af1591f7..8caf6353a1 100644
--- a/lib/eventdev/eventdev_trace_points.c
+++ b/lib/eventdev/eventdev_trace_points.c
@@ -65,6 +65,9 @@ RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_eth_rx_adapter_free,
RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_eth_rx_adapter_queue_add,
lib.eventdev.rx.adapter.queue.add)
+RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_eth_rx_adapter_queues_add,
+ lib.eventdev.rx.adapter.queues.add)
+
RTE_TRACE_POINT_REGISTER(rte_eventdev_trace_eth_rx_adapter_queue_del,
lib.eventdev.rx.adapter.queue.del)
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
index 39674c4604..c5a357aa85 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/eventdev/rte_event_eth_rx_adapter.c
@@ -2793,6 +2793,152 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
return 0;
}
+int
+rte_event_eth_rx_adapter_queues_add(uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id[],
+ const struct rte_event_eth_rx_adapter_queue_conf queue_conf[],
+ uint16_t nb_rx_queues)
+{
+ struct rte_event_eth_rx_adapter_vector_limits limits;
+ struct event_eth_rx_adapter *rx_adapter;
+ struct eth_device_info *dev_info;
+ struct rte_eventdev *dev;
+ uint32_t cap;
+ int32_t i;
+ int ret;
+
+ if (rxa_memzone_lookup())
+ return -ENOMEM;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+ rx_adapter = rxa_id_to_adapter(id);
+ if ((rx_adapter == NULL) || (queue_conf == NULL))
+ return -EINVAL;
+
+ if (nb_rx_queues && rx_queue_id == NULL)
+ return -EINVAL;
+
+ if (nb_rx_queues > rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+ RTE_EDEV_LOG_ERR("Invalid number of rx queues %" PRIu16, nb_rx_queues);
+ return -EINVAL;
+ }
+
+ ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id, eth_dev_id, &cap);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8 "eth port %" PRIu16, id,
+ eth_dev_id);
+ return ret;
+ }
+
+ for (i = 0; i < (nb_rx_queues ? nb_rx_queues : 1); i++) {
+ const struct rte_event_eth_rx_adapter_queue_conf *conf;
+
+ conf = &queue_conf[i];
+ if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0 &&
+ (conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
+ RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
+ " eth port: %" PRIu16 " adapter id: %" PRIu8,
+ eth_dev_id, id);
+ return -EINVAL;
+ }
+
+ if (conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
+ if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) == 0) {
+ RTE_EDEV_LOG_ERR("Event vectorization is not supported,"
+ " eth port: %" PRIu16 " adapter id: %" PRIu8,
+ eth_dev_id, id);
+ return -EINVAL;
+ }
+
+ ret = rte_event_eth_rx_adapter_vector_limits_get(rx_adapter->eventdev_id,
+ eth_dev_id, &limits);
+ if (ret < 0) {
+ RTE_EDEV_LOG_ERR("Failed to get event device vector limits,"
+ " eth port: %" PRIu16 " adapter id: %" PRIu8,
+ eth_dev_id, id);
+ return -EINVAL;
+ }
+
+ if (conf->vector_sz < limits.min_sz || conf->vector_sz > limits.max_sz ||
+ conf->vector_timeout_ns < limits.min_timeout_ns ||
+ conf->vector_timeout_ns > limits.max_timeout_ns ||
+ conf->vector_mp == NULL) {
+ RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
+ " eth port: %" PRIu16 " adapter id: %" PRIu8,
+ eth_dev_id, id);
+ return -EINVAL;
+ }
+
+ if (conf->vector_mp->elt_size < (sizeof(struct rte_event_vector) +
+ (sizeof(uintptr_t) * conf->vector_sz))) {
+ RTE_EDEV_LOG_ERR("Invalid event vector configuration,"
+ " eth port: %" PRIu16 " adapter id: %" PRIu8,
+ eth_dev_id, id);
+ return -EINVAL;
+ }
+ }
+
+ if ((rx_adapter->use_queue_event_buf && conf->event_buf_size == 0) ||
+ (!rx_adapter->use_queue_event_buf && conf->event_buf_size != 0)) {
+ RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue");
+ return -EINVAL;
+ }
+ }
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
+ if (*dev->dev_ops->eth_rx_adapter_queues_add == NULL)
+ return -ENOTSUP;
+
+ if (dev_info->rx_queue == NULL) {
+ dev_info->rx_queue =
+ rte_zmalloc_socket(rx_adapter->mem_name,
+ dev_info->dev->data->nb_rx_queues *
+ sizeof(struct eth_rx_queue_info),
+ 0, rx_adapter->socket_id);
+ if (dev_info->rx_queue == NULL)
+ return -ENOMEM;
+ }
+
+ ret = (*dev->dev_ops->eth_rx_adapter_queues_add)(
+ dev, &rte_eth_devices[eth_dev_id], rx_queue_id, queue_conf, nb_rx_queues);
+ if (ret == 0) {
+ dev_info->internal_event_port = 1;
+
+ if (nb_rx_queues == 0)
+ rxa_update_queue(rx_adapter, dev_info, -1, 1);
+
+ for (i = 0; i < nb_rx_queues; i++)
+ rxa_update_queue(rx_adapter, dev_info, rx_queue_id[i], 1);
+ }
+ } else {
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ dev_info->internal_event_port = 0;
+ ret = rxa_init_service(rx_adapter, id);
+ if (ret == 0) {
+ uint32_t service_id = rx_adapter->service_id;
+
+ if (nb_rx_queues == 0)
+ ret = rxa_sw_add(rx_adapter, eth_dev_id, -1, &queue_conf[0]);
+
+ for (i = 0; i < nb_rx_queues; i++)
+ ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id[i],
+ &queue_conf[i]);
+
+ rte_service_component_runstate_set(service_id,
+ rxa_sw_adapter_queue_count(rx_adapter));
+ }
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ }
+
+ rte_eventdev_trace_eth_rx_adapter_queues_add(id, eth_dev_id, nb_rx_queues, rx_queue_id,
+ queue_conf, ret);
+ return ret;
+}
+
static int
rxa_sw_vector_limits(struct rte_event_eth_rx_adapter_vector_limits *limits)
{
diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h
index 9237e198a7..758e1c5f56 100644
--- a/lib/eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/eventdev/rte_event_eth_rx_adapter.h
@@ -553,6 +553,39 @@ int rte_event_eth_rx_adapter_queue_add(uint8_t id,
int32_t rx_queue_id,
const struct rte_event_eth_rx_adapter_queue_conf *conf);
+/**
+ * Add multiple receive queues to an event adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param eth_dev_id
+ * Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ * Array of Ethernet device receive queue indices.
+ * If nb_rx_queues is 0, then rx_queue_id is ignored.
+ *
+ * @param conf
+ * Array of additional configuration structures of type
+ * *rte_event_eth_rx_adapter_queue_conf*. conf[i] is used for rx_queue_id[i].
+ * If nb_rx_queues is 0, then conf[0] is used for all Rx queues.
+ *
+ * @param nb_rx_queues
+ * Number of receive queues to add.
+ * If nb_rx_queues is 0, then all Rx queues configured for
+ * the device are added with the same configuration in conf[0].
+ * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
+ *
+ * @return
+ * - 0: Success, Receive queues added correctly.
+ * - <0: Error code on failure.
+ */
+__rte_experimental
+int rte_event_eth_rx_adapter_queues_add(uint8_t id, uint16_t eth_dev_id, int32_t rx_queue_id[],
+ const struct rte_event_eth_rx_adapter_queue_conf conf[],
+ uint16_t nb_rx_queues);
+
/**
* Delete receive queue from an event adapter.
*
diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map
index 42a5867aba..44687255cb 100644
--- a/lib/eventdev/version.map
+++ b/lib/eventdev/version.map
@@ -153,6 +153,9 @@ EXPERIMENTAL {
__rte_eventdev_trace_port_preschedule_modify;
rte_event_port_preschedule;
__rte_eventdev_trace_port_preschedule;
+
+ # added in 25.03
+ rte_event_eth_rx_adapter_queues_add;
};
INTERNAL {
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 2/3] event/cnxk: enable PMD op to burst add queues to Rx adapter
2025-02-07 14:09 [PATCH 0/3] Rx adapter API to add Rx queues in burst Shijith Thotton
2025-02-07 14:09 ` [PATCH 1/3] eventdev/eth_rx: add API to burst add queues to Rx adapter Shijith Thotton
@ 2025-02-07 14:09 ` Shijith Thotton
2025-02-07 14:09 ` [PATCH 3/3] test/event: unit test to burst add Rx queues to adapter Shijith Thotton
2 siblings, 0 replies; 4+ messages in thread
From: Shijith Thotton @ 2025-02-07 14:09 UTC (permalink / raw)
To: jerinj; +Cc: pbhagavatula, Shijith Thotton, dev
Implemented PMD support for the eventdev PMD operation to burst add
queues to the Rx adapter.
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
---
drivers/event/cnxk/cn10k_eventdev.c | 82 ++++++++--
drivers/event/cnxk/cn20k_eventdev.c | 195 ++++++++++++++++-------
drivers/event/cnxk/cn9k_eventdev.c | 38 ++++-
drivers/event/cnxk/cnxk_eventdev.h | 8 +-
drivers/event/cnxk/cnxk_eventdev_adptr.c | 102 +++++++-----
5 files changed, 308 insertions(+), 117 deletions(-)
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index f2e591f547..3832eb7e00 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -685,6 +685,22 @@ cn10k_sso_rx_offload_cb(uint16_t port_id, uint64_t flags)
eventdev_fops_update(event_dev);
}
+static int
+cn10k_sso_configure_queue_stash_default(struct cnxk_sso_evdev *dev, uint16_t hwgrp)
+{
+ struct roc_sso_hwgrp_stash stash;
+ int rc;
+
+ stash.hwgrp = hwgrp;
+ stash.stash_offset = CN10K_SSO_DEFAULT_STASH_OFFSET;
+ stash.stash_count = CN10K_SSO_DEFAULT_STASH_LENGTH;
+ rc = roc_sso_hwgrp_stash_config(&dev->sso, &stash, 1);
+ if (rc)
+ plt_warn("failed to configure HWGRP WQE stashing rc = %d", rc);
+
+ return rc;
+}
+
static int
cn10k_sso_rx_adapter_queue_add(
const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
@@ -693,8 +709,8 @@ cn10k_sso_rx_adapter_queue_add(
{
struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
- struct roc_sso_hwgrp_stash stash;
struct cn10k_eth_rxq *rxq;
+ uint16_t nb_rx_queues;
void *lookup_mem;
int rc;
@@ -702,8 +718,42 @@ cn10k_sso_rx_adapter_queue_add(
if (rc)
return -EINVAL;
- rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
- queue_conf);
+ nb_rx_queues = rx_queue_id == -1 ? 0 : 1;
+ rc = cnxk_sso_rx_adapter_queues_add(event_dev, eth_dev, &rx_queue_id, queue_conf,
+ nb_rx_queues);
+ if (rc)
+ return -EINVAL;
+
+ cnxk_eth_dev->cnxk_sso_ptp_tstamp_cb = cn10k_sso_tstamp_hdl_update;
+ cnxk_eth_dev->evdev_priv = (struct rte_eventdev *)(uintptr_t)event_dev;
+
+ rxq = eth_dev->data->rx_queues[0];
+ lookup_mem = rxq->lookup_mem;
+ cn10k_sso_set_priv_mem(event_dev, lookup_mem);
+ cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+ if (roc_feature_sso_has_stash() && dev->nb_event_ports > 1)
+ rc = cn10k_sso_configure_queue_stash_default(dev, queue_conf->ev.queue_id);
+
+ return rc;
+}
+static int
+cn10k_sso_rx_adapter_queues_add(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev, int32_t rx_queue_id[],
+ const struct rte_event_eth_rx_adapter_queue_conf queue_conf[],
+ uint16_t nb_rx_queues)
+{
+ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ struct cn10k_eth_rxq *rxq;
+ void *lookup_mem;
+ int rc, i;
+
+ rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
+ if (rc)
+ return -EINVAL;
+
+ rc = cnxk_sso_rx_adapter_queues_add(event_dev, eth_dev, rx_queue_id, queue_conf,
+ nb_rx_queues);
if (rc)
return -EINVAL;
@@ -715,15 +765,24 @@ cn10k_sso_rx_adapter_queue_add(
cn10k_sso_set_priv_mem(event_dev, lookup_mem);
cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
if (roc_feature_sso_has_stash() && dev->nb_event_ports > 1) {
- stash.hwgrp = queue_conf->ev.queue_id;
- stash.stash_offset = CN10K_SSO_DEFAULT_STASH_OFFSET;
- stash.stash_count = CN10K_SSO_DEFAULT_STASH_LENGTH;
- rc = roc_sso_hwgrp_stash_config(&dev->sso, &stash, 1);
- if (rc < 0)
- plt_warn("failed to configure HWGRP WQE stashing rc = %d", rc);
+ uint16_t hwgrp = dev->sso.max_hwgrp;
+
+ if (nb_rx_queues == 0)
+ rc = cn10k_sso_configure_queue_stash_default(dev,
+ queue_conf[0].ev.queue_id);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ if (hwgrp == queue_conf[i].ev.queue_id)
+ continue;
+
+ hwgrp = queue_conf[i].ev.queue_id;
+ rc = cn10k_sso_configure_queue_stash_default(dev, hwgrp);
+ if (rc < 0)
+ break;
+ }
}
- return 0;
+ return rc;
}
static int
@@ -987,8 +1046,6 @@ cn10k_dma_adapter_vchan_del(const struct rte_eventdev *event_dev,
return cnxk_dma_adapter_vchan_del(dma_dev_id, vchan_id);
}
-
-
static struct eventdev_ops cn10k_sso_dev_ops = {
.dev_infos_get = cn10k_sso_info_get,
.dev_configure = cn10k_sso_dev_configure,
@@ -1010,6 +1067,7 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
.eth_rx_adapter_caps_get = cn10k_sso_rx_adapter_caps_get,
.eth_rx_adapter_queue_add = cn10k_sso_rx_adapter_queue_add,
+ .eth_rx_adapter_queues_add = cn10k_sso_rx_adapter_queues_add,
.eth_rx_adapter_queue_del = cn10k_sso_rx_adapter_queue_del,
.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
diff --git a/drivers/event/cnxk/cn20k_eventdev.c b/drivers/event/cnxk/cn20k_eventdev.c
index d68700fc05..0688cf97e5 100644
--- a/drivers/event/cnxk/cn20k_eventdev.c
+++ b/drivers/event/cnxk/cn20k_eventdev.c
@@ -717,79 +717,115 @@ cn20k_sso_rx_adapter_vwqe_enable(struct cnxk_sso_evdev *dev, uint16_t port_id, u
}
static int
-cn20k_rx_adapter_queue_add(const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
- int32_t rx_queue_id,
- const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+cn20k_rx_adapter_queue_del(const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id)
{
struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
- uint16_t port = eth_dev->data->port_id;
- struct cnxk_eth_rxq_sp *rxq_sp;
- int i, rc = 0, agq = 0;
+ struct roc_nix_rq *rxq;
+ int i, rc = 0;
+ RTE_SET_USED(event_dev);
if (rx_queue_id < 0) {
for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
- rc |= cn20k_rx_adapter_queue_add(event_dev, eth_dev, i, queue_conf);
+ cn20k_rx_adapter_queue_del(event_dev, eth_dev, i);
} else {
- rxq_sp = cnxk_eth_rxq_to_sp(eth_dev->data->rx_queues[rx_queue_id]);
+ rxq = &cnxk_eth_dev->rqs[rx_queue_id];
+ if (rxq->tt == SSO_TT_AGG)
+ roc_sso_hwgrp_agq_free(&dev->sso, rxq->hwgrp, rxq->tag_mask);
+ rc = cnxk_sso_rxq_disable(eth_dev, (uint16_t)rx_queue_id);
+ cnxk_eth_dev->nb_rxq_sso--;
+ }
+
+ if (rc < 0)
+ plt_err("Failed to clear Rx adapter config port=%d, q=%d", eth_dev->data->port_id,
+ rx_queue_id);
+ return rc;
+}
+
+static int
+cn20k_rx_adapter_queues_add(const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id[],
+ const struct rte_event_eth_rx_adapter_queue_conf queue_conf[],
+ uint16_t nb_rx_queues)
+{
+ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ const struct rte_event_eth_rx_adapter_queue_conf *conf;
+ uint64_t old_xae_cnt = dev->adptr_xae_cnt;
+ uint16_t port = eth_dev->data->port_id;
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ uint16_t max_rx_queues;
+ int i, rc = 0, agq = 0;
+ int32_t queue_id;
+
+ max_rx_queues = nb_rx_queues ? nb_rx_queues : eth_dev->data->nb_rx_queues;
+ for (i = 0; i < max_rx_queues; i++) {
+ conf = nb_rx_queues ? &queue_conf[i] : &queue_conf[0];
+ queue_id = nb_rx_queues ? rx_queue_id[i] : i;
+ rxq_sp = cnxk_eth_rxq_to_sp(eth_dev->data->rx_queues[queue_id]);
cnxk_sso_updt_xae_cnt(dev, rxq_sp, RTE_EVENT_TYPE_ETHDEV);
+
+ if (conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR)
+ cnxk_sso_updt_xae_cnt(dev, conf->vector_mp, RTE_EVENT_TYPE_ETHDEV_VECTOR);
+ }
+
+ if (dev->adptr_xae_cnt != old_xae_cnt) {
rc = cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
- if (queue_conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
- cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp,
- RTE_EVENT_TYPE_ETHDEV_VECTOR);
- rc = cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
- if (rc < 0)
- return rc;
+ if (rc < 0)
+ return rc;
+ }
+
+ for (i = 0; i < max_rx_queues; i++) {
+ conf = nb_rx_queues ? &queue_conf[i] : &queue_conf[0];
+ queue_id = nb_rx_queues ? rx_queue_id[i] : i;
+ if (conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
+ rc = cn20k_sso_rx_adapter_vwqe_enable(dev, port, queue_id, conf);
+ if (rc < 0) {
+ plt_err("Failed to enable VWQE, port=%d, rxq=%d", port, queue_id);
+ goto fail;
+ }
- rc = cn20k_sso_rx_adapter_vwqe_enable(dev, port, rx_queue_id, queue_conf);
- if (rc < 0)
- return rc;
agq = rc;
}
- rc = cn20k_sso_rxq_enable(cnxk_eth_dev, (uint16_t)rx_queue_id, port, queue_conf,
- agq);
+ rc = cn20k_sso_rxq_enable(cnxk_eth_dev, (uint16_t)queue_id, port, conf, agq);
+ if (rc < 0) {
+ plt_err("Failed to enable Rx queue, port=%d, rxq=%d", port, queue_id);
+ goto fail;
+ }
- /* Propagate force bp devarg */
- cnxk_eth_dev->nix.force_rx_aura_bp = dev->force_ena_bp;
- cnxk_sso_tstamp_cfg(port, eth_dev, dev);
cnxk_eth_dev->nb_rxq_sso++;
}
- if (rc < 0) {
- plt_err("Failed to configure Rx adapter port=%d, q=%d", port,
- queue_conf->ev.queue_id);
- return rc;
- }
-
+ /* Propagate force bp devarg */
+ cnxk_eth_dev->nix.force_rx_aura_bp = dev->force_ena_bp;
+ cnxk_sso_tstamp_cfg(port, eth_dev, dev);
dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
return 0;
+
+fail:
+ for (i = cnxk_eth_dev->nb_rxq_sso - 1; i >= 0; i--) {
+ queue_id = nb_rx_queues ? rx_queue_id[i] : i;
+ cn20k_rx_adapter_queue_del(event_dev, eth_dev, queue_id);
+ }
+
+ return rc;
}
static int
-cn20k_rx_adapter_queue_del(const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
- int32_t rx_queue_id)
+cn20k_sso_configure_queue_stash_default(struct cnxk_sso_evdev *dev, uint16_t hwgrp)
{
- struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
- struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
- struct roc_nix_rq *rxq;
- int i, rc = 0;
-
- RTE_SET_USED(event_dev);
- if (rx_queue_id < 0) {
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
- cn20k_rx_adapter_queue_del(event_dev, eth_dev, i);
- } else {
- rxq = &cnxk_eth_dev->rqs[rx_queue_id];
- if (rxq->tt == SSO_TT_AGG)
- roc_sso_hwgrp_agq_free(&dev->sso, rxq->hwgrp, rxq->tag_mask);
- rc = cnxk_sso_rxq_disable(eth_dev, (uint16_t)rx_queue_id);
- cnxk_eth_dev->nb_rxq_sso--;
- }
+ struct roc_sso_hwgrp_stash stash;
+ int rc;
+ stash.hwgrp = hwgrp;
+ stash.stash_offset = CN20K_SSO_DEFAULT_STASH_OFFSET;
+ stash.stash_count = CN20K_SSO_DEFAULT_STASH_LENGTH;
+ rc = roc_sso_hwgrp_stash_config(&dev->sso, &stash, 1);
if (rc < 0)
- plt_err("Failed to clear Rx adapter config port=%d, q=%d", eth_dev->data->port_id,
- rx_queue_id);
+ plt_warn("failed to configure HWGRP WQE stashing rc = %d", rc);
+
return rc;
}
@@ -800,8 +836,8 @@ cn20k_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
{
struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
- struct roc_sso_hwgrp_stash stash;
struct cn20k_eth_rxq *rxq;
+ uint16_t nb_rx_queues;
void *lookup_mem;
int rc;
@@ -809,7 +845,42 @@ cn20k_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
if (rc)
return -EINVAL;
- rc = cn20k_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id, queue_conf);
+ nb_rx_queues = rx_queue_id == -1 ? 0 : 1;
+ rc = cn20k_rx_adapter_queues_add(event_dev, eth_dev, &rx_queue_id, queue_conf,
+ nb_rx_queues);
+ if (rc)
+ return -EINVAL;
+
+ cnxk_eth_dev->cnxk_sso_ptp_tstamp_cb = cn20k_sso_tstamp_hdl_update;
+ cnxk_eth_dev->evdev_priv = (struct rte_eventdev *)(uintptr_t)event_dev;
+
+ rxq = eth_dev->data->rx_queues[0];
+ lookup_mem = rxq->lookup_mem;
+ cn20k_sso_set_priv_mem(event_dev, lookup_mem);
+ cn20k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+ if (roc_feature_sso_has_stash() && dev->nb_event_ports > 1)
+ rc = cn20k_sso_configure_queue_stash_default(dev, queue_conf->ev.queue_id);
+
+ return rc;
+}
+
+static int
+cn20k_sso_rx_adapter_queues_add(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev, int32_t rx_queue_id[],
+ const struct rte_event_eth_rx_adapter_queue_conf queue_conf[],
+ uint16_t nb_rx_queues)
+{
+ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ struct cn20k_eth_rxq *rxq;
+ void *lookup_mem;
+ int rc, i;
+
+ rc = strncmp(eth_dev->device->driver->name, "net_cn20k", 8);
+ if (rc)
+ return -EINVAL;
+
+ rc = cn20k_rx_adapter_queues_add(event_dev, eth_dev, rx_queue_id, queue_conf, nb_rx_queues);
if (rc)
return -EINVAL;
@@ -821,15 +892,24 @@ cn20k_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
cn20k_sso_set_priv_mem(event_dev, lookup_mem);
cn20k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
if (roc_feature_sso_has_stash() && dev->nb_event_ports > 1) {
- stash.hwgrp = queue_conf->ev.queue_id;
- stash.stash_offset = CN20K_SSO_DEFAULT_STASH_OFFSET;
- stash.stash_count = CN20K_SSO_DEFAULT_STASH_LENGTH;
- rc = roc_sso_hwgrp_stash_config(&dev->sso, &stash, 1);
- if (rc < 0)
- plt_warn("failed to configure HWGRP WQE stashing rc = %d", rc);
+ uint16_t hwgrp = dev->sso.max_hwgrp;
+
+ if (nb_rx_queues == 0)
+ rc = cn20k_sso_configure_queue_stash_default(dev,
+ queue_conf[0].ev.queue_id);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ if (hwgrp == queue_conf[i].ev.queue_id)
+ continue;
+
+ hwgrp = queue_conf[i].ev.queue_id;
+ rc = cn20k_sso_configure_queue_stash_default(dev, hwgrp);
+ if (rc < 0)
+ break;
+ }
}
- return 0;
+ return rc;
}
static int
@@ -985,6 +1065,7 @@ static struct eventdev_ops cn20k_sso_dev_ops = {
.eth_rx_adapter_caps_get = cn20k_sso_rx_adapter_caps_get,
.eth_rx_adapter_queue_add = cn20k_sso_rx_adapter_queue_add,
+ .eth_rx_adapter_queues_add = cn20k_sso_rx_adapter_queues_add,
.eth_rx_adapter_queue_del = cn20k_sso_rx_adapter_queue_del,
.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 05e237c005..5f24366770 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -871,6 +871,7 @@ cn9k_sso_rx_adapter_queue_add(
{
struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
struct cn9k_eth_rxq *rxq;
+ uint16_t nb_rx_queues;
void *lookup_mem;
int rc;
@@ -878,8 +879,40 @@ cn9k_sso_rx_adapter_queue_add(
if (rc)
return -EINVAL;
- rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
- queue_conf);
+ nb_rx_queues = rx_queue_id == -1 ? 0 : 1;
+ rc = cnxk_sso_rx_adapter_queues_add(event_dev, eth_dev, &rx_queue_id, queue_conf,
+ nb_rx_queues);
+ if (rc)
+ return -EINVAL;
+
+ cnxk_eth_dev->cnxk_sso_ptp_tstamp_cb = cn9k_sso_tstamp_hdl_update;
+ cnxk_eth_dev->evdev_priv = (struct rte_eventdev *)(uintptr_t)event_dev;
+
+ rxq = eth_dev->data->rx_queues[0];
+ lookup_mem = rxq->lookup_mem;
+ cn9k_sso_set_priv_mem(event_dev, lookup_mem);
+ cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
+
+ return 0;
+}
+
+static int
+cn9k_sso_rx_adapter_queues_add(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev, int32_t rx_queue_id[],
+ const struct rte_event_eth_rx_adapter_queue_conf queue_conf[],
+ uint16_t nb_rx_queues)
+{
+ struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
+ struct cn9k_eth_rxq *rxq;
+ void *lookup_mem;
+ int rc;
+
+ rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
+ if (rc)
+ return -EINVAL;
+
+ rc = cnxk_sso_rx_adapter_queues_add(event_dev, eth_dev, rx_queue_id, queue_conf,
+ nb_rx_queues);
if (rc)
return -EINVAL;
@@ -1131,6 +1164,7 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
.eth_rx_adapter_caps_get = cn9k_sso_rx_adapter_caps_get,
.eth_rx_adapter_queue_add = cn9k_sso_rx_adapter_queue_add,
+ .eth_rx_adapter_queues_add = cn9k_sso_rx_adapter_queues_add,
.eth_rx_adapter_queue_del = cn9k_sso_rx_adapter_queue_del,
.eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
.eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index 33b3538753..32991e51dc 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -255,10 +255,10 @@ int cnxk_sso_xstats_reset(struct rte_eventdev *event_dev,
void cn9k_sso_set_rsrc(void *arg);
/* Common adapter ops */
-int cnxk_sso_rx_adapter_queue_add(
- const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
- int32_t rx_queue_id,
- const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+int cnxk_sso_rx_adapter_queues_add(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev, int32_t rx_queue_id[],
+ const struct rte_event_eth_rx_adapter_queue_conf queue_conf[],
+ uint16_t nb_rx_queues);
int cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
const struct rte_eth_dev *eth_dev,
int32_t rx_queue_id);
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 4cf48db74c..80f770ee8d 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -220,63 +220,81 @@ cnxk_sso_tstamp_cfg(uint16_t port_id, const struct rte_eth_dev *eth_dev, struct
}
int
-cnxk_sso_rx_adapter_queue_add(
- const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
- int32_t rx_queue_id,
- const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+cnxk_sso_rx_adapter_queues_add(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev, int32_t rx_queue_id[],
+ const struct rte_event_eth_rx_adapter_queue_conf queue_conf[],
+ uint16_t nb_rx_queues)
{
struct cnxk_eth_dev *cnxk_eth_dev = eth_dev->data->dev_private;
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ const struct rte_event_eth_rx_adapter_queue_conf *conf;
+ uint64_t old_xae_cnt = dev->adptr_xae_cnt;
uint16_t port = eth_dev->data->port_id;
struct cnxk_eth_rxq_sp *rxq_sp;
- int i, rc = 0;
+ bool vec_drop_reset = false;
+ uint16_t max_rx_queues;
+ int32_t queue_id;
+ int i, rc;
- if (rx_queue_id < 0) {
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
- rc |= cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev,
- i, queue_conf);
- } else {
- rxq_sp = cnxk_eth_rxq_to_sp(
- eth_dev->data->rx_queues[rx_queue_id]);
+ max_rx_queues = nb_rx_queues ? nb_rx_queues : eth_dev->data->nb_rx_queues;
+ for (i = 0; i < max_rx_queues; i++) {
+ conf = nb_rx_queues ? &queue_conf[i] : &queue_conf[0];
+ queue_id = nb_rx_queues ? rx_queue_id[i] : i;
+
+ rxq_sp = cnxk_eth_rxq_to_sp(eth_dev->data->rx_queues[queue_id]);
cnxk_sso_updt_xae_cnt(dev, rxq_sp, RTE_EVENT_TYPE_ETHDEV);
- rc = cnxk_sso_xae_reconfigure(
- (struct rte_eventdev *)(uintptr_t)event_dev);
- rc |= cnxk_sso_rxq_enable(
- cnxk_eth_dev, (uint16_t)rx_queue_id, port,
- &queue_conf->ev,
- !!(queue_conf->rx_queue_flags &
- RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID));
- if (queue_conf->rx_queue_flags &
- RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
- cnxk_sso_updt_xae_cnt(dev, queue_conf->vector_mp,
- RTE_EVENT_TYPE_ETHDEV_VECTOR);
- rc |= cnxk_sso_xae_reconfigure(
- (struct rte_eventdev *)(uintptr_t)event_dev);
- rc |= cnxk_sso_rx_adapter_vwqe_enable(
- cnxk_eth_dev, port, rx_queue_id,
- queue_conf->vector_sz,
- queue_conf->vector_timeout_ns,
- queue_conf->vector_mp);
-
- if (cnxk_eth_dev->vec_drop_re_dis)
- rc |= roc_nix_rx_drop_re_set(&cnxk_eth_dev->nix,
- false);
- }
- /* Propagate force bp devarg */
- cnxk_eth_dev->nix.force_rx_aura_bp = dev->force_ena_bp;
- cnxk_sso_tstamp_cfg(eth_dev->data->port_id, eth_dev, dev);
+ if (conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR)
+ cnxk_sso_updt_xae_cnt(dev, conf->vector_mp, RTE_EVENT_TYPE_ETHDEV_VECTOR);
+ }
+
+ if (dev->adptr_xae_cnt != old_xae_cnt) {
+ rc = cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
+ if (rc < 0)
+ return rc;
+ }
+
+ for (i = 0; i < max_rx_queues; i++) {
+ conf = nb_rx_queues ? &queue_conf[i] : &queue_conf[0];
+ queue_id = nb_rx_queues ? rx_queue_id[i] : i;
+
+ rc = cnxk_sso_rxq_enable(
+ cnxk_eth_dev, (uint16_t)queue_id, port, &conf->ev,
+ !!(conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID));
+ if (rc < 0)
+ goto fail;
+
+ if (conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR) {
+ rc = cnxk_sso_rx_adapter_vwqe_enable(
+ cnxk_eth_dev, port, (uint16_t)queue_id, conf->vector_sz,
+ conf->vector_timeout_ns, conf->vector_mp);
+ if (rc < 0)
+ goto fail;
+
+ vec_drop_reset = true;
+ }
cnxk_eth_dev->nb_rxq_sso++;
}
- if (rc < 0) {
- plt_err("Failed to configure Rx adapter port=%d, q=%d", port,
- queue_conf->ev.queue_id);
- return rc;
+ if (cnxk_eth_dev->vec_drop_re_dis && vec_drop_reset) {
+ rc = roc_nix_rx_drop_re_set(&cnxk_eth_dev->nix, false);
+ if (rc < 0)
+ goto fail;
}
+ /* Propagate force bp devarg */
+ cnxk_eth_dev->nix.force_rx_aura_bp = dev->force_ena_bp;
+ cnxk_sso_tstamp_cfg(eth_dev->data->port_id, eth_dev, dev);
dev->rx_offloads |= cnxk_eth_dev->rx_offload_flags;
return 0;
+
+fail:
+ for (i = cnxk_eth_dev->nb_rxq_sso - 1; i >= 0; i--) {
+ queue_id = nb_rx_queues ? rx_queue_id[i] : i;
+ cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, queue_id);
+ }
+
+ return rc;
}
int
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 3/3] test/event: unit test to burst add Rx queues to adapter
2025-02-07 14:09 [PATCH 0/3] Rx adapter API to add Rx queues in burst Shijith Thotton
2025-02-07 14:09 ` [PATCH 1/3] eventdev/eth_rx: add API to burst add queues to Rx adapter Shijith Thotton
2025-02-07 14:09 ` [PATCH 2/3] event/cnxk: enable PMD op " Shijith Thotton
@ 2025-02-07 14:09 ` Shijith Thotton
2 siblings, 0 replies; 4+ messages in thread
From: Shijith Thotton @ 2025-02-07 14:09 UTC (permalink / raw)
To: jerinj; +Cc: pbhagavatula, Shijith Thotton, dev
Added unit test for adding queues to Rx adapter in bursts using
rte_event_eth_rx_adapter_queues_add().
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
---
app/test/test_event_eth_rx_adapter.c | 86 ++++++++++++++++++++++++++++
1 file changed, 86 insertions(+)
diff --git a/app/test/test_event_eth_rx_adapter.c b/app/test/test_event_eth_rx_adapter.c
index 0233c87779..92b7ff6d99 100644
--- a/app/test/test_event_eth_rx_adapter.c
+++ b/app/test/test_event_eth_rx_adapter.c
@@ -9,6 +9,7 @@
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_ethdev.h>
+#include <rte_malloc.h>
#ifdef RTE_EXEC_ENV_WINDOWS
static int
@@ -819,6 +820,89 @@ adapter_queue_add_del(void)
return TEST_SUCCESS;
}
+static int
+adapter_queues_add_del(void)
+{
+ struct rte_event_eth_rx_adapter_queue_conf *queue_conf;
+ struct rte_event_dev_info event_dev_info;
+ struct rte_eth_dev_info dev_info;
+ uint16_t i, max_rx_queues;
+ int32_t *rx_queue_ids;
+ struct rte_event ev;
+ uint32_t cap;
+ int err;
+
+ err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID, &cap);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_eth_dev_info_get(TEST_ETHDEV_ID, &dev_info);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ max_rx_queues = RTE_MIN(dev_info.max_rx_queues, MAX_NUM_RX_QUEUE);
+
+ err = rte_event_dev_info_get(TEST_DEV_ID, &event_dev_info);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ queue_conf = rte_zmalloc(NULL, sizeof(*queue_conf) * max_rx_queues, 0);
+ TEST_ASSERT(queue_conf != NULL, "Failed to allocate memory");
+
+ rx_queue_ids = rte_zmalloc(NULL, sizeof(*rx_queue_ids) * max_rx_queues, 0);
+ TEST_ASSERT(rx_queue_ids != NULL, "Failed to allocate memory");
+
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ for (i = 0; i < max_rx_queues; i++) {
+ rx_queue_ids[i] = i;
+ ev.queue_id = i % event_dev_info.max_event_queues;
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
+ ev.flow_id = 1;
+ queue_conf[i].rx_queue_flags =
+ RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
+ }
+ queue_conf[i].ev = ev;
+ queue_conf[i].servicing_weight = 1;
+ }
+
+ err = rte_event_eth_rx_adapter_queues_add(TEST_INST_ID,
+ rte_eth_dev_count_total(),
+ rx_queue_ids, queue_conf, 0);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ err = rte_event_eth_rx_adapter_queues_add(TEST_INST_ID + 1, TEST_ETHDEV_ID,
+ NULL, queue_conf, 0);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ err = rte_event_eth_rx_adapter_queues_add(TEST_INST_ID, TEST_ETHDEV_ID,
+ rx_queue_ids, queue_conf, 1);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID, 0);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) {
+ err = rte_event_eth_rx_adapter_queues_add(
+ TEST_INST_ID, TEST_ETHDEV_ID, rx_queue_ids, queue_conf,
+ max_rx_queues);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID, -1);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ } else {
+ err = rte_event_eth_rx_adapter_queues_add(
+ TEST_INST_ID, TEST_ETHDEV_ID, NULL, queue_conf, 0);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID, -1);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ }
+
+ rte_free(rx_queue_ids);
+ rte_free(queue_conf);
+
+ return TEST_SUCCESS;
+}
+
static int
adapter_multi_eth_add_del(void)
{
@@ -1423,6 +1507,8 @@ static struct unit_test_suite event_eth_rx_tests = {
TEST_CASE_ST(NULL, NULL, adapter_create_free_with_params),
TEST_CASE_ST(adapter_create, adapter_free,
adapter_queue_add_del),
+ TEST_CASE_ST(adapter_create, adapter_free,
+ adapter_queues_add_del),
TEST_CASE_ST(adapter_create, adapter_free,
adapter_multi_eth_add_del),
TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2025-02-07 14:10 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-02-07 14:09 [PATCH 0/3] Rx adapter API to add Rx queues in burst Shijith Thotton
2025-02-07 14:09 ` [PATCH 1/3] eventdev/eth_rx: add API to burst add queues to Rx adapter Shijith Thotton
2025-02-07 14:09 ` [PATCH 2/3] event/cnxk: enable PMD op " Shijith Thotton
2025-02-07 14:09 ` [PATCH 3/3] test/event: unit test to burst add Rx queues to adapter Shijith Thotton
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).