From: Xueming Li <xuemingl@nvidia.com>
To: <dev@dpdk.org>
Cc: <xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>,
Matan Azrad <matan@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Subject: [dpdk-dev] [PATCH v2 12/13] net/mlx5: support shared Rx queue
Date: Sat, 16 Oct 2021 17:12:12 +0800 [thread overview]
Message-ID: <20211016091214.1831902-13-xuemingl@nvidia.com> (raw)
In-Reply-To: <20211016091214.1831902-1-xuemingl@nvidia.com>
This patch introduces shared RXQ. All share Rx queues with same group
and queue id shares same rxq_ctrl. Rxq_ctrl and rxq_data are shared,
all queues from different member port share same WQ and CQ, essentially
one Rx WQ, mbufs are filled into this singleton WQ.
Shared rxq_data is set into device Rx queues of all member ports as
rxq object, used for receiving packets. Polling queue of any member
ports returns packets of any member, mbuf->port is used to identify
source port.
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
doc/guides/nics/features/mlx5.ini | 1 +
doc/guides/nics/mlx5.rst | 6 +
drivers/net/mlx5/linux/mlx5_os.c | 2 +
drivers/net/mlx5/linux/mlx5_verbs.c | 12 +-
drivers/net/mlx5/mlx5.h | 4 +-
drivers/net/mlx5/mlx5_devx.c | 50 +++++--
drivers/net/mlx5/mlx5_ethdev.c | 5 +
drivers/net/mlx5/mlx5_rx.h | 4 +
drivers/net/mlx5/mlx5_rxq.c | 218 ++++++++++++++++++++++++----
drivers/net/mlx5/mlx5_trigger.c | 76 ++++++----
10 files changed, 298 insertions(+), 80 deletions(-)
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index f01abd4231f..ff5e669acc1 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -11,6 +11,7 @@ Removal event = Y
Rx interrupt = Y
Fast mbuf free = Y
Queue start/stop = Y
+Shared Rx queue = Y
Burst mode info = Y
Power mgmt address monitor = Y
MTU update = Y
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index bae73f42d88..d26f274dec4 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -113,6 +113,7 @@ Features
- Connection tracking.
- Sub-Function representors.
- Sub-Function.
+- Shared Rx queue.
Limitations
@@ -464,6 +465,11 @@ Limitations
- In order to achieve best insertion rate, application should manage the flows per lcore.
- Better to disable memory reclaim by setting ``reclaim_mem_mode`` to 0 to accelerate the flow object allocation and release with cache.
+ Shared Rx queue:
+
+ - Counter of received packets and bytes number of devices in same share group are same.
+ - Counter of received packets and bytes number of queues in same group and queue ID are same.
+
Statistics
----------
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 985f0bd4892..49acbe34817 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -457,6 +457,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
mlx5_glue->dr_create_flow_action_default_miss();
if (!sh->default_miss_action)
DRV_LOG(WARNING, "Default miss action is not supported.");
+ LIST_INIT(&sh->shared_rxqs);
return 0;
error:
/* Rollback the created objects. */
@@ -531,6 +532,7 @@ mlx5_os_free_shared_dr(struct mlx5_priv *priv)
MLX5_ASSERT(sh && sh->refcnt);
if (sh->refcnt > 1)
return;
+ MLX5_ASSERT(LIST_EMPTY(&sh->shared_rxqs));
#ifdef HAVE_MLX5DV_DR
if (sh->rx_domain) {
mlx5_glue->dr_destroy_domain(sh->rx_domain);
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index 0e68a13208b..17183adf732 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -459,20 +459,24 @@ mlx5_rxq_ibv_obj_new(struct mlx5_rxq_priv *rxq)
*
* @param rxq
* Pointer to Rx queue.
+ * @return
+ * Safe to release RxQ object.
*/
-static void
+static bool
mlx5_rxq_ibv_obj_release(struct mlx5_rxq_priv *rxq)
{
struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;
- MLX5_ASSERT(rxq_obj);
- MLX5_ASSERT(rxq_obj->wq);
- MLX5_ASSERT(rxq_obj->ibv_cq);
+ if (rxq_obj == NULL || rxq_obj->wq == NULL)
+ return true;
claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
+ rxq_obj->wq = NULL;
+ MLX5_ASSERT(rxq_obj->ibv_cq);
claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
if (rxq_obj->ibv_channel)
claim_zero(mlx5_glue->destroy_comp_channel
(rxq_obj->ibv_channel));
+ return true;
}
/**
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 55612f777ea..647a18d3916 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1193,6 +1193,7 @@ struct mlx5_dev_ctx_shared {
struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];
/* Flex parser profiles information. */
void *devx_rx_uar; /* DevX UAR for Rx. */
+ LIST_HEAD(shared_rxqs, mlx5_rxq_ctrl) shared_rxqs; /* Shared RXQs. */
struct mlx5_aso_age_mng *aso_age_mng;
/* Management data for aging mechanism using ASO Flow Hit. */
struct mlx5_geneve_tlv_option_resource *geneve_tlv_option_resource;
@@ -1257,6 +1258,7 @@ struct mlx5_rxq_obj {
};
struct mlx5_devx_obj *rq; /* DevX RQ object for hairpin. */
struct {
+ struct mlx5_devx_rmp devx_rmp; /* RMP for shared RQ. */
struct mlx5_devx_cq cq_obj; /* DevX CQ object. */
void *devx_channel;
};
@@ -1342,7 +1344,7 @@ struct mlx5_obj_ops {
int (*rxq_obj_new)(struct mlx5_rxq_priv *rxq);
int (*rxq_event_get)(struct mlx5_rxq_obj *rxq_obj);
int (*rxq_obj_modify)(struct mlx5_rxq_priv *rxq, uint8_t type);
- void (*rxq_obj_release)(struct mlx5_rxq_priv *rxq);
+ bool (*rxq_obj_release)(struct mlx5_rxq_priv *rxq);
int (*ind_table_new)(struct rte_eth_dev *dev, const unsigned int log_n,
struct mlx5_ind_table_obj *ind_tbl);
int (*ind_table_modify)(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index b767470dea0..94253047141 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -88,6 +88,8 @@ mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type)
default:
break;
}
+ if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
+ return mlx5_devx_cmd_modify_rq(rxq->ctrl->obj->rq, &rq_attr);
return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);
}
@@ -152,22 +154,27 @@ mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
*
* @param rxq
* DevX Rx queue.
+ * @return
+ * Safe to release RxQ object.
*/
-static void
+static bool
mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)
{
- struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
- struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
+ struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;
- MLX5_ASSERT(rxq != NULL);
- MLX5_ASSERT(rxq_ctrl != NULL);
+ if (rxq_obj == NULL)
+ return true;
if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
- MLX5_ASSERT(rxq_obj->rq);
+ if (rxq_obj->rq == NULL)
+ return true;
mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST);
claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
} else {
+ if (rxq->devx_rq.rq == NULL)
+ return true;
mlx5_devx_rq_destroy(&rxq->devx_rq);
- memset(&rxq->devx_rq, 0, sizeof(rxq->devx_rq));
+ if (rxq->devx_rq.rmp != NULL && rxq->devx_rq.rmp->ref_cnt > 0)
+ return false;
mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));
if (rxq_obj->devx_channel) {
@@ -176,6 +183,7 @@ mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)
rxq_obj->devx_channel = NULL;
}
}
+ return true;
}
/**
@@ -269,6 +277,8 @@ mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)
MLX5_WQ_END_PAD_MODE_NONE;
rq_attr.wq_attr.pd = priv->sh->pdn;
rq_attr.counter_set_id = priv->counter_set_id;
+ if (rxq_data->shared) /* Create RMP based RQ. */
+ rxq->devx_rq.rmp = &rxq_ctrl->obj->devx_rmp;
/* Create RQ using DevX API. */
return mlx5_devx_rq_create(priv->sh->ctx, &rxq->devx_rq,
wqe_size, log_desc_n, &rq_attr,
@@ -299,6 +309,8 @@ mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq)
uint16_t event_nums[1] = { 0 };
int ret = 0;
+ if (rxq_ctrl->started)
+ return 0;
if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
!rxq_data->lro) {
cq_attr.cqe_comp_en = 1u;
@@ -364,6 +376,7 @@ mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq)
rxq_data->cq_uar = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar);
rxq_data->cqe_n = log_cqe_n;
rxq_data->cqn = cq_obj->cq->id;
+ rxq_data->cq_ci = 0;
if (rxq_ctrl->obj->devx_channel) {
ret = mlx5_os_devx_subscribe_devx_event
(rxq_ctrl->obj->devx_channel,
@@ -462,7 +475,7 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
return mlx5_rxq_obj_hairpin_new(rxq);
tmpl->rxq_ctrl = rxq_ctrl;
- if (rxq_ctrl->irq) {
+ if (rxq_ctrl->irq && !rxq_ctrl->started) {
int devx_ev_flag =
MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
@@ -495,11 +508,19 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);
if (ret)
goto error;
- rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;
- rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec;
- mlx5_rxq_initialize(rxq_data);
+ if (!rxq_data->shared) {
+ rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;
+ rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec;
+ } else if (!rxq_ctrl->started) {
+ rxq_data->wqes = (void *)(uintptr_t)tmpl->devx_rmp.wq.umem_buf;
+ rxq_data->rq_db =
+ (uint32_t *)(uintptr_t)tmpl->devx_rmp.wq.db_rec;
+ }
+ if (!rxq_ctrl->started) {
+ mlx5_rxq_initialize(rxq_data);
+ rxq_ctrl->wqn = rxq->devx_rq.rq->id;
+ }
priv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED;
- rxq_ctrl->wqn = rxq->devx_rq.rq->id;
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
@@ -557,7 +578,10 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queues[i]);
MLX5_ASSERT(rxq != NULL);
- rqt_attr->rq_list[i] = rxq->devx_rq.rq->id;
+ if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
+ rqt_attr->rq_list[i] = rxq->ctrl->obj->rq->id;
+ else
+ rqt_attr->rq_list[i] = rxq->devx_rq.rq->id;
}
MLX5_ASSERT(i > 0);
for (j = 0; i != rqt_n; ++j, ++i)
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 070ff149488..ee6a70f315e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -26,6 +26,7 @@
#include "mlx5_rx.h"
#include "mlx5_tx.h"
#include "mlx5_autoconf.h"
+#include "mlx5_devx.h"
/**
* Get the interface index from device name.
@@ -336,9 +337,13 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
mlx5_set_default_params(dev, info);
mlx5_set_txlimit_params(dev, info);
+ if (priv->config.hca_attr.mem_rq_rmp &&
+ priv->obj_ops.rxq_obj_new == devx_obj_ops.rxq_obj_new)
+ info->dev_capa |= RTE_ETH_DEV_CAPA_RXQ_SHARE;
info->switch_info.name = dev->data->name;
info->switch_info.domain_id = priv->domain_id;
info->switch_info.port_id = priv->representor_id;
+ info->switch_info.rx_domain = 0; /* No sub Rx domains. */
if (priv->representor) {
uint16_t port_id;
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 161399c764d..c293cb1b61c 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -107,6 +107,7 @@ struct mlx5_rxq_data {
unsigned int lro:1; /* Enable LRO. */
unsigned int dynf_meta:1; /* Dynamic metadata is configured. */
unsigned int mcqe_format:3; /* CQE compression format. */
+ unsigned int shared:1; /* Shared RXQ. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t port_id;
@@ -169,6 +170,9 @@ struct mlx5_rxq_ctrl {
struct mlx5_dev_ctx_shared *sh; /* Shared context. */
enum mlx5_rxq_type type; /* Rxq type. */
unsigned int socket; /* CPU socket ID for allocations. */
+ LIST_ENTRY(mlx5_rxq_ctrl) share_entry; /* Entry in shared RXQ list. */
+ uint32_t share_group; /* Group ID of shared RXQ. */
+ unsigned int started:1; /* Whether (shared) RXQ has been started. */
unsigned int irq:1; /* Whether IRQ is enabled. */
uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 8eec9a4ed8d..494c9e3517f 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -28,6 +28,7 @@
#include "mlx5_rx.h"
#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
+#include "mlx5_devx.h"
/* Default RSS hash key also used for ConnectX-3. */
@@ -648,6 +649,114 @@ mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc)
return 0;
}
+/**
+ * Get the shared Rx queue object that matches group and queue index.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param group
+ * Shared RXQ group.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * Shared RXQ object that matching, or NULL if not found.
+ */
+static struct mlx5_rxq_ctrl *
+mlx5_shared_rxq_get(struct rte_eth_dev *dev, uint32_t group, uint16_t idx)
+{
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) {
+ if (rxq_ctrl->share_group == group && rxq_ctrl->rxq.idx == idx)
+ return rxq_ctrl;
+ }
+ return NULL;
+}
+
+/**
+ * Check whether requested Rx queue configuration matches shared RXQ.
+ *
+ * @param rxq_ctrl
+ * Pointer to shared RXQ.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param idx
+ * Queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ * @param[in] conf
+ * Thresholds parameters.
+ * @param mp
+ * Memory pool for buffer allocations.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static bool
+mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev,
+ uint16_t idx, uint16_t desc, unsigned int socket,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
+{
+ struct mlx5_priv *spriv = LIST_FIRST(&rxq_ctrl->owners)->priv;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int mprq_stride_nums = priv->config.mprq.stride_num_n ?
+ priv->config.mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
+
+ RTE_SET_USED(conf);
+ if (rxq_ctrl->socket != socket) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: socket mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (priv->config.mprq.enabled)
+ desc >>= mprq_stride_nums;
+ if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: descriptor number mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (priv->mtu != spriv->mtu) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mtu mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (priv->dev_data->dev_conf.intr_conf.rxq !=
+ spriv->dev_data->dev_conf.intr_conf.rxq) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: interrupt mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (!spriv->config.mprq.enabled && rxq_ctrl->rxq.mp != mp) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mempool mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (priv->config.hw_padding != spriv->config.hw_padding) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: padding mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (memcmp(&priv->config.mprq, &spriv->config.mprq,
+ sizeof(priv->config.mprq)) != 0) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: MPRQ mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ if (priv->config.cqe_comp != spriv->config.cqe_comp ||
+ (priv->config.cqe_comp &&
+ priv->config.cqe_comp_fmt != spriv->config.cqe_comp_fmt)) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: CQE compression mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
+ return true;
+}
+
/**
*
* @param dev
@@ -673,12 +782,14 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_priv *rxq;
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
struct rte_eth_rxseg_split *rx_seg =
(struct rte_eth_rxseg_split *)conf->rx_seg;
struct rte_eth_rxseg_split rx_single = {.mp = mp};
uint16_t n_seg = conf->rx_nseg;
int res;
+ uint64_t offloads = conf->offloads |
+ dev->data->dev_conf.rxmode.offloads;
if (mp) {
/*
@@ -690,9 +801,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
n_seg = 1;
}
if (n_seg > 1) {
- uint64_t offloads = conf->offloads |
- dev->data->dev_conf.rxmode.offloads;
-
/* The offloads should be checked on rte_eth_dev layer. */
MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
@@ -704,9 +812,32 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
}
MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
}
+ if (conf->share_group > 0) {
+ if (!priv->config.hca_attr.mem_rq_rmp) {
+ DRV_LOG(ERR, "port %u queue index %u shared Rx queue not supported by fw",
+ dev->data->port_id, idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (priv->obj_ops.rxq_obj_new != devx_obj_ops.rxq_obj_new) {
+ DRV_LOG(ERR, "port %u queue index %u shared Rx queue needs DevX api",
+ dev->data->port_id, idx);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ /* Try to reuse shared RXQ. */
+ rxq_ctrl = mlx5_shared_rxq_get(dev, conf->share_group, idx);
+ if (rxq_ctrl != NULL &&
+ !mlx5_shared_rxq_match(rxq_ctrl, dev, idx, desc, socket,
+ conf, mp)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ }
res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
if (res)
return res;
+ /* Allocate RXQ. */
rxq = mlx5_malloc(MLX5_MEM_ANY | MLX5_MEM_ZERO, sizeof(*rxq), 0,
SOCKET_ID_ANY);
if (!rxq) {
@@ -718,15 +849,23 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rxq->priv = priv;
rxq->idx = idx;
(*priv->rxq_privs)[idx] = rxq;
- rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg, n_seg);
- if (!rxq_ctrl) {
- DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
- dev->data->port_id, idx);
- mlx5_free(rxq);
- (*priv->rxq_privs)[idx] = NULL;
- rte_errno = ENOMEM;
- return -rte_errno;
+ if (rxq_ctrl != NULL) {
+ /* Join owner list. */
+ LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
+ rxq->ctrl = rxq_ctrl;
+ } else {
+ rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg,
+ n_seg);
+ if (rxq_ctrl == NULL) {
+ DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
+ dev->data->port_id, idx);
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
}
+ mlx5_rxq_ref(dev, idx);
DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
dev->data->port_id, idx);
dev->data->rx_queues[idx] = &rxq_ctrl->rxq;
@@ -1071,6 +1210,9 @@ mlx5_rxq_obj_verify(struct rte_eth_dev *dev)
struct mlx5_rxq_obj *rxq_obj;
LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) {
+ if (rxq_obj->rxq_ctrl->rxq.shared &&
+ !LIST_EMPTY(&rxq_obj->rxq_ctrl->owners))
+ continue;
DRV_LOG(DEBUG, "port %u Rx queue %u still referenced",
dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx);
++ret;
@@ -1348,6 +1490,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
return NULL;
}
LIST_INIT(&tmpl->owners);
+ if (conf->share_group > 0) {
+ tmpl->rxq.shared = 1;
+ tmpl->share_group = conf->share_group;
+ LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
+ }
rxq->ctrl = tmpl;
LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
@@ -1596,7 +1743,6 @@ mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
#endif
tmpl->rxq.idx = idx;
- mlx5_rxq_ref(dev, idx);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
@@ -1771,33 +1917,45 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_priv *rxq;
struct mlx5_rxq_ctrl *rxq_ctrl;
+ bool free_obj;
+ uint32_t refcnt;
if (priv->rxq_privs == NULL)
return 0;
rxq = mlx5_rxq_get(dev, idx);
if (rxq == NULL)
return 0;
- if (mlx5_rxq_deref(dev, idx) > 1)
- return 1;
rxq_ctrl = rxq->ctrl;
- if (rxq_ctrl->obj != NULL) {
- priv->obj_ops.rxq_obj_release(rxq);
- LIST_REMOVE(rxq_ctrl->obj, next);
- mlx5_free(rxq_ctrl->obj);
- rxq_ctrl->obj = NULL;
- }
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
- rxq_free_elts(rxq_ctrl);
- dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
- if (!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED)) {
+ refcnt = mlx5_rxq_deref(dev, idx);
+ if (refcnt > 1) {
+ return 1;
+ } else if (refcnt == 1) { /* RxQ stopped. */
+ free_obj = priv->obj_ops.rxq_obj_release(rxq);
+ if (free_obj && rxq_ctrl->obj != NULL) {
+ LIST_REMOVE(rxq_ctrl->obj, next);
+ mlx5_free(rxq_ctrl->obj);
+ rxq_ctrl->obj = NULL;
+ rxq_ctrl->started = false;
+ }
if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
- mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
- mlx5_mprq_free_mp(dev, rxq_ctrl);
+ if (free_obj)
+ rxq_free_elts(rxq_ctrl);
+ dev->data->rx_queue_state[idx] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
}
+ } else { /* Refcnt zero, closing device. */
LIST_REMOVE(rxq, owner_entry);
- LIST_REMOVE(rxq_ctrl, next);
- mlx5_free(rxq_ctrl);
+ if (LIST_EMPTY(&rxq_ctrl->owners)) {
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
+ mlx5_mr_btree_free
+ (&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+ mlx5_mprq_free_mp(dev, rxq_ctrl);
+ }
+ if (rxq_ctrl->rxq.shared)
+ LIST_REMOVE(rxq_ctrl, share_entry);
+ LIST_REMOVE(rxq_ctrl, next);
+ mlx5_free(rxq_ctrl);
+ }
dev->data->rx_queues[idx] = NULL;
mlx5_free(rxq);
(*priv->rxq_privs)[idx] = NULL;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 1e865e74e39..b12ca3dde99 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -122,6 +122,46 @@ mlx5_rxq_stop(struct rte_eth_dev *dev)
mlx5_rxq_release(dev, i);
}
+static int
+mlx5_rxq_ctrl_prepare(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl,
+ unsigned int idx)
+{
+ int ret = 0;
+
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
+ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+ /* Allocate/reuse/resize mempool for MPRQ. */
+ if (mlx5_mprq_alloc_mp(dev, rxq_ctrl) < 0)
+ return -rte_errno;
+
+ /* Pre-register Rx mempools. */
+ mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
+ rxq_ctrl->rxq.mprq_mp);
+ } else {
+ uint32_t s;
+ for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
+ mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
+ rxq_ctrl->rxq.rxseg[s].mp);
+ }
+ ret = rxq_alloc_elts(rxq_ctrl);
+ if (ret)
+ return ret;
+ }
+ MLX5_ASSERT(!rxq_ctrl->obj);
+ rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ sizeof(*rxq_ctrl->obj), 0,
+ rxq_ctrl->socket);
+ if (!rxq_ctrl->obj) {
+ DRV_LOG(ERR, "Port %u Rx queue %u can't allocate resources.",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.", dev->data->port_id,
+ idx, (void *)&rxq_ctrl->obj);
+ return 0;
+}
+
/**
* Start traffic on Rx queues.
*
@@ -149,45 +189,17 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
if (rxq == NULL)
continue;
rxq_ctrl = rxq->ctrl;
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
- if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
- /* Allocate/reuse/resize mempool for MPRQ. */
- if (mlx5_mprq_alloc_mp(dev, rxq_ctrl) < 0)
- goto error;
- /* Pre-register Rx mempools. */
- mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
- rxq_ctrl->rxq.mprq_mp);
- } else {
- uint32_t s;
-
- for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
- mlx5_mr_update_mp
- (dev, &rxq_ctrl->rxq.mr_ctrl,
- rxq_ctrl->rxq.rxseg[s].mp);
- }
- ret = rxq_alloc_elts(rxq_ctrl);
- if (ret)
+ if (!rxq_ctrl->started) {
+ if (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0)
goto error;
- }
- MLX5_ASSERT(!rxq_ctrl->obj);
- rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
- sizeof(*rxq_ctrl->obj), 0,
- rxq_ctrl->socket);
- if (!rxq_ctrl->obj) {
- DRV_LOG(ERR,
- "Port %u Rx queue %u can't allocate resources.",
- dev->data->port_id, i);
- rte_errno = ENOMEM;
- goto error;
+ LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
}
ret = priv->obj_ops.rxq_obj_new(rxq);
if (ret) {
mlx5_free(rxq_ctrl->obj);
goto error;
}
- DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.",
- dev->data->port_id, i, (void *)&rxq_ctrl->obj);
- LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
+ rxq_ctrl->started = true;
}
return 0;
error:
--
2.33.0
next prev parent reply other threads:[~2021-10-16 9:14 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-26 11:18 [dpdk-dev] [PATCH 00/11] " Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 01/11] common/mlx5: support receive queue user index Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 02/11] common/mlx5: support receive memory pool Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 03/11] net/mlx5: clean Rx queue code Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 04/11] net/mlx5: split multiple packet Rq memory pool Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 05/11] net/mlx5: split Rx queue Xueming Li
2021-09-26 11:18 ` [dpdk-dev] [PATCH 06/11] net/mlx5: move Rx queue reference count Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 07/11] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 08/11] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 09/11] net/mlx5: move Rx queue DevX resource Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 10/11] net/mlx5: remove Rx queue data list from device Xueming Li
2021-09-26 11:19 ` [dpdk-dev] [PATCH 11/11] net/mlx5: support shared Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 00/13] " Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 01/13] common/mlx5: support receive queue user index Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 02/13] common/mlx5: support receive memory pool Xueming Li
2021-10-20 9:32 ` Kinsella, Ray
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 03/13] net/mlx5: fix Rx queue memory allocation return value Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 04/13] net/mlx5: clean Rx queue code Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 05/13] net/mlx5: split multiple packet Rq memory pool Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 06/13] net/mlx5: split Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 07/13] net/mlx5: move Rx queue reference count Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 08/13] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 09/13] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 10/13] net/mlx5: move Rx queue DevX resource Xueming Li
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 11/13] net/mlx5: remove Rx queue data list from device Xueming Li
2021-10-16 9:12 ` Xueming Li [this message]
2021-10-16 9:12 ` [dpdk-dev] [PATCH v2 13/13] net/mlx5: add shared Rx queue port datapath support Xueming Li
2021-10-19 8:19 ` [dpdk-dev] [PATCH v2 00/13] net/mlx5: support shared Rx queue Slava Ovsiienko
2021-10-19 8:22 ` Slava Ovsiienko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211016091214.1831902-13-xuemingl@nvidia.com \
--to=xuemingl@nvidia.com \
--cc=dev@dpdk.org \
--cc=lmargalit@nvidia.com \
--cc=matan@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).