DPDK patches and discussions
 help / color / mirror / Atom feed
From: Xueming Li <xuemingl@nvidia.com>
To: <dev@dpdk.org>
Cc: <xuemingl@nvidia.com>, Lior Margalit <lmargalit@nvidia.com>,
	Matan Azrad <matan@nvidia.com>,
	Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
	"Anatoly Burakov" <anatoly.burakov@intel.com>
Subject: [dpdk-dev] [PATCH v3 11/14] net/mlx5: move Rx queue DevX resource
Date: Wed, 3 Nov 2021 15:58:35 +0800	[thread overview]
Message-ID: <20211103075838.1486056-12-xuemingl@nvidia.com> (raw)
In-Reply-To: <20211103075838.1486056-1-xuemingl@nvidia.com>

To support shared RX queue, moves DevX RQ which is per queue resource to
Rx queue private data.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_verbs.c | 154 +++++++++++--------
 drivers/net/mlx5/mlx5.h             |  11 +-
 drivers/net/mlx5/mlx5_devx.c        | 227 +++++++++++++---------------
 drivers/net/mlx5/mlx5_rx.h          |   1 +
 drivers/net/mlx5/mlx5_rxq.c         |  44 +++---
 drivers/net/mlx5/mlx5_rxtx.c        |   6 +-
 drivers/net/mlx5/mlx5_trigger.c     |   2 +-
 drivers/net/mlx5/mlx5_vlan.c        |  16 +-
 8 files changed, 240 insertions(+), 221 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index 4779b37aa65..5d4ae3ea752 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -29,13 +29,13 @@
 /**
  * Modify Rx WQ vlan stripping offload
  *
- * @param rxq_obj
- *   Rx queue object.
+ * @param rxq
+ *   Rx queue.
  *
  * @return 0 on success, non-0 otherwise
  */
 static int
-mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
+mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)
 {
 	uint16_t vlan_offloads =
 		(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
@@ -47,14 +47,14 @@ mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
 		.flags = vlan_offloads,
 	};
 
-	return mlx5_glue->modify_wq(rxq_obj->wq, &mod);
+	return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);
 }
 
 /**
  * Modifies the attributes for the specified WQ.
  *
- * @param rxq_obj
- *   Verbs Rx queue object.
+ * @param rxq
+ *   Verbs Rx queue.
  * @param type
  *   Type of change queue state.
  *
@@ -62,14 +62,14 @@ mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_ibv_modify_wq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
+mlx5_ibv_modify_wq(struct mlx5_rxq_priv *rxq, uint8_t type)
 {
 	struct ibv_wq_attr mod = {
 		.attr_mask = IBV_WQ_ATTR_STATE,
 		.wq_state = (enum ibv_wq_state)type,
 	};
 
-	return mlx5_glue->modify_wq(rxq_obj->wq, &mod);
+	return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);
 }
 
 /**
@@ -139,21 +139,18 @@ mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
 /**
  * Create a CQ Verbs object.
  *
- * @param dev
- *   Pointer to Ethernet device.
- * @param idx
- *   Queue index in DPDK Rx queue array.
+ * @param rxq
+ *   Pointer to Rx queue.
  *
  * @return
  *   The Verbs CQ object initialized, NULL otherwise and rte_errno is set.
  */
 static struct ibv_cq *
-mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)
+mlx5_rxq_ibv_cq_create(struct mlx5_rxq_priv *rxq)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
-	struct mlx5_rxq_ctrl *rxq_ctrl =
-		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+	struct mlx5_priv *priv = rxq->priv;
+	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
+	struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
 	struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
 	unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
 	struct {
@@ -199,7 +196,7 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)
 		DRV_LOG(DEBUG,
 			"Port %u Rx CQE compression is disabled for HW"
 			" timestamp.",
-			dev->data->port_id);
+			priv->dev_data->port_id);
 	}
 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
 	if (RTE_CACHE_LINE_SIZE == 128) {
@@ -216,21 +213,18 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)
 /**
  * Create a WQ Verbs object.
  *
- * @param dev
- *   Pointer to Ethernet device.
- * @param idx
- *   Queue index in DPDK Rx queue array.
+ * @param rxq
+ *   Pointer to Rx queue.
  *
  * @return
  *   The Verbs WQ object initialized, NULL otherwise and rte_errno is set.
  */
 static struct ibv_wq *
-mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)
+mlx5_rxq_ibv_wq_create(struct mlx5_rxq_priv *rxq)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
-	struct mlx5_rxq_ctrl *rxq_ctrl =
-		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+	struct mlx5_priv *priv = rxq->priv;
+	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
+	struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
 	struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
 	unsigned int wqe_n = 1 << rxq_data->elts_n;
 	struct {
@@ -297,7 +291,7 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)
 			DRV_LOG(ERR,
 				"Port %u Rx queue %u requested %u*%u but got"
 				" %u*%u WRs*SGEs.",
-				dev->data->port_id, idx,
+				priv->dev_data->port_id, rxq->idx,
 				wqe_n >> rxq_data->sges_n,
 				(1 << rxq_data->sges_n),
 				wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
@@ -312,21 +306,20 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)
 /**
  * Create the Rx queue Verbs object.
  *
- * @param dev
- *   Pointer to Ethernet device.
- * @param idx
- *   Queue index in DPDK Rx queue array.
+ * @param rxq
+ *   Pointer to Rx queue.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
+mlx5_rxq_ibv_obj_new(struct mlx5_rxq_priv *rxq)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
-	struct mlx5_rxq_ctrl *rxq_ctrl =
-		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+	uint16_t idx = rxq->idx;
+	struct mlx5_priv *priv = rxq->priv;
+	uint16_t port_id = priv->dev_data->port_id;
+	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
+	struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
 	struct mlx5dv_cq cq_info;
 	struct mlx5dv_rwq rwq;
@@ -341,17 +334,17 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 			mlx5_glue->create_comp_channel(priv->sh->cdev->ctx);
 		if (!tmpl->ibv_channel) {
 			DRV_LOG(ERR, "Port %u: comp channel creation failure.",
-				dev->data->port_id);
+				port_id);
 			rte_errno = ENOMEM;
 			goto error;
 		}
 		tmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd;
 	}
 	/* Create CQ using Verbs API. */
-	tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(dev, idx);
+	tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(rxq);
 	if (!tmpl->ibv_cq) {
 		DRV_LOG(ERR, "Port %u Rx queue %u CQ creation failure.",
-			dev->data->port_id, idx);
+			port_id, idx);
 		rte_errno = ENOMEM;
 		goto error;
 	}
@@ -366,7 +359,7 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 		DRV_LOG(ERR,
 			"Port %u wrong MLX5_CQE_SIZE environment "
 			"variable value: it should be set to %u.",
-			dev->data->port_id, RTE_CACHE_LINE_SIZE);
+			port_id, RTE_CACHE_LINE_SIZE);
 		rte_errno = EINVAL;
 		goto error;
 	}
@@ -377,19 +370,19 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 	rxq_data->cq_uar = cq_info.cq_uar;
 	rxq_data->cqn = cq_info.cqn;
 	/* Create WQ (RQ) using Verbs API. */
-	tmpl->wq = mlx5_rxq_ibv_wq_create(dev, idx);
+	tmpl->wq = mlx5_rxq_ibv_wq_create(rxq);
 	if (!tmpl->wq) {
 		DRV_LOG(ERR, "Port %u Rx queue %u WQ creation failure.",
-			dev->data->port_id, idx);
+			port_id, idx);
 		rte_errno = ENOMEM;
 		goto error;
 	}
 	/* Change queue state to ready. */
-	ret = mlx5_ibv_modify_wq(tmpl, IBV_WQS_RDY);
+	ret = mlx5_ibv_modify_wq(rxq, IBV_WQS_RDY);
 	if (ret) {
 		DRV_LOG(ERR,
 			"Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.",
-			dev->data->port_id, idx);
+			port_id, idx);
 		rte_errno = ret;
 		goto error;
 	}
@@ -405,7 +398,7 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 	rxq_data->cq_arm_sn = 0;
 	mlx5_rxq_initialize(rxq_data);
 	rxq_data->cq_ci = 0;
-	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
+	priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
 	rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;
 	return 0;
 error:
@@ -423,12 +416,14 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 /**
  * Release an Rx verbs queue object.
  *
- * @param rxq_obj
- *   Verbs Rx queue object.
+ * @param rxq
+ *   Pointer to Rx queue.
  */
 static void
-mlx5_rxq_ibv_obj_release(struct mlx5_rxq_obj *rxq_obj)
+mlx5_rxq_ibv_obj_release(struct mlx5_rxq_priv *rxq)
 {
+	struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;
+
 	MLX5_ASSERT(rxq_obj);
 	MLX5_ASSERT(rxq_obj->wq);
 	MLX5_ASSERT(rxq_obj->ibv_cq);
@@ -652,12 +647,24 @@ static void
 mlx5_rxq_ibv_obj_drop_release(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
+	struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
+	struct mlx5_rxq_obj *rxq_obj;
 
-	if (rxq->wq)
-		claim_zero(mlx5_glue->destroy_wq(rxq->wq));
-	if (rxq->ibv_cq)
-		claim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq));
+	if (rxq == NULL)
+		return;
+	if (rxq->ctrl == NULL)
+		goto free_priv;
+	rxq_obj = rxq->ctrl->obj;
+	if (rxq_obj == NULL)
+		goto free_ctrl;
+	if (rxq_obj->wq)
+		claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
+	if (rxq_obj->ibv_cq)
+		claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
+	mlx5_free(rxq_obj);
+free_ctrl:
+	mlx5_free(rxq->ctrl);
+free_priv:
 	mlx5_free(rxq);
 	priv->drop_queue.rxq = NULL;
 }
@@ -676,39 +683,58 @@ mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct ibv_context *ctx = priv->sh->cdev->ctx;
-	struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
+	struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
+	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+	struct mlx5_rxq_obj *rxq_obj = NULL;
 
-	if (rxq)
+	if (rxq != NULL)
 		return 0;
 	rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
-	if (!rxq) {
+	if (rxq == NULL) {
 		DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.",
 		      dev->data->port_id);
 		rte_errno = ENOMEM;
 		return -rte_errno;
 	}
 	priv->drop_queue.rxq = rxq;
-	rxq->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
-	if (!rxq->ibv_cq) {
+	rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), 0,
+			       SOCKET_ID_ANY);
+	if (rxq_ctrl == NULL) {
+		DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue control memory.",
+		      dev->data->port_id);
+		rte_errno = ENOMEM;
+		goto error;
+	}
+	rxq->ctrl = rxq_ctrl;
+	rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0,
+			      SOCKET_ID_ANY);
+	if (rxq_obj == NULL) {
+		DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.",
+		      dev->data->port_id);
+		rte_errno = ENOMEM;
+		goto error;
+	}
+	rxq_ctrl->obj = rxq_obj;
+	rxq_obj->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
+	if (!rxq_obj->ibv_cq) {
 		DRV_LOG(DEBUG, "Port %u cannot allocate CQ for drop queue.",
 		      dev->data->port_id);
 		rte_errno = errno;
 		goto error;
 	}
-	rxq->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){
+	rxq_obj->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){
 						    .wq_type = IBV_WQT_RQ,
 						    .max_wr = 1,
 						    .max_sge = 1,
 						    .pd = priv->sh->cdev->pd,
-						    .cq = rxq->ibv_cq,
+						    .cq = rxq_obj->ibv_cq,
 					      });
-	if (!rxq->wq) {
+	if (!rxq_obj->wq) {
 		DRV_LOG(DEBUG, "Port %u cannot allocate WQ for drop queue.",
 		      dev->data->port_id);
 		rte_errno = errno;
 		goto error;
 	}
-	priv->drop_queue.rxq = rxq;
 	return 0;
 error:
 	mlx5_rxq_ibv_obj_drop_release(dev);
@@ -737,7 +763,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
 	ret = mlx5_rxq_ibv_obj_drop_create(dev);
 	if (ret < 0)
 		goto error;
-	rxq = priv->drop_queue.rxq;
+	rxq = priv->drop_queue.rxq->ctrl->obj;
 	ind_tbl = mlx5_glue->create_rwq_ind_table
 				(priv->sh->cdev->ctx,
 				 &(struct ibv_rwq_ind_table_init_attr){
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 3e008241ca8..bc1b6b96cda 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -294,7 +294,7 @@ struct mlx5_vf_vlan {
 /* Flow drop context necessary due to Verbs API. */
 struct mlx5_drop {
 	struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
-	struct mlx5_rxq_obj *rxq; /* Rx queue object. */
+	struct mlx5_rxq_priv *rxq; /* Rx queue. */
 };
 
 /* Loopback dummy queue resources required due to Verbs API. */
@@ -1239,7 +1239,6 @@ struct mlx5_rxq_obj {
 		};
 		struct mlx5_devx_obj *rq; /* DevX RQ object for hairpin. */
 		struct {
-			struct mlx5_devx_rq rq_obj; /* DevX RQ object. */
 			struct mlx5_devx_cq cq_obj; /* DevX CQ object. */
 			void *devx_channel;
 		};
@@ -1321,11 +1320,11 @@ struct mlx5_rxq_priv;
 
 /* HW objects operations structure. */
 struct mlx5_obj_ops {
-	int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on);
-	int (*rxq_obj_new)(struct rte_eth_dev *dev, uint16_t idx);
+	int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_priv *rxq, int on);
+	int (*rxq_obj_new)(struct mlx5_rxq_priv *rxq);
 	int (*rxq_event_get)(struct mlx5_rxq_obj *rxq_obj);
-	int (*rxq_obj_modify)(struct mlx5_rxq_obj *rxq_obj, uint8_t type);
-	void (*rxq_obj_release)(struct mlx5_rxq_obj *rxq_obj);
+	int (*rxq_obj_modify)(struct mlx5_rxq_priv *rxq, uint8_t type);
+	void (*rxq_obj_release)(struct mlx5_rxq_priv *rxq);
 	int (*ind_table_new)(struct rte_eth_dev *dev, const unsigned int log_n,
 			     struct mlx5_ind_table_obj *ind_tbl);
 	int (*ind_table_modify)(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index 8b3651f5034..b90a5d82458 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -30,14 +30,16 @@
 /**
  * Modify RQ vlan stripping offload
  *
- * @param rxq_obj
- *   Rx queue object.
+ * @param rxq
+ *   Rx queue.
+ * @param on
+ *   Enable/disable VLAN stripping.
  *
  * @return
  *   0 on success, non-0 otherwise
  */
 static int
-mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
+mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)
 {
 	struct mlx5_devx_modify_rq_attr rq_attr;
 
@@ -46,14 +48,14 @@ mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
 	rq_attr.state = MLX5_RQC_STATE_RDY;
 	rq_attr.vsd = (on ? 0 : 1);
 	rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
-	return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr);
+	return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);
 }
 
 /**
  * Modify RQ using DevX API.
  *
- * @param rxq_obj
- *   DevX Rx queue object.
+ * @param rxq
+ *   DevX rx queue.
  * @param type
  *   Type of change queue state.
  *
@@ -61,7 +63,7 @@ mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
+mlx5_devx_modify_rq(struct mlx5_rxq_priv *rxq, uint8_t type)
 {
 	struct mlx5_devx_modify_rq_attr rq_attr;
 
@@ -86,7 +88,7 @@ mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
 	default:
 		break;
 	}
-	return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr);
+	return mlx5_devx_cmd_modify_rq(rxq->devx_rq.rq, &rq_attr);
 }
 
 /**
@@ -145,42 +147,34 @@ mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
 	return 0;
 }
 
-/**
- * Destroy the Rx queue DevX object.
- *
- * @param rxq_obj
- *   Rxq object to destroy.
- */
-static void
-mlx5_rxq_release_devx_resources(struct mlx5_rxq_obj *rxq_obj)
-{
-	mlx5_devx_rq_destroy(&rxq_obj->rq_obj);
-	memset(&rxq_obj->rq_obj, 0, sizeof(rxq_obj->rq_obj));
-	mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
-	memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));
-}
-
 /**
  * Release an Rx DevX queue object.
  *
- * @param rxq_obj
- *   DevX Rx queue object.
+ * @param rxq
+ *   DevX Rx queue.
  */
 static void
-mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
+mlx5_rxq_devx_obj_release(struct mlx5_rxq_priv *rxq)
 {
-	MLX5_ASSERT(rxq_obj);
+	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
+	struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
+
+	MLX5_ASSERT(rxq != NULL);
+	MLX5_ASSERT(rxq_ctrl != NULL);
 	if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
 		MLX5_ASSERT(rxq_obj->rq);
-		mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST);
+		mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RDY2RST);
 		claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
 	} else {
-		MLX5_ASSERT(rxq_obj->cq_obj.cq);
-		MLX5_ASSERT(rxq_obj->rq_obj.rq);
-		mlx5_rxq_release_devx_resources(rxq_obj);
-		if (rxq_obj->devx_channel)
+		mlx5_devx_rq_destroy(&rxq->devx_rq);
+		memset(&rxq->devx_rq, 0, sizeof(rxq->devx_rq));
+		mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
+		memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));
+		if (rxq_obj->devx_channel) {
 			mlx5_os_devx_destroy_event_channel
 							(rxq_obj->devx_channel);
+			rxq_obj->devx_channel = NULL;
+		}
 	}
 }
 
@@ -224,22 +218,19 @@ mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
 /**
  * Create a RQ object using DevX.
  *
- * @param dev
- *   Pointer to Ethernet device.
- * @param rxq_data
- *   RX queue data.
+ * @param rxq
+ *   Pointer to Rx queue.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev,
-				  struct mlx5_rxq_data *rxq_data)
+mlx5_rxq_create_devx_rq_resources(struct mlx5_rxq_priv *rxq)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_priv *priv = rxq->priv;
 	struct mlx5_common_device *cdev = priv->sh->cdev;
-	struct mlx5_rxq_ctrl *rxq_ctrl =
-		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
+	struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
 	struct mlx5_devx_create_rq_attr rq_attr = { 0 };
 	uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n;
 	uint32_t wqe_size, log_wqe_size;
@@ -281,31 +272,29 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev,
 	rq_attr.wq_attr.pd = cdev->pdn;
 	rq_attr.counter_set_id = priv->counter_set_id;
 	/* Create RQ using DevX API. */
-	return mlx5_devx_rq_create(cdev->ctx, &rxq_ctrl->obj->rq_obj, wqe_size,
+	return mlx5_devx_rq_create(cdev->ctx, &rxq->devx_rq, wqe_size,
 				   log_desc_n, &rq_attr, rxq_ctrl->socket);
 }
 
 /**
  * Create a DevX CQ object for an Rx queue.
  *
- * @param dev
- *   Pointer to Ethernet device.
- * @param rxq_data
- *   RX queue data.
+ * @param rxq
+ *   Pointer to Rx queue.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,
-				  struct mlx5_rxq_data *rxq_data)
+mlx5_rxq_create_devx_cq_resources(struct mlx5_rxq_priv *rxq)
 {
 	struct mlx5_devx_cq *cq_obj = 0;
 	struct mlx5_devx_cq_attr cq_attr = { 0 };
-	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_priv *priv = rxq->priv;
 	struct mlx5_dev_ctx_shared *sh = priv->sh;
-	struct mlx5_rxq_ctrl *rxq_ctrl =
-		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+	uint16_t port_id = priv->dev_data->port_id;
+	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
+	struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
 	unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
 	uint32_t log_cqe_n;
 	uint16_t event_nums[1] = { 0 };
@@ -346,7 +335,7 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,
 		}
 		DRV_LOG(DEBUG,
 			"Port %u Rx CQE compression is enabled, format %d.",
-			dev->data->port_id, priv->config.cqe_comp_fmt);
+			port_id, priv->config.cqe_comp_fmt);
 		/*
 		 * For vectorized Rx, it must not be doubled in order to
 		 * make cq_ci and rq_ci aligned.
@@ -355,13 +344,12 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,
 			cqe_n *= 2;
 	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
 		DRV_LOG(DEBUG,
-			"Port %u Rx CQE compression is disabled for HW"
-			" timestamp.",
-			dev->data->port_id);
+			"Port %u Rx CQE compression is disabled for HW timestamp.",
+			port_id);
 	} else if (priv->config.cqe_comp && rxq_data->lro) {
 		DRV_LOG(DEBUG,
 			"Port %u Rx CQE compression is disabled for LRO.",
-			dev->data->port_id);
+			port_id);
 	}
 	cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar);
 	log_cqe_n = log2above(cqe_n);
@@ -399,27 +387,23 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,
 /**
  * Create the Rx hairpin queue object.
  *
- * @param dev
- *   Pointer to Ethernet device.
- * @param idx
- *   Queue index in DPDK Rx queue array.
+ * @param rxq
+ *   Pointer to Rx queue.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
+mlx5_rxq_obj_hairpin_new(struct mlx5_rxq_priv *rxq)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
-	struct mlx5_rxq_ctrl *rxq_ctrl =
-		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+	uint16_t idx = rxq->idx;
+	struct mlx5_priv *priv = rxq->priv;
+	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
 	struct mlx5_devx_create_rq_attr attr = { 0 };
 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
 	uint32_t max_wq_data;
 
-	MLX5_ASSERT(rxq_data);
-	MLX5_ASSERT(tmpl);
+	MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL && tmpl != NULL);
 	tmpl->rxq_ctrl = rxq_ctrl;
 	attr.hairpin = 1;
 	max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
@@ -448,39 +432,36 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
 	if (!tmpl->rq) {
 		DRV_LOG(ERR,
 			"Port %u Rx hairpin queue %u can't create rq object.",
-			dev->data->port_id, idx);
+			priv->dev_data->port_id, idx);
 		rte_errno = errno;
 		return -rte_errno;
 	}
-	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
+	priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
 	return 0;
 }
 
 /**
  * Create the Rx queue DevX object.
  *
- * @param dev
- *   Pointer to Ethernet device.
- * @param idx
- *   Queue index in DPDK Rx queue array.
+ * @param rxq
+ *   Pointer to Rx queue.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
+mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
-	struct mlx5_rxq_ctrl *rxq_ctrl =
-		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+	struct mlx5_priv *priv = rxq->priv;
+	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
+	struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
 	struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
 	int ret = 0;
 
 	MLX5_ASSERT(rxq_data);
 	MLX5_ASSERT(tmpl);
 	if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
-		return mlx5_rxq_obj_hairpin_new(dev, idx);
+		return mlx5_rxq_obj_hairpin_new(rxq);
 	tmpl->rxq_ctrl = rxq_ctrl;
 	if (rxq_ctrl->irq) {
 		int devx_ev_flag =
@@ -498,34 +479,32 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
 		tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
 	}
 	/* Create CQ using DevX API. */
-	ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);
+	ret = mlx5_rxq_create_devx_cq_resources(rxq);
 	if (ret) {
 		DRV_LOG(ERR, "Failed to create CQ.");
 		goto error;
 	}
 	/* Create RQ using DevX API. */
-	ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);
+	ret = mlx5_rxq_create_devx_rq_resources(rxq);
 	if (ret) {
 		DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
-			dev->data->port_id, idx);
+			priv->dev_data->port_id, rxq->idx);
 		rte_errno = ENOMEM;
 		goto error;
 	}
 	/* Change queue state to ready. */
-	ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY);
+	ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);
 	if (ret)
 		goto error;
-	rxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.wq.umem_buf;
-	rxq_data->rq_db = (uint32_t *)(uintptr_t)tmpl->rq_obj.wq.db_rec;
-	rxq_data->cq_arm_sn = 0;
-	rxq_data->cq_ci = 0;
+	rxq_data->wqes = (void *)(uintptr_t)rxq->devx_rq.wq.umem_buf;
+	rxq_data->rq_db = (uint32_t *)(uintptr_t)rxq->devx_rq.wq.db_rec;
 	mlx5_rxq_initialize(rxq_data);
-	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
-	rxq_ctrl->wqn = tmpl->rq_obj.rq->id;
+	priv->dev_data->rx_queue_state[rxq->idx] = RTE_ETH_QUEUE_STATE_STARTED;
+	rxq_ctrl->wqn = rxq->devx_rq.rq->id;
 	return 0;
 error:
 	ret = rte_errno; /* Save rte_errno before cleanup. */
-	mlx5_rxq_devx_obj_release(tmpl);
+	mlx5_rxq_devx_obj_release(rxq);
 	rte_errno = ret; /* Restore rte_errno. */
 	return -rte_errno;
 }
@@ -571,15 +550,15 @@ mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
 	rqt_attr->rqt_actual_size = rqt_n;
 	if (queues == NULL) {
 		for (i = 0; i < rqt_n; i++)
-			rqt_attr->rq_list[i] = priv->drop_queue.rxq->rq->id;
+			rqt_attr->rq_list[i] =
+					priv->drop_queue.rxq->devx_rq.rq->id;
 		return rqt_attr;
 	}
 	for (i = 0; i != queues_n; ++i) {
-		struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
-		struct mlx5_rxq_ctrl *rxq_ctrl =
-				container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queues[i]);
 
-		rqt_attr->rq_list[i] = rxq_ctrl->obj->rq_obj.rq->id;
+		MLX5_ASSERT(rxq != NULL);
+		rqt_attr->rq_list[i] = rxq->devx_rq.rq->id;
 	}
 	MLX5_ASSERT(i > 0);
 	for (j = 0; i != rqt_n; ++j, ++i)
@@ -719,7 +698,7 @@ mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
 			}
 		}
 	} else {
-		rxq_obj_type = priv->drop_queue.rxq->rxq_ctrl->type;
+		rxq_obj_type = priv->drop_queue.rxq->ctrl->type;
 	}
 	memset(tir_attr, 0, sizeof(*tir_attr));
 	tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
@@ -891,9 +870,9 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	int socket_id = dev->device->numa_node;
-	struct mlx5_rxq_ctrl *rxq_ctrl;
-	struct mlx5_rxq_data *rxq_data;
-	struct mlx5_rxq_obj *rxq = NULL;
+	struct mlx5_rxq_priv *rxq;
+	struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+	struct mlx5_rxq_obj *rxq_obj = NULL;
 	int ret;
 
 	/*
@@ -901,6 +880,13 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
 	 * They are required to hold pointers for cleanup
 	 * and are only accessible via drop queue DevX objects.
 	 */
+	rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);
+	if (rxq == NULL) {
+		DRV_LOG(ERR, "Port %u could not allocate drop queue private",
+			dev->data->port_id);
+		rte_errno = ENOMEM;
+		goto error;
+	}
 	rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl),
 			       0, socket_id);
 	if (rxq_ctrl == NULL) {
@@ -909,27 +895,29 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
 		rte_errno = ENOMEM;
 		goto error;
 	}
-	rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);
-	if (rxq == NULL) {
+	rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0, socket_id);
+	if (rxq_obj == NULL) {
 		DRV_LOG(ERR, "Port %u could not allocate drop queue object",
 			dev->data->port_id);
 		rte_errno = ENOMEM;
 		goto error;
 	}
-	rxq->rxq_ctrl = rxq_ctrl;
+	rxq_obj->rxq_ctrl = rxq_ctrl;
 	rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD;
 	rxq_ctrl->sh = priv->sh;
-	rxq_ctrl->obj = rxq;
-	rxq_data = &rxq_ctrl->rxq;
+	rxq_ctrl->obj = rxq_obj;
+	rxq->ctrl = rxq_ctrl;
+	rxq->priv = priv;
+	LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
 	/* Create CQ using DevX API. */
-	ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);
+	ret = mlx5_rxq_create_devx_cq_resources(rxq);
 	if (ret != 0) {
 		DRV_LOG(ERR, "Port %u drop queue CQ creation failed.",
 			dev->data->port_id);
 		goto error;
 	}
 	/* Create RQ using DevX API. */
-	ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);
+	ret = mlx5_rxq_create_devx_rq_resources(rxq);
 	if (ret != 0) {
 		DRV_LOG(ERR, "Port %u drop queue RQ creation failed.",
 			dev->data->port_id);
@@ -945,18 +933,20 @@ mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
 	return 0;
 error:
 	ret = rte_errno; /* Save rte_errno before cleanup. */
-	if (rxq != NULL) {
-		if (rxq->rq_obj.rq != NULL)
-			mlx5_devx_rq_destroy(&rxq->rq_obj);
-		if (rxq->cq_obj.cq != NULL)
-			mlx5_devx_cq_destroy(&rxq->cq_obj);
-		if (rxq->devx_channel)
+	if (rxq != NULL && rxq->devx_rq.rq != NULL)
+		mlx5_devx_rq_destroy(&rxq->devx_rq);
+	if (rxq_obj != NULL) {
+		if (rxq_obj->cq_obj.cq != NULL)
+			mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
+		if (rxq_obj->devx_channel)
 			mlx5_os_devx_destroy_event_channel
-							(rxq->devx_channel);
-		mlx5_free(rxq);
+							(rxq_obj->devx_channel);
+		mlx5_free(rxq_obj);
 	}
 	if (rxq_ctrl != NULL)
 		mlx5_free(rxq_ctrl);
+	if (rxq != NULL)
+		mlx5_free(rxq);
 	rte_errno = ret; /* Restore rte_errno. */
 	return -rte_errno;
 }
@@ -971,12 +961,13 @@ static void
 mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
-	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->rxq_ctrl;
+	struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
+	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
 
 	mlx5_rxq_devx_obj_release(rxq);
-	mlx5_free(rxq);
+	mlx5_free(rxq_ctrl->obj);
 	mlx5_free(rxq_ctrl);
+	mlx5_free(rxq);
 	priv->drop_queue.rxq = NULL;
 }
 
@@ -996,7 +987,7 @@ mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
 		mlx5_devx_tir_destroy(hrxq);
 	if (hrxq->ind_table->ind_table != NULL)
 		mlx5_devx_ind_table_destroy(hrxq->ind_table);
-	if (priv->drop_queue.rxq->rq != NULL)
+	if (priv->drop_queue.rxq->devx_rq.rq != NULL)
 		mlx5_rxq_devx_obj_drop_release(dev);
 }
 
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index c04c0c73349..337dcca59fb 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -174,6 +174,7 @@ struct mlx5_rxq_priv {
 	struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
 	LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
 	struct mlx5_priv *priv; /* Back pointer to private data. */
+	struct mlx5_devx_rq devx_rq;
 	struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
 	uint32_t hairpin_status; /* Hairpin binding status. */
 };
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 5a20966e2ca..2850a220399 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -471,13 +471,13 @@ int
 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
-	struct mlx5_rxq_ctrl *rxq_ctrl =
-			container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
 	int ret;
 
+	MLX5_ASSERT(rxq != NULL && rxq_ctrl != NULL);
 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
-	ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RDY2RST);
+	ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RDY2RST);
 	if (ret) {
 		DRV_LOG(ERR, "Cannot change Rx WQ state to RESET:  %s",
 			strerror(errno));
@@ -485,7 +485,7 @@ mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx)
 		return ret;
 	}
 	/* Remove all processes CQEs. */
-	rxq_sync_cq(rxq);
+	rxq_sync_cq(&rxq_ctrl->rxq);
 	/* Free all involved mbufs. */
 	rxq_free_elts(rxq_ctrl);
 	/* Set the actual queue state. */
@@ -557,26 +557,26 @@ int
 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
-	struct mlx5_rxq_ctrl *rxq_ctrl =
-			container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+	struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
 	int ret;
 
-	MLX5_ASSERT(rte_eal_process_type() ==  RTE_PROC_PRIMARY);
+	MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	/* Allocate needed buffers. */
-	ret = rxq_alloc_elts(rxq_ctrl);
+	ret = rxq_alloc_elts(rxq->ctrl);
 	if (ret) {
 		DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ");
 		rte_errno = errno;
 		return ret;
 	}
 	rte_io_wmb();
-	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+	*rxq_data->cq_db = rte_cpu_to_be_32(rxq_data->cq_ci);
 	rte_io_wmb();
 	/* Reset RQ consumer before moving queue to READY state. */
-	*rxq->rq_db = rte_cpu_to_be_32(0);
+	*rxq_data->rq_db = rte_cpu_to_be_32(0);
 	rte_io_wmb();
-	ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
+	ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RST2RDY);
 	if (ret) {
 		DRV_LOG(ERR, "Cannot change Rx WQ state to READY:  %s",
 			strerror(errno));
@@ -584,8 +584,8 @@ mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
 		return ret;
 	}
 	/* Reinitialize RQ - set WQEs. */
-	mlx5_rxq_initialize(rxq);
-	rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
+	mlx5_rxq_initialize(rxq_data);
+	rxq_data->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
 	/* Set actual queue state. */
 	dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
 	return 0;
@@ -1835,15 +1835,19 @@ int
 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
-	struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
+	struct mlx5_rxq_priv *rxq;
+	struct mlx5_rxq_ctrl *rxq_ctrl;
 
-	if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)
+	if (priv->rxq_privs == NULL)
+		return 0;
+	rxq = mlx5_rxq_get(dev, idx);
+	if (rxq == NULL)
 		return 0;
 	if (mlx5_rxq_deref(dev, idx) > 1)
 		return 1;
-	if (rxq_ctrl->obj) {
-		priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
+	rxq_ctrl = rxq->ctrl;
+	if (rxq_ctrl->obj != NULL) {
+		priv->obj_ops.rxq_obj_release(rxq);
 		LIST_REMOVE(rxq_ctrl->obj, next);
 		mlx5_free(rxq_ctrl->obj);
 		rxq_ctrl->obj = NULL;
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 0bcdff1b116..54d410b513b 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -373,11 +373,9 @@ mlx5_queue_state_modify_primary(struct rte_eth_dev *dev,
 	struct mlx5_priv *priv = dev->data->dev_private;
 
 	if (sm->is_wq) {
-		struct mlx5_rxq_data *rxq = (*priv->rxqs)[sm->queue_id];
-		struct mlx5_rxq_ctrl *rxq_ctrl =
-			container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+		struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, sm->queue_id);
 
-		ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, sm->state);
+		ret = priv->obj_ops.rxq_obj_modify(rxq, sm->state);
 		if (ret) {
 			DRV_LOG(ERR, "Cannot change Rx WQ state to %u  - %s",
 					sm->state, strerror(errno));
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index caafdf27e8f..2cf62a9780d 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -231,7 +231,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
 			rte_errno = ENOMEM;
 			goto error;
 		}
-		ret = priv->obj_ops.rxq_obj_new(dev, i);
+		ret = priv->obj_ops.rxq_obj_new(rxq);
 		if (ret) {
 			mlx5_free(rxq_ctrl->obj);
 			rxq_ctrl->obj = NULL;
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 07792fc5d94..ea841bb32fb 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -91,11 +91,11 @@ void
 mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];
-	struct mlx5_rxq_ctrl *rxq_ctrl =
-		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+	struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, queue);
+	struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq;
 	int ret = 0;
 
+	MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL);
 	/* Validate hw support */
 	if (!priv->config.hw_vlan_strip) {
 		DRV_LOG(ERR, "port %u VLAN stripping is not supported",
@@ -109,20 +109,20 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 		return;
 	}
 	DRV_LOG(DEBUG, "port %u set VLAN stripping offloads %d for port %uqueue %d",
-		dev->data->port_id, on, rxq->port_id, queue);
-	if (!rxq_ctrl->obj) {
+		dev->data->port_id, on, rxq_data->port_id, queue);
+	if (rxq->ctrl->obj == NULL) {
 		/* Update related bits in RX queue. */
-		rxq->vlan_strip = !!on;
+		rxq_data->vlan_strip = !!on;
 		return;
 	}
-	ret = priv->obj_ops.rxq_obj_modify_vlan_strip(rxq_ctrl->obj, on);
+	ret = priv->obj_ops.rxq_obj_modify_vlan_strip(rxq, on);
 	if (ret) {
 		DRV_LOG(ERR, "Port %u failed to modify object stripping mode:"
 			" %s", dev->data->port_id, strerror(rte_errno));
 		return;
 	}
 	/* Update related bits in RX queue. */
-	rxq->vlan_strip = !!on;
+	rxq_data->vlan_strip = !!on;
 }
 
 /**
-- 
2.33.0


  parent reply	other threads:[~2021-11-03  8:01 UTC|newest]

Thread overview: 266+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-27  3:42 [dpdk-dev] [RFC] ethdev: introduce shared Rx queue Xueming Li
2021-07-28  7:56 ` Andrew Rybchenko
2021-07-28  8:20   ` Xueming(Steven) Li
2021-08-09 11:47 ` [dpdk-dev] [PATCH v1] " Xueming Li
2021-08-09 13:50   ` Jerin Jacob
2021-08-09 14:16     ` Xueming(Steven) Li
2021-08-11  8:02       ` Jerin Jacob
2021-08-11  8:28         ` Xueming(Steven) Li
2021-08-11 12:04           ` Ferruh Yigit
2021-08-11 12:59             ` Xueming(Steven) Li
2021-08-12 14:35               ` Xueming(Steven) Li
2021-09-15 15:34               ` Xueming(Steven) Li
2021-09-26  5:35             ` Xueming(Steven) Li
2021-09-28  9:35               ` Jerin Jacob
2021-09-28 11:36                 ` Xueming(Steven) Li
2021-09-28 11:37                 ` Xueming(Steven) Li
2021-09-28 11:37                 ` Xueming(Steven) Li
2021-09-28 12:58                   ` Jerin Jacob
2021-09-28 13:25                     ` Xueming(Steven) Li
2021-09-28 13:38                       ` Jerin Jacob
2021-09-28 13:59                         ` Ananyev, Konstantin
2021-09-28 14:40                           ` Xueming(Steven) Li
2021-09-28 14:59                             ` Jerin Jacob
2021-09-29  7:41                               ` Xueming(Steven) Li
2021-09-29  8:05                                 ` Jerin Jacob
2021-10-08  8:26                                   ` Xueming(Steven) Li
2021-10-10  9:46                                     ` Jerin Jacob
2021-10-10 13:40                                       ` Xueming(Steven) Li
2021-10-11  4:10                                         ` Jerin Jacob
2021-09-29  0:26                             ` Ananyev, Konstantin
2021-09-29  8:40                               ` Xueming(Steven) Li
2021-09-29 10:20                                 ` Ananyev, Konstantin
2021-09-29 13:25                                   ` Xueming(Steven) Li
2021-09-30  9:59                                     ` Ananyev, Konstantin
2021-10-06  7:54                                       ` Xueming(Steven) Li
2021-09-29  9:12                               ` Xueming(Steven) Li
2021-09-29  9:52                                 ` Ananyev, Konstantin
2021-09-29 11:07                                   ` Bruce Richardson
2021-09-29 11:46                                     ` Ananyev, Konstantin
2021-09-29 12:17                                       ` Bruce Richardson
2021-09-29 12:08                                   ` Xueming(Steven) Li
2021-09-29 12:35                                     ` Ananyev, Konstantin
2021-09-29 14:54                                       ` Xueming(Steven) Li
2021-09-28 14:51                         ` Xueming(Steven) Li
2021-09-28 12:59                 ` Xueming(Steven) Li
2021-08-11 14:04 ` [dpdk-dev] [PATCH v2 01/15] " Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 02/15] app/testpmd: dump port and queue info for each packet Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 03/15] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 04/15] app/testpmd: make sure shared Rx queue polled on same core Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 05/15] app/testpmd: adds common forwarding for shared Rx queue Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 06/15] app/testpmd: add common fwd wrapper function Xueming Li
2021-08-17  9:37     ` Jerin Jacob
2021-08-18 11:27       ` Xueming(Steven) Li
2021-08-18 11:47         ` Jerin Jacob
2021-08-18 14:08           ` Xueming(Steven) Li
2021-08-26 11:28             ` Jerin Jacob
2021-08-29  7:07               ` Xueming(Steven) Li
2021-09-01 14:44                 ` Xueming(Steven) Li
2021-09-28  5:54                   ` Xueming(Steven) Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 07/15] app/testpmd: support shared Rx queues for IO forwarding Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 08/15] app/testpmd: support shared Rx queue for rxonly forwarding Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 09/15] app/testpmd: support shared Rx queue for icmpecho fwd Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 10/15] app/testpmd: support shared Rx queue for csum fwd Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 11/15] app/testpmd: support shared Rx queue for flowgen Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 12/15] app/testpmd: support shared Rx queue for MAC fwd Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 13/15] app/testpmd: support shared Rx queue for macswap fwd Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 14/15] app/testpmd: support shared Rx queue for 5tuple fwd Xueming Li
2021-08-11 14:04   ` [dpdk-dev] [PATCH v2 15/15] app/testpmd: support shared Rx queue for ieee1588 fwd Xueming Li
2021-08-17  9:33   ` [dpdk-dev] [PATCH v2 01/15] ethdev: introduce shared Rx queue Jerin Jacob
2021-08-17 11:31     ` Xueming(Steven) Li
2021-08-17 15:11       ` Jerin Jacob
2021-08-18 11:14         ` Xueming(Steven) Li
2021-08-19  5:26           ` Jerin Jacob
2021-08-19 12:09             ` Xueming(Steven) Li
2021-08-26 11:58               ` Jerin Jacob
2021-08-28 14:16                 ` Xueming(Steven) Li
2021-08-30  9:31                   ` Jerin Jacob
2021-08-30 10:13                     ` Xueming(Steven) Li
2021-09-15 14:45                     ` Xueming(Steven) Li
2021-09-16  4:16                       ` Jerin Jacob
2021-09-28  5:50                         ` Xueming(Steven) Li
2021-09-17  8:01 ` [dpdk-dev] [PATCH v3 0/8] " Xueming Li
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 1/8] " Xueming Li
2021-09-27 23:53     ` Ajit Khaparde
2021-09-28 14:24       ` Xueming(Steven) Li
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 2/8] ethdev: new API to aggregate shared Rx queue group Xueming Li
2021-09-26 17:54     ` Ajit Khaparde
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 3/8] app/testpmd: dump port and queue info for each packet Xueming Li
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 4/8] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 5/8] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 6/8] app/testpmd: add common fwd wrapper Xueming Li
2021-09-17 11:24     ` Jerin Jacob
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 7/8] app/testpmd: improve forwarding cache miss Xueming Li
2021-09-17  8:01   ` [dpdk-dev] [PATCH v3 8/8] app/testpmd: support shared Rx queue forwarding Xueming Li
2021-09-30 14:55 ` [dpdk-dev] [PATCH v4 0/6] ethdev: introduce shared Rx queue Xueming Li
2021-09-30 14:55   ` [dpdk-dev] [PATCH v4 1/6] " Xueming Li
2021-10-11 10:47     ` Andrew Rybchenko
2021-10-11 13:12       ` Xueming(Steven) Li
2021-09-30 14:55   ` [dpdk-dev] [PATCH v4 2/6] ethdev: new API to aggregate shared Rx queue group Xueming Li
2021-09-30 14:55   ` [dpdk-dev] [PATCH v4 3/6] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-09-30 14:56   ` [dpdk-dev] [PATCH v4 4/6] app/testpmd: dump port info for " Xueming Li
2021-09-30 14:56   ` [dpdk-dev] [PATCH v4 5/6] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-09-30 14:56   ` [dpdk-dev] [PATCH v4 6/6] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-11 11:49   ` [dpdk-dev] [PATCH v4 0/6] ethdev: introduce " Andrew Rybchenko
2021-10-11 15:11     ` Xueming(Steven) Li
2021-10-12  6:37       ` Xueming(Steven) Li
2021-10-12  8:48         ` Andrew Rybchenko
2021-10-12 10:55           ` Xueming(Steven) Li
2021-10-12 11:28             ` Andrew Rybchenko
2021-10-12 11:33               ` Xueming(Steven) Li
2021-10-13  7:53               ` Xueming(Steven) Li
2021-10-11 12:37 ` [dpdk-dev] [PATCH v5 0/5] " Xueming Li
2021-10-11 12:37   ` [dpdk-dev] [PATCH v5 1/5] " Xueming Li
2021-10-11 12:37   ` [dpdk-dev] [PATCH v5 2/5] app/testpmd: new parameter to enable " Xueming Li
2021-10-11 12:37   ` [dpdk-dev] [PATCH v5 3/5] app/testpmd: dump port info for " Xueming Li
2021-10-11 12:37   ` [dpdk-dev] [PATCH v5 4/5] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-11 12:37   ` [dpdk-dev] [PATCH v5 5/5] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-12 14:39 ` [dpdk-dev] [PATCH v6 0/5] ethdev: introduce " Xueming Li
2021-10-12 14:39   ` [dpdk-dev] [PATCH v6 1/5] " Xueming Li
2021-10-15  9:28     ` Andrew Rybchenko
2021-10-15 10:54       ` Xueming(Steven) Li
2021-10-18  6:46         ` Andrew Rybchenko
2021-10-18  6:57           ` Xueming(Steven) Li
2021-10-15 17:20     ` Ferruh Yigit
2021-10-16  9:14       ` Xueming(Steven) Li
2021-10-12 14:39   ` [dpdk-dev] [PATCH v6 2/5] app/testpmd: new parameter to enable " Xueming Li
2021-10-12 14:39   ` [dpdk-dev] [PATCH v6 3/5] app/testpmd: dump port info for " Xueming Li
2021-10-12 14:39   ` [dpdk-dev] [PATCH v6 4/5] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-12 14:39   ` [dpdk-dev] [PATCH v6 5/5] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-16  8:42 ` [dpdk-dev] [PATCH v7 0/5] ethdev: introduce " Xueming Li
2021-10-16  8:42   ` [dpdk-dev] [PATCH v7 1/5] " Xueming Li
2021-10-17  5:33     ` Ajit Khaparde
2021-10-17  7:29       ` Xueming(Steven) Li
2021-10-16  8:42   ` [dpdk-dev] [PATCH v7 2/5] app/testpmd: new parameter to enable " Xueming Li
2021-10-16  8:42   ` [dpdk-dev] [PATCH v7 3/5] app/testpmd: dump port info for " Xueming Li
2021-10-16  8:42   ` [dpdk-dev] [PATCH v7 4/5] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-16  8:42   ` [dpdk-dev] [PATCH v7 5/5] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-18 12:59 ` [dpdk-dev] [PATCH v8 0/6] ethdev: introduce " Xueming Li
2021-10-18 12:59   ` [dpdk-dev] [PATCH v8 1/6] " Xueming Li
2021-10-19  0:21     ` Ajit Khaparde
2021-10-19  5:54       ` Xueming(Steven) Li
2021-10-19  6:28     ` Andrew Rybchenko
2021-10-18 12:59   ` [dpdk-dev] [PATCH v8 2/6] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-18 12:59   ` [dpdk-dev] [PATCH v8 3/6] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-18 12:59   ` [dpdk-dev] [PATCH v8 4/6] app/testpmd: dump port info for " Xueming Li
2021-10-18 12:59   ` [dpdk-dev] [PATCH v8 5/6] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-18 12:59   ` [dpdk-dev] [PATCH v8 6/6] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-19  8:17 ` [dpdk-dev] [PATCH v9 0/6] ethdev: introduce " Xueming Li
2021-10-19  8:17   ` [dpdk-dev] [PATCH v9 1/6] " Xueming Li
2021-10-19  8:17   ` [dpdk-dev] [PATCH v9 2/6] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-19  8:33     ` Andrew Rybchenko
2021-10-19  9:10       ` Xueming(Steven) Li
2021-10-19  9:39         ` Andrew Rybchenko
2021-10-19  8:17   ` [dpdk-dev] [PATCH v9 3/6] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-19  8:17   ` [dpdk-dev] [PATCH v9 4/6] app/testpmd: dump port info for " Xueming Li
2021-10-19  8:17   ` [dpdk-dev] [PATCH v9 5/6] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-19  8:17   ` [dpdk-dev] [PATCH v9 6/6] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-19 15:20 ` [dpdk-dev] [PATCH v10 0/6] ethdev: introduce " Xueming Li
2021-10-19 15:20   ` [dpdk-dev] [PATCH v10 1/6] ethdev: new API to resolve device capability name Xueming Li
2021-10-19 15:20   ` [dpdk-dev] [PATCH v10 2/6] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-19 15:20   ` [dpdk-dev] [PATCH v10 3/6] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-19 15:20   ` [dpdk-dev] [PATCH v10 4/6] app/testpmd: dump port info for " Xueming Li
2021-10-19 15:20   ` [dpdk-dev] [PATCH v10 5/6] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-19 15:20   ` [dpdk-dev] [PATCH v10 6/6] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-19 15:28 ` [dpdk-dev] [PATCH v10 0/7] ethdev: introduce " Xueming Li
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 1/7] " Xueming Li
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 2/7] ethdev: new API to resolve device capability name Xueming Li
2021-10-19 17:57     ` Andrew Rybchenko
2021-10-20  7:47       ` Xueming(Steven) Li
2021-10-20  7:48         ` Andrew Rybchenko
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 3/7] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 4/7] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 5/7] app/testpmd: dump port info for " Xueming Li
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 6/7] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-19 15:28   ` [dpdk-dev] [PATCH v10 7/7] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-20  7:53 ` [dpdk-dev] [PATCH v11 0/7] ethdev: introduce " Xueming Li
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 1/7] " Xueming Li
2021-10-20 17:14     ` Ajit Khaparde
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 2/7] ethdev: new API to resolve device capability name Xueming Li
2021-10-20 10:52     ` Andrew Rybchenko
2021-10-20 17:16       ` Ajit Khaparde
2021-10-20 18:42     ` Thomas Monjalon
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 3/7] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-21  3:24     ` Li, Xiaoyun
2021-10-21  3:28       ` Ajit Khaparde
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 4/7] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-20 17:29     ` Ajit Khaparde
2021-10-20 19:14       ` Thomas Monjalon
2021-10-21  4:09         ` Xueming(Steven) Li
2021-10-21  3:49       ` Xueming(Steven) Li
2021-10-21  3:24     ` Li, Xiaoyun
2021-10-21  3:58       ` Xueming(Steven) Li
2021-10-21  5:15         ` Li, Xiaoyun
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 5/7] app/testpmd: dump port info for " Xueming Li
2021-10-21  3:24     ` Li, Xiaoyun
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 6/7] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-21  3:24     ` Li, Xiaoyun
2021-10-21  4:21       ` Xueming(Steven) Li
2021-10-20  7:53   ` [dpdk-dev] [PATCH v11 7/7] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-20 19:20     ` Thomas Monjalon
2021-10-21  3:26       ` Li, Xiaoyun
2021-10-21  4:39       ` Xueming(Steven) Li
2021-10-21  5:08 ` [dpdk-dev] [PATCH v12 0/7] ethdev: introduce " Xueming Li
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 1/7] " Xueming Li
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 2/7] ethdev: get device capability name as string Xueming Li
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 3/7] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 4/7] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-21  9:20     ` Thomas Monjalon
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 5/7] app/testpmd: dump port info for " Xueming Li
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 6/7] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-21  6:35     ` Li, Xiaoyun
2021-10-21  5:08   ` [dpdk-dev] [PATCH v12 7/7] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-21  6:33     ` Li, Xiaoyun
2021-10-21  7:58       ` Xueming(Steven) Li
2021-10-21  8:01         ` Li, Xiaoyun
2021-10-21  8:22           ` Xueming(Steven) Li
2021-10-21  9:28     ` Thomas Monjalon
2021-10-21 10:41 ` [dpdk-dev] [PATCH v13 0/7] ethdev: introduce " Xueming Li
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 1/7] " Xueming Li
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 2/7] ethdev: get device capability name as string Xueming Li
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 3/7] app/testpmd: dump device capability and Rx domain info Xueming Li
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 4/7] app/testpmd: new parameter to enable shared Rx queue Xueming Li
2021-10-21 19:45     ` Ajit Khaparde
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 5/7] app/testpmd: dump port info for " Xueming Li
2021-10-21 19:48     ` Ajit Khaparde
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 6/7] app/testpmd: force shared Rx queue polled on same core Xueming Li
2021-10-21 10:41   ` [dpdk-dev] [PATCH v13 7/7] app/testpmd: add forwarding engine for shared Rx queue Xueming Li
2021-10-21 23:41   ` [dpdk-dev] [PATCH v13 0/7] ethdev: introduce " Ferruh Yigit
2021-10-22  6:31     ` Xueming(Steven) Li
2021-11-04 15:52   ` Tom Barbette
2021-11-03  7:58 ` [dpdk-dev] [PATCH v3 00/14] net/mlx5: support " Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 01/14] common/mlx5: introduce user index field in completion Xueming Li
2021-11-04  9:14     ` Slava Ovsiienko
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 02/14] net/mlx5: fix field reference for PPC Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 03/14] common/mlx5: adds basic receive memory pool support Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 04/14] common/mlx5: support receive memory pool Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 05/14] net/mlx5: fix Rx queue memory allocation return value Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 06/14] net/mlx5: clean Rx queue code Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 07/14] net/mlx5: split Rx queue into shareable and private Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 08/14] net/mlx5: move Rx queue reference count Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 09/14] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 10/14] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-11-03  7:58   ` Xueming Li [this message]
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 12/14] net/mlx5: remove Rx queue data list from device Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 13/14] net/mlx5: support shared Rx queue Xueming Li
2021-11-03  7:58   ` [dpdk-dev] [PATCH v3 14/14] net/mlx5: add shared Rx queue port datapath support Xueming Li
2021-11-04 12:33 ` [dpdk-dev] [PATCH v4 00/14] net/mlx5: support shared Rx queue Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 01/14] common/mlx5: introduce user index field in completion Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 02/14] net/mlx5: fix field reference for PPC Xueming Li
2021-11-04 17:07     ` Raslan Darawsheh
2021-11-04 17:49     ` David Christensen
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 03/14] common/mlx5: adds basic receive memory pool support Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 04/14] common/mlx5: support receive memory pool Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 05/14] net/mlx5: fix Rx queue memory allocation return value Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 06/14] net/mlx5: clean Rx queue code Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 07/14] net/mlx5: split Rx queue into shareable and private Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 08/14] net/mlx5: move Rx queue reference count Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 09/14] net/mlx5: move Rx queue hairpin info to private data Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 10/14] net/mlx5: remove port info from shareable Rx queue Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 11/14] net/mlx5: move Rx queue DevX resource Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 12/14] net/mlx5: remove Rx queue data list from device Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 13/14] net/mlx5: support shared Rx queue Xueming Li
2021-11-04 12:33   ` [dpdk-dev] [PATCH v4 14/14] net/mlx5: add shared Rx queue port datapath support Xueming Li
2021-11-04 17:50     ` David Christensen
2021-11-05  6:40     ` Ruifeng Wang
2021-11-04 20:06   ` [dpdk-dev] [PATCH v4 00/14] net/mlx5: support shared Rx queue Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211103075838.1486056-12-xuemingl@nvidia.com \
    --to=xuemingl@nvidia.com \
    --cc=anatoly.burakov@intel.com \
    --cc=dev@dpdk.org \
    --cc=lmargalit@nvidia.com \
    --cc=matan@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).