DPDK patches and discussions
 help / color / mirror / Atom feed
From: Raja Zidane <rzidane@nvidia.com>
To: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH 1/5] common/mlx5: share DevX QP operations
Date: Fri, 3 Sep 2021 14:21:53 +0000	[thread overview]
Message-ID: <20210903142157.25617-2-rzidane@nvidia.com> (raw)
In-Reply-To: <20210903142157.25617-1-rzidane@nvidia.com>

Currently drivers using QP (vDPA, crypto and compress, regex soon)
manage their memory, creation, modification and destruction of the QP,
in almost identical code.
Move QP memory management, creation and destruction to common.
Add common function to change QP state to RTS.
Add user_index attribute to QP creation.
It's for better code maintenance and reuse.

Signed-off-by: Raja Zidane <rzidane@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/common/mlx5/mlx5_common_devx.c | 144 +++++++++++++++++++++++++
 drivers/common/mlx5/mlx5_common_devx.h |  23 ++++
 drivers/common/mlx5/mlx5_devx_cmds.c   |   1 +
 drivers/common/mlx5/mlx5_devx_cmds.h   |   1 +
 drivers/common/mlx5/version.map        |   3 +
 drivers/crypto/mlx5/mlx5_crypto.c      |  96 ++++-------------
 drivers/crypto/mlx5/mlx5_crypto.h      |   5 +-
 drivers/vdpa/mlx5/mlx5_vdpa.h          |   5 +-
 drivers/vdpa/mlx5/mlx5_vdpa_event.c    |  53 +++------
 9 files changed, 209 insertions(+), 122 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_common_devx.c b/drivers/common/mlx5/mlx5_common_devx.c
index 22c8d356c4..825f84b183 100644
--- a/drivers/common/mlx5/mlx5_common_devx.c
+++ b/drivers/common/mlx5/mlx5_common_devx.c
@@ -271,6 +271,115 @@ mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj, uint16_t log_wqbb_n,
 	return -rte_errno;
 }
 
+/**
+ * Destroy DevX Queue Pair.
+ *
+ * @param[in] qp
+ *   DevX QP to destroy.
+ */
+void
+mlx5_devx_qp_destroy(struct mlx5_devx_qp *qp)
+{
+	if (qp->qp)
+		claim_zero(mlx5_devx_cmd_destroy(qp->qp));
+	if (qp->umem_obj)
+		claim_zero(mlx5_os_umem_dereg(qp->umem_obj));
+	if (qp->umem_buf)
+		mlx5_free((void *)(uintptr_t)qp->umem_buf);
+}
+
+/**
+ * Create Queue Pair using DevX API.
+ *
+ * Get a pointer to partially initialized attributes structure, and updates the
+ * following fields:
+ *   wq_umem_id
+ *   wq_umem_offset
+ *   dbr_umem_valid
+ *   dbr_umem_id
+ *   dbr_address
+ *   log_page_size
+ * All other fields are updated by caller.
+ *
+ * @param[in] ctx
+ *   Context returned from mlx5 open_device() glue function.
+ * @param[in/out] qp_obj
+ *   Pointer to QP to create.
+ * @param[in] log_wqbb_n
+ *   Log of number of WQBBs in queue.
+ * @param[in] attr
+ *   Pointer to QP attributes structure.
+ * @param[in] socket
+ *   Socket to use for allocation.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_qp_create(void *ctx, struct mlx5_devx_qp *qp_obj, uint16_t log_wqbb_n,
+		    struct mlx5_devx_qp_attr *attr, int socket)
+{
+	struct mlx5_devx_obj *qp = NULL;
+	struct mlx5dv_devx_umem *umem_obj = NULL;
+	void *umem_buf = NULL;
+	size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
+	uint32_t umem_size, umem_dbrec;
+	uint16_t qp_size = 1 << log_wqbb_n;
+	int ret;
+
+	if (alignment == (size_t)-1) {
+		DRV_LOG(ERR, "Failed to get WQE buf alignment.");
+		rte_errno = ENOMEM;
+		return -rte_errno;
+	}
+	/* Allocate memory buffer for WQEs and doorbell record. */
+	umem_size = MLX5_WQE_SIZE * qp_size;
+	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
+	umem_size += MLX5_DBR_SIZE;
+	umem_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, umem_size,
+			       alignment, socket);
+	if (!umem_buf) {
+		DRV_LOG(ERR, "Failed to allocate memory for QP.");
+		rte_errno = ENOMEM;
+		return -rte_errno;
+	}
+	/* Register allocated buffer in user space with DevX. */
+	umem_obj = mlx5_os_umem_reg(ctx, (void *)(uintptr_t)umem_buf, umem_size,
+				    IBV_ACCESS_LOCAL_WRITE);
+	if (!umem_obj) {
+		DRV_LOG(ERR, "Failed to register umem for QP.");
+		rte_errno = errno;
+		goto error;
+	}
+	/* Fill attributes for SQ object creation. */
+	attr->wq_umem_id = mlx5_os_get_umem_id(umem_obj);
+	attr->wq_umem_offset = 0;
+	attr->dbr_umem_valid = 1;
+	attr->dbr_umem_id = attr->wq_umem_id;
+	attr->dbr_address = umem_dbrec;
+	attr->log_page_size = MLX5_LOG_PAGE_SIZE;
+	/* Create send queue object with DevX. */
+	qp = mlx5_devx_cmd_create_qp(ctx, attr);
+	if (!qp) {
+		DRV_LOG(ERR, "Can't create DevX QP object.");
+		rte_errno = ENOMEM;
+		goto error;
+	}
+	qp_obj->umem_buf = umem_buf;
+	qp_obj->umem_obj = umem_obj;
+	qp_obj->qp = qp;
+	qp_obj->db_rec = RTE_PTR_ADD(qp_obj->umem_buf, umem_dbrec);
+	return 0;
+error:
+	ret = rte_errno;
+	if (umem_obj)
+		claim_zero(mlx5_os_umem_dereg(umem_obj));
+	if (umem_buf)
+		mlx5_free((void *)(uintptr_t)umem_buf);
+	rte_errno = ret;
+	return -rte_errno;
+}
+
 /**
  * Destroy DevX Receive Queue.
  *
@@ -385,3 +494,38 @@ mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj, uint32_t wqe_size,
 	return -rte_errno;
 }
 
+
+/**
+ * Change QP state to RTS.
+ *
+ * @param[in] qp
+ *   DevX QP to change.
+ * @param[in] remote_qp_id
+ *   The remote QP ID for MLX5_CMD_OP_INIT2RTR_QP operation.
+ *
+ * @return
+ *	 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_devx_qp2rts(struct mlx5_devx_qp *qp, uint32_t remote_qp_id)
+{
+	if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RST2INIT_QP,
+					  remote_qp_id)) {
+		DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).",
+			rte_errno);
+		return -1;
+	}
+	if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_INIT2RTR_QP,
+					  remote_qp_id)) {
+		DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).",
+			rte_errno);
+		return -1;
+	}
+	if (mlx5_devx_cmd_modify_qp_state(qp->qp, MLX5_CMD_OP_RTR2RTS_QP,
+					  remote_qp_id)) {
+		DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).",
+			rte_errno);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/common/mlx5/mlx5_common_devx.h b/drivers/common/mlx5/mlx5_common_devx.h
index aad0184e5a..f699405f69 100644
--- a/drivers/common/mlx5/mlx5_common_devx.h
+++ b/drivers/common/mlx5/mlx5_common_devx.h
@@ -33,6 +33,18 @@ struct mlx5_devx_sq {
 	volatile uint32_t *db_rec; /* The SQ doorbell record. */
 };
 
+/* DevX Queue Pair structure. */
+struct mlx5_devx_qp {
+	struct mlx5_devx_obj *qp; /* The QP DevX object. */
+	void *umem_obj; /* The QP umem object. */
+	union {
+		void *umem_buf;
+		struct mlx5_wqe *wqes; /* The QP ring buffer. */
+		struct mlx5_aso_wqe *aso_wqes;
+	};
+	volatile uint32_t *db_rec; /* The QP doorbell record. */
+};
+
 /* DevX Receive Queue structure. */
 struct mlx5_devx_rq {
 	struct mlx5_devx_obj *rq; /* The RQ DevX object. */
@@ -59,6 +71,14 @@ int mlx5_devx_sq_create(void *ctx, struct mlx5_devx_sq *sq_obj,
 			uint16_t log_wqbb_n,
 			struct mlx5_devx_create_sq_attr *attr, int socket);
 
+__rte_internal
+void mlx5_devx_qp_destroy(struct mlx5_devx_qp *qp);
+
+__rte_internal
+int mlx5_devx_qp_create(void *ctx, struct mlx5_devx_qp *qp_obj,
+			uint16_t log_wqbb_n,
+			struct mlx5_devx_qp_attr *attr, int socket);
+
 __rte_internal
 void mlx5_devx_rq_destroy(struct mlx5_devx_rq *rq);
 
@@ -67,4 +87,7 @@ int mlx5_devx_rq_create(void *ctx, struct mlx5_devx_rq *rq_obj,
 			uint32_t wqe_size, uint16_t log_wqbb_n,
 			struct mlx5_devx_create_rq_attr *attr, int socket);
 
+__rte_internal
+int mlx5_devx_qp2rts(struct mlx5_devx_qp *qp, uint32_t remote_qp_id);
+
 #endif /* RTE_PMD_MLX5_COMMON_DEVX_H_ */
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 56407cc332..ac554cca05 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -2021,6 +2021,7 @@ mlx5_devx_cmd_create_qp(void *ctx,
 	MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
 	MLX5_SET(qpc, qpc, pd, attr->pd);
 	MLX5_SET(qpc, qpc, ts_format, attr->ts_format);
+	MLX5_SET(qpc, qpc, user_index, attr->user_index);
 	if (attr->uar_index) {
 		MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 		MLX5_SET(qpc, qpc, uar_page, attr->uar_index);
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index e576e30f24..c071629904 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -397,6 +397,7 @@ struct mlx5_devx_qp_attr {
 	uint64_t dbr_address;
 	uint32_t wq_umem_id;
 	uint64_t wq_umem_offset;
+	uint32_t user_index:24;
 };
 
 struct mlx5_devx_virtio_q_couners_attr {
diff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map
index e5cb6b7060..d3c5040aac 100644
--- a/drivers/common/mlx5/version.map
+++ b/drivers/common/mlx5/version.map
@@ -67,6 +67,9 @@ INTERNAL {
 
 	mlx5_devx_get_out_command_status;
 
+	mlx5_devx_qp2rts;
+	mlx5_devx_qp_create;
+	mlx5_devx_qp_destroy;
 	mlx5_devx_rq_create;
 	mlx5_devx_rq_destroy;
 	mlx5_devx_sq_create;
diff --git a/drivers/crypto/mlx5/mlx5_crypto.c b/drivers/crypto/mlx5/mlx5_crypto.c
index b3d5200ca3..1d91dc5737 100644
--- a/drivers/crypto/mlx5/mlx5_crypto.c
+++ b/drivers/crypto/mlx5/mlx5_crypto.c
@@ -257,12 +257,7 @@ mlx5_crypto_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
 {
 	struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
 
-	if (qp->qp_obj != NULL)
-		claim_zero(mlx5_devx_cmd_destroy(qp->qp_obj));
-	if (qp->umem_obj != NULL)
-		claim_zero(mlx5_glue->devx_umem_dereg(qp->umem_obj));
-	if (qp->umem_buf != NULL)
-		rte_free(qp->umem_buf);
+	mlx5_devx_qp_destroy(&qp->qp_obj);
 	mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
 	mlx5_devx_cq_destroy(&qp->cq_obj);
 	rte_free(qp);
@@ -270,34 +265,6 @@ mlx5_crypto_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
 	return 0;
 }
 
-static int
-mlx5_crypto_qp2rts(struct mlx5_crypto_qp *qp)
-{
-	/*
-	 * In Order to configure self loopback, when calling these functions the
-	 * remote QP id that is used is the id of the same QP.
-	 */
-	if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RST2INIT_QP,
-					  qp->qp_obj->id)) {
-		DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).",
-			rte_errno);
-		return -1;
-	}
-	if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_INIT2RTR_QP,
-					  qp->qp_obj->id)) {
-		DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).",
-			rte_errno);
-		return -1;
-	}
-	if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RTR2RTS_QP,
-					  qp->qp_obj->id)) {
-		DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).",
-			rte_errno);
-		return -1;
-	}
-	return 0;
-}
-
 static __rte_noinline uint32_t
 mlx5_crypto_get_block_size(struct rte_crypto_op *op)
 {
@@ -452,7 +419,7 @@ mlx5_crypto_wqe_set(struct mlx5_crypto_priv *priv,
 		memcpy(klms, &umr->kseg[0], sizeof(*klms) * klm_n);
 	}
 	ds = 2 + klm_n;
-	cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds);
+	cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);
 	cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
 							MLX5_OPCODE_RDMA_WRITE);
 	ds = RTE_ALIGN(ds, 4);
@@ -461,7 +428,7 @@ mlx5_crypto_wqe_set(struct mlx5_crypto_priv *priv,
 	if (priv->max_rdmar_ds > ds) {
 		cseg += ds;
 		ds = priv->max_rdmar_ds - ds;
-		cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds);
+		cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds);
 		cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
 							       MLX5_OPCODE_NOP);
 		qp->db_pi += ds >> 2; /* Here, DS is 4 aligned for sure. */
@@ -503,7 +470,7 @@ mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
 		return 0;
 	do {
 		op = *ops++;
-		umr = RTE_PTR_ADD(qp->umem_buf, priv->wqe_set_size * qp->pi);
+		umr = RTE_PTR_ADD(qp->qp_obj.umem_buf, priv->wqe_set_size * qp->pi);
 		if (unlikely(mlx5_crypto_wqe_set(priv, qp, op, umr) == 0)) {
 			qp->stats.enqueue_err_count++;
 			if (remain != nb_ops) {
@@ -517,7 +484,7 @@ mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
 	} while (--remain);
 	qp->stats.enqueued_count += nb_ops;
 	rte_io_wmb();
-	qp->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->db_pi);
+	qp->qp_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->db_pi);
 	rte_wmb();
 	mlx5_crypto_uar_write(*(volatile uint64_t *)qp->wqe, qp->priv);
 	rte_wmb();
@@ -583,7 +550,7 @@ mlx5_crypto_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp)
 	uint32_t i;
 
 	for (i = 0 ; i < qp->entries_n; i++) {
-		struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->umem_buf, i *
+		struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->qp_obj.umem_buf, i *
 							 priv->wqe_set_size);
 		struct mlx5_wqe_umr_cseg *ucseg = (struct mlx5_wqe_umr_cseg *)
 								     (cseg + 1);
@@ -593,7 +560,7 @@ mlx5_crypto_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp)
 		struct mlx5_wqe_rseg *rseg;
 
 		/* Init UMR WQE. */
-		cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) |
+		cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) |
 					 (priv->umr_wqe_size / MLX5_WSEG_SIZE));
 		cseg->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
 				       MLX5_COMP_MODE_OFFSET);
@@ -628,7 +595,7 @@ mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv,
 		.klm_num = RTE_ALIGN(priv->max_segs_num, 4),
 	};
 
-	for (umr = (struct mlx5_umr_wqe *)qp->umem_buf, i = 0;
+	for (umr = (struct mlx5_umr_wqe *)qp->qp_obj.umem_buf, i = 0;
 	   i < qp->entries_n; i++, umr = RTE_PTR_ADD(umr, priv->wqe_set_size)) {
 		attr.klm_array = (struct mlx5_klm *)&umr->kseg[0];
 		qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->ctx, &attr);
@@ -649,9 +616,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 	struct mlx5_devx_qp_attr attr = {0};
 	struct mlx5_crypto_qp *qp;
 	uint16_t log_nb_desc = rte_log2_u32(qp_conf->nb_descriptors);
-	uint32_t umem_size = RTE_BIT32(log_nb_desc) *
-			      priv->wqe_set_size +
-			      sizeof(*qp->db_rec) * 2;
+	uint32_t ret;
 	uint32_t alloc_size = sizeof(*qp);
 	struct mlx5_devx_cq_attr cq_attr = {
 		.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
@@ -675,18 +640,14 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 		DRV_LOG(ERR, "Failed to create CQ.");
 		goto error;
 	}
-	qp->umem_buf = rte_zmalloc_socket(__func__, umem_size, 4096, socket_id);
-	if (qp->umem_buf == NULL) {
-		DRV_LOG(ERR, "Failed to allocate QP umem.");
-		rte_errno = ENOMEM;
-		goto error;
-	}
-	qp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
-					       (void *)(uintptr_t)qp->umem_buf,
-					       umem_size,
-					       IBV_ACCESS_LOCAL_WRITE);
-	if (qp->umem_obj == NULL) {
-		DRV_LOG(ERR, "Failed to register QP umem.");
+	attr.pd = priv->pdn;
+	attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
+	attr.cqn = qp->cq_obj.cq->id;
+	attr.rq_size =  0;
+	attr.sq_size = RTE_BIT32(log_nb_desc);
+	ret = mlx5_devx_qp_create(priv->ctx, &qp->qp_obj, log_nb_desc, &attr, socket_id);
+	if(ret) {
+		DRV_LOG(ERR, "Failed to create QP.");
 		goto error;
 	}
 	if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
@@ -697,24 +658,11 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 		goto error;
 	}
 	qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen;
-	attr.pd = priv->pdn;
-	attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
-	attr.cqn = qp->cq_obj.cq->id;
-	attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
-	attr.rq_size =  0;
-	attr.sq_size = RTE_BIT32(log_nb_desc);
-	attr.dbr_umem_valid = 1;
-	attr.wq_umem_id = qp->umem_obj->umem_id;
-	attr.wq_umem_offset = 0;
-	attr.dbr_umem_id = qp->umem_obj->umem_id;
-	attr.dbr_address = RTE_BIT64(log_nb_desc) * priv->wqe_set_size;
-	qp->qp_obj = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
-	if (qp->qp_obj == NULL) {
-		DRV_LOG(ERR, "Failed to create QP(%u).", rte_errno);
-		goto error;
-	}
-	qp->db_rec = RTE_PTR_ADD(qp->umem_buf, (uintptr_t)attr.dbr_address);
-	if (mlx5_crypto_qp2rts(qp))
+	/*
+	 * In Order to configure self loopback, when calling devx qp2rts the
+	 * remote QP id that is used is the id of the same QP.
+	 */
+	if (mlx5_devx_qp2rts(&qp->qp_obj, qp->qp_obj.qp->id))
 		goto error;
 	qp->mkey = (struct mlx5_devx_obj **)RTE_ALIGN((uintptr_t)(qp + 1),
 							   RTE_CACHE_LINE_SIZE);
diff --git a/drivers/crypto/mlx5/mlx5_crypto.h b/drivers/crypto/mlx5/mlx5_crypto.h
index d49b0001f0..013eed30b5 100644
--- a/drivers/crypto/mlx5/mlx5_crypto.h
+++ b/drivers/crypto/mlx5/mlx5_crypto.h
@@ -43,11 +43,8 @@ struct mlx5_crypto_priv {
 struct mlx5_crypto_qp {
 	struct mlx5_crypto_priv *priv;
 	struct mlx5_devx_cq cq_obj;
-	struct mlx5_devx_obj *qp_obj;
+	struct mlx5_devx_qp qp_obj;
 	struct rte_cryptodev_stats stats;
-	struct mlx5dv_devx_umem *umem_obj;
-	void *umem_buf;
-	volatile uint32_t *db_rec;
 	struct rte_crypto_op **ops;
 	struct mlx5_devx_obj **mkey; /* WQE's indirect mekys. */
 	struct mlx5_mr_ctrl mr_ctrl;
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h
index 2a04e36607..a27f3fdadb 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/mlx5_vdpa.h
@@ -54,10 +54,7 @@ struct mlx5_vdpa_cq {
 struct mlx5_vdpa_event_qp {
 	struct mlx5_vdpa_cq cq;
 	struct mlx5_devx_obj *fw_qp;
-	struct mlx5_devx_obj *sw_qp;
-	struct mlx5dv_devx_umem *umem_obj;
-	void *umem_buf;
-	volatile uint32_t *db_rec;
+	struct mlx5_devx_qp sw_qp;
 };
 
 struct mlx5_vdpa_query_mr {
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
index 3541c652ce..b557c93dd4 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
@@ -179,7 +179,7 @@ mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
 		cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
 		rte_io_wmb();
 		/* Ring SW QP doorbell record. */
-		eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
+		eqp->sw_qp.db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
 	}
 	return comp;
 }
@@ -531,12 +531,7 @@ mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
 void
 mlx5_vdpa_event_qp_destroy(struct mlx5_vdpa_event_qp *eqp)
 {
-	if (eqp->sw_qp)
-		claim_zero(mlx5_devx_cmd_destroy(eqp->sw_qp));
-	if (eqp->umem_obj)
-		claim_zero(mlx5_glue->devx_umem_dereg(eqp->umem_obj));
-	if (eqp->umem_buf)
-		rte_free(eqp->umem_buf);
+	mlx5_devx_qp_destroy(&eqp->sw_qp);
 	if (eqp->fw_qp)
 		claim_zero(mlx5_devx_cmd_destroy(eqp->fw_qp));
 	mlx5_vdpa_cq_destroy(&eqp->cq);
@@ -547,36 +542,36 @@ static int
 mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
 {
 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RST2INIT_QP,
-					  eqp->sw_qp->id)) {
+					  eqp->sw_qp.qp->id)) {
 		DRV_LOG(ERR, "Failed to modify FW QP to INIT state(%u).",
 			rte_errno);
 		return -1;
 	}
-	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RST2INIT_QP,
+	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RST2INIT_QP,
 					  eqp->fw_qp->id)) {
 		DRV_LOG(ERR, "Failed to modify SW QP to INIT state(%u).",
 			rte_errno);
 		return -1;
 	}
 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_INIT2RTR_QP,
-					  eqp->sw_qp->id)) {
+					  eqp->sw_qp.qp->id)) {
 		DRV_LOG(ERR, "Failed to modify FW QP to RTR state(%u).",
 			rte_errno);
 		return -1;
 	}
-	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_INIT2RTR_QP,
+	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_INIT2RTR_QP,
 					  eqp->fw_qp->id)) {
 		DRV_LOG(ERR, "Failed to modify SW QP to RTR state(%u).",
 			rte_errno);
 		return -1;
 	}
 	if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_RTR2RTS_QP,
-					  eqp->sw_qp->id)) {
+					  eqp->sw_qp.qp->id)) {
 		DRV_LOG(ERR, "Failed to modify FW QP to RTS state(%u).",
 			rte_errno);
 		return -1;
 	}
-	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp, MLX5_CMD_OP_RTR2RTS_QP,
+	if (mlx5_devx_cmd_modify_qp_state(eqp->sw_qp.qp, MLX5_CMD_OP_RTR2RTS_QP,
 					  eqp->fw_qp->id)) {
 		DRV_LOG(ERR, "Failed to modify SW QP to RTS state(%u).",
 			rte_errno);
@@ -591,8 +586,7 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
 {
 	struct mlx5_devx_qp_attr attr = {0};
 	uint16_t log_desc_n = rte_log2_u32(desc_n);
-	uint32_t umem_size = (1 << log_desc_n) * MLX5_WSEG_SIZE +
-						       sizeof(*eqp->db_rec) * 2;
+	uint32_t ret;
 
 	if (mlx5_vdpa_event_qp_global_prepare(priv))
 		return -1;
@@ -605,42 +599,21 @@ mlx5_vdpa_event_qp_create(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
 		DRV_LOG(ERR, "Failed to create FW QP(%u).", rte_errno);
 		goto error;
 	}
-	eqp->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
-	if (!eqp->umem_buf) {
-		DRV_LOG(ERR, "Failed to allocate memory for SW QP.");
-		rte_errno = ENOMEM;
-		goto error;
-	}
-	eqp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
-					       (void *)(uintptr_t)eqp->umem_buf,
-					       umem_size,
-					       IBV_ACCESS_LOCAL_WRITE);
-	if (!eqp->umem_obj) {
-		DRV_LOG(ERR, "Failed to register umem for SW QP.");
-		goto error;
-	}
 	attr.uar_index = priv->uar->page_id;
 	attr.cqn = eqp->cq.cq_obj.cq->id;
-	attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
-	attr.rq_size = 1 << log_desc_n;
+	attr.rq_size = RTE_BIT32(log_desc_n);
 	attr.log_rq_stride = rte_log2_u32(MLX5_WSEG_SIZE);
 	attr.sq_size = 0; /* No need SQ. */
-	attr.dbr_umem_valid = 1;
-	attr.wq_umem_id = eqp->umem_obj->umem_id;
-	attr.wq_umem_offset = 0;
-	attr.dbr_umem_id = eqp->umem_obj->umem_id;
 	attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
-	attr.dbr_address = RTE_BIT64(log_desc_n) * MLX5_WSEG_SIZE;
-	eqp->sw_qp = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
-	if (!eqp->sw_qp) {
+	ret = mlx5_devx_qp_create(priv->ctx, &(eqp->sw_qp), log_desc_n, &attr, SOCKET_ID_ANY);
+	if (ret) {
 		DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
 		goto error;
 	}
-	eqp->db_rec = RTE_PTR_ADD(eqp->umem_buf, (uintptr_t)attr.dbr_address);
 	if (mlx5_vdpa_qps2rts(eqp))
 		goto error;
 	/* First ringing. */
-	rte_write32(rte_cpu_to_be_32(1 << log_desc_n), &eqp->db_rec[0]);
+	rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)), &eqp->sw_qp.db_rec[0]);
 	return 0;
 error:
 	mlx5_vdpa_event_qp_destroy(eqp);
-- 
2.17.1


  reply	other threads:[~2021-09-03 14:22 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-03 14:21 [dpdk-dev] [PATCH 0/5] mlx5: replaced hardware queue object Raja Zidane
2021-09-03 14:21 ` Raja Zidane [this message]
2021-09-12 16:36   ` [dpdk-dev] [PATCH V2 " Raja Zidane
2021-09-12 16:36     ` [dpdk-dev] [PATCH V2 1/5] common/mlx5: share DevX QP operations Raja Zidane
2021-09-15  0:04       ` [dpdk-dev] [PATCH V3 0/5] mlx5: replaced hardware queue object Raja Zidane
2021-09-15  0:05         ` [dpdk-dev] [PATCH V3 1/5] common/mlx5: share DevX QP operations Raja Zidane
2021-09-15  0:05         ` [dpdk-dev] [PATCH V3 2/5] common/mlx5: update new MMO HCA capabilities Raja Zidane
2021-09-22 19:48           ` Thomas Monjalon
2021-09-15  0:05         ` [dpdk-dev] [PATCH V3 3/5] common/mlx5: add MMO configuration for the DevX QP Raja Zidane
2021-09-15  0:05         ` [dpdk-dev] [PATCH V3 4/5] compress/mlx5: refactor queue HW object Raja Zidane
2021-09-15  0:05         ` [dpdk-dev] [PATCH V3 5/5] regex/mlx5: refactor HW queue objects Raja Zidane
2021-09-28 12:16         ` [dpdk-dev] [PATCH V4 0/5] mlx5: replaced hardware queue object Raja Zidane
2021-09-28 12:16           ` [dpdk-dev] [PATCH V4 1/5] common/mlx5: share DevX QP operations Raja Zidane
2021-09-28 12:16           ` [dpdk-dev] [PATCH V4 2/5] common/mlx5: update new MMO HCA capabilities Raja Zidane
2021-09-28 12:16           ` [dpdk-dev] [PATCH V4 3/5] common/mlx5: add MMO configuration for the DevX QP Raja Zidane
2021-09-28 12:16           ` [dpdk-dev] [PATCH V4 4/5] compress/mlx5: refactor queue HW object Raja Zidane
2021-09-28 12:16           ` [dpdk-dev] [PATCH V4 5/5] regex/mlx5: refactor HW queue objects Raja Zidane
2021-09-30  5:44           ` [dpdk-dev] [PATCH V5 0/5] mlx5: replaced hardware queue object Raja Zidane
2021-09-30  5:44             ` [dpdk-dev] [PATCH V5 1/5] common/mlx5: update new MMO HCA capabilities Raja Zidane
2021-09-30  5:44             ` [dpdk-dev] [PATCH V5 2/5] common/mlx5: add MMO configuration for the DevX QP Raja Zidane
2021-09-30  5:44             ` [dpdk-dev] [PATCH V5 3/5] compress/mlx5: refactor queue HW object Raja Zidane
2021-09-30  5:44             ` [dpdk-dev] [PATCH V5 4/5] regex/mlx5: refactor HW queue objects Raja Zidane
2021-09-30  5:44             ` [dpdk-dev] [PATCH V5 5/5] compress/mlx5: allow partial transformations support Raja Zidane
2021-10-05 12:27             ` [dpdk-dev] [PATCH V6 0/5] mlx5: replaced hardware queue object Raja Zidane
2021-10-05 12:27               ` [dpdk-dev] [PATCH V6 1/5] common/mlx5: share DevX QP operations Raja Zidane
2021-10-05 12:27               ` [dpdk-dev] [PATCH V6 2/5] common/mlx5: update new MMO HCA capabilities Raja Zidane
2021-10-05 12:27               ` [dpdk-dev] [PATCH V6 3/5] common/mlx5: add MMO configuration for the DevX QP Raja Zidane
2021-10-05 12:27               ` [dpdk-dev] [PATCH V6 4/5] compress/mlx5: refactor queue HW object Raja Zidane
2021-10-05 12:27               ` [dpdk-dev] [PATCH V6 5/5] regex/mlx5: refactor HW queue objects Raja Zidane
2021-10-05 16:18               ` [dpdk-dev] [PATCH V6 0/5] mlx5: replaced hardware queue object Thomas Monjalon
2021-09-12 16:36     ` [dpdk-dev] [PATCH V2 2/5] common/mlx5: update new MMO HCA capabilities Raja Zidane
2021-09-12 16:36     ` [dpdk-dev] [PATCH V2 3/5] common/mlx5: add MMO configuration for the DevX QP Raja Zidane
2021-09-12 16:36     ` [dpdk-dev] [PATCH V2 4/5] compress/mlx5: refactor queue HW object Raja Zidane
2021-09-12 16:36     ` [dpdk-dev] [PATCH V2 5/5] regex/mlx5: refactor HW queue objects Raja Zidane
2021-09-03 14:21 ` [dpdk-dev] [PATCH 2/5] common/mlx5: update new MMO HCA capabilities Raja Zidane
2021-09-03 14:21 ` [dpdk-dev] [PATCH 3/5] common/mlx5: add MMO configuration for the DevX QP Raja Zidane
2021-09-03 14:21 ` [dpdk-dev] [PATCH 4/5] compress/mlx5: refactor queue HW object Raja Zidane
2021-09-03 14:21 ` [dpdk-dev] [PATCH 5/5] regex/mlx5: refactor HW queue objects Raja Zidane

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210903142157.25617-2-rzidane@nvidia.com \
    --to=rzidane@nvidia.com \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).