DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/mlx5: implement CQ for RxQ using DevX API
@ 2020-07-19 11:13 Dekel Peled
  2020-07-19 15:33 ` Raslan Darawsheh
  0 siblings, 1 reply; 2+ messages in thread
From: Dekel Peled @ 2020-07-19 11:13 UTC (permalink / raw)
  To: matan, viacheslavo, rasland; +Cc: dev

This patch continues the work to use DevX API for different objects
creation and management.
On Rx control path, the RQ, RQT, and TIR objects can already be
created using DevX API.
This patch adds the support to create CQ for RxQ using DevX API.
The corresponding event channel is also created and utilized using
DevX API.

Signed-off-by: Dekel Peled <dekelp@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
 drivers/net/mlx5/linux/mlx5_os.c |  21 ++
 drivers/net/mlx5/mlx5.c          |  10 +
 drivers/net/mlx5/mlx5.h          |   2 +
 drivers/net/mlx5/mlx5_rxq.c      | 422 +++++++++++++++++++++++++++++----------
 drivers/net/mlx5/mlx5_rxtx.h     |  29 ++-
 5 files changed, 375 insertions(+), 109 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index bf1c750..8af9756 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -68,6 +68,27 @@
 #endif
 
 /**
+ * Set the completion channel file descriptor interrupt as non-blocking.
+ *
+ * @param[in] rxq_obj
+ *   Pointer to RQ channel object, which includes the channel fd
+ *
+ * @param[out] fd
+ *   The file descriptor (representing the intetrrupt) used in this channel.
+ *
+ * @return
+ *   0 on successfully setting the fd to non-blocking, non-zero otherwise.
+ */
+int
+mlx5_os_set_nonblock_channel_fd(int fd)
+{
+	int flags;
+
+	flags = fcntl(fd, F_GETFL);
+	return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+}
+
+/**
  * Get mlx5 device attributes. The glue function query_device_ex() is called
  * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
  * device attributes from the glue out parameter.
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 517fbb4..25ce4f0 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -827,6 +827,12 @@ struct mlx5_dev_ctx_shared *
 			err = ENOMEM;
 			goto error;
 		}
+		sh->devx_rx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, 0);
+		if (!sh->devx_rx_uar) {
+			DRV_LOG(ERR, "Failed to allocate Rx DevX UAR.");
+			err = ENOMEM;
+			goto error;
+		}
 	}
 	sh->flow_id_pool = mlx5_flow_id_pool_alloc
 					((1 << HAIRPIN_FLOW_ID_BITS) - 1);
@@ -894,6 +900,8 @@ struct mlx5_dev_ctx_shared *
 		claim_zero(mlx5_devx_cmd_destroy(sh->tis));
 	if (sh->td)
 		claim_zero(mlx5_devx_cmd_destroy(sh->td));
+	if (sh->devx_rx_uar)
+		mlx5_glue->devx_free_uar(sh->devx_rx_uar);
 	if (sh->pd)
 		claim_zero(mlx5_glue->dealloc_pd(sh->pd));
 	if (sh->ctx)
@@ -965,6 +973,8 @@ struct mlx5_dev_ctx_shared *
 		claim_zero(mlx5_devx_cmd_destroy(sh->tis));
 	if (sh->td)
 		claim_zero(mlx5_devx_cmd_destroy(sh->td));
+	if (sh->devx_rx_uar)
+		mlx5_glue->devx_free_uar(sh->devx_rx_uar);
 	if (sh->ctx)
 		claim_zero(mlx5_glue->close_device(sh->ctx));
 	if (sh->flow_id_pool)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 78ebd19..c5f6552 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -665,6 +665,7 @@ struct mlx5_dev_ctx_shared {
 	struct mlx5dv_devx_uar *tx_uar; /* Tx/packer pacing shared UAR. */
 	struct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];
 	/* Flex parser profiles information. */
+	struct mlx5dv_devx_uar *devx_rx_uar; /* DevX UAR for Rx. */
 	struct mlx5_dev_shared_port port[]; /* per device port data array. */
 };
 
@@ -1025,6 +1026,7 @@ int mlx5_os_read_dev_stat(struct mlx5_priv *priv,
 void mlx5_os_stats_init(struct rte_eth_dev *dev);
 void mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
 			   mlx5_dereg_mr_t *dereg_mr_cb);
+int mlx5_os_set_nonblock_channel_fd(int fd);
 
 /* mlx5_txpp.c */
 
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 67d996c..0d6f02d 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -28,6 +28,7 @@
 #include <rte_interrupts.h>
 #include <rte_debug.h>
 #include <rte_io.h>
+#include <rte_eal_paging.h>
 
 #include <mlx5_glue.h>
 #include <mlx5_devx_cmds.h>
@@ -638,7 +639,7 @@
  *   DevX Rx queue object.
  */
 static void
-rxq_release_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
+rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
 {
 	if (rxq_ctrl->rxq.wqes) {
 		mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
@@ -651,6 +652,25 @@
 }
 
 /**
+ * Release the resources allocated for the Rx CQ DevX object.
+ *
+ * @param rxq_ctrl
+ *   DevX Rx queue object.
+ */
+static void
+rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+	if (rxq_ctrl->rxq.cqes) {
+		rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
+		rxq_ctrl->rxq.cqes = NULL;
+	}
+	if (rxq_ctrl->cq_umem) {
+		mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
+		rxq_ctrl->cq_umem = NULL;
+	}
+}
+
+/**
  * Release an Rx hairpin related resources.
  *
  * @param rxq_obj
@@ -685,27 +705,31 @@
 		switch (rxq_obj->type) {
 		case MLX5_RXQ_OBJ_TYPE_IBV:
 			MLX5_ASSERT(rxq_obj->wq);
-			MLX5_ASSERT(rxq_obj->cq);
+			MLX5_ASSERT(rxq_obj->ibv_cq);
 			rxq_free_elts(rxq_obj->rxq_ctrl);
 			claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
-			claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
+			claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
+			if (rxq_obj->ibv_channel)
+				claim_zero(mlx5_glue->destroy_comp_channel
+					   (rxq_obj->ibv_channel));
 			break;
 		case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
-			MLX5_ASSERT(rxq_obj->cq);
 			MLX5_ASSERT(rxq_obj->rq);
+			MLX5_ASSERT(rxq_obj->devx_cq);
 			rxq_free_elts(rxq_obj->rxq_ctrl);
 			claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
-			rxq_release_rq_resources(rxq_obj->rxq_ctrl);
-			claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
+			claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
+			if (rxq_obj->devx_channel)
+				mlx5_glue->devx_destroy_event_channel
+							(rxq_obj->devx_channel);
+			rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
+			rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
 			break;
 		case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
 			MLX5_ASSERT(rxq_obj->rq);
 			rxq_obj_hairpin_release(rxq_obj);
 			break;
 		}
-		if (rxq_obj->channel)
-			claim_zero(mlx5_glue->destroy_comp_channel
-				   (rxq_obj->channel));
 		LIST_REMOVE(rxq_obj, next);
 		mlx5_free(rxq_obj);
 		return 0;
@@ -750,12 +774,11 @@
 	for (i = 0; i != n; ++i) {
 		/* This rxq obj must not be released in this function. */
 		struct mlx5_rxq_obj *rxq_obj = mlx5_rxq_obj_get(dev, i);
-		int fd;
-		int flags;
 		int rc;
 
 		/* Skip queues that cannot request interrupts. */
-		if (!rxq_obj || !rxq_obj->channel) {
+		if (!rxq_obj || (!rxq_obj->ibv_channel &&
+				 !rxq_obj->devx_channel)) {
 			/* Use invalid intr_vec[] index to disable entry. */
 			intr_handle->intr_vec[i] =
 				RTE_INTR_VEC_RXTX_OFFSET +
@@ -772,21 +795,19 @@
 			rte_errno = ENOMEM;
 			return -rte_errno;
 		}
-		fd = rxq_obj->channel->fd;
-		flags = fcntl(fd, F_GETFL);
-		rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+		rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd);
 		if (rc < 0) {
 			rte_errno = errno;
 			DRV_LOG(ERR,
 				"port %u failed to make Rx interrupt file"
 				" descriptor %d non-blocking for queue index"
 				" %d",
-				dev->data->port_id, fd, i);
+				dev->data->port_id, rxq_obj->fd, i);
 			mlx5_rx_intr_vec_disable(dev);
 			return -rte_errno;
 		}
 		intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
-		intr_handle->efds[count] = fd;
+		intr_handle->efds[count] = rxq_obj->fd;
 		count++;
 	}
 	if (!count)
@@ -858,7 +879,7 @@
 	sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK;
 	doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK);
 	doorbell = (uint64_t)doorbell_hi << 32;
-	doorbell |=  rxq->cqn;
+	doorbell |= rxq->cqn;
 	rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
 	mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
 			 cq_db_reg, rxq->uar_lock_cq);
@@ -937,13 +958,29 @@
 		rte_errno = EINVAL;
 		return -rte_errno;
 	}
-	ret = mlx5_glue->get_cq_event(rxq_obj->channel, &ev_cq, &ev_ctx);
-	if (ret || ev_cq != rxq_obj->cq) {
-		rte_errno = EINVAL;
-		goto exit;
+	if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_IBV) {
+		ret = mlx5_glue->get_cq_event(rxq_obj->ibv_channel, &ev_cq,
+					      &ev_ctx);
+		if (ret || ev_cq != rxq_obj->ibv_cq) {
+			rte_errno = EINVAL;
+			goto exit;
+		}
+		mlx5_glue->ack_cq_events(rxq_obj->ibv_cq, 1);
+	} else if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
+#ifdef HAVE_IBV_DEVX_EVENT
+		struct mlx5dv_devx_async_event_hdr *event_data = NULL;
+
+		ret = mlx5_glue->devx_get_event
+				(rxq_obj->devx_channel, event_data,
+				 sizeof(struct mlx5dv_devx_async_event_hdr));
+		if (ret <= 0 || event_data->cookie !=
+				(uint64_t)(uintptr_t)rxq_obj->devx_cq) {
+			rte_errno = EINVAL;
+			goto exit;
+		}
+#endif /* HAVE_IBV_DEVX_EVENT */
 	}
 	rxq_data->cq_arm_sn++;
-	mlx5_glue->ack_cq_events(rxq_obj->cq, 1);
 	mlx5_rxq_obj_release(rxq_obj);
 	return 0;
 exit:
@@ -985,7 +1022,7 @@
 
 	cq_attr.ibv = (struct ibv_cq_init_attr_ex){
 		.cqe = cqe_n,
-		.channel = rxq_obj->channel,
+		.channel = rxq_obj->ibv_channel,
 		.comp_mask = 0,
 	};
 	cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
@@ -1069,7 +1106,7 @@
 		/* Max number of scatter/gather elements in a WR. */
 		.max_sge = 1 << rxq_data->sges_n,
 		.pd = priv->sh->pd,
-		.cq = rxq_obj->cq,
+		.cq = rxq_obj->ibv_cq,
 		.comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
 		.create_flags = (rxq_data->vlan_strip ?
 				 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
@@ -1168,8 +1205,8 @@
 					MLX5_WQ_END_PAD_MODE_ALIGN :
 					MLX5_WQ_END_PAD_MODE_NONE;
 	wq_attr->pd = priv->sh->pdn;
-	wq_attr->dbr_addr = rxq_ctrl->dbr_offset;
-	wq_attr->dbr_umem_id = rxq_ctrl->dbr_umem_id;
+	wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
+	wq_attr->dbr_umem_id = rxq_ctrl->rq_dbr_umem_id;
 	wq_attr->dbr_umem_valid = 1;
 	wq_attr->wq_umem_id = rxq_ctrl->wq_umem->umem_id;
 	wq_attr->wq_umem_valid = 1;
@@ -1195,7 +1232,7 @@
 	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
 	struct mlx5_rxq_ctrl *rxq_ctrl =
 		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
-	struct mlx5_devx_create_rq_attr rq_attr;
+	struct mlx5_devx_create_rq_attr rq_attr = { 0 };
 	uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
 	uint32_t wq_size = 0;
 	uint32_t wqe_size = 0;
@@ -1203,7 +1240,6 @@
 	void *buf = NULL;
 	struct mlx5_devx_obj *rq;
 
-	memset(&rq_attr, 0, sizeof(rq_attr));
 	/* Fill RQ attributes. */
 	rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
 	rq_attr.flush_in_error_en = 1;
@@ -1247,11 +1283,136 @@
 	mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
 	rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
 	if (!rq)
-		rxq_release_rq_resources(rxq_ctrl);
+		rxq_release_devx_rq_resources(rxq_ctrl);
 	return rq;
 }
 
 /**
+ * Create a DevX CQ object for an Rx queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param cqe_n
+ *   Number of CQEs in CQ.
+ * @param idx
+ *   Queue index in DPDK Rx queue array
+ * @param rxq_obj
+ *   Pointer to Rx queue object data.
+ *
+ * @return
+ *   The DevX object initialised, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_devx_obj *
+mlx5_devx_cq_new(struct rte_eth_dev *dev, unsigned int cqe_n, uint16_t idx,
+		 struct mlx5_rxq_obj *rxq_obj)
+{
+	struct mlx5_devx_obj *cq_obj = 0;
+	struct mlx5_devx_cq_attr cq_attr = { 0 };
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+	struct mlx5_rxq_ctrl *rxq_ctrl =
+		container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+	size_t page_size = rte_mem_page_size();
+	uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
+	uint32_t eqn = 0;
+	void *buf = NULL;
+	uint16_t event_nums[1] = {0};
+	uint32_t log_cqe_n;
+	uint32_t cq_size;
+	int ret = 0;
+
+	if (page_size == (size_t)-1) {
+		DRV_LOG(ERR, "Failed to get page_size.");
+		goto error;
+	}
+	if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
+	    !rxq_data->lro) {
+		cq_attr.cqe_comp_en = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+		cq_attr.mini_cqe_res_format =
+				mlx5_rxq_mprq_enabled(rxq_data) ?
+				MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
+				MLX5DV_CQE_RES_FORMAT_HASH;
+#else
+		cq_attr.mini_cqe_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
+#endif
+		/*
+		 * For vectorized Rx, it must not be doubled in order to
+		 * make cq_ci and rq_ci aligned.
+		 */
+		if (mlx5_rxq_check_vec_support(rxq_data) < 0)
+			cqe_n *= 2;
+	} else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
+		DRV_LOG(DEBUG,
+			"port %u Rx CQE compression is disabled for HW"
+			" timestamp",
+			dev->data->port_id);
+	} else if (priv->config.cqe_comp && rxq_data->lro) {
+		DRV_LOG(DEBUG,
+			"port %u Rx CQE compression is disabled for LRO",
+			dev->data->port_id);
+	}
+#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
+	if (priv->config.cqe_pad)
+		cq_attr.cqe_size = MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
+#endif
+	log_cqe_n = log2above(cqe_n);
+	cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
+	/* Query the EQN for this core. */
+	if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
+		DRV_LOG(ERR, "Failed to query EQN for CQ.");
+		goto error;
+	}
+	cq_attr.eqn = eqn;
+	buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
+				rxq_ctrl->socket);
+	if (!buf) {
+		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
+		goto error;
+	}
+	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
+	rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
+						     cq_size,
+						     IBV_ACCESS_LOCAL_WRITE);
+	if (!rxq_ctrl->cq_umem) {
+		DRV_LOG(ERR, "Failed to register umem for CQ.");
+		goto error;
+	}
+	cq_attr.uar_page_id = priv->sh->devx_rx_uar->page_id;
+	cq_attr.q_umem_id = rxq_ctrl->cq_umem->umem_id;
+	cq_attr.q_umem_valid = 1;
+	cq_attr.log_cq_size = log_cqe_n;
+	cq_attr.log_page_size = rte_log2_u32(page_size);
+	cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
+	cq_attr.db_umem_id = rxq_ctrl->cq_dbr_umem_id;
+	cq_attr.db_umem_valid = rxq_ctrl->cq_dbr_umem_id_valid;
+	cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
+	if (!cq_obj)
+		goto error;
+	rxq_data->cqe_n = log_cqe_n;
+	rxq_data->cqn = cq_obj->id;
+	if (rxq_obj->devx_channel) {
+		ret = mlx5_glue->devx_subscribe_devx_event
+						(rxq_obj->devx_channel,
+						 cq_obj->obj,
+						 sizeof(event_nums),
+						 event_nums,
+						 (uint64_t)(uintptr_t)cq_obj);
+		if (ret) {
+			DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
+			rte_errno = errno;
+			goto error;
+		}
+	}
+	/* Initialise CQ to 1's to mark HW ownership for all CQEs. */
+	memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
+	return cq_obj;
+error:
+	rxq_release_devx_cq_resources(rxq_ctrl);
+	return NULL;
+}
+
+/**
  * Create the Rx hairpin queue object.
  *
  * @param dev
@@ -1366,7 +1527,7 @@ struct mlx5_rxq_obj *
 			   rxq_ctrl->socket);
 	if (!tmpl) {
 		DRV_LOG(ERR,
-			"port %u Rx queue %u cannot allocate verbs resources",
+			"port %u Rx queue %u cannot allocate resources",
 			dev->data->port_id, rxq_data->idx);
 		rte_errno = ENOMEM;
 		goto error;
@@ -1374,60 +1535,75 @@ struct mlx5_rxq_obj *
 	tmpl->type = type;
 	tmpl->rxq_ctrl = rxq_ctrl;
 	if (rxq_ctrl->irq) {
-		tmpl->channel = mlx5_glue->create_comp_channel(priv->sh->ctx);
-		if (!tmpl->channel) {
-			DRV_LOG(ERR, "port %u: comp channel creation failure",
-				dev->data->port_id);
-			rte_errno = ENOMEM;
-			goto error;
+		if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
+			tmpl->ibv_channel =
+				mlx5_glue->create_comp_channel(priv->sh->ctx);
+			if (!tmpl->ibv_channel) {
+				DRV_LOG(ERR, "port %u: comp channel creation "
+					"failure", dev->data->port_id);
+				rte_errno = ENOMEM;
+				goto error;
+			}
+			tmpl->fd = tmpl->ibv_channel->fd;
+		} else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
+			int devx_ev_flag =
+			  MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
+
+			tmpl->devx_channel =
+				mlx5_glue->devx_create_event_channel
+								(priv->sh->ctx,
+								 devx_ev_flag);
+			if (!tmpl->devx_channel) {
+				rte_errno = errno;
+				DRV_LOG(ERR,
+					"Failed to create event channel %d.",
+					rte_errno);
+				goto error;
+			}
+			tmpl->fd = tmpl->devx_channel->fd;
 		}
 	}
 	if (mlx5_rxq_mprq_enabled(rxq_data))
 		cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
 	else
-		cqe_n = wqe_n  - 1;
-	tmpl->cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n, tmpl);
-	if (!tmpl->cq) {
-		DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
-			dev->data->port_id, idx);
-		rte_errno = ENOMEM;
-		goto error;
-	}
-	obj.cq.in = tmpl->cq;
-	obj.cq.out = &cq_info;
-	ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
-	if (ret) {
-		rte_errno = ret;
-		goto error;
-	}
-	if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
-		DRV_LOG(ERR,
-			"port %u wrong MLX5_CQE_SIZE environment variable"
-			" value: it should be set to %u",
-			dev->data->port_id, RTE_CACHE_LINE_SIZE);
-		rte_errno = EINVAL;
-		goto error;
-	}
+		cqe_n = wqe_n - 1;
 	DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
 		dev->data->port_id, priv->sh->device_attr.max_qp_wr);
 	DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
 		dev->data->port_id, priv->sh->device_attr.max_sge);
-	/* Allocate door-bell for types created with DevX. */
-	if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
-		struct mlx5_devx_dbr_page *dbr_page;
-		int64_t dbr_offset;
-
-		dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs,
-					  &dbr_page);
-		if (dbr_offset < 0)
-			goto error;
-		rxq_ctrl->dbr_offset = dbr_offset;
-		rxq_ctrl->dbr_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
-		rxq_ctrl->dbr_umem_id_valid = 1;
-		rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
-					       (uintptr_t)rxq_ctrl->dbr_offset);
-	}
 	if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
+		/* Create CQ using Verbs API. */
+		tmpl->ibv_cq = mlx5_ibv_cq_new(dev, priv, rxq_data, cqe_n,
+					       tmpl);
+		if (!tmpl->ibv_cq) {
+			DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
+				dev->data->port_id, idx);
+			rte_errno = ENOMEM;
+			goto error;
+		}
+		obj.cq.in = tmpl->ibv_cq;
+		obj.cq.out = &cq_info;
+		ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
+		if (ret) {
+			rte_errno = ret;
+			goto error;
+		}
+		if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
+			DRV_LOG(ERR,
+				"port %u wrong MLX5_CQE_SIZE environment "
+				"variable value: it should be set to %u",
+				dev->data->port_id, RTE_CACHE_LINE_SIZE);
+			rte_errno = EINVAL;
+			goto error;
+		}
+		/* Fill the rings. */
+		rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
+		rxq_data->cq_db = cq_info.dbrec;
+		rxq_data->cqes =
+			(volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
+		rxq_data->cq_uar = cq_info.cq_uar;
+		rxq_data->cqn = cq_info.cqn;
+		/* Create WQ (RQ) using Verbs API. */
 		tmpl->wq = mlx5_ibv_wq_new(dev, priv, rxq_data, idx, wqe_n,
 					   tmpl);
 		if (!tmpl->wq) {
@@ -1459,10 +1635,45 @@ struct mlx5_rxq_obj *
 		rxq_data->wqes = rwq.buf;
 		rxq_data->rq_db = rwq.dbrec;
 	} else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
-		struct mlx5_devx_modify_rq_attr rq_attr;
+		struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
+		struct mlx5_devx_dbr_page *dbr_page;
+		int64_t dbr_offset;
 
-		memset(&rq_attr, 0, sizeof(rq_attr));
-		tmpl->rq = mlx5_devx_rq_new(dev, idx, cq_info.cqn);
+		/* Allocate CQ door-bell. */
+		dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs,
+					  &dbr_page);
+		if (dbr_offset < 0) {
+			DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
+			goto error;
+		}
+		rxq_ctrl->cq_dbr_offset = dbr_offset;
+		rxq_ctrl->cq_dbr_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
+		rxq_ctrl->cq_dbr_umem_id_valid = 1;
+		rxq_data->cq_db =
+			(uint32_t *)((uintptr_t)dbr_page->dbrs +
+				     (uintptr_t)rxq_ctrl->cq_dbr_offset);
+		rxq_data->cq_uar = priv->sh->devx_rx_uar->base_addr;
+		/* Create CQ using DevX API. */
+		tmpl->devx_cq = mlx5_devx_cq_new(dev, cqe_n, idx, tmpl);
+		if (!tmpl->devx_cq) {
+			DRV_LOG(ERR, "Failed to create CQ.");
+			goto error;
+		}
+		/* Allocate RQ door-bell. */
+		dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs,
+					  &dbr_page);
+		if (dbr_offset < 0) {
+			DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
+			goto error;
+		}
+		rxq_ctrl->rq_dbr_offset = dbr_offset;
+		rxq_ctrl->rq_dbr_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
+		rxq_ctrl->rq_dbr_umem_id_valid = 1;
+		rxq_data->rq_db =
+			(uint32_t *)((uintptr_t)dbr_page->dbrs +
+				     (uintptr_t)rxq_ctrl->rq_dbr_offset);
+		/* Create RQ using DevX API. */
+		tmpl->rq = mlx5_devx_rq_new(dev, idx, tmpl->devx_cq->id);
 		if (!tmpl->rq) {
 			DRV_LOG(ERR, "port %u Rx queue %u RQ creation failure",
 				dev->data->port_id, idx);
@@ -1476,12 +1687,6 @@ struct mlx5_rxq_obj *
 		if (ret)
 			goto error;
 	}
-	/* Fill the rings. */
-	rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
-	rxq_data->cq_db = cq_info.dbrec;
-	rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
-	rxq_data->cq_uar = cq_info.cq_uar;
-	rxq_data->cqn = cq_info.cqn;
 	rxq_data->cq_arm_sn = 0;
 	mlx5_rxq_initialize(rxq_data);
 	rxq_data->cq_ci = 0;
@@ -1494,20 +1699,31 @@ struct mlx5_rxq_obj *
 error:
 	if (tmpl) {
 		ret = rte_errno; /* Save rte_errno before cleanup. */
-		if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV && tmpl->wq)
-			claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
-		else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ && tmpl->rq)
-			claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
-		if (tmpl->cq)
-			claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
-		if (tmpl->channel)
-			claim_zero(mlx5_glue->destroy_comp_channel
-							(tmpl->channel));
+		if (tmpl->type == MLX5_RXQ_OBJ_TYPE_IBV) {
+			if (tmpl->wq)
+				claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
+			if (tmpl->ibv_cq)
+				claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq));
+			if (tmpl->ibv_channel)
+				claim_zero(mlx5_glue->destroy_comp_channel
+							(tmpl->ibv_channel));
+		} else if (tmpl->type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
+			if (tmpl->rq)
+				claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
+			if (tmpl->devx_cq)
+				claim_zero(mlx5_devx_cmd_destroy
+							(tmpl->devx_cq));
+			if (tmpl->devx_channel)
+				mlx5_glue->devx_destroy_event_channel
+							(tmpl->devx_channel);
+		}
 		mlx5_free(tmpl);
 		rte_errno = ret; /* Restore rte_errno. */
 	}
-	if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
-		rxq_release_rq_resources(rxq_ctrl);
+	if (type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) {
+		rxq_release_devx_rq_resources(rxq_ctrl);
+		rxq_release_devx_cq_resources(rxq_ctrl);
+	}
 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
 	return NULL;
 }
@@ -2104,10 +2320,14 @@ struct mlx5_rxq_ctrl *
 	if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
 		rxq_ctrl->obj = NULL;
 	if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
-		if (rxq_ctrl->dbr_umem_id_valid)
+		if (rxq_ctrl->rq_dbr_umem_id_valid)
+			claim_zero(mlx5_release_dbr(&priv->dbrpgs,
+						    rxq_ctrl->rq_dbr_umem_id,
+						    rxq_ctrl->rq_dbr_offset));
+		if (rxq_ctrl->cq_dbr_umem_id_valid)
 			claim_zero(mlx5_release_dbr(&priv->dbrpgs,
-						    rxq_ctrl->dbr_umem_id,
-						    rxq_ctrl->dbr_offset));
+						    rxq_ctrl->cq_dbr_umem_id,
+						    rxq_ctrl->cq_dbr_offset));
 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
 			mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
 		LIST_REMOVE(rxq_ctrl, next);
@@ -2771,7 +2991,7 @@ enum mlx5_rxq_type
 		rte_errno = ENOMEM;
 		goto error;
 	}
-	rxq->cq = cq;
+	rxq->ibv_cq = cq;
 	rxq->wq = wq;
 	priv->drop_queue.rxq = rxq;
 	return rxq;
@@ -2800,8 +3020,8 @@ enum mlx5_rxq_type
 
 	if (rxq->wq)
 		claim_zero(mlx5_glue->destroy_wq(rxq->wq));
-	if (rxq->cq)
-		claim_zero(mlx5_glue->destroy_cq(rxq->cq));
+	if (rxq->ibv_cq)
+		claim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq));
 	mlx5_free(rxq);
 	priv->drop_queue.rxq = NULL;
 }
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 5116a15..39fa1fb 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -154,7 +154,7 @@ struct mlx5_rxq_data {
 	struct mlx5_rxq_stats stats;
 	rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */
 	struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
-	void *cq_uar; /* CQ user access region. */
+	void *cq_uar; /* Verbs CQ user access region. */
 	uint32_t cqn; /* CQ number. */
 	uint8_t cq_arm_sn; /* CQ arm seq number. */
 #ifndef RTE_ARCH_64
@@ -184,14 +184,21 @@ struct mlx5_rxq_obj {
 	LIST_ENTRY(mlx5_rxq_obj) next; /* Pointer to the next element. */
 	rte_atomic32_t refcnt; /* Reference counter. */
 	struct mlx5_rxq_ctrl *rxq_ctrl; /* Back pointer to parent. */
-	struct ibv_cq *cq; /* Completion Queue. */
 	enum mlx5_rxq_obj_type type;
+	int fd; /* File descriptor for event channel */
 	RTE_STD_C11
 	union {
-		struct ibv_wq *wq; /* Work Queue. */
-		struct mlx5_devx_obj *rq; /* DevX object for Rx Queue. */
+		struct {
+			struct ibv_wq *wq; /* Work Queue. */
+			struct ibv_cq *ibv_cq; /* Completion Queue. */
+			struct ibv_comp_channel *ibv_channel;
+		};
+		struct {
+			struct mlx5_devx_obj *rq; /* DevX Rx Queue object. */
+			struct mlx5_devx_obj *devx_cq; /* DevX CQ object. */
+			struct mlx5dv_devx_event_channel *devx_channel;
+		};
 	};
-	struct ibv_comp_channel *channel;
 };
 
 /* RX queue control descriptor. */
@@ -204,14 +211,20 @@ struct mlx5_rxq_ctrl {
 	enum mlx5_rxq_type type; /* Rxq type. */
 	unsigned int socket; /* CPU socket ID for allocations. */
 	unsigned int irq:1; /* Whether IRQ is enabled. */
-	unsigned int dbr_umem_id_valid:1; /* dbr_umem_id holds a valid value. */
+	unsigned int rq_dbr_umem_id_valid:1;
+	unsigned int cq_dbr_umem_id_valid:1;
 	uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
 	uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
 	uint32_t wqn; /* WQ number. */
 	uint16_t dump_file_n; /* Number of dump files. */
-	uint32_t dbr_umem_id; /* Storing door-bell information, */
-	uint64_t dbr_offset;  /* needed when freeing door-bell. */
+	uint32_t rq_dbr_umem_id;
+	uint64_t rq_dbr_offset;
+	/* Storing RQ door-bell information, needed when freeing door-bell. */
+	uint32_t cq_dbr_umem_id;
+	uint64_t cq_dbr_offset;
+	/* Storing CQ door-bell information, needed when freeing door-bell. */
 	struct mlx5dv_devx_umem *wq_umem; /* WQ buffer registration info. */
+	struct mlx5dv_devx_umem *cq_umem; /* CQ buffer registration info. */
 	struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
 };
 
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2020-07-19 15:33 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-19 11:13 [dpdk-dev] [PATCH] net/mlx5: implement CQ for RxQ using DevX API Dekel Peled
2020-07-19 15:33 ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).