DPDK patches and discussions
 help / color / mirror / Atom feed
From: Matan Azrad <matan@mellanox.com>
To: Maxime Coquelin <maxime.coquelin@redhat.com>
Cc: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 2/3] vdpa/mlx5: optimize completion queue poll
Date: Thu, 18 Jun 2020 19:11:15 +0000	[thread overview]
Message-ID: <1592507476-442112-3-git-send-email-matan@mellanox.com> (raw)
In-Reply-To: <1592507476-442112-1-git-send-email-matan@mellanox.com>

The vDPA driver uses a CQ in order to know when traffic works were
completed by the HW.

Each traffic completion adds a CQE to the CQ.

When the vDPA driver detects CQEs in the CQ, it triggers the guest
notification for the corresponding queue and consumes all of them.

There is collapse feature in the HW that configures the HW to write all the
CQEs in the first entry of the CQ.

Using this feature, the vDPA driver can read only the first CQE,
validate that the completion counter inside the CQE was changed and if
so, to notify the guest.

Use CQ collapse feature in order to improve the poll utilization.

Signed-off-by: Matan Azrad <matan@mellanox.com>
---
 drivers/vdpa/mlx5/mlx5_vdpa_event.c | 73 ++++++++++++++++++++-----------------
 1 file changed, 40 insertions(+), 33 deletions(-)

diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_event.c b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
index 69c8bf6..25f11fd 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_event.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_event.c
@@ -127,12 +127,12 @@
 	struct mlx5_devx_cq_attr attr;
 	size_t pgsize = sysconf(_SC_PAGESIZE);
 	uint32_t umem_size;
-	int ret;
 	uint16_t event_nums[1] = {0};
+	uint16_t cq_size = 1 << log_desc_n;
+	int ret;
 
 	cq->log_desc_n = log_desc_n;
-	umem_size = sizeof(struct mlx5_cqe) * (1 << log_desc_n) +
-							sizeof(*cq->db_rec) * 2;
+	umem_size = sizeof(struct mlx5_cqe) * cq_size + sizeof(*cq->db_rec) * 2;
 	cq->umem_buf = rte_zmalloc(__func__, umem_size, 4096);
 	if (!cq->umem_buf) {
 		DRV_LOG(ERR, "Failed to allocate memory for CQ.");
@@ -149,13 +149,13 @@
 	}
 	attr.q_umem_valid = 1;
 	attr.db_umem_valid = 1;
-	attr.use_first_only = 0;
+	attr.use_first_only = 1;
 	attr.overrun_ignore = 0;
 	attr.uar_page_id = priv->uar->page_id;
 	attr.q_umem_id = cq->umem_obj->umem_id;
 	attr.q_umem_offset = 0;
 	attr.db_umem_id = cq->umem_obj->umem_id;
-	attr.db_umem_offset = sizeof(struct mlx5_cqe) * (1 << log_desc_n);
+	attr.db_umem_offset = sizeof(struct mlx5_cqe) * cq_size;
 	attr.eqn = priv->eqn;
 	attr.log_cq_size = log_desc_n;
 	attr.log_page_size = rte_log2_u32(pgsize);
@@ -187,7 +187,8 @@
 	}
 	cq->callfd = callfd;
 	/* Init CQ to ones to be in HW owner in the start. */
-	memset((void *)(uintptr_t)cq->umem_buf, 0xFF, attr.db_umem_offset);
+	cq->cqes[0].op_own = MLX5_CQE_OWNER_MASK;
+	cq->cqes[0].wqe_counter = rte_cpu_to_be_16(cq_size - 1);
 	/* First arming. */
 	mlx5_vdpa_cq_arm(priv, cq);
 	return 0;
@@ -203,34 +204,40 @@
 				container_of(cq, struct mlx5_vdpa_event_qp, cq);
 	const unsigned int cq_size = 1 << cq->log_desc_n;
 	const unsigned int cq_mask = cq_size - 1;
-	uint32_t total = 0;
-	int ret;
-
-	do {
-		volatile struct mlx5_cqe *cqe = cq->cqes + ((cq->cq_ci + total)
-							    & cq_mask);
-
-		ret = check_cqe(cqe, cq_size, cq->cq_ci + total);
-		switch (ret) {
-		case MLX5_CQE_STATUS_ERR:
+	union {
+		struct {
+			uint16_t wqe_counter;
+			uint8_t rsvd5;
+			uint8_t op_own;
+		};
+		uint32_t word;
+	} last_word;
+	uint16_t next_wqe_counter = cq->cq_ci & cq_mask;
+	uint16_t cur_wqe_counter;
+	uint16_t comp;
+
+	last_word.word = rte_read32(&cq->cqes[0].wqe_counter);
+	cur_wqe_counter = rte_be_to_cpu_16(last_word.wqe_counter);
+	comp = (cur_wqe_counter + 1u - next_wqe_counter) & cq_mask;
+	if (comp) {
+		cq->cq_ci += comp;
+		MLX5_ASSERT(!!(cq->cq_ci & cq_size) ==
+			    MLX5_CQE_OWNER(last_word.op_own));
+		MLX5_ASSERT(MLX5_CQE_OPCODE(last_word.op_own) !=
+			    MLX5_CQE_INVALID);
+		if (unlikely(!(MLX5_CQE_OPCODE(last_word.op_own) ==
+			       MLX5_CQE_RESP_ERR ||
+			       MLX5_CQE_OPCODE(last_word.op_own) ==
+			       MLX5_CQE_REQ_ERR)))
 			cq->errors++;
-			/*fall-through*/
-		case MLX5_CQE_STATUS_SW_OWN:
-			total++;
-			break;
-		case MLX5_CQE_STATUS_HW_OWN:
-		default:
-			break;
-		}
-	} while (ret != MLX5_CQE_STATUS_HW_OWN);
-	rte_io_wmb();
-	cq->cq_ci += total;
-	/* Ring CQ doorbell record. */
-	cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
-	rte_io_wmb();
-	/* Ring SW QP doorbell record. */
-	eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
-	return total;
+		rte_io_wmb();
+		/* Ring CQ doorbell record. */
+		cq->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci);
+		rte_io_wmb();
+		/* Ring SW QP doorbell record. */
+		eqp->db_rec[0] = rte_cpu_to_be_32(cq->cq_ci + cq_size);
+	}
+	return comp;
 }
 
 static void
-- 
1.8.3.1


  parent reply	other threads:[~2020-06-18 19:11 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-18 19:11 [dpdk-dev] [PATCH 0/3] vdpa/mlx5: optimize cpu utilization Matan Azrad
2020-06-18 19:11 ` [dpdk-dev] [PATCH 1/3] vdpa/mlx5: optimize notification events Matan Azrad
2020-06-18 19:11 ` Matan Azrad [this message]
2020-06-18 19:11 ` [dpdk-dev] [PATCH 3/3] vdpa/mlx5: add traffic control device arguments Matan Azrad
2020-06-25 13:30 ` [dpdk-dev] [PATCH v2 0/3] vdpa/mlx5: optimize cpu utilization Matan Azrad
2020-06-25 13:30   ` [dpdk-dev] [PATCH v2 1/3] vdpa/mlx5: optimize notification events Matan Azrad
2020-06-29  9:05     ` Maxime Coquelin
2020-06-25 13:30   ` [dpdk-dev] [PATCH v2 2/3] vdpa/mlx5: optimize completion queue poll Matan Azrad
2020-06-29  9:11     ` Maxime Coquelin
2020-06-25 13:30   ` [dpdk-dev] [PATCH v2 3/3] vdpa/mlx5: control completion queue event mode Matan Azrad
2020-06-29  9:16     ` Maxime Coquelin
2020-06-29 14:01   ` [dpdk-dev] [PATCH v3 0/3] vdpa/mlx5: optimize cpu utilization Matan Azrad
2020-06-29 14:01     ` [dpdk-dev] [PATCH v3 1/3] vdpa/mlx5: optimize notification events Matan Azrad
2020-06-29 14:01     ` [dpdk-dev] [PATCH v3 2/3] vdpa/mlx5: optimize completion queue poll Matan Azrad
2020-06-29 14:01     ` [dpdk-dev] [PATCH v3 3/3] vdpa/mlx5: control completion queue event mode Matan Azrad
2020-06-29 17:24     ` [dpdk-dev] [PATCH v3 0/3] vdpa/mlx5: optimize cpu utilization Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1592507476-442112-3-git-send-email-matan@mellanox.com \
    --to=matan@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).