patches for DPDK stable branches
 help / color / mirror / Atom feed
* [PATCH 1/2] net/mana: avoid unnecessary assignments in data path
@ 2023-03-17 23:32 longli
  2023-03-17 23:32 ` [PATCH 2/2] net/mana: optimize completion queue polling by processing a batch at a time longli
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: longli @ 2023-03-17 23:32 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: dev, Ajay Sharma, Long Li, stable

From: Long Li <longli@microsoft.com>

Unnecessary assignments involve memset and waste CPU cycles.
Removing them to reduce CPU usage.

Fixes: 517ed6e2d590 ("net/mana: add basic driver with build environment")
Cc: stable@dpdk.org
Signed-off-by: Long Li <longli@microsoft.com>
---
 drivers/net/mana/gdma.c | 11 ++---------
 drivers/net/mana/mana.h |  2 +-
 drivers/net/mana/rx.c   |  9 ++++-----
 drivers/net/mana/tx.c   | 17 ++++++++++-------
 4 files changed, 17 insertions(+), 22 deletions(-)

diff --git a/drivers/net/mana/gdma.c b/drivers/net/mana/gdma.c
index 3d4039014f..0922463ef9 100644
--- a/drivers/net/mana/gdma.c
+++ b/drivers/net/mana/gdma.c
@@ -123,7 +123,7 @@ write_scatter_gather_list(uint8_t *work_queue_head_pointer,
 int
 gdma_post_work_request(struct mana_gdma_queue *queue,
 		       struct gdma_work_request *work_req,
-		       struct gdma_posted_wqe_info *wqe_info)
+		       uint32_t *wqe_size_in_bu)
 {
 	uint32_t client_oob_size =
 		work_req->inline_oob_size_in_bytes >
@@ -149,14 +149,7 @@ gdma_post_work_request(struct mana_gdma_queue *queue,
 	DRV_LOG(DEBUG, "client_oob_size %u sgl_data_size %u wqe_size %u",
 		client_oob_size, sgl_data_size, wqe_size);
 
-	if (wqe_info) {
-		wqe_info->wqe_index =
-			((queue->head * GDMA_WQE_ALIGNMENT_UNIT_SIZE) &
-			 (queue->size - 1)) / GDMA_WQE_ALIGNMENT_UNIT_SIZE;
-		wqe_info->unmasked_queue_offset = queue->head;
-		wqe_info->wqe_size_in_bu =
-			wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE;
-	}
+	*wqe_size_in_bu = wqe_size / GDMA_WQE_ALIGNMENT_UNIT_SIZE;
 
 	wq_buffer_pointer = gdma_get_wqe_pointer(queue);
 	wq_buffer_pointer += write_dma_client_oob(wq_buffer_pointer, work_req,
diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index 4a05238a96..d4a1ba8492 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -459,7 +459,7 @@ int mana_rq_ring_doorbell(struct mana_rxq *rxq, uint8_t arm);
 
 int gdma_post_work_request(struct mana_gdma_queue *queue,
 			   struct gdma_work_request *work_req,
-			   struct gdma_posted_wqe_info *wqe_info);
+			   uint32_t *wqe_size_in_bu);
 uint8_t *gdma_get_wqe_pointer(struct mana_gdma_queue *queue);
 
 uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **rx_pkts,
diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c
index 55247889c1..bdbd11c5f9 100644
--- a/drivers/net/mana/rx.c
+++ b/drivers/net/mana/rx.c
@@ -52,8 +52,8 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
 {
 	struct rte_mbuf *mbuf = NULL;
 	struct gdma_sgl_element sgl[1];
-	struct gdma_work_request request = {0};
-	struct gdma_posted_wqe_info wqe_info = {0};
+	struct gdma_work_request request;
+	uint32_t wqe_size_in_bu;
 	struct mana_priv *priv = rxq->priv;
 	int ret;
 	struct mana_mr_cache *mr;
@@ -72,7 +72,6 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
 	}
 
 	request.gdma_header.struct_size = sizeof(request);
-	wqe_info.gdma_header.struct_size = sizeof(wqe_info);
 
 	sgl[0].address = rte_cpu_to_le_64(rte_pktmbuf_mtod(mbuf, uint64_t));
 	sgl[0].memory_key = mr->lkey;
@@ -87,14 +86,14 @@ mana_alloc_and_post_rx_wqe(struct mana_rxq *rxq)
 	request.flags = 0;
 	request.client_data_unit = NOT_USING_CLIENT_DATA_UNIT;
 
-	ret = gdma_post_work_request(&rxq->gdma_rq, &request, &wqe_info);
+	ret = gdma_post_work_request(&rxq->gdma_rq, &request, &wqe_size_in_bu);
 	if (!ret) {
 		struct mana_rxq_desc *desc =
 			&rxq->desc_ring[rxq->desc_ring_head];
 
 		/* update queue for tracking pending packets */
 		desc->pkt = mbuf;
-		desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu;
+		desc->wqe_size_in_bu = wqe_size_in_bu;
 		rxq->desc_ring_head = (rxq->desc_ring_head + 1) % rxq->num_desc;
 	} else {
 		DRV_LOG(ERR, "failed to post recv ret %d", ret);
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
index 300bf27cc1..a7ee47c582 100644
--- a/drivers/net/mana/tx.c
+++ b/drivers/net/mana/tx.c
@@ -208,8 +208,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	for (uint16_t pkt_idx = 0; pkt_idx < nb_pkts; pkt_idx++) {
 		struct rte_mbuf *m_pkt = tx_pkts[pkt_idx];
 		struct rte_mbuf *m_seg = m_pkt;
-		struct transmit_oob_v2 tx_oob = {0};
-		struct one_sgl sgl = {0};
+		struct transmit_oob_v2 tx_oob;
+		struct one_sgl sgl;
 		uint16_t seg_idx;
 
 		/* Drop the packet if it exceeds max segments */
@@ -263,6 +263,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			tx_oob.short_oob.tx_compute_TCP_checksum = 1;
 			tx_oob.short_oob.tx_transport_header_offset =
 				m_pkt->l2_len + m_pkt->l3_len;
+		} else {
+			tx_oob.short_oob.tx_compute_TCP_checksum = 0;
 		}
 
 		if ((m_pkt->ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
@@ -301,6 +303,8 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			}
 
 			tx_oob.short_oob.tx_compute_UDP_checksum = 1;
+		} else {
+			tx_oob.short_oob.tx_compute_UDP_checksum = 0;
 		}
 
 		tx_oob.short_oob.suppress_tx_CQE_generation = 0;
@@ -355,11 +359,10 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		if (seg_idx != m_pkt->nb_segs)
 			continue;
 
-		struct gdma_work_request work_req = {0};
-		struct gdma_posted_wqe_info wqe_info = {0};
+		struct gdma_work_request work_req;
+		uint32_t wqe_size_in_bu;
 
 		work_req.gdma_header.struct_size = sizeof(work_req);
-		wqe_info.gdma_header.struct_size = sizeof(wqe_info);
 
 		work_req.sgl = sgl.gdma_sgl;
 		work_req.num_sgl_elements = m_pkt->nb_segs;
@@ -370,14 +373,14 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		work_req.client_data_unit = NOT_USING_CLIENT_DATA_UNIT;
 
 		ret = gdma_post_work_request(&txq->gdma_sq, &work_req,
-					     &wqe_info);
+					     &wqe_size_in_bu);
 		if (!ret) {
 			struct mana_txq_desc *desc =
 				&txq->desc_ring[txq->desc_ring_head];
 
 			/* Update queue for tracking pending requests */
 			desc->pkt = m_pkt;
-			desc->wqe_size_in_bu = wqe_info.wqe_size_in_bu;
+			desc->wqe_size_in_bu = wqe_size_in_bu;
 			txq->desc_ring_head =
 				(txq->desc_ring_head + 1) % txq->num_desc;
 
-- 
2.32.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 2/2] net/mana: optimize completion queue polling by processing a batch at a time
  2023-03-17 23:32 [PATCH 1/2] net/mana: avoid unnecessary assignments in data path longli
@ 2023-03-17 23:32 ` longli
  2023-05-02 18:23   ` Ferruh Yigit
  2023-03-21 11:48 ` [PATCH 1/2] net/mana: avoid unnecessary assignments in data path Ferruh Yigit
  2023-05-02 18:32 ` Ferruh Yigit
  2 siblings, 1 reply; 6+ messages in thread
From: longli @ 2023-03-17 23:32 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: dev, Ajay Sharma, Long Li, stable

From: Long Li <longli@microsoft.com>

We can poll completion queues in a batch to speed up completion processing.
Also, the completion data doesn't need to be copied out of the hardware
queue and they can be passed as pointers to be consumed by the RX/TX code.

Fixes: 517ed6e2d590 ("net/mana: add basic driver with build environment")
Cc: stable@dpdk.org
Signed-off-by: Long Li <longli@microsoft.com>
---
 drivers/net/mana/gdma.c | 62 ++++++++++++++++++++++-------------------
 drivers/net/mana/mana.c | 22 +++++++++++++++
 drivers/net/mana/mana.h | 25 +++++++----------
 drivers/net/mana/rx.c   | 21 +++++---------
 drivers/net/mana/tx.c   | 11 +++++---
 5 files changed, 80 insertions(+), 61 deletions(-)

diff --git a/drivers/net/mana/gdma.c b/drivers/net/mana/gdma.c
index 0922463ef9..db1571a5c8 100644
--- a/drivers/net/mana/gdma.c
+++ b/drivers/net/mana/gdma.c
@@ -252,45 +252,51 @@ mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
 /*
  * Poll completion queue for completions.
  */
-int
-gdma_poll_completion_queue(struct mana_gdma_queue *cq, struct gdma_comp *comp)
+uint32_t
+gdma_poll_completion_queue(struct mana_gdma_queue *cq,
+			   struct gdma_comp *gdma_comp, uint32_t max_comp)
 {
 	struct gdma_hardware_completion_entry *cqe;
-	uint32_t head = cq->head % cq->count;
 	uint32_t new_owner_bits, old_owner_bits;
 	uint32_t cqe_owner_bits;
+	uint32_t num_comp = 0;
 	struct gdma_hardware_completion_entry *buffer = cq->buffer;
 
-	cqe = &buffer[head];
-	new_owner_bits = (cq->head / cq->count) & COMPLETION_QUEUE_OWNER_MASK;
-	old_owner_bits = (cq->head / cq->count - 1) &
-				COMPLETION_QUEUE_OWNER_MASK;
-	cqe_owner_bits = cqe->owner_bits;
+	while (num_comp < max_comp) {
+		cqe = &buffer[cq->head % cq->count];
+		new_owner_bits = (cq->head / cq->count) &
+					COMPLETION_QUEUE_OWNER_MASK;
+		old_owner_bits = (cq->head / cq->count - 1) &
+					COMPLETION_QUEUE_OWNER_MASK;
+		cqe_owner_bits = cqe->owner_bits;
+
+		DRV_LOG(DEBUG, "comp cqe bits 0x%x owner bits 0x%x",
+			cqe_owner_bits, old_owner_bits);
+
+		/* No new entry */
+		if (cqe_owner_bits == old_owner_bits)
+			break;
+
+		if (cqe_owner_bits != new_owner_bits) {
+			DRV_LOG(ERR, "CQ overflowed, ID %u cqe 0x%x new 0x%x",
+				cq->id, cqe_owner_bits, new_owner_bits);
+			break;
+		}
 
-	DRV_LOG(DEBUG, "comp cqe bits 0x%x owner bits 0x%x",
-		cqe_owner_bits, old_owner_bits);
+		gdma_comp[num_comp].cqe_data = cqe->dma_client_data;
+		num_comp++;
 
-	if (cqe_owner_bits == old_owner_bits)
-		return 0; /* No new entry */
+		cq->head++;
 
-	if (cqe_owner_bits != new_owner_bits) {
-		DRV_LOG(ERR, "CQ overflowed, ID %u cqe 0x%x new 0x%x",
-			cq->id, cqe_owner_bits, new_owner_bits);
-		return -1;
+		DRV_LOG(DEBUG, "comp new 0x%x old 0x%x cqe 0x%x wq %u sq %u head %u",
+			new_owner_bits, old_owner_bits, cqe_owner_bits,
+			cqe->wq_num, cqe->is_sq, cq->head);
 	}
 
-	/* Ensure checking owner bits happens before reading from CQE */
+	/* Make sure the CQE owner bits are checked before we access the data
+	 * in CQE
+	 */
 	rte_rmb();
 
-	comp->work_queue_number = cqe->wq_num;
-	comp->send_work_queue = cqe->is_sq;
-
-	memcpy(comp->completion_data, cqe->dma_client_data, GDMA_COMP_DATA_SIZE);
-
-	cq->head++;
-
-	DRV_LOG(DEBUG, "comp new 0x%x old 0x%x cqe 0x%x wq %u sq %u head %u",
-		new_owner_bits, old_owner_bits, cqe_owner_bits,
-		comp->work_queue_number, comp->send_work_queue, cq->head);
-	return 1;
+	return num_comp;
 }
diff --git a/drivers/net/mana/mana.c b/drivers/net/mana/mana.c
index 8a782c0d63..2463f34c1e 100644
--- a/drivers/net/mana/mana.c
+++ b/drivers/net/mana/mana.c
@@ -487,6 +487,15 @@ mana_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 		goto fail;
 	}
 
+	txq->gdma_comp_buf = rte_malloc_socket("mana_txq_comp",
+			sizeof(*txq->gdma_comp_buf) * nb_desc,
+			RTE_CACHE_LINE_SIZE, socket_id);
+	if (!txq->gdma_comp_buf) {
+		DRV_LOG(ERR, "failed to allocate txq comp");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
 	ret = mana_mr_btree_init(&txq->mr_btree,
 				 MANA_MR_BTREE_PER_QUEUE_N, socket_id);
 	if (ret) {
@@ -506,6 +515,7 @@ mana_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	return 0;
 
 fail:
+	rte_free(txq->gdma_comp_buf);
 	rte_free(txq->desc_ring);
 	rte_free(txq);
 	return ret;
@@ -518,6 +528,7 @@ mana_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 
 	mana_mr_btree_free(&txq->mr_btree);
 
+	rte_free(txq->gdma_comp_buf);
 	rte_free(txq->desc_ring);
 	rte_free(txq);
 }
@@ -557,6 +568,15 @@ mana_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	rxq->desc_ring_head = 0;
 	rxq->desc_ring_tail = 0;
 
+	rxq->gdma_comp_buf = rte_malloc_socket("mana_rxq_comp",
+			sizeof(*rxq->gdma_comp_buf) * nb_desc,
+			RTE_CACHE_LINE_SIZE, socket_id);
+	if (!rxq->gdma_comp_buf) {
+		DRV_LOG(ERR, "failed to allocate rxq comp");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
 	ret = mana_mr_btree_init(&rxq->mr_btree,
 				 MANA_MR_BTREE_PER_QUEUE_N, socket_id);
 	if (ret) {
@@ -572,6 +592,7 @@ mana_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 	return 0;
 
 fail:
+	rte_free(rxq->gdma_comp_buf);
 	rte_free(rxq->desc_ring);
 	rte_free(rxq);
 	return ret;
@@ -584,6 +605,7 @@ mana_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 
 	mana_mr_btree_free(&rxq->mr_btree);
 
+	rte_free(rxq->gdma_comp_buf);
 	rte_free(rxq->desc_ring);
 	rte_free(rxq);
 }
diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index d4a1ba8492..38a0f64745 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -142,19 +142,6 @@ struct gdma_header {
 #define COMPLETION_QUEUE_OWNER_MASK \
 	((1 << (COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE)) - 1)
 
-struct gdma_comp {
-	struct gdma_header gdma_header;
-
-	/* Filled by GDMA core */
-	uint32_t completion_data[GDMA_COMP_DATA_SIZE_IN_UINT32];
-
-	/* Filled by GDMA core */
-	uint32_t work_queue_number;
-
-	/* Filled by GDMA core */
-	bool send_work_queue;
-};
-
 struct gdma_hardware_completion_entry {
 	char dma_client_data[GDMA_COMP_DATA_SIZE];
 	union {
@@ -391,6 +378,11 @@ struct mana_gdma_queue {
 
 #define MANA_MR_BTREE_PER_QUEUE_N	64
 
+struct gdma_comp {
+	/* Filled by GDMA core */
+	char *cqe_data;
+};
+
 struct mana_txq {
 	struct mana_priv *priv;
 	uint32_t num_desc;
@@ -399,6 +391,7 @@ struct mana_txq {
 
 	struct mana_gdma_queue gdma_sq;
 	struct mana_gdma_queue gdma_cq;
+	struct gdma_comp *gdma_comp_buf;
 
 	uint32_t tx_vp_offset;
 
@@ -433,6 +426,7 @@ struct mana_rxq {
 
 	struct mana_gdma_queue gdma_rq;
 	struct mana_gdma_queue gdma_cq;
+	struct gdma_comp *gdma_comp_buf;
 
 	struct mana_stats stats;
 	struct mana_mr_btree mr_btree;
@@ -473,8 +467,9 @@ uint16_t mana_rx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
 uint16_t mana_tx_burst_removed(void *dpdk_rxq, struct rte_mbuf **pkts,
 			       uint16_t pkts_n);
 
-int gdma_poll_completion_queue(struct mana_gdma_queue *cq,
-			       struct gdma_comp *comp);
+uint32_t gdma_poll_completion_queue(struct mana_gdma_queue *cq,
+				    struct gdma_comp *gdma_comp,
+				    uint32_t max_comp);
 
 int mana_start_rx_queues(struct rte_eth_dev *dev);
 int mana_start_tx_queues(struct rte_eth_dev *dev);
diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c
index bdbd11c5f9..3e09c6ee5b 100644
--- a/drivers/net/mana/rx.c
+++ b/drivers/net/mana/rx.c
@@ -383,24 +383,17 @@ mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	uint8_t wqe_posted = 0;
 	struct mana_rxq *rxq = dpdk_rxq;
 	struct mana_priv *priv = rxq->priv;
-	struct gdma_comp comp;
 	struct rte_mbuf *mbuf;
 	int ret;
+	uint32_t num_pkts;
 
-	while (pkt_received < pkts_n &&
-	       gdma_poll_completion_queue(&rxq->gdma_cq, &comp) == 1) {
-		struct mana_rxq_desc *desc;
-		struct mana_rx_comp_oob *oob =
-			(struct mana_rx_comp_oob *)&comp.completion_data[0];
-
-		if (comp.work_queue_number != rxq->gdma_rq.id) {
-			DRV_LOG(ERR, "rxq comp id mismatch wqid=0x%x rcid=0x%x",
-				comp.work_queue_number, rxq->gdma_rq.id);
-			rxq->stats.errors++;
-			break;
-		}
+	num_pkts = gdma_poll_completion_queue(&rxq->gdma_cq, rxq->gdma_comp_buf, pkts_n);
+	for (uint32_t i = 0; i < num_pkts; i++) {
+		struct mana_rx_comp_oob *oob = (struct mana_rx_comp_oob *)
+			rxq->gdma_comp_buf[i].cqe_data;
+		struct mana_rxq_desc *desc =
+			&rxq->desc_ring[rxq->desc_ring_tail];
 
-		desc = &rxq->desc_ring[rxq->desc_ring_tail];
 		rxq->gdma_rq.tail += desc->wqe_size_in_bu;
 		mbuf = desc->pkt;
 
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
index a7ee47c582..ee0319c71d 100644
--- a/drivers/net/mana/tx.c
+++ b/drivers/net/mana/tx.c
@@ -170,17 +170,20 @@ mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	struct mana_txq *txq = dpdk_txq;
 	struct mana_priv *priv = txq->priv;
-	struct gdma_comp comp;
 	int ret;
 	void *db_page;
 	uint16_t pkt_sent = 0;
+	uint32_t num_comp;
 
 	/* Process send completions from GDMA */
-	while (gdma_poll_completion_queue(&txq->gdma_cq, &comp) == 1) {
+	num_comp = gdma_poll_completion_queue(&txq->gdma_cq,
+			txq->gdma_comp_buf, txq->num_desc);
+
+	for (uint32_t i = 0; i < num_comp; i++) {
 		struct mana_txq_desc *desc =
 			&txq->desc_ring[txq->desc_ring_tail];
-		struct mana_tx_comp_oob *oob =
-			(struct mana_tx_comp_oob *)&comp.completion_data[0];
+		struct mana_tx_comp_oob *oob = (struct mana_tx_comp_oob *)
+			txq->gdma_comp_buf[i].cqe_data;
 
 		if (oob->cqe_hdr.cqe_type != CQE_TX_OKAY) {
 			DRV_LOG(ERR,
-- 
2.32.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] net/mana: avoid unnecessary assignments in data path
  2023-03-17 23:32 [PATCH 1/2] net/mana: avoid unnecessary assignments in data path longli
  2023-03-17 23:32 ` [PATCH 2/2] net/mana: optimize completion queue polling by processing a batch at a time longli
@ 2023-03-21 11:48 ` Ferruh Yigit
  2023-03-21 16:31   ` Long Li
  2023-05-02 18:32 ` Ferruh Yigit
  2 siblings, 1 reply; 6+ messages in thread
From: Ferruh Yigit @ 2023-03-21 11:48 UTC (permalink / raw)
  To: longli; +Cc: dev, Ajay Sharma, Long Li, stable

On 3/17/2023 11:32 PM, longli@linuxonhyperv.com wrote:
> From: Long Li <longli@microsoft.com>
> 
> Unnecessary assignments involve memset and waste CPU cycles.
> Removing them to reduce CPU usage.
> 
> Fixes: 517ed6e2d590 ("net/mana: add basic driver with build environment")
> Cc: stable@dpdk.org
> Signed-off-by: Long Li <longli@microsoft.com>

Hi Long,

Is this set for next release, since we are a few days away from the
current one?

^ permalink raw reply	[flat|nested] 6+ messages in thread

* RE: [PATCH 1/2] net/mana: avoid unnecessary assignments in data path
  2023-03-21 11:48 ` [PATCH 1/2] net/mana: avoid unnecessary assignments in data path Ferruh Yigit
@ 2023-03-21 16:31   ` Long Li
  0 siblings, 0 replies; 6+ messages in thread
From: Long Li @ 2023-03-21 16:31 UTC (permalink / raw)
  To: Ferruh Yigit, longli; +Cc: dev, Ajay Sharma, stable

> Subject: Re: [PATCH 1/2] net/mana: avoid unnecessary assignments in data
> path
> 
> On 3/17/2023 11:32 PM, longli@linuxonhyperv.com wrote:
> > From: Long Li <longli@microsoft.com>
> >
> > Unnecessary assignments involve memset and waste CPU cycles.
> > Removing them to reduce CPU usage.
> >
> > Fixes: 517ed6e2d590 ("net/mana: add basic driver with build
> > environment")
> > Cc: stable@dpdk.org
> > Signed-off-by: Long Li <longli@microsoft.com>
> 
> Hi Long,
> 
> Is this set for next release, since we are a few days away from the current one?

Hi Ferruh,

This patch can wait for the next release.

Thank you,
Long

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 2/2] net/mana: optimize completion queue polling by processing a batch at a time
  2023-03-17 23:32 ` [PATCH 2/2] net/mana: optimize completion queue polling by processing a batch at a time longli
@ 2023-05-02 18:23   ` Ferruh Yigit
  0 siblings, 0 replies; 6+ messages in thread
From: Ferruh Yigit @ 2023-05-02 18:23 UTC (permalink / raw)
  To: longli; +Cc: dev, Ajay Sharma, Long Li, stable, Thomas Monjalon, David Marchand

On 3/17/2023 11:32 PM, longli@linuxonhyperv.com wrote:
> -	while (pkt_received < pkts_n &&
> -	       gdma_poll_completion_queue(&rxq->gdma_cq, &comp) == 1) {
> -		struct mana_rxq_desc *desc;
> -		struct mana_rx_comp_oob *oob =
> -			(struct mana_rx_comp_oob *)&comp.completion_data[0];
> -
> -		if (comp.work_queue_number != rxq->gdma_rq.id) {
> -			DRV_LOG(ERR, "rxq comp id mismatch wqid=0x%x rcid=0x%x",
> -				comp.work_queue_number, rxq->gdma_rq.id);
> -			rxq->stats.errors++;
> -			break;
> -		}
> +	num_pkts = gdma_poll_completion_queue(&rxq->gdma_cq, rxq->gdma_comp_buf, pkts_n);
> +	for (uint32_t i = 0; i < num_pkts; i++) {
> +		struct mana_rx_comp_oob *oob = (struct mana_rx_comp_oob *)
> +			rxq->gdma_comp_buf[i].cqe_data;
> +		struct mana_rxq_desc *desc =
> +			&rxq->desc_ring[rxq->desc_ring_tail];


Checkpatch complains about defining variable inside for() [1], but as we
are switching to C99 standard, I don't see any reason to prevent this usage.

I can see relevant checkpatch commit mentions about compiler warning [2]
but not sure if that is still valid with C99 switch, I will send a patch
to discuss this on.



[1]
Warning in drivers/net/mana/rx.c:
Declaring a variable inside for()

[2]
43e73483a4b8 ("devtools: forbid variable declaration inside for")

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] net/mana: avoid unnecessary assignments in data path
  2023-03-17 23:32 [PATCH 1/2] net/mana: avoid unnecessary assignments in data path longli
  2023-03-17 23:32 ` [PATCH 2/2] net/mana: optimize completion queue polling by processing a batch at a time longli
  2023-03-21 11:48 ` [PATCH 1/2] net/mana: avoid unnecessary assignments in data path Ferruh Yigit
@ 2023-05-02 18:32 ` Ferruh Yigit
  2 siblings, 0 replies; 6+ messages in thread
From: Ferruh Yigit @ 2023-05-02 18:32 UTC (permalink / raw)
  To: longli; +Cc: dev, Ajay Sharma, Long Li, stable

On 3/17/2023 11:32 PM, longli@linuxonhyperv.com wrote:
> From: Long Li <longli@microsoft.com>
> 
> Unnecessary assignments involve memset and waste CPU cycles.
> Removing them to reduce CPU usage.
> 
> Fixes: 517ed6e2d590 ("net/mana: add basic driver with build environment")
> Cc: stable@dpdk.org
> Signed-off-by: Long Li <longli@microsoft.com>

Series applied to dpdk-next-net/main, thanks.

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-05-02 18:32 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-03-17 23:32 [PATCH 1/2] net/mana: avoid unnecessary assignments in data path longli
2023-03-17 23:32 ` [PATCH 2/2] net/mana: optimize completion queue polling by processing a batch at a time longli
2023-05-02 18:23   ` Ferruh Yigit
2023-03-21 11:48 ` [PATCH 1/2] net/mana: avoid unnecessary assignments in data path Ferruh Yigit
2023-03-21 16:31   ` Long Li
2023-05-02 18:32 ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).