DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/mlx5: replenish MPRQ buffers for miniCQEs
@ 2023-11-01 14:43 Alexander Kozyrev
  2023-11-01 14:57 ` [PATCH v2] " Alexander Kozyrev
  0 siblings, 1 reply; 4+ messages in thread
From: Alexander Kozyrev @ 2023-11-01 14:43 UTC (permalink / raw)
  To: dev; +Cc: suanmingm, viacheslavo, rasland

Keep unzipping if the next CQE is the miniCQE array in
rxq_cq_decompress_v() routine only for non-MPRQ scenario,
MPRQ requires buffer replenishment between the miniCQEs.

Restore the check for the initial compressed CQE for SPRQ
and check that the current CQE is not compressed before
copying it as a possible title CQE.

Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxtx_vec.c         | 46 ++++++++++++++++--------
 drivers/net/mlx5/mlx5_rxtx_vec_altivec.h |  6 ++--
 drivers/net/mlx5/mlx5_rxtx_vec_neon.h    |  6 ++--
 drivers/net/mlx5/mlx5_rxtx_vec_sse.h     |  6 ++--
 4 files changed, 44 insertions(+), 20 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 2363d7ed27..ea1c497b90 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -331,6 +331,15 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
 	}
 	/* At this point, there shouldn't be any remaining packets. */
 	MLX5_ASSERT(rxq->decompressed == 0);
+	/* Go directly to unzipping in case the first CQE is compressed. */
+	if (rxq->cqe_comp_layout) {
+		ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci);
+		if (ret == MLX5_CQE_STATUS_SW_OWN &&
+		    (MLX5_CQE_FORMAT(cq->op_own) == MLX5_COMPRESSED)) {
+				comp_idx = 0;
+				goto decompress;
+		}
+	}
 	/* Process all the CQEs */
 	nocmp_n = rxq_cq_process_v(rxq, cq, elts, pkts, pkts_n, err, &comp_idx);
 	/* If no new CQE seen, return without updating cq_db. */
@@ -345,18 +354,23 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
 	rcvd_pkt += nocmp_n;
 	/* Copy title packet for future compressed sessions. */
 	if (rxq->cqe_comp_layout) {
-		next = &(*rxq->cqes)[rxq->cq_ci & q_mask];
-		ret = check_cqe_iteration(next,	rxq->cqe_n, rxq->cq_ci);
-		if (ret != MLX5_CQE_STATUS_SW_OWN ||
-		    MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED)
-			rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
-				   sizeof(struct rte_mbuf));
+		ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci);
+		if (ret == MLX5_CQE_STATUS_SW_OWN &&
+		    (MLX5_CQE_FORMAT(cq->op_own) != MLX5_COMPRESSED)) {
+			next = &(*rxq->cqes)[rxq->cq_ci & q_mask];
+			ret = check_cqe_iteration(next,	rxq->cqe_n, rxq->cq_ci);
+			if (MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED ||
+			    ret != MLX5_CQE_STATUS_SW_OWN)
+				rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
+					   sizeof(struct rte_mbuf));
+		}
 	}
+decompress:
 	/* Decompress the last CQE if compressed. */
 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) {
 		MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
 		rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
-							&elts[nocmp_n]);
+							&elts[nocmp_n], true);
 		rxq->cq_ci += rxq->decompressed;
 		/* Return more packets if needed. */
 		if (nocmp_n < pkts_n) {
@@ -495,18 +509,22 @@ rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
 	rcvd_pkt += cp_pkt;
 	/* Copy title packet for future compressed sessions. */
 	if (rxq->cqe_comp_layout) {
-		next = &(*rxq->cqes)[rxq->cq_ci & q_mask];
-		ret = check_cqe_iteration(next,	rxq->cqe_n, rxq->cq_ci);
-		if (ret != MLX5_CQE_STATUS_SW_OWN ||
-		    MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED)
-			rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
-				   sizeof(struct rte_mbuf));
+		ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci);
+		if (ret == MLX5_CQE_STATUS_SW_OWN &&
+		    (MLX5_CQE_FORMAT(cq->op_own) != MLX5_COMPRESSED)) {
+			next = &(*rxq->cqes)[rxq->cq_ci & q_mask];
+			ret = check_cqe_iteration(next,	rxq->cqe_n, rxq->cq_ci);
+			if (MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED ||
+			    ret != MLX5_CQE_STATUS_SW_OWN)
+				rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
+					   sizeof(struct rte_mbuf));
+		}
 	}
 	/* Decompress the last CQE if compressed. */
 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) {
 		MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
 		rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
-							&elts[nocmp_n]);
+							&elts[nocmp_n], false);
 		/* Return more packets if needed. */
 		if (nocmp_n < pkts_n) {
 			uint16_t n = rxq->decompressed;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
index cccfa7f2d3..b2bbc4ba17 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
@@ -68,13 +68,15 @@ rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n)
  * @param elts
  *   Pointer to SW ring to be filled. The first mbuf has to be pre-built from
  *   the title completion descriptor to be copied to the rest of mbufs.
+ * @param keep
+ *   Keep unzipping if the next CQE is the miniCQE array.
  *
  * @return
  *   Number of mini-CQEs successfully decompressed.
  */
 static inline uint16_t
 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
-		    struct rte_mbuf **elts)
+		    struct rte_mbuf **elts, bool keep)
 {
 	volatile struct mlx5_mini_cqe8 *mcq =
 		(void *)&(cq + !rxq->cqe_comp_layout)->pkt_info;
@@ -507,7 +509,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 		}
 	}
 
-	if (rxq->cqe_comp_layout) {
+	if (rxq->cqe_comp_layout && keep) {
 		int ret;
 		/* Keep unzipping if the next CQE is the miniCQE array. */
 		cq = &cq[mcqe_n];
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index 3ed688191f..510f60b25d 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -63,13 +63,15 @@ rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n)
  * @param elts
  *   Pointer to SW ring to be filled. The first mbuf has to be pre-built from
  *   the title completion descriptor to be copied to the rest of mbufs.
+ * @param keep
+ *   Keep unzipping if the next CQE is the miniCQE array.
  *
  * @return
  *   Number of mini-CQEs successfully decompressed.
  */
 static inline uint16_t
 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
-		    struct rte_mbuf **elts)
+		    struct rte_mbuf **elts, bool keep)
 {
 	volatile struct mlx5_mini_cqe8 *mcq =
 		(void *)&(cq + !rxq->cqe_comp_layout)->pkt_info;
@@ -372,7 +374,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 			}
 		}
 	}
-	if (rxq->cqe_comp_layout) {
+	if (rxq->cqe_comp_layout && keep) {
 		int ret;
 		/* Keep unzipping if the next CQE is the miniCQE array. */
 		cq = &cq[mcqe_n];
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 2bdd1f676d..06bec45cdf 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -65,13 +65,15 @@ rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n)
  * @param elts
  *   Pointer to SW ring to be filled. The first mbuf has to be pre-built from
  *   the title completion descriptor to be copied to the rest of mbufs.
+ * @param keep
+ *   Keep unzipping if the next CQE is the miniCQE array.
  *
  * @return
  *   Number of mini-CQEs successfully decompressed.
  */
 static inline uint16_t
 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
-		    struct rte_mbuf **elts)
+		    struct rte_mbuf **elts, bool keep)
 {
 	volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + !rxq->cqe_comp_layout);
 	/* Title packet is pre-built. */
@@ -361,7 +363,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 			}
 		}
 	}
-	if (rxq->cqe_comp_layout) {
+	if (rxq->cqe_comp_layout && keep) {
 		int ret;
 		/* Keep unzipping if the next CQE is the miniCQE array. */
 		cq = &cq[mcqe_n];
-- 
2.18.2


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v2] net/mlx5: replenish MPRQ buffers for miniCQEs
  2023-11-01 14:43 [PATCH] net/mlx5: replenish MPRQ buffers for miniCQEs Alexander Kozyrev
@ 2023-11-01 14:57 ` Alexander Kozyrev
  2024-07-22 12:16   ` Dariusz Sosnowski
  2024-07-22 15:03   ` Raslan Darawsheh
  0 siblings, 2 replies; 4+ messages in thread
From: Alexander Kozyrev @ 2023-11-01 14:57 UTC (permalink / raw)
  To: dev; +Cc: suanmingm, viacheslavo, rasland

Keep unzipping if the next CQE is the miniCQE array in
rxq_cq_decompress_v() routine only for non-MPRQ scenario,
MPRQ requires buffer replenishment between the miniCQEs.

Restore the check for the initial compressed CQE for SPRQ
and check that the current CQE is not compressed before
copying it as a possible title CQE.

Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxtx_vec.c         | 56 ++++++++++++++++++------
 drivers/net/mlx5/mlx5_rxtx_vec_altivec.h |  6 ++-
 drivers/net/mlx5/mlx5_rxtx_vec_neon.h    |  6 ++-
 drivers/net/mlx5/mlx5_rxtx_vec_sse.h     |  6 ++-
 4 files changed, 54 insertions(+), 20 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 2363d7ed27..1872bf310c 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -331,6 +331,15 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
 	}
 	/* At this point, there shouldn't be any remaining packets. */
 	MLX5_ASSERT(rxq->decompressed == 0);
+	/* Go directly to unzipping in case the first CQE is compressed. */
+	if (rxq->cqe_comp_layout) {
+		ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci);
+		if (ret == MLX5_CQE_STATUS_SW_OWN &&
+		    (MLX5_CQE_FORMAT(cq->op_own) == MLX5_COMPRESSED)) {
+			comp_idx = 0;
+			goto decompress;
+		}
+	}
 	/* Process all the CQEs */
 	nocmp_n = rxq_cq_process_v(rxq, cq, elts, pkts, pkts_n, err, &comp_idx);
 	/* If no new CQE seen, return without updating cq_db. */
@@ -345,18 +354,23 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
 	rcvd_pkt += nocmp_n;
 	/* Copy title packet for future compressed sessions. */
 	if (rxq->cqe_comp_layout) {
-		next = &(*rxq->cqes)[rxq->cq_ci & q_mask];
-		ret = check_cqe_iteration(next,	rxq->cqe_n, rxq->cq_ci);
-		if (ret != MLX5_CQE_STATUS_SW_OWN ||
-		    MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED)
-			rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
-				   sizeof(struct rte_mbuf));
+		ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci);
+		if (ret == MLX5_CQE_STATUS_SW_OWN &&
+		    (MLX5_CQE_FORMAT(cq->op_own) != MLX5_COMPRESSED)) {
+			next = &(*rxq->cqes)[rxq->cq_ci & q_mask];
+			ret = check_cqe_iteration(next,	rxq->cqe_n, rxq->cq_ci);
+			if (MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED ||
+			    ret != MLX5_CQE_STATUS_SW_OWN)
+				rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
+					   sizeof(struct rte_mbuf));
+		}
 	}
+decompress:
 	/* Decompress the last CQE if compressed. */
 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) {
 		MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
 		rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
-							&elts[nocmp_n]);
+							&elts[nocmp_n], true);
 		rxq->cq_ci += rxq->decompressed;
 		/* Return more packets if needed. */
 		if (nocmp_n < pkts_n) {
@@ -482,6 +496,15 @@ rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
 	}
 	/* At this point, there shouldn't be any remaining packets. */
 	MLX5_ASSERT(rxq->decompressed == 0);
+	/* Go directly to unzipping in case the first CQE is compressed. */
+	if (rxq->cqe_comp_layout) {
+		ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci);
+		if (ret == MLX5_CQE_STATUS_SW_OWN &&
+		    (MLX5_CQE_FORMAT(cq->op_own) == MLX5_COMPRESSED)) {
+			comp_idx = 0;
+			goto decompress;
+		}
+	}
 	/* Process all the CQEs */
 	nocmp_n = rxq_cq_process_v(rxq, cq, elts, pkts, pkts_n, err, &comp_idx);
 	/* If no new CQE seen, return without updating cq_db. */
@@ -495,18 +518,23 @@ rxq_burst_mprq_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts,
 	rcvd_pkt += cp_pkt;
 	/* Copy title packet for future compressed sessions. */
 	if (rxq->cqe_comp_layout) {
-		next = &(*rxq->cqes)[rxq->cq_ci & q_mask];
-		ret = check_cqe_iteration(next,	rxq->cqe_n, rxq->cq_ci);
-		if (ret != MLX5_CQE_STATUS_SW_OWN ||
-		    MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED)
-			rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
-				   sizeof(struct rte_mbuf));
+		ret = check_cqe_iteration(cq, rxq->cqe_n, rxq->cq_ci);
+		if (ret == MLX5_CQE_STATUS_SW_OWN &&
+		    (MLX5_CQE_FORMAT(cq->op_own) != MLX5_COMPRESSED)) {
+			next = &(*rxq->cqes)[rxq->cq_ci & q_mask];
+			ret = check_cqe_iteration(next,	rxq->cqe_n, rxq->cq_ci);
+			if (MLX5_CQE_FORMAT(next->op_own) == MLX5_COMPRESSED ||
+			    ret != MLX5_CQE_STATUS_SW_OWN)
+				rte_memcpy(&rxq->title_pkt, elts[nocmp_n - 1],
+					   sizeof(struct rte_mbuf));
+		}
 	}
+decompress:
 	/* Decompress the last CQE if compressed. */
 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP) {
 		MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
 		rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
-							&elts[nocmp_n]);
+							&elts[nocmp_n], false);
 		/* Return more packets if needed. */
 		if (nocmp_n < pkts_n) {
 			uint16_t n = rxq->decompressed;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
index cccfa7f2d3..b2bbc4ba17 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
@@ -68,13 +68,15 @@ rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n)
  * @param elts
  *   Pointer to SW ring to be filled. The first mbuf has to be pre-built from
  *   the title completion descriptor to be copied to the rest of mbufs.
+ * @param keep
+ *   Keep unzipping if the next CQE is the miniCQE array.
  *
  * @return
  *   Number of mini-CQEs successfully decompressed.
  */
 static inline uint16_t
 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
-		    struct rte_mbuf **elts)
+		    struct rte_mbuf **elts, bool keep)
 {
 	volatile struct mlx5_mini_cqe8 *mcq =
 		(void *)&(cq + !rxq->cqe_comp_layout)->pkt_info;
@@ -507,7 +509,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 		}
 	}
 
-	if (rxq->cqe_comp_layout) {
+	if (rxq->cqe_comp_layout && keep) {
 		int ret;
 		/* Keep unzipping if the next CQE is the miniCQE array. */
 		cq = &cq[mcqe_n];
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index 3ed688191f..510f60b25d 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -63,13 +63,15 @@ rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n)
  * @param elts
  *   Pointer to SW ring to be filled. The first mbuf has to be pre-built from
  *   the title completion descriptor to be copied to the rest of mbufs.
+ * @param keep
+ *   Keep unzipping if the next CQE is the miniCQE array.
  *
  * @return
  *   Number of mini-CQEs successfully decompressed.
  */
 static inline uint16_t
 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
-		    struct rte_mbuf **elts)
+		    struct rte_mbuf **elts, bool keep)
 {
 	volatile struct mlx5_mini_cqe8 *mcq =
 		(void *)&(cq + !rxq->cqe_comp_layout)->pkt_info;
@@ -372,7 +374,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 			}
 		}
 	}
-	if (rxq->cqe_comp_layout) {
+	if (rxq->cqe_comp_layout && keep) {
 		int ret;
 		/* Keep unzipping if the next CQE is the miniCQE array. */
 		cq = &cq[mcqe_n];
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 2bdd1f676d..06bec45cdf 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -65,13 +65,15 @@ rxq_copy_mbuf_v(struct rte_mbuf **elts, struct rte_mbuf **pkts, uint16_t n)
  * @param elts
  *   Pointer to SW ring to be filled. The first mbuf has to be pre-built from
  *   the title completion descriptor to be copied to the rest of mbufs.
+ * @param keep
+ *   Keep unzipping if the next CQE is the miniCQE array.
  *
  * @return
  *   Number of mini-CQEs successfully decompressed.
  */
 static inline uint16_t
 rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
-		    struct rte_mbuf **elts)
+		    struct rte_mbuf **elts, bool keep)
 {
 	volatile struct mlx5_mini_cqe8 *mcq = (void *)(cq + !rxq->cqe_comp_layout);
 	/* Title packet is pre-built. */
@@ -361,7 +363,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 			}
 		}
 	}
-	if (rxq->cqe_comp_layout) {
+	if (rxq->cqe_comp_layout && keep) {
 		int ret;
 		/* Keep unzipping if the next CQE is the miniCQE array. */
 		cq = &cq[mcqe_n];
-- 
2.18.2


^ permalink raw reply	[flat|nested] 4+ messages in thread

* RE: [PATCH v2] net/mlx5: replenish MPRQ buffers for miniCQEs
  2023-11-01 14:57 ` [PATCH v2] " Alexander Kozyrev
@ 2024-07-22 12:16   ` Dariusz Sosnowski
  2024-07-22 15:03   ` Raslan Darawsheh
  1 sibling, 0 replies; 4+ messages in thread
From: Dariusz Sosnowski @ 2024-07-22 12:16 UTC (permalink / raw)
  To: Alexander Kozyrev, dev; +Cc: Suanming Mou, Slava Ovsiienko, Raslan Darawsheh

Hi,

> -----Original Message-----
> From: Alexander Kozyrev <akozyrev@nvidia.com>
> Sent: Wednesday, November 1, 2023 15:57
> To: dev@dpdk.org
> Cc: Suanming Mou <suanmingm@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH v2] net/mlx5: replenish MPRQ buffers for miniCQEs
> 
> Keep unzipping if the next CQE is the miniCQE array in
> rxq_cq_decompress_v() routine only for non-MPRQ scenario, MPRQ requires
> buffer replenishment between the miniCQEs.
> 
> Restore the check for the initial compressed CQE for SPRQ and check that the
> current CQE is not compressed before copying it as a possible title CQE.
> 
> Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>

Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>

Best regards,
Dariusz Sosnowski

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2] net/mlx5: replenish MPRQ buffers for miniCQEs
  2023-11-01 14:57 ` [PATCH v2] " Alexander Kozyrev
  2024-07-22 12:16   ` Dariusz Sosnowski
@ 2024-07-22 15:03   ` Raslan Darawsheh
  1 sibling, 0 replies; 4+ messages in thread
From: Raslan Darawsheh @ 2024-07-22 15:03 UTC (permalink / raw)
  To: Alexander Kozyrev, dev; +Cc: Suanming Mou, Slava Ovsiienko

Hi,

From: Alexander Kozyrev <akozyrev@nvidia.com>
Sent: Wednesday, November 1, 2023 4:57 PM
To: dev@dpdk.org
Cc: Suanming Mou; Slava Ovsiienko; Raslan Darawsheh
Subject: [PATCH v2] net/mlx5: replenish MPRQ buffers for miniCQEs

Keep unzipping if the next CQE is the miniCQE array in
rxq_cq_decompress_v() routine only for non-MPRQ scenario,
MPRQ requires buffer replenishment between the miniCQEs.

Restore the check for the initial compressed CQE for SPRQ
and check that the current CQE is not compressed before
copying it as a possible title CQE.

Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>


Patch applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2024-07-22 15:03 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-01 14:43 [PATCH] net/mlx5: replenish MPRQ buffers for miniCQEs Alexander Kozyrev
2023-11-01 14:57 ` [PATCH v2] " Alexander Kozyrev
2024-07-22 12:16   ` Dariusz Sosnowski
2024-07-22 15:03   ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).