patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Kevin Traynor <ktraynor@redhat.com>
To: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Cc: dpdk stable <stable@dpdk.org>
Subject: patch 'net/mlx5: fix out-of-order completions in ordinary Rx burst' has been queued to stable release 24.11.3
Date: Fri, 18 Jul 2025 20:31:31 +0100	[thread overview]
Message-ID: <20250718193247.1008129-157-ktraynor@redhat.com> (raw)
In-Reply-To: <20250718193247.1008129-1-ktraynor@redhat.com>

Hi,

FYI, your patch has been queued to stable release 24.11.3

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 07/23/25. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable

This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable/commit/f22dca1f87a4cc856e9221aae5de4df58b19a7b3

Thanks.

Kevin

---
From f22dca1f87a4cc856e9221aae5de4df58b19a7b3 Mon Sep 17 00:00:00 2001
From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Date: Tue, 8 Jul 2025 13:46:41 +0300
Subject: [PATCH] net/mlx5: fix out-of-order completions in ordinary Rx burst

[ upstream commit 5f9223611f3570c974b9c8e6c0b62db605fb3076 ]

The existing Rx burst routines suppose the completions in CQ
arrive in order and address the WQEs in receiving queue in order.
That is not true for the shared RQs, CQEs can arrive in out of
order and to address appropriate WQE we should fetch its index
from the CQE wqe_counter field.

Also, we can advance the RQ CI if and only if all the WQEs are
handled in the covered range. This requires slide window to track
handled WQEs. We support the out-of-order window size up to the
full queue size.

Fixes: 09c2555303be ("net/mlx5: support shared Rx queue")

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_verbs.c |   8 +-
 drivers/net/mlx5/mlx5_devx.c        |   7 +-
 drivers/net/mlx5/mlx5_ethdev.c      |   8 +-
 drivers/net/mlx5/mlx5_rx.c          | 284 +++++++++++++++++++++++++++-
 drivers/net/mlx5/mlx5_rx.h          |  28 ++-
 drivers/net/mlx5/mlx5_rxq.c         |  11 +-
 6 files changed, 334 insertions(+), 12 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c
index 454bd7c77e..9011319a3e 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -398,5 +398,11 @@ mlx5_rxq_ibv_obj_new(struct mlx5_rxq_priv *rxq)
 	rxq_data->rq_db = rwq.dbrec;
 	rxq_data->cq_arm_sn = 0;
-	mlx5_rxq_initialize(rxq_data);
+	ret = mlx5_rxq_initialize(rxq_data);
+	if (ret) {
+		DRV_LOG(ERR, "Port %u Rx queue %u RQ initialization failure.",
+			priv->dev_data->port_id, rxq->idx);
+		rte_errno = ENOMEM;
+		goto error;
+	}
 	rxq_data->cq_ci = 0;
 	priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index b9d29ca7d5..f9081b0e30 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -710,5 +710,10 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
 	}
 	if (!rxq_ctrl->started) {
-		mlx5_rxq_initialize(rxq_data);
+		if (mlx5_rxq_initialize(rxq_data)) {
+			DRV_LOG(ERR, "Port %u Rx queue %u RQ initialization failure.",
+			priv->dev_data->port_id, rxq->idx);
+			rte_errno = ENOMEM;
+			goto error;
+		}
 		rxq_ctrl->wqn = rxq->devx_rq.rq->id;
 	}
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index f2ae75a8e1..ddfe968a99 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -618,4 +618,5 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
 
 	if (dev->rx_pkt_burst == mlx5_rx_burst ||
+	    dev->rx_pkt_burst == mlx5_rx_burst_out_of_order ||
 	    dev->rx_pkt_burst == mlx5_rx_burst_mprq ||
 	    dev->rx_pkt_burst == mlx5_rx_burst_vec ||
@@ -688,5 +689,10 @@ mlx5_select_rx_function(struct rte_eth_dev *dev)
 
 	MLX5_ASSERT(dev != NULL);
-	if (mlx5_check_vec_rx_support(dev) > 0) {
+	if (mlx5_shared_rq_enabled(dev)) {
+		rx_pkt_burst = mlx5_rx_burst_out_of_order;
+		DRV_LOG(DEBUG, "port %u forced to use SPRQ"
+			" Rx function with Out-of-Order completions",
+			dev->data->port_id);
+	} else if (mlx5_check_vec_rx_support(dev) > 0) {
 		if (mlx5_mprq_enabled(dev)) {
 			rx_pkt_burst = mlx5_rx_burst_mprq_vec;
diff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c
index 5e58eb8bc9..0f2152fdb0 100644
--- a/drivers/net/mlx5/mlx5_rx.c
+++ b/drivers/net/mlx5/mlx5_rx.c
@@ -42,5 +42,5 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
 		 uint16_t cqe_n, uint16_t cqe_mask,
 		 volatile struct mlx5_mini_cqe8 **mcqe,
-		 uint16_t *skip_cnt, bool mprq);
+		 uint16_t *skip_cnt, bool mprq, uint32_t *widx);
 
 static __rte_always_inline uint32_t
@@ -221,4 +221,6 @@ mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
 	if (pkt_burst == mlx5_rx_burst) {
 		snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
+	} else if (pkt_burst == mlx5_rx_burst_out_of_order) {
+		snprintf(mode->info, sizeof(mode->info), "%s", "Scalar Out-of-Order");
 	} else if (pkt_burst == mlx5_rx_burst_mprq) {
 		snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
@@ -359,4 +361,75 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
 }
 
+static inline void mlx5_rq_win_reset(struct mlx5_rxq_data *rxq)
+{
+	static_assert(MLX5_WINOOO_BITS == (sizeof(*rxq->rq_win_data) * CHAR_BIT),
+		      "Invalid out-of-order window bitwidth");
+	rxq->rq_win_idx = 0;
+	rxq->rq_win_cnt = 0;
+	if (rxq->rq_win_data != NULL && rxq->rq_win_idx_mask != 0)
+		memset(rxq->rq_win_data, 0, (rxq->rq_win_idx_mask + 1) * sizeof(*rxq->rq_win_data));
+}
+
+static inline int mlx5_rq_win_init(struct mlx5_rxq_data *rxq)
+{
+	struct mlx5_rxq_ctrl *ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+	uint32_t win_size, win_mask;
+
+	/* Set queue size as window size */
+	win_size = 1u << rxq->elts_n;
+	win_size = RTE_MAX(win_size, MLX5_WINOOO_BITS);
+	win_size = win_size / MLX5_WINOOO_BITS;
+	win_mask = win_size - 1;
+	if (win_mask != rxq->rq_win_idx_mask || rxq->rq_win_data == NULL) {
+		mlx5_free(rxq->rq_win_data);
+		rxq->rq_win_idx_mask = 0;
+		rxq->rq_win_data = mlx5_malloc(MLX5_MEM_RTE,
+					       win_size * sizeof(*rxq->rq_win_data),
+					       RTE_CACHE_LINE_SIZE, ctrl->socket);
+		if (rxq->rq_win_data == NULL)
+			return -ENOMEM;
+		rxq->rq_win_idx_mask = (uint16_t)win_mask;
+	}
+	mlx5_rq_win_reset(rxq);
+	return 0;
+}
+
+static inline bool mlx5_rq_win_test(struct mlx5_rxq_data *rxq)
+{
+	return !!rxq->rq_win_cnt;
+}
+
+static inline void mlx5_rq_win_update(struct mlx5_rxq_data *rxq, uint32_t delta)
+{
+	uint32_t idx;
+
+	idx = (delta / MLX5_WINOOO_BITS) + rxq->rq_win_idx;
+	idx &= rxq->rq_win_idx_mask;
+	rxq->rq_win_cnt = 1;
+	rxq->rq_win_data[idx] |= 1u << (delta % MLX5_WINOOO_BITS);
+}
+
+static inline uint32_t mlx5_rq_win_advance(struct mlx5_rxq_data *rxq, uint32_t delta)
+{
+	uint32_t idx;
+
+	idx = (delta / MLX5_WINOOO_BITS) + rxq->rq_win_idx;
+	idx &= rxq->rq_win_idx_mask;
+	rxq->rq_win_data[idx] |= 1u << (delta % MLX5_WINOOO_BITS);
+	++rxq->rq_win_cnt;
+	if (delta >= MLX5_WINOOO_BITS)
+		return 0;
+	delta = 0;
+	while (~rxq->rq_win_data[idx] == 0) {
+		rxq->rq_win_data[idx] = 0;
+		MLX5_ASSERT(rxq->rq_win_cnt >= MLX5_WINOOO_BITS);
+		rxq->rq_win_cnt -= MLX5_WINOOO_BITS;
+		idx = (idx + 1) & rxq->rq_win_idx_mask;
+		rxq->rq_win_idx = idx;
+		delta += MLX5_WINOOO_BITS;
+	}
+	return delta;
+}
+
 /**
  * Initialize Rx WQ and indexes.
@@ -365,5 +438,5 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
  *   Pointer to RX queue structure.
  */
-void
+int
 mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
 {
@@ -414,6 +487,10 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
 	/* Update doorbell counter. */
 	rxq->rq_ci = wqe_n >> rxq->sges_n;
+	rxq->rq_ci_ooo = rxq->rq_ci;
+	if (mlx5_rq_win_init(rxq))
+		return -ENOMEM;
 	rte_io_wmb();
 	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+	return 0;
 }
 
@@ -524,4 +601,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
 			rxq_ctrl->dump_file_n++;
 		}
+		/* Try to find the actual cq_ci in hardware for shared queue. */
+		if (rxq->shared)
+			rxq_sync_cq(rxq);
 		rxq->err_state = MLX5_RXQ_ERR_STATE_NEED_READY;
 		/* Fall-through */
@@ -583,5 +663,6 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
 								&rxq->fake_mbuf;
 			}
-			mlx5_rxq_initialize(rxq);
+			if (mlx5_rxq_initialize(rxq))
+				return MLX5_RECOVERY_ERROR_RET;
 			rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
 			return MLX5_RECOVERY_COMPLETED_RET;
@@ -613,4 +694,8 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
  * @param mprq
  *   Indication if it is called from MPRQ.
+ * @param[out] widx
+ *   Store WQE index from CQE to support out of order completions. NULL
+ *   can be specified if index is not needed
+ *
  * @return
  *   0 in case of empty CQE,
@@ -624,5 +709,5 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
 		 uint16_t cqe_n, uint16_t cqe_mask,
 		 volatile struct mlx5_mini_cqe8 **mcqe,
-		 uint16_t *skip_cnt, bool mprq)
+		 uint16_t *skip_cnt, bool mprq, uint32_t *widx)
 {
 	struct rxq_zip *zip = &rxq->zip;
@@ -640,4 +725,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
 			len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt &
 						rxq->byte_mask);
+			if (widx != NULL)
+				*widx = zip->wqe_idx + zip->ai;
 			*mcqe = &(*mc)[zip->ai & 7];
 			if (rxq->cqe_comp_layout) {
@@ -693,4 +780,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
 				if (unlikely(ret == MLX5_CQE_STATUS_ERR ||
 					     rxq->err_state)) {
+					/* We should try to track out-pf-order WQE */
+					if (widx != NULL)
+						*widx = rte_be_to_cpu_16(cqe->wqe_counter);
 					ret = mlx5_rx_err_handle(rxq, 0, 1, skip_cnt);
 					if (ret == MLX5_CQE_STATUS_HW_OWN)
@@ -737,4 +827,8 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
 				zip->ca = cq_ci;
 				zip->na = zip->ca + 7;
+				if (widx != NULL) {
+					zip->wqe_idx = rte_be_to_cpu_16(cqe->wqe_counter);
+					*widx = zip->wqe_idx;
+				}
 				/* Compute the next non compressed CQE. */
 				zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
@@ -761,4 +855,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
 				++rxq->cq_ci;
 				len = rte_be_to_cpu_32(cqe->byte_cnt);
+				if (widx != NULL)
+					*widx = rte_be_to_cpu_16(cqe->wqe_counter);
 				if (rxq->cqe_comp_layout) {
 					volatile struct mlx5_cqe *next;
@@ -976,5 +1072,6 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		if (!pkt) {
 			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
-			len = mlx5_rx_poll_len(rxq, cqe, cqe_n, cqe_mask, &mcqe, &skip_cnt, false);
+			len = mlx5_rx_poll_len(rxq, cqe, cqe_n, cqe_mask,
+					       &mcqe, &skip_cnt, false, NULL);
 			if (unlikely(len & MLX5_ERROR_CQE_MASK)) {
 				/* We drop packets with non-critical errors */
@@ -1062,4 +1159,179 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 }
 
+/**
+ * DPDK callback for RX with Out-of-Order completions support.
+ *
+ * @param dpdk_rxq
+ *   Generic pointer to RX queue structure.
+ * @param[out] pkts
+ *   Array to store received packets.
+ * @param pkts_n
+ *   Maximum number of packets in array.
+ *
+ * @return
+ *   Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+mlx5_rx_burst_out_of_order(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+	struct mlx5_rxq_data *rxq = dpdk_rxq;
+	const uint32_t wqe_n = 1 << rxq->elts_n;
+	const uint32_t wqe_mask = wqe_n - 1;
+	const uint32_t cqe_n = 1 << rxq->cqe_n;
+	const uint32_t cqe_mask = cqe_n - 1;
+	const unsigned int sges_n = rxq->sges_n;
+	const uint32_t pkt_mask = wqe_mask >> sges_n;
+	struct rte_mbuf *pkt = NULL;
+	struct rte_mbuf *seg = NULL;
+	volatile struct mlx5_cqe *cqe =
+		&(*rxq->cqes)[rxq->cq_ci & cqe_mask];
+	unsigned int i = 0;
+	int len = 0; /* keep its value across iterations. */
+	const uint32_t rq_ci = rxq->rq_ci;
+	uint32_t idx = 0;
+
+	do {
+		volatile struct mlx5_wqe_data_seg *wqe;
+		struct rte_mbuf *rep = NULL;
+		volatile struct mlx5_mini_cqe8 *mcqe = NULL;
+		uint32_t delta;
+		uint16_t skip_cnt;
+
+		if (!pkt) {
+			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask];
+			rte_prefetch0(cqe);
+			/* Allocate from the first packet mbuf pool */
+			rep = (*rxq->elts)[0];
+			/* We must allocate before CQE consuming to allow retry */
+			rep = rte_mbuf_raw_alloc(rep->pool);
+			if (unlikely(rep == NULL)) {
+				++rxq->stats.rx_nombuf;
+				break;
+			}
+			len = mlx5_rx_poll_len(rxq, cqe, cqe_n, cqe_mask,
+					       &mcqe, &skip_cnt, false, &idx);
+			if (unlikely(len == MLX5_CRITICAL_ERROR_CQE_RET)) {
+				rte_mbuf_raw_free(rep);
+				mlx5_rq_win_reset(rxq);
+				break;
+			}
+			if (len == 0) {
+				rte_mbuf_raw_free(rep);
+				break;
+			}
+			idx &= pkt_mask;
+			delta = (idx - rxq->rq_ci) & pkt_mask;
+			MLX5_ASSERT(delta < ((rxq->rq_win_idx_mask + 1) * MLX5_WINOOO_BITS));
+			if (likely(!mlx5_rq_win_test(rxq))) {
+				/* No out of order completions in sliding window */
+				if (likely(delta == 0))
+					rxq->rq_ci++;
+				else
+					mlx5_rq_win_update(rxq, delta);
+			} else {
+				/* We have out of order completions */
+				rxq->rq_ci += mlx5_rq_win_advance(rxq, delta);
+			}
+			if (rxq->zip.ai == 0)
+				rxq->rq_ci_ooo = rxq->rq_ci;
+			idx <<= sges_n;
+			/* We drop packets with non-critical errors */
+			if (unlikely(len & MLX5_ERROR_CQE_MASK)) {
+				rte_mbuf_raw_free(rep);
+				continue;
+			}
+		}
+		wqe = &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
+		if (unlikely(pkt))
+			NEXT(seg) = (*rxq->elts)[idx];
+		seg = (*rxq->elts)[idx];
+		rte_prefetch0(seg);
+		rte_prefetch0(wqe);
+		/* Allocate the buf from the same pool. */
+		if (unlikely(rep == NULL)) {
+			rep = rte_mbuf_raw_alloc(seg->pool);
+			if (unlikely(rep == NULL)) {
+				++rxq->stats.rx_nombuf;
+				if (!pkt) {
+					/*
+					 * no buffers before we even started,
+					 * bail out silently.
+					 */
+					break;
+				}
+				while (pkt != seg) {
+					MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
+					rep = NEXT(pkt);
+					NEXT(pkt) = NULL;
+					NB_SEGS(pkt) = 1;
+					rte_mbuf_raw_free(pkt);
+					pkt = rep;
+				}
+				break;
+			}
+		}
+		if (!pkt) {
+			pkt = seg;
+			MLX5_ASSERT(len >= (rxq->crc_present << 2));
+			pkt->ol_flags &= RTE_MBUF_F_EXTERNAL;
+			if (rxq->cqe_comp_layout && mcqe)
+				cqe = &rxq->title_cqe;
+			rxq_cq_to_mbuf(rxq, pkt, cqe, mcqe);
+			if (rxq->crc_present)
+				len -= RTE_ETHER_CRC_LEN;
+			PKT_LEN(pkt) = len;
+			if (cqe->lro_num_seg > 1) {
+				mlx5_lro_update_hdr
+					(rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
+					 mcqe, rxq, len);
+				pkt->ol_flags |= RTE_MBUF_F_RX_LRO;
+				pkt->tso_segsz = len / cqe->lro_num_seg;
+			}
+		}
+		DATA_LEN(rep) = DATA_LEN(seg);
+		PKT_LEN(rep) = PKT_LEN(seg);
+		SET_DATA_OFF(rep, DATA_OFF(seg));
+		PORT(rep) = PORT(seg);
+		(*rxq->elts)[idx] = rep;
+		/*
+		 * Fill NIC descriptor with the new buffer. The lkey and size
+		 * of the buffers are already known, only the buffer address
+		 * changes.
+		 */
+		wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
+		/* If there's only one MR, no need to replace LKey in WQE. */
+		if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
+			wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
+		if (len > DATA_LEN(seg)) {
+			len -= DATA_LEN(seg);
+			++NB_SEGS(pkt);
+			++idx;
+			idx &= wqe_mask;
+			continue;
+		}
+		DATA_LEN(seg) = len;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+		/* Increment bytes counter. */
+		rxq->stats.ibytes += PKT_LEN(pkt);
+#endif
+		/* Return packet. */
+		*(pkts++) = pkt;
+		pkt = NULL;
+		++i;
+	} while (i < pkts_n);
+	if (unlikely(i == 0 && rq_ci == rxq->rq_ci_ooo))
+		return 0;
+	/* Update the consumer index. */
+	rte_io_wmb();
+	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+	rte_io_wmb();
+	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci_ooo);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+	/* Increment packets counter. */
+	rxq->stats.ipackets += i;
+#endif
+	return i;
+}
+
 /**
  * Update LRO packet TCP header.
@@ -1220,5 +1492,5 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		}
 		cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
-		ret = mlx5_rx_poll_len(rxq, cqe, cqe_n, cq_mask, &mcqe, &skip_cnt, true);
+		ret = mlx5_rx_poll_len(rxq, cqe, cqe_n, cq_mask, &mcqe, &skip_cnt, true, NULL);
 		if (unlikely(ret & MLX5_ERROR_CQE_MASK)) {
 			if (ret == MLX5_CRITICAL_ERROR_CQE_RET) {
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 6c48a37be7..6ec5f82022 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -23,4 +23,5 @@
 /* Support tunnel matching. */
 #define MLX5_FLOW_TUNNEL 10
+#define MLX5_WINOOO_BITS  (sizeof(uint32_t) * CHAR_BIT)
 
 #define RXQ_PORT(rxq_ctrl) LIST_FIRST(&(rxq_ctrl)->owners)->priv
@@ -47,4 +48,5 @@ struct rxq_zip {
 	uint32_t na; /* Next array index. */
 	uint32_t cq_ci; /* The next CQE. */
+	uint16_t wqe_idx; /* WQE index */
 };
 
@@ -107,4 +109,5 @@ struct __rte_cache_aligned mlx5_rxq_data {
 	uint32_t elts_ci;
 	uint32_t rq_ci;
+	uint32_t rq_ci_ooo;
 	uint16_t consumed_strd; /* Number of consumed strides in WQE. */
 	uint32_t rq_pi;
@@ -147,4 +150,8 @@ struct __rte_cache_aligned mlx5_rxq_data {
 	struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
 	/* Buffer split segment descriptions - sizes, offsets, pools. */
+	uint16_t rq_win_cnt; /* Number of packets in the sliding window data. */
+	uint16_t rq_win_idx_mask; /* Sliding window index wrapping mask. */
+	uint16_t rq_win_idx; /* Index of the first element in sliding window. */
+	uint32_t *rq_win_data; /* Out-of-Order completions sliding window. */
 };
 
@@ -286,5 +293,6 @@ int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx,
 
 uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
-void mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
+uint16_t mlx5_rx_burst_out_of_order(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
+int mlx5_rxq_initialize(struct mlx5_rxq_data *rxq);
 __rte_noinline int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
 				      uint16_t err_n, uint16_t *skip_cnt);
@@ -312,4 +320,5 @@ uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
 uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
 				uint16_t pkts_n);
+void rxq_sync_cq(struct mlx5_rxq_data *rxq);
 
 static int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
@@ -642,4 +651,21 @@ mlx5_mprq_enabled(struct rte_eth_dev *dev)
 }
 
+/**
+ * Check whether Shared RQ is enabled for the device.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ *
+ * @return
+ *   0 if disabled, otherwise enabled.
+ */
+static __rte_always_inline int
+mlx5_shared_rq_enabled(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+
+	return !LIST_EMPTY(&priv->sh->shared_rxqs);
+}
+
 /**
  * Check whether given RxQ is external.
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 6047529535..75733339e4 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -421,5 +421,5 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
 
 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
-static void
+void
 rxq_sync_cq(struct mlx5_rxq_data *rxq)
 {
@@ -593,5 +593,11 @@ mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
 	}
 	/* Reinitialize RQ - set WQEs. */
-	mlx5_rxq_initialize(rxq_data);
+	ret = mlx5_rxq_initialize(rxq_data);
+	if (ret) {
+		DRV_LOG(ERR, "Port %u Rx queue %u RQ initialization failure.",
+			priv->dev_data->port_id, rxq->idx);
+		rte_errno = ENOMEM;
+		return ret;
+	}
 	rxq_data->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
 	/* Set actual queue state. */
@@ -2306,4 +2312,5 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
 				LIST_REMOVE(rxq_ctrl, share_entry);
 			LIST_REMOVE(rxq_ctrl, next);
+			mlx5_free(rxq_ctrl->rxq.rq_win_data);
 			mlx5_free(rxq_ctrl);
 		}
-- 
2.50.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2025-07-18 20:29:16.483936765 +0100
+++ 0157-net-mlx5-fix-out-of-order-completions-in-ordinary-Rx.patch	2025-07-18 20:29:11.154908017 +0100
@@ -1 +1 @@
-From 5f9223611f3570c974b9c8e6c0b62db605fb3076 Mon Sep 17 00:00:00 2001
+From f22dca1f87a4cc856e9221aae5de4df58b19a7b3 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit 5f9223611f3570c974b9c8e6c0b62db605fb3076 ]
+
@@ -18 +19,0 @@
-Cc: stable@dpdk.org
@@ -48 +49 @@
-index 0ee16ba4f0..10bd93c29a 100644
+index b9d29ca7d5..f9081b0e30 100644
@@ -51 +52 @@
-@@ -684,5 +684,10 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
+@@ -710,5 +710,10 @@ mlx5_rxq_devx_obj_new(struct mlx5_rxq_priv *rxq)
@@ -64 +65 @@
-index b7df39ace9..68d1c1bfa7 100644
+index f2ae75a8e1..ddfe968a99 100644
@@ -67 +68 @@
-@@ -649,4 +649,5 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
+@@ -618,4 +618,5 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev, size_t *no_of_elements)
@@ -73 +74 @@
-@@ -719,5 +720,10 @@ mlx5_select_rx_function(struct rte_eth_dev *dev)
+@@ -688,5 +689,10 @@ mlx5_select_rx_function(struct rte_eth_dev *dev)
@@ -86 +87 @@
-index 5f4a93fe8c..5e8c312d00 100644
+index 5e58eb8bc9..0f2152fdb0 100644
@@ -89 +90 @@
-@@ -43,5 +43,5 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
+@@ -42,5 +42,5 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
@@ -96 +97 @@
-@@ -222,4 +222,6 @@ mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
+@@ -221,4 +221,6 @@ mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
@@ -103 +104 @@
-@@ -360,4 +362,75 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
+@@ -359,4 +361,75 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
@@ -179 +180 @@
-@@ -366,5 +439,5 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
+@@ -365,5 +438,5 @@ rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
@@ -186 +187 @@
-@@ -415,6 +488,10 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
+@@ -414,6 +487,10 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
@@ -197 +198 @@
-@@ -525,4 +602,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
+@@ -524,4 +601,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
@@ -205 +206 @@
-@@ -584,5 +664,6 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
+@@ -583,5 +663,6 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
@@ -213 +214 @@
-@@ -614,4 +695,8 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
+@@ -613,4 +694,8 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec,
@@ -222 +223 @@
-@@ -625,5 +710,5 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
+@@ -624,5 +709,5 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
@@ -229 +230 @@
-@@ -641,4 +726,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
+@@ -640,4 +725,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
@@ -236 +237 @@
-@@ -694,4 +781,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
+@@ -693,4 +780,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
@@ -244 +245 @@
-@@ -738,4 +828,8 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
+@@ -737,4 +827,8 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
@@ -253 +254 @@
-@@ -762,4 +856,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
+@@ -761,4 +855,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
@@ -260 +261 @@
-@@ -977,5 +1073,6 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+@@ -976,5 +1072,6 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
@@ -268 +269 @@
-@@ -1063,4 +1160,179 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+@@ -1062,4 +1159,179 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
@@ -448 +449 @@
-@@ -1221,5 +1493,5 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+@@ -1220,5 +1492,5 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
@@ -456 +457 @@
-index 6380895502..4f3d73e3c4 100644
+index 6c48a37be7..6ec5f82022 100644
@@ -465 +466 @@
-@@ -65,4 +66,5 @@ struct rxq_zip {
+@@ -47,4 +48,5 @@ struct rxq_zip {
@@ -471 +472 @@
-@@ -125,4 +127,5 @@ struct __rte_cache_aligned mlx5_rxq_data {
+@@ -107,4 +109,5 @@ struct __rte_cache_aligned mlx5_rxq_data {
@@ -477 +478 @@
-@@ -165,4 +168,8 @@ struct __rte_cache_aligned mlx5_rxq_data {
+@@ -147,4 +150,8 @@ struct __rte_cache_aligned mlx5_rxq_data {
@@ -486 +487 @@
-@@ -306,5 +313,6 @@ int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx,
+@@ -286,5 +293,6 @@ int mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hxrq_idx,
@@ -494 +495 @@
-@@ -332,4 +340,5 @@ uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
+@@ -312,4 +320,5 @@ uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
@@ -500 +501 @@
-@@ -662,4 +671,21 @@ mlx5_mprq_enabled(struct rte_eth_dev *dev)
+@@ -642,4 +651,21 @@ mlx5_mprq_enabled(struct rte_eth_dev *dev)
@@ -523 +524 @@
-index 2e9bcbea4d..77c5848c37 100644
+index 6047529535..75733339e4 100644
@@ -526 +527 @@
-@@ -422,5 +422,5 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
+@@ -421,5 +421,5 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
@@ -533 +534 @@
-@@ -594,5 +594,11 @@ mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
+@@ -593,5 +593,11 @@ mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx)
@@ -546 +547 @@
-@@ -2361,4 +2367,5 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
+@@ -2306,4 +2312,5 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)


  parent reply	other threads:[~2025-07-18 19:38 UTC|newest]

Thread overview: 176+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-07-18 19:28 patch 'net/fm10k/base: fix compilation warnings' " Kevin Traynor
2025-07-18 19:28 ` patch 'net/ixgbe/base: correct definition of endianness macro' " Kevin Traynor
2025-07-18 19:28 ` patch 'net/ixgbe/base: fix compilation warnings' " Kevin Traynor
2025-07-18 19:28 ` patch 'net/i40e/base: fix unused value " Kevin Traynor
2025-07-18 19:28 ` patch 'net/i40e/base: fix compiler " Kevin Traynor
2025-07-18 19:29 ` patch 'acl: fix build with GCC 15 on aarch64' " Kevin Traynor
2025-07-18 19:29 ` patch 'eal/linux: improve ASLR check' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/idpf: fix truncation of constant value' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/e1000: fix EEPROM dump' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/ixgbe: enable ethertype filter for E610' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/ixgbe: fix port mask default value in filter' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/e1000: fix igb Tx queue offloads capability' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/ice: fix flow creation failure' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/ice: fix support for 3 scheduler levels' " Kevin Traynor
2025-07-18 19:29 ` patch 'vhost: fix wrapping on control virtqueue rings' " Kevin Traynor
2025-07-18 19:29 ` patch 'vhost/crypto: fix cipher data length' " Kevin Traynor
2025-07-18 19:29 ` patch 'crypto/virtio: fix cipher data source " Kevin Traynor
2025-07-18 19:29 ` patch 'event/cnxk: fix missing HW state checks' " Kevin Traynor
2025-07-18 19:29 ` patch 'crypto/cnxk: fix uninitialized variable' " Kevin Traynor
2025-07-18 19:29 ` patch 'crypto/cnxk: fix out-of-bounds access in SM2' " Kevin Traynor
2025-07-18 19:29 ` patch 'app/crypto-perf: fix AAD offset alignment' " Kevin Traynor
2025-07-18 19:29 ` patch 'crypto/qat: fix out-of-place header bytes in AEAD raw API' " Kevin Traynor
2025-07-18 19:29 ` patch 'crypto/qat: fix out-of-place chain/cipher/auth headers' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mlx5: fix header modify action on group 0' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mlx5: validate GTP PSC QFI width' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mlx5: fix counter pool init error propagation' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mlx5: fix counter service cleanup on init failure' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mlx5: fix validation for GENEVE options' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mlx5: fix GRE flow item validation' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mlx5/hws: fix send queue drain on FW WQE destroy' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mlx5: fix flex tunnel flow validation' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mlx5: remove unsupported flow meter action in HWS' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mlx5: fix error notification for large flow patterns' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mlx5: fix mark action with shared Rx queue' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mlx5: align PF and VF/SF MAC address handling' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/sfc: fix action order on start failure' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/nfp: fix crash with null RSS hash key' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/nfp: fix hash key length logic' " Kevin Traynor
2025-07-18 19:29 ` patch 'app/testpmd: fix RSS hash key update' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/af_xdp: fix use after free in zero-copy Tx' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/hns3: fix integer overflow in interrupt unmap' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/hns3: fix memory leak on failure' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/hns3: fix extra wait for link up' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/hns3: fix memory leak for indirect flow action' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/hns3: fix interrupt rollback' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/hns3: fix divide by zero' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/hns3: fix resources release on reset' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/nfp: fix flow rule freeing' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/nfp: fix control message overflow' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/nfp: standardize NFDk Tx descriptor endianness' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/qede: fix use after free' " Kevin Traynor
2025-07-18 19:29 ` patch 'bus/fslmc: " Kevin Traynor
2025-07-18 19:29 ` patch 'net/null: fix packet copy' " Kevin Traynor
2025-07-18 19:29 ` patch 'bus/vmbus: use Hyper-V page size' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/netvsc: " Kevin Traynor
2025-07-18 19:29 ` patch 'net/netvsc: add stats counters from VF' " Kevin Traynor
2025-07-18 19:29 ` patch 'app/testpmd: relax number of TCs in DCB command' " Kevin Traynor
2025-07-18 19:29 ` patch 'app/testpmd: fix flow random item token' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mana: check vendor ID when probing RDMA device' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/ntnic: avoid divide by zero' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/ntnic: fix ring queue operation' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/hns3: fix CRC data segment' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/tap: fix qdisc add failure handling' " Kevin Traynor
2025-07-18 19:29 ` patch 'net/mlx5: fix VLAN stripping on hairpin queue' " Kevin Traynor
2025-07-18 19:29 ` patch 'mem: fix lockup on address space shortage' " Kevin Traynor
2025-07-18 19:30 ` patch 'test/malloc: improve resiliency' " Kevin Traynor
2025-07-18 19:30 ` patch 'trace: fix overflow in per-lcore trace buffer' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/octeon_ep: fix buffer refill' " Kevin Traynor
2025-07-18 19:30 ` patch 'common/cnxk: fix aura offset' " Kevin Traynor
2025-07-18 19:30 ` patch 'common/cnxk: fix E-tag pattern parsing' " Kevin Traynor
2025-07-18 19:30 ` patch 'common/cnxk: fix CQ tail drop' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/cnxk: fix descriptor count update on reconfig' " Kevin Traynor
2025-07-18 19:30 ` patch 'ethdev: fix error struct in flow configure' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/ice/base: fix type conversion' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/ice/base: fix media type check' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/ice/base: fix integer overflow' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/ice/base: fix typo in device ID description' " Kevin Traynor
2025-07-18 19:30 ` patch 'common/dpaax: fix PDCP key command race condition' " Kevin Traynor
2025-07-18 19:30 ` patch 'common/dpaax: fix PDCP AES only 12-bit SN' " Kevin Traynor
2025-07-18 19:30 ` patch 'crypto/dpaa2_sec: fix uninitialized variable' " Kevin Traynor
2025-07-18 19:30 ` patch 'crypto/cnxk: update SG list population' " Kevin Traynor
2025-07-18 19:30 ` patch 'crypto/cnxk: fix QP stats' " Kevin Traynor
2025-07-18 19:30 ` patch 'crypto/virtio: fix driver cleanup' " Kevin Traynor
2025-07-18 19:30 ` patch 'eal: fix return value of lcore role' " Kevin Traynor
2025-07-18 19:30 ` patch 'eal: warn if no lcore is available' " Kevin Traynor
2025-07-18 19:30 ` patch 'test/lcore: fix race in per-lcore test' " Kevin Traynor
2025-07-18 19:30 ` patch 'bus: cleanup device lists' " Kevin Traynor
2025-07-18 19:30 ` patch 'eal/linux: unregister alarm callback before free' " Kevin Traynor
2025-07-18 19:30 ` patch 'eal/freebsd: " Kevin Traynor
2025-07-18 19:30 ` patch 'bus/pci/bsd: fix device existence check' " Kevin Traynor
2025-07-18 19:30 ` patch 'power/intel_uncore: fix crash closing uninitialized driver' " Kevin Traynor
2025-07-18 19:30 ` patch 'net: fix IPv6 check for IPv4 compat' " Kevin Traynor
2025-07-18 19:30 ` patch 'pcapng: fix null dereference in close' " Kevin Traynor
2025-07-18 19:30 ` patch 'crypto/qat: fix size calculation for memset' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/mlx5: fix link on Windows' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/mlx5: avoid setting kernel MTU if not needed' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/mlx5: fix hypervisor detection in VLAN workaround' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/mlx5: fix template flow rule identification' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/ixgbe/base: fix link status for E610' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/hns3: check requirement for hardware GRO' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/hns3: allow Tx vector when fast free not enabled' " Kevin Traynor
2025-07-18 19:30 ` patch 'common/mlx5: fix dependency detection on Windows' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/mlx5: fix crash in HWS counter pool destroy' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/mlx5: fix access to auxiliary flow data' " Kevin Traynor
2025-07-18 19:30 ` patch 'common/mlx5: fix extraction of auxiliary device name' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/mlx5: fix crash on age query with indirect conntrack' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/mlx5: fix WQE size calculation for Tx queue' " Kevin Traynor
2025-07-18 19:30 ` patch 'eal/x86: fix C++ build' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/ixgbe: fix indentation' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/ice: fix querying RSS hash for DCF' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/ice: fix handling empty DCF RSS hash' " Kevin Traynor
2025-07-18 19:30 ` patch 'net/iavf: fix VLAN strip setting after enabling filter' " Kevin Traynor
2025-07-18 19:30 ` patch 'vhost: search virtqueues driver data in read-only area' " Kevin Traynor
2025-07-18 19:30 ` patch 'vhost: fix net control virtqueue used length' " Kevin Traynor
2025-07-18 19:30 ` patch 'eal/unix: fix log message for madvise failure' " Kevin Traynor
2025-07-18 19:30 ` patch 'buildtools/test: scan muti-line registrations' " Kevin Traynor
2025-07-18 19:30 ` patch 'crypto/cnxk: fix includes' " Kevin Traynor
2025-07-18 19:30 ` patch 'common/cnxk: fix qsize in CPT iq enable' " Kevin Traynor
2025-07-18 19:30 ` patch 'crypto/qat: fix out-of-place chain/cipher/auth headers' " Kevin Traynor
2025-07-18 19:30 ` patch 'examples/ipsec-secgw: fix crash with IPv6' " Kevin Traynor
2025-07-18 19:30 ` patch 'examples/ipsec-secgw: fix crash in event vector mode' " Kevin Traynor
2025-07-18 19:30 ` patch 'test/crypto: fix auth and cipher case IV length' " Kevin Traynor
2025-07-18 19:30 ` patch 'test/crypto: set to null after freeing operation' " Kevin Traynor
2025-07-18 19:30 ` patch 'crypto/openssl: include private exponent in RSA session' " Kevin Traynor
2025-07-18 19:30 ` patch 'test/crypto: fix RSA vector as per RFC 8017' " Kevin Traynor
2025-07-18 19:31 ` patch 'test/crypto: fix RSA decrypt validation' " Kevin Traynor
2025-07-18 19:31 ` patch 'test/crypto: fix EdDSA vector description' " Kevin Traynor
2025-07-18 19:31 ` patch 'event/dlb2: fix dequeue with CQ depth <= 16' " Kevin Traynor
2025-07-18 19:31 ` patch 'event/dlb2: fix validaton of LDB port COS ID arguments' " Kevin Traynor
2025-07-18 19:31 ` patch 'event/dlb2: fix num single link ports for DLB2.5' " Kevin Traynor
2025-07-18 19:31 ` patch 'event/dlb2: fix QID depth xstat' " Kevin Traynor
2025-07-18 19:31 ` patch 'event/dlb2: fix default credits based on HW version' " Kevin Traynor
2025-07-18 19:31 ` patch 'event/dlb2: fix public symbol namespace' " Kevin Traynor
2025-07-18 19:31 ` patch 'app/eventdev: fix number of releases sent during cleanup' " Kevin Traynor
2025-07-18 19:31 ` patch 'eventdev: fix flag types consistency' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/ntnic: unmap DMA during queue release' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/txgbe: fix ntuple filter parsing' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/txgbe: fix raw pattern match for FDIR rule' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/txgbe: fix packet type for FDIR filter' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/txgbe: fix to create FDIR filter for SCTP packet' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/txgbe: fix FDIR perfect mode for IPv6' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/txgbe: fix to create FDIR filter for tunnel packet' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/txgbe: fix reserved extra FDIR headroom' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/txgbe: fix MAC control frame forwarding' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/ngbe: " Kevin Traynor
2025-07-18 19:31 ` patch 'net/txgbe: fix device statistics' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/ngbe: " Kevin Traynor
2025-07-18 19:31 ` patch 'net/txgbe: restrict VLAN strip configuration on VF' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/txgbe: add LRO flag in mbuf when enabled' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/hns3: fix queue TC configuration on VF' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/bonding: avoid RSS RETA update in flow isolation mode' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/iavf: fix VLAN strip disabling for ADQ v2 capability' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/i40e: fix RSS on plain IPv4' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/octeon_ep: increase mailbox timeout' " Kevin Traynor
2025-07-18 19:31 ` patch 'bus/auxiliary: fix crash in cleanup' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/mlx5: fix masked indirect age action validation' " Kevin Traynor
2025-07-18 19:31 ` Kevin Traynor [this message]
2025-07-18 19:31 ` patch 'examples/ntb: check more heap allocations' " Kevin Traynor
2025-07-18 19:31 ` patch 'examples/ipsec-secgw: fix number of queue pairs' " Kevin Traynor
2025-07-18 19:31 ` patch 'dts: fix deterministic doc' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/e1000: fix xstats name' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/mlx5: fix maximal queue size query' " Kevin Traynor
2025-07-18 19:31 ` patch 'bus/vmbus: align ring buffer data to page boundary' " Kevin Traynor
2025-07-18 19:31 ` patch 'crypto/virtio: add request check on request side' " Kevin Traynor
2025-07-18 19:31 ` patch 'ethdev: keep promiscuous/allmulti value before disabling' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/ixgbe/base: fix lock checker errors' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/nfp: standardize Rx descriptor endianness' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/nfp: standardize NFD3 Tx " Kevin Traynor
2025-07-18 19:31 ` patch 'doc: add kernel options required for mlx5' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/ixgbe: remove VLAs' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/ixgbe: skip MACsec stats for E610' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/cnxk: fix lock for security session operations' " Kevin Traynor
2025-07-18 19:31 ` patch 'doc: fix missing feature matrix for event device' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/ena: fix control path interrupt mode' " Kevin Traynor
2025-07-18 19:31 ` patch 'net/ena: fix aenq timeout with low poll interval' " Kevin Traynor
2025-07-18 19:31 ` patch 'examples/flow_filtering: fix make clean' " Kevin Traynor

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250718193247.1008129-157-ktraynor@redhat.com \
    --to=ktraynor@redhat.com \
    --cc=stable@dpdk.org \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).