patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Kevin Traynor <ktraynor@redhat.com>
To: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Cc: dpdk stable <stable@dpdk.org>
Subject: patch 'net/mlx5: fix check for orphan wait descriptor' has been queued to stable release 21.11.3
Date: Tue, 25 Oct 2022 16:07:01 +0100	[thread overview]
Message-ID: <20221025150734.142189-66-ktraynor@redhat.com> (raw)
In-Reply-To: <20221025150734.142189-1-ktraynor@redhat.com>

Hi,

FYI, your patch has been queued to stable release 21.11.3

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/01/22. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable

This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable/commit/b5e5d926b224bdda168d2d9fd2d8cce0e5dbcec0

Thanks.

Kevin

---
From b5e5d926b224bdda168d2d9fd2d8cce0e5dbcec0 Mon Sep 17 00:00:00 2001
From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Date: Thu, 11 Aug 2022 08:50:58 +0300
Subject: [PATCH] net/mlx5: fix check for orphan wait descriptor

[ upstream commit 37d6fc30c1ad03485ef707140b67623b95498d0d ]

The mlx5 PMD supports send scheduling feature, it allows
to send packets at specified moment of time, to do that
PMD pushes special wait descriptor (WQE) to the hardware
queue and then pushes descriptor for packet data as usual.
If queue is close to be full or there is no enough elts
buffers to store mbufs being sent the data descriptors might
be not pushed and the orphan wait WQE (not followed by the
data) might reside in queue on tx_burst routine exit.

To avoid orphan wait WQEs there was the check for enough
free space in the queue WQE buffer and enough amount of the
free elts in queue mbuf storage. This check was incomplete
and did not cover all the cases for Enhanced Multi-Packet
Write descriptors.

Fixes: 2f827f5ea6e1 ("net/mlx5: support scheduling on send routine template")

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_tx.h | 78 +++++++++++++++++++++-----------------
 1 file changed, 43 insertions(+), 35 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index 6ed00f722e..bd3a060963 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -1623,4 +1623,5 @@ static __rte_always_inline enum mlx5_txcmp_code
 mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
 		      struct mlx5_txq_local *restrict loc,
+		      uint16_t elts,
 		      unsigned int olx)
 {
@@ -1637,5 +1638,5 @@ mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
 		 */
 		if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
-		    loc->elts_free < NB_SEGS(loc->mbuf))
+		    loc->elts_free < elts)
 			return MLX5_TXCMP_CODE_EXIT;
 		/* Convert the timestamp into completion to wait. */
@@ -1667,4 +1668,7 @@ mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
  * @param loc
  *   Pointer to burst routine local context.
+ * @param elts
+ *   Number of free elements in elts buffer to be checked, for zero
+ *   value the check is optimized out by compiler.
  * @param olx
  *   Configured Tx offloads mask. It is fully defined at
@@ -1684,9 +1688,10 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
 	unsigned int ds, dlen, inlen, ntcp, vlan = 0;
 
+	MLX5_ASSERT(loc->elts_free >= NB_SEGS(loc->mbuf));
 	if (MLX5_TXOFF_CONFIG(TXPP)) {
 		enum mlx5_txcmp_code wret;
 
 		/* Generate WAIT for scheduling if requested. */
-		wret = mlx5_tx_schedule_send(txq, loc, olx);
+		wret = mlx5_tx_schedule_send(txq, loc, 0, olx);
 		if (wret == MLX5_TXCMP_CODE_EXIT)
 			return MLX5_TXCMP_CODE_EXIT;
@@ -1782,9 +1787,10 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
 
 	MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
+	MLX5_ASSERT(loc->elts_free >= NB_SEGS(loc->mbuf));
 	if (MLX5_TXOFF_CONFIG(TXPP)) {
 		enum mlx5_txcmp_code wret;
 
 		/* Generate WAIT for scheduling if requested. */
-		wret = mlx5_tx_schedule_send(txq, loc, olx);
+		wret = mlx5_tx_schedule_send(txq, loc, 0, olx);
 		if (wret == MLX5_TXCMP_CODE_EXIT)
 			return MLX5_TXCMP_CODE_EXIT;
@@ -1897,14 +1903,5 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
 	MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
 	MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
-	if (MLX5_TXOFF_CONFIG(TXPP)) {
-		enum mlx5_txcmp_code wret;
-
-		/* Generate WAIT for scheduling if requested. */
-		wret = mlx5_tx_schedule_send(txq, loc, olx);
-		if (wret == MLX5_TXCMP_CODE_EXIT)
-			return MLX5_TXCMP_CODE_EXIT;
-		if (wret == MLX5_TXCMP_CODE_ERROR)
-			return MLX5_TXCMP_CODE_ERROR;
-	}
+	MLX5_ASSERT(loc->elts_free >= NB_SEGS(loc->mbuf));
 	/*
 	 * First calculate data length to be inlined
@@ -2012,4 +2009,14 @@ do_align:
 	 */
 do_build:
+	if (MLX5_TXOFF_CONFIG(TXPP)) {
+		enum mlx5_txcmp_code wret;
+
+		/* Generate WAIT for scheduling if requested. */
+		wret = mlx5_tx_schedule_send(txq, loc, 0, olx);
+		if (wret == MLX5_TXCMP_CODE_EXIT)
+			return MLX5_TXCMP_CODE_EXIT;
+		if (wret == MLX5_TXCMP_CODE_ERROR)
+			return MLX5_TXCMP_CODE_ERROR;
+	}
 	MLX5_ASSERT(inlen <= txq->inlen_send);
 	ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
@@ -2172,5 +2179,5 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
 
 			/* Generate WAIT for scheduling if requested. */
-			wret = mlx5_tx_schedule_send(txq, loc, olx);
+			wret = mlx5_tx_schedule_send(txq, loc, 1, olx);
 			if (wret == MLX5_TXCMP_CODE_EXIT)
 				return MLX5_TXCMP_CODE_EXIT;
@@ -2550,14 +2557,4 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
 next_empw:
 		MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
-		if (MLX5_TXOFF_CONFIG(TXPP)) {
-			enum mlx5_txcmp_code wret;
-
-			/* Generate WAIT for scheduling if requested. */
-			wret = mlx5_tx_schedule_send(txq, loc, olx);
-			if (wret == MLX5_TXCMP_CODE_EXIT)
-				return MLX5_TXCMP_CODE_EXIT;
-			if (wret == MLX5_TXCMP_CODE_ERROR)
-				return MLX5_TXCMP_CODE_ERROR;
-		}
 		part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
 				       MLX5_MPW_MAX_PACKETS :
@@ -2570,4 +2567,14 @@ next_empw:
 			part = loc->elts_free;
 		}
+		if (MLX5_TXOFF_CONFIG(TXPP)) {
+			enum mlx5_txcmp_code wret;
+
+			/* Generate WAIT for scheduling if requested. */
+			wret = mlx5_tx_schedule_send(txq, loc, 0, olx);
+			if (wret == MLX5_TXCMP_CODE_EXIT)
+				return MLX5_TXCMP_CODE_EXIT;
+			if (wret == MLX5_TXCMP_CODE_ERROR)
+				return MLX5_TXCMP_CODE_ERROR;
+		}
 		/* Check whether we have enough WQEs */
 		if (unlikely(loc->wqe_free < ((2 + part + 3) / 4))) {
@@ -2724,14 +2731,4 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
 
 		MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
-		if (MLX5_TXOFF_CONFIG(TXPP)) {
-			enum mlx5_txcmp_code wret;
-
-			/* Generate WAIT for scheduling if requested. */
-			wret = mlx5_tx_schedule_send(txq, loc, olx);
-			if (wret == MLX5_TXCMP_CODE_EXIT)
-				return MLX5_TXCMP_CODE_EXIT;
-			if (wret == MLX5_TXCMP_CODE_ERROR)
-				return MLX5_TXCMP_CODE_ERROR;
-		}
 		/*
 		 * Limits the amount of packets in one WQE
@@ -2741,4 +2738,14 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
 				       MLX5_MPW_INLINE_MAX_PACKETS :
 				       MLX5_EMPW_MAX_PACKETS);
+		if (MLX5_TXOFF_CONFIG(TXPP)) {
+			enum mlx5_txcmp_code wret;
+
+			/* Generate WAIT for scheduling if requested. */
+			wret = mlx5_tx_schedule_send(txq, loc, nlim, olx);
+			if (wret == MLX5_TXCMP_CODE_EXIT)
+				return MLX5_TXCMP_CODE_EXIT;
+			if (wret == MLX5_TXCMP_CODE_ERROR)
+				return MLX5_TXCMP_CODE_ERROR;
+		}
 		/* Check whether we have minimal amount WQEs */
 		if (unlikely(loc->wqe_free <
@@ -3023,9 +3030,10 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
 
 		MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
+		MLX5_ASSERT(loc->elts_free);
 		if (MLX5_TXOFF_CONFIG(TXPP)) {
 			enum mlx5_txcmp_code wret;
 
 			/* Generate WAIT for scheduling if requested. */
-			wret = mlx5_tx_schedule_send(txq, loc, olx);
+			wret = mlx5_tx_schedule_send(txq, loc, 0, olx);
 			if (wret == MLX5_TXCMP_CODE_EXIT)
 				return MLX5_TXCMP_CODE_EXIT;
-- 
2.37.3

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2022-10-25 14:19:00.039429874 +0100
+++ 0066-net-mlx5-fix-check-for-orphan-wait-descriptor.patch	2022-10-25 14:18:58.473798313 +0100
@@ -1 +1 @@
-From 37d6fc30c1ad03485ef707140b67623b95498d0d Mon Sep 17 00:00:00 2001
+From b5e5d926b224bdda168d2d9fd2d8cce0e5dbcec0 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit 37d6fc30c1ad03485ef707140b67623b95498d0d ]
+
@@ -22 +23,0 @@
-Cc: stable@dpdk.org
@@ -30 +31 @@
-index 20776919c2..f081921ffc 100644
+index 6ed00f722e..bd3a060963 100644
@@ -33,9 +34 @@
-@@ -1643,4 +1643,7 @@ dseg_done:
-  * @param loc
-  *   Pointer to burst routine local context.
-+ * @param elts
-+ *   Number of free elements in elts buffer to be checked, for zero
-+ *   value the check is optimized out by compiler.
-  * @param olx
-  *   Configured Tx offloads mask. It is fully defined at
-@@ -1656,4 +1659,5 @@ static __rte_always_inline enum mlx5_txcmp_code
+@@ -1623,4 +1623,5 @@ static __rte_always_inline enum mlx5_txcmp_code
@@ -47 +40 @@
-@@ -1670,5 +1674,5 @@ mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
+@@ -1637,5 +1638,5 @@ mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
@@ -54 +47,9 @@
-@@ -1736,9 +1740,10 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
+@@ -1667,4 +1668,7 @@ mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
+  * @param loc
+  *   Pointer to burst routine local context.
++ * @param elts
++ *   Number of free elements in elts buffer to be checked, for zero
++ *   value the check is optimized out by compiler.
+  * @param olx
+  *   Configured Tx offloads mask. It is fully defined at
+@@ -1684,9 +1688,10 @@ mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
@@ -66 +67 @@
-@@ -1834,9 +1839,10 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
+@@ -1782,9 +1787,10 @@ mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
@@ -78 +79 @@
-@@ -1949,14 +1955,5 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
+@@ -1897,14 +1903,5 @@ mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
@@ -94 +95 @@
-@@ -2064,4 +2061,14 @@ do_align:
+@@ -2012,4 +2009,14 @@ do_align:
@@ -109 +110 @@
-@@ -2224,5 +2231,5 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
+@@ -2172,5 +2179,5 @@ mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
@@ -116 +117 @@
-@@ -2602,14 +2609,4 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
+@@ -2550,14 +2557,4 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
@@ -131 +132 @@
-@@ -2622,4 +2619,14 @@ next_empw:
+@@ -2570,4 +2567,14 @@ next_empw:
@@ -146 +147 @@
-@@ -2776,14 +2783,4 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
+@@ -2724,14 +2731,4 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
@@ -161 +162 @@
-@@ -2793,4 +2790,14 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
+@@ -2741,4 +2738,14 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
@@ -176 +177 @@
-@@ -3075,9 +3082,10 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
+@@ -3023,9 +3030,10 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,


  parent reply	other threads:[~2022-10-25 15:09 UTC|newest]

Thread overview: 101+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-25 15:05 patch 'build: enable developer mode for all working trees' " Kevin Traynor
2022-10-25 15:05 ` patch 'net: accept unaligned data in checksum routines' " Kevin Traynor
2022-10-25 15:05 ` patch 'eal: fix side effect in some pointer arithmetic macros' " Kevin Traynor
2022-10-25 15:05 ` patch 'app/testpmd: restore ixgbe bypass commands' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/bonding: fix array overflow in Rx burst' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/bonding: fix double slave link status query' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/failsafe: fix interrupt handle leak' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/nfp: compose firmware file name with new hwinfo' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/axgbe: fix scattered Rx' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/axgbe: fix mbuf lengths in " Kevin Traynor
2022-10-25 15:06 ` patch 'net/axgbe: fix length of each segment " Kevin Traynor
2022-10-25 15:06 ` patch 'net/axgbe: fix checksum and RSS " Kevin Traynor
2022-10-25 15:06 ` patch 'net/axgbe: optimise " Kevin Traynor
2022-10-25 15:06 ` patch 'net/axgbe: remove freeing buffer in " Kevin Traynor
2022-10-25 15:06 ` patch 'net/nfp: improve HW info header log readability' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/txgbe: fix IPv6 flow rule' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/txgbe: remove semaphore between SW/FW' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/txgbe: rename some extended statistics' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ngbe: " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ngbe: remove semaphore between SW/FW' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ngbe: fix maximum frame size' " Kevin Traynor
2022-10-25 15:06 ` patch 'common/cnxk: fix log level during MCAM allocation' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/mvneta: fix build with GCC 12' " Kevin Traynor
2022-10-25 15:06 ` patch 'common/cnxk: fix missing flow counter reset' " Kevin Traynor
2022-10-25 15:06 ` patch 'common/cnxk: fix printing disabled MKEX registers' " Kevin Traynor
2022-10-25 15:06 ` patch 'malloc: fix storage size for some allocations' " Kevin Traynor
2022-10-25 15:06 ` patch 'event/dsw: fix flow migration' " Kevin Traynor
2022-10-25 15:06 ` patch 'event/sw: fix device name in dump' " Kevin Traynor
2022-10-25 15:06 ` patch 'eventdev/eth_tx: add spinlock for adapter start/stop' " Kevin Traynor
2022-10-25 15:06 ` patch 'eventdev/eth_tx: fix adapter stop' " Kevin Traynor
2022-10-25 15:06 ` patch 'test/ipsec: skip if no compatible device' " Kevin Traynor
2022-10-25 15:06 ` patch 'examples/ipsec-secgw: use Tx checksum offload conditionally' " Kevin Traynor
2022-10-25 15:06 ` patch 'test/crypto: fix debug messages' " Kevin Traynor
2022-10-25 15:06 ` patch 'common/qat: fix VF to PF answer' " Kevin Traynor
2022-10-25 15:06 ` patch 'test/ipsec: fix build with GCC 12' " Kevin Traynor
2022-10-25 15:06 ` patch 'ipsec: " Kevin Traynor
2022-10-25 15:06 ` patch 'crypto/qat: " Kevin Traynor
2022-10-25 15:06 ` patch 'cryptodev: fix missing SHA3 algorithm strings' " Kevin Traynor
2022-10-25 15:06 ` patch 'eventdev: fix name of Rx conf type in documentation' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/i40e: fix VF representor release' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice: fix RSS hash update' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice/base: fix inner symmetric RSS hash in raw flow' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/iavf: fix L3 checksum Tx offload flag' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/iavf: fix VLAN insertion' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/iavf: fix pattern check for flow director parser' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/iavf: fix Tx done descriptors cleanup' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/iavf: update IPsec ESN values when updating session' " Kevin Traynor
2022-10-25 15:06 ` patch 'common/iavf: avoid copy in async mode' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice/base: fix division during E822 PTP init' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice/base: fix 100M speed capability' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice/base: fix DSCP PFC TLV creation' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice/base: fix media type of PHY 10G SFI C2C' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice/base: fix function descriptions for parser' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice/base: fix endian format' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice/base: fix array overflow in add switch recipe' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice/base: fix bit finding range over ptype bitmap' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice/base: fix add MAC rule' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice/base: fix double VLAN in promiscuous mode' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice/base: ignore promiscuous already exist' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/ice/base: fix input set of GTPoGRE' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/iavf: fix processing VLAN TCI in SSE path' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/iavf: fix outer checksum flags' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/virtio: fix crash when configured twice' " Kevin Traynor
2022-10-25 15:06 ` patch 'net/mlx4: fix Verbs FD leak in secondary process' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/mlx5: " Kevin Traynor
2022-10-25 15:07 ` Kevin Traynor [this message]
2022-10-25 15:07 ` patch 'net/mlx5: fix single not inline packet storing' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/mlx5: fix inline length exceeding descriptor limit' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/mlx5: fix Tx check for hardware descriptor length' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/mlx5: fix modify action with tunnel decapsulation' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/mlx5: fix meter profile delete after disable' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/iavf: check illegal packet sizes' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/ice: " Kevin Traynor
2022-10-25 15:07 ` patch 'net/cnxk: fix DF bit in vector mode' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/axgbe: reset end of packet in scattered Rx' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/axgbe: clear buffer on scattered Rx chaining failure' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/axgbe: save segment data in scattered Rx' " Kevin Traynor
2022-10-25 15:07 ` patch 'common/sfc_efx/base: fix maximum Tx data count' " Kevin Traynor
2022-10-25 15:07 ` patch 'event/cnxk: fix missing xstats operations' " Kevin Traynor
2022-10-25 15:07 ` patch 'cryptodev: fix unduly newlines in logs' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/bnxt: fix null pointer dereference in LED config' " Kevin Traynor
2022-11-17  9:05   ` 答复: " Mao,Yingming
2022-11-17 10:08     ` Kevin Traynor
2022-10-25 15:07 ` patch 'net/bnxt: fix error code during MTU change' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/bnxt: remove unnecessary check' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/bnxt: fix representor info freeing' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/bnxt: fix build with GCC 13' " Kevin Traynor
2022-10-25 15:07 ` patch 'mem: fix API doc about allocation on secondary processes' " Kevin Traynor
2022-10-25 15:07 ` patch 'examples/vm_power_manager: use safe list iterator' " Kevin Traynor
2022-10-25 15:07 ` patch 'timer: fix stopping all timers' " Kevin Traynor
2022-10-25 15:07 ` patch 'vhost: fix build with GCC 12' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/i40e: fix build with MinGW " Kevin Traynor
2022-10-25 15:07 ` patch 'net/qede/base: fix 32-bit build with " Kevin Traynor
2022-10-25 15:07 ` patch 'net/tap: fix overflow of network interface index' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/memif: fix crash with different number of Rx/Tx queues' " Kevin Traynor
2022-10-25 15:07 ` patch 'common/sfc_efx/base: remove VQ index check during VQ start' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/hns3: fix Rx with PTP' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/hns3: fix crash in SVE Tx' " Kevin Traynor
2022-10-25 15:07 ` patch 'net/hns3: fix next-to-use overflow " Kevin Traynor
2022-10-25 15:07 ` patch 'net/hns3: fix next-to-use overflow in simple " Kevin Traynor
2022-10-25 15:07 ` patch 'net/hns3: fix crash when secondary process access FW' " Kevin Traynor

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221025150734.142189-66-ktraynor@redhat.com \
    --to=ktraynor@redhat.com \
    --cc=stable@dpdk.org \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).