From: Kevin Traynor <ktraynor@redhat.com>
To: Rongwei Liu <rongweil@nvidia.com>
Cc: Dariusz Sosnowski <dsosnowski@nvidia.com>, dpdk stable <stable@dpdk.org>
Subject: patch 'net/mlx5: fix flow aging race condition' has been queued to stable release 24.11.4
Date: Fri, 31 Oct 2025 14:33:33 +0000 [thread overview]
Message-ID: <20251031143421.324432-91-ktraynor@redhat.com> (raw)
In-Reply-To: <20251031143421.324432-1-ktraynor@redhat.com>
Hi,
FYI, your patch has been queued to stable release 24.11.4
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 11/05/25. So please
shout if anyone has objections.
Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.
Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable
This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable/commit/5c5f24d5279a0a124b63050f4345c0f8a834d5b4
Thanks.
Kevin
---
From 5c5f24d5279a0a124b63050f4345c0f8a834d5b4 Mon Sep 17 00:00:00 2001
From: Rongwei Liu <rongweil@nvidia.com>
Date: Thu, 9 Oct 2025 12:18:10 +0300
Subject: [PATCH] net/mlx5: fix flow aging race condition
[ upstream commit 820ca7361bb7fa40e96e53515d8392ea40a35265 ]
When aging is configured, there is a background thread
which queries all the counters in the pool.
Meantime, per queue flow insertion/deletion/update changes
the counter pool too. It introduces a race condition between
resetting counters's in_used and age_idx fields during flow deletion
and reading them in the background thread.
To resolve it, all key members of counter's struct
are placed in a single uint32_t and they are accessed atomically.
To avoid the occasional timestamp equalization with age_idx,
query_gen_when_free is moved out of the union. The total memory
size is kept the same.
Fixes: 04a4de756e14 ("net/mlx5: support flow age action with HWS")
Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_hw.c | 5 +-
drivers/net/mlx5/mlx5_hws_cnt.c | 10 +--
drivers/net/mlx5/mlx5_hws_cnt.h | 135 ++++++++++++++++++++------------
3 files changed, 91 insertions(+), 59 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index be0129a96f..052b7f2768 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -3192,5 +3192,5 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
if (action_flags & MLX5_FLOW_ACTION_COUNT) {
cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
- if (mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &age_cnt, idx) < 0)
+ if (mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &age_cnt, idx, 0) < 0)
return -1;
flow->flags |= MLX5_FLOW_HW_FLOW_FLAG_CNT_ID;
@@ -3628,5 +3628,6 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
case RTE_FLOW_ACTION_TYPE_COUNT:
cnt_queue = mlx5_hws_cnt_get_queue(priv, &queue);
- ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id, age_idx);
+ ret = mlx5_hws_cnt_pool_get(priv->hws_cpool, cnt_queue, &cnt_id,
+ age_idx, 0);
if (ret != 0) {
rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
index 7baeaedd17..3a93434ecd 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.c
+++ b/drivers/net/mlx5/mlx5_hws_cnt.c
@@ -57,6 +57,6 @@ __mlx5_hws_cnt_svc(struct mlx5_dev_ctx_shared *sh,
reset_cnt_num = rte_ring_count(reset_list);
- cpool->query_gen++;
mlx5_aso_cnt_query(sh, cpool);
+ rte_atomic_fetch_add_explicit(&cpool->query_gen, 1, rte_memory_order_release);
zcdr.n1 = 0;
zcdu.n1 = 0;
@@ -128,12 +128,12 @@ mlx5_hws_aging_check(struct mlx5_priv *priv, struct mlx5_hws_cnt_pool *cpool)
uint16_t expected1 = HWS_AGE_CANDIDATE;
uint16_t expected2 = HWS_AGE_CANDIDATE_INSIDE_RING;
- uint32_t i;
+ uint32_t i, age_idx, in_use;
cpool->time_of_last_age_check = curr_time;
for (i = 0; i < nb_alloc_cnts; ++i) {
- uint32_t age_idx = cpool->pool[i].age_idx;
uint64_t hits;
- if (!cpool->pool[i].in_used || age_idx == 0)
+ mlx5_hws_cnt_get_all(&cpool->pool[i], &in_use, NULL, &age_idx);
+ if (!in_use || age_idx == 0)
continue;
param = mlx5_ipool_get(age_info->ages_ipool, age_idx);
@@ -754,5 +754,5 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
* to wait for query.
*/
- cpool->query_gen = 1;
+ rte_atomic_store_explicit(&cpool->query_gen, 1, rte_memory_order_relaxed);
ret = mlx5_hws_cnt_pool_action_create(priv, cpool);
if (ret != 0) {
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index d8da9dfcdd..8408c571ec 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -43,29 +43,32 @@ struct mlx5_hws_cnt_dcs_mng {
};
+union mlx5_hws_cnt_state {
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t)data;
+ struct {
+ uint32_t in_used:1;
+ /* Indicator whether this counter in used or in pool. */
+ uint32_t share:1;
+ /*
+ * share will be set to 1 when this counter is used as
+ * indirect action.
+ */
+ uint32_t age_idx:24;
+ /*
+ * When this counter uses for aging, it stores the index
+ * of AGE parameter. Otherwise, this index is zero.
+ */
+ };
+};
+
struct mlx5_hws_cnt {
struct flow_counter_stats reset;
- bool in_used; /* Indicator whether this counter in used or in pool. */
- union {
- struct {
- uint32_t share:1;
- /*
- * share will be set to 1 when this counter is used as
- * indirect action.
- */
- uint32_t age_idx:24;
- /*
- * When this counter uses for aging, it save the index
- * of AGE parameter. For pure counter (without aging)
- * this index is zero.
- */
- };
- /* This struct is only meaningful when user own this counter. */
- uint32_t query_gen_when_free;
- /*
- * When PMD own this counter (user put back counter to PMD
- * counter pool, i.e), this field recorded value of counter
- * pools query generation at time user release the counter.
- */
- };
+ union mlx5_hws_cnt_state cnt_state;
+ /* This struct is only meaningful when user own this counter. */
+ alignas(RTE_CACHE_LINE_SIZE) RTE_ATOMIC(uint32_t)query_gen_when_free;
+ /*
+ * When PMD own this counter (user put back counter to PMD
+ * counter pool, i.e), this field recorded value of counter
+ * pools query generation at time user release the counter.
+ */
};
@@ -198,4 +201,40 @@ mlx5_hws_cnt_id_valid(cnt_id_t cnt_id)
}
+static __rte_always_inline void
+mlx5_hws_cnt_set_age_idx(struct mlx5_hws_cnt *cnt, uint32_t value)
+{
+ union mlx5_hws_cnt_state cnt_state;
+
+ cnt_state.data = rte_atomic_load_explicit(&cnt->cnt_state.data, rte_memory_order_acquire);
+ cnt_state.age_idx = value;
+ rte_atomic_store_explicit(&cnt->cnt_state.data, cnt_state.data, rte_memory_order_release);
+}
+
+static __rte_always_inline void
+mlx5_hws_cnt_set_all(struct mlx5_hws_cnt *cnt, uint32_t in_used, uint32_t share, uint32_t age_idx)
+{
+ union mlx5_hws_cnt_state cnt_state;
+
+ cnt_state.in_used = !!in_used;
+ cnt_state.share = !!share;
+ cnt_state.age_idx = age_idx;
+ rte_atomic_store_explicit(&cnt->cnt_state.data, cnt_state.data, rte_memory_order_relaxed);
+}
+
+static __rte_always_inline void
+mlx5_hws_cnt_get_all(struct mlx5_hws_cnt *cnt, uint32_t *in_used, uint32_t *share,
+ uint32_t *age_idx)
+{
+ union mlx5_hws_cnt_state cnt_state;
+
+ cnt_state.data = rte_atomic_load_explicit(&cnt->cnt_state.data, rte_memory_order_acquire);
+ if (in_used != NULL)
+ *in_used = cnt_state.in_used;
+ if (share != NULL)
+ *share = cnt_state.share;
+ if (age_idx != NULL)
+ *age_idx = cnt_state.age_idx;
+}
+
/**
* Generate Counter id from internal index.
@@ -425,7 +464,8 @@ mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
hpool = mlx5_hws_cnt_host_pool(cpool);
iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
- hpool->pool[iidx].in_used = false;
- hpool->pool[iidx].query_gen_when_free =
- rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed);
+ mlx5_hws_cnt_set_all(&hpool->pool[iidx], 0, 0, 0);
+ rte_atomic_store_explicit(&hpool->pool[iidx].query_gen_when_free,
+ rte_atomic_load_explicit(&hpool->query_gen, rte_memory_order_relaxed),
+ rte_memory_order_relaxed);
if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
qcache = hpool->cache->qcache[*queue];
@@ -481,5 +521,5 @@ mlx5_hws_cnt_pool_put(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
static __rte_always_inline int
mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
- cnt_id_t *cnt_id, uint32_t age_idx)
+ cnt_id_t *cnt_id, uint32_t age_idx, uint32_t shared)
{
unsigned int ret;
@@ -509,8 +549,5 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
&cpool->pool[iidx].reset.hits,
&cpool->pool[iidx].reset.bytes);
- cpool->pool[iidx].share = 0;
- MLX5_ASSERT(!cpool->pool[iidx].in_used);
- cpool->pool[iidx].in_used = true;
- cpool->pool[iidx].age_idx = age_idx;
+ mlx5_hws_cnt_set_all(&cpool->pool[iidx], 1, shared, age_idx);
return 0;
}
@@ -531,6 +568,8 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
*cnt_id = (*(cnt_id_t *)zcdc.ptr1);
iidx = mlx5_hws_cnt_iidx(cpool, *cnt_id);
- query_gen = cpool->pool[iidx].query_gen_when_free;
- if (cpool->query_gen == query_gen) { /* counter is waiting to reset. */
+ query_gen = rte_atomic_load_explicit(&cpool->pool[iidx].query_gen_when_free,
+ rte_memory_order_relaxed);
+ /* counter is waiting to reset. */
+ if (rte_atomic_load_explicit(&cpool->query_gen, rte_memory_order_relaxed) == query_gen) {
rte_ring_dequeue_zc_elem_finish(qcache, 0);
/* write-back counter to reset list. */
@@ -550,8 +589,5 @@ mlx5_hws_cnt_pool_get(struct mlx5_hws_cnt_pool *cpool, uint32_t *queue,
&cpool->pool[iidx].reset.bytes);
rte_ring_dequeue_zc_elem_finish(qcache, 1);
- cpool->pool[iidx].share = 0;
- MLX5_ASSERT(!cpool->pool[iidx].in_used);
- cpool->pool[iidx].in_used = true;
- cpool->pool[iidx].age_idx = age_idx;
+ mlx5_hws_cnt_set_all(&cpool->pool[iidx], 1, shared, age_idx);
return 0;
}
@@ -612,13 +648,6 @@ mlx5_hws_cnt_shared_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id,
{
struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
- uint32_t iidx;
- int ret;
- ret = mlx5_hws_cnt_pool_get(hpool, NULL, cnt_id, age_idx);
- if (ret != 0)
- return ret;
- iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
- hpool->pool[iidx].share = 1;
- return 0;
+ return mlx5_hws_cnt_pool_get(hpool, NULL, cnt_id, age_idx, 1);
}
@@ -627,7 +656,5 @@ mlx5_hws_cnt_shared_put(struct mlx5_hws_cnt_pool *cpool, cnt_id_t *cnt_id)
{
struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
- uint32_t iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
- hpool->pool[iidx].share = 0;
mlx5_hws_cnt_pool_put(hpool, NULL, cnt_id);
}
@@ -638,6 +665,8 @@ mlx5_hws_cnt_is_shared(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
+ uint32_t share;
- return hpool->pool[iidx].share ? true : false;
+ mlx5_hws_cnt_get_all(&hpool->pool[iidx], NULL, &share, NULL);
+ return !!share;
}
@@ -649,6 +678,6 @@ mlx5_hws_cnt_age_set(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id,
uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
- MLX5_ASSERT(hpool->pool[iidx].share);
- hpool->pool[iidx].age_idx = age_idx;
+ MLX5_ASSERT(hpool->pool[iidx].cnt_state.share);
+ mlx5_hws_cnt_set_age_idx(&hpool->pool[iidx], age_idx);
}
@@ -658,7 +687,9 @@ mlx5_hws_cnt_age_get(struct mlx5_hws_cnt_pool *cpool, cnt_id_t cnt_id)
struct mlx5_hws_cnt_pool *hpool = mlx5_hws_cnt_host_pool(cpool);
uint32_t iidx = mlx5_hws_cnt_iidx(hpool, cnt_id);
+ uint32_t age_idx, share;
- MLX5_ASSERT(hpool->pool[iidx].share);
- return hpool->pool[iidx].age_idx;
+ mlx5_hws_cnt_get_all(&hpool->pool[iidx], NULL, &share, &age_idx);
+ MLX5_ASSERT(share);
+ return age_idx;
}
--
2.51.0
---
Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- - 2025-10-31 13:53:55.022194996 +0000
+++ 0091-net-mlx5-fix-flow-aging-race-condition.patch 2025-10-31 13:53:52.263524077 +0000
@@ -1 +1 @@
-From 820ca7361bb7fa40e96e53515d8392ea40a35265 Mon Sep 17 00:00:00 2001
+From 5c5f24d5279a0a124b63050f4345c0f8a834d5b4 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit 820ca7361bb7fa40e96e53515d8392ea40a35265 ]
+
@@ -22 +23,0 @@
-Cc: stable@dpdk.org
@@ -33 +34 @@
-index 9a0aa1827e..491a78a0de 100644
+index be0129a96f..052b7f2768 100644
@@ -36 +37 @@
-@@ -3233,5 +3233,5 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
+@@ -3192,5 +3192,5 @@ flow_hw_shared_action_construct(struct rte_eth_dev *dev, uint32_t queue,
@@ -43 +44 @@
-@@ -3669,5 +3669,6 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
+@@ -3628,5 +3628,6 @@ flow_hw_actions_construct(struct rte_eth_dev *dev,
@@ -52 +53 @@
-index 5c738f38ca..fb01fce4e5 100644
+index 7baeaedd17..3a93434ecd 100644
@@ -55 +56 @@
-@@ -64,6 +64,6 @@ __mlx5_hws_cnt_svc(struct mlx5_dev_ctx_shared *sh,
+@@ -57,6 +57,6 @@ __mlx5_hws_cnt_svc(struct mlx5_dev_ctx_shared *sh,
@@ -63 +64 @@
-@@ -135,12 +135,12 @@ mlx5_hws_aging_check(struct mlx5_priv *priv, struct mlx5_hws_cnt_pool *cpool)
+@@ -128,12 +128,12 @@ mlx5_hws_aging_check(struct mlx5_priv *priv, struct mlx5_hws_cnt_pool *cpool)
@@ -79 +80 @@
-@@ -768,5 +768,5 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
+@@ -754,5 +754,5 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
@@ -87 +88 @@
-index 38a9c19449..f5b7e8f643 100644
+index d8da9dfcdd..8408c571ec 100644
next prev parent reply other threads:[~2025-10-31 14:38 UTC|newest]
Thread overview: 135+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-31 14:32 patch 'test/telemetry: fix test calling all commands' " Kevin Traynor
2025-10-31 14:32 ` patch 'eal: fix plugin dir walk' " Kevin Traynor
2025-10-31 14:32 ` patch 'cmdline: fix port list parsing' " Kevin Traynor
2025-10-31 14:32 ` patch 'tailq: fix lookup macro' " Kevin Traynor
2025-10-31 14:32 ` patch 'hash: fix unaligned access in predictable RSS' " Kevin Traynor
2025-10-31 14:32 ` patch 'graph: fix stats query with no node xstats' " Kevin Traynor
2025-10-31 14:32 ` patch 'graph: fix unaligned access in stats' " Kevin Traynor
2025-10-31 14:32 ` patch 'eventdev: fix listing timer adapters with telemetry' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/gve: send whole packet when mbuf is large' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/gve: clean when insufficient Tx descriptors' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/gve: do not write zero-length " Kevin Traynor
2025-10-31 14:32 ` patch 'net/gve: validate Tx packet before sending' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/gve: add DQO Tx descriptor limit' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/gve: fix DQO TSO " Kevin Traynor
2025-10-31 14:32 ` patch 'net/gve: clear DQO Tx descriptors before writing' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/vmxnet3: fix mapping of mempools to queues' " Kevin Traynor
2025-10-31 14:32 ` patch 'app/testpmd: increase size of set cores list command' " Kevin Traynor
2025-10-31 14:32 ` patch 'bus/dpaa: improve cleanup' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/gve: free device resources on close' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/dpaa2: fix extract buffer preparation' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/dpaa2: fix shaper rate' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/tap: fix BPF with cross-compilation' " Kevin Traynor
2025-10-31 14:32 ` patch 'app/testpmd: fix mask in flow random item' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/zxdh: fix Arm build' " Kevin Traynor
2025-10-31 14:32 ` patch 'app/testpmd: monitor state of primary process' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/gve: fix disabling interrupts on DQ' " Kevin Traynor
2025-10-31 14:32 ` patch 'app/testpmd: fix conntrack action query' " Kevin Traynor
2025-10-31 14:32 ` patch 'doc: add conntrack state inspect command to testpmd guide' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/gve: free Rx mbufs if allocation fails on ring setup' " Kevin Traynor
2025-10-31 14:32 ` patch 'app/testpmd: validate DSCP and VLAN for meter creation' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/mlx5: fix index-based flow rules' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/mlx5: fix default flow rules start' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/mlx5: fix storage of shared Rx queues' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/mlx5: fix use after scope of RSS configuration' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/mlx5: fix unsupported flow rule port action' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/mlx5: fix double free in non-template flow destroy' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/mlx5: fix non-template age rules flush' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/mlx5: fix non-template RSS expansion' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/mlx5: fix connection tracking state item validation' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/mlx5/hws: fix TIR action support in FDB' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/mlx5: fix indirect flow age action handling' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/mlx5: fix Direct Verbs counter offset detection' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/mlx5: fix interface name parameter definition' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/iavf: fix Tx vector path selection logic' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/ixgbe: fix SCTP port filtering on E610' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/intel: fix assumption about tag placement order' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/ice: fix VLAN tag reporting on Rx' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/ice/base: fix adding special words' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/ice/base: fix memory leak in HW profile handling' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/ice/base: fix memory leak in recipe " Kevin Traynor
2025-10-31 14:32 ` patch 'gro: fix payload corruption in coalescing packets' " Kevin Traynor
2025-10-31 14:32 ` patch 'eal: fix DMA mask validation with IOVA mode option' " Kevin Traynor
2025-10-31 14:32 ` patch 'eal: fix MP socket cleanup' " Kevin Traynor
2025-10-31 14:32 ` patch 'crypto/ipsec_mb: fix QP release in secondary' " Kevin Traynor
2025-10-31 14:32 ` patch 'efd: fix AVX2 support' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/octeon_ep: handle interrupt enable failure' " Kevin Traynor
2025-10-31 14:32 ` patch 'net/octeon_ep: fix mbuf data offset update' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/octeon_ep: fix device start' " Kevin Traynor
2025-10-31 14:33 ` patch 'common/cnxk: fix async event handling' " Kevin Traynor
2025-10-31 14:33 ` patch 'doc: fix feature list of ice driver' " Kevin Traynor
2025-10-31 14:33 ` patch 'doc: fix feature list of iavf " Kevin Traynor
2025-10-31 14:33 ` patch 'eal/arm: fix C++ build for 32-bit memcpy' " Kevin Traynor
2025-10-31 14:33 ` patch 'test/debug: fix crash with mlx5 devices' " Kevin Traynor
2025-10-31 14:33 ` patch 'bus/pci: fix build with MinGW 13' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/mlx5: " Kevin Traynor
2025-10-31 14:33 ` patch 'bbdev: " Kevin Traynor
2025-10-31 14:33 ` patch 'dma/hisilicon: fix stop with pending transfers' " Kevin Traynor
2025-10-31 14:33 ` patch 'test/dma: fix failure condition' " Kevin Traynor
2025-10-31 14:33 ` patch 'eal/x86: enable timeout in AMD power monitor' " Kevin Traynor
2025-11-01 15:31 ` Tummala, Sivaprasad
2025-10-31 14:33 ` patch 'test/func_reentrancy: fix args to EAL init call' " Kevin Traynor
2025-10-31 14:33 ` patch 'fib6: fix memory leak on delete operation' " Kevin Traynor
2025-10-31 14:33 ` patch 'fib6: fix tbl8 allocation check logic' " Kevin Traynor
2025-10-31 14:33 ` patch 'vhost: add VDUSE virtqueue ready state polling workaround' " Kevin Traynor
2025-10-31 14:33 ` patch 'vhost: fix virtqueue info init in VDUSE vring setup' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/virtio-user: fix used ring address calculation' " Kevin Traynor
2025-10-31 14:33 ` patch 'vhost: fix double fetch when dequeue offloading' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/bnxt: fix free of not allocated object' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/ice/base: fix integer overflow on NVM init' " Kevin Traynor
2025-10-31 14:33 ` patch 'doc: fix display of commands in cpfl guide' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/ice: fix initialization with 8 ports' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/ice: remove indirection for FDIR filters' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/ice: fix memory leak in raw pattern parse' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/i40e: fix symmetric Toeplitz hashing for SCTP' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/mlx5: fix ESP item validation to match on seqnum' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/mlx5: fix ESP header match after UDP for group 0' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/mlx5: fix multicast' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/mlx5: fix flow encapsulation hash' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/mlx5: fix indirect flow action memory leak' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/mlx5: fix MTU initialization' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/mlx5: fix leak of flow indexed pools' " Kevin Traynor
2025-10-31 14:33 ` Kevin Traynor [this message]
2025-10-31 14:33 ` patch 'net/hns3: fix inconsistent lock' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/hns3: fix VLAN resources freeing' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/hns3: fix overwrite mbuf in vector path' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/af_packet: fix crash in secondary process' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/ark: remove double mbuf free' " Kevin Traynor
2025-10-31 14:33 ` patch 'app/testpmd: stop forwarding in secondary process' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/tap: fix build with LTO' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/hns3: fix VLAN tag loss for short tunnel frame' " Kevin Traynor
2025-10-31 14:33 ` patch 'ethdev: fix VLAN filter parameter description' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/enetfec: fix file descriptor leak on read error' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/enetfec: fix out-of-bounds access in UIO mapping' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/enetfec: fix buffer descriptor size configuration' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/enetfec: fix Tx queue free' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/enetfec: fix checksum flag handling and error return' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/enetfec: reject multi-queue configuration' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/enetfec: fix memory leak in Rx buffer cleanup' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/enetfec: reject Tx deferred queue' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/tap: fix interrupt callback crash after failed start' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/ntnic: fix potential format overflow' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/ena: fix PCI BAR mapping on 64K page size' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/ena/base: fix unsafe memcpy on invalid memory' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/dpaa2: fix uninitialized variable' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/dpaa2: free buffers from error queue' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/dpaa2: fix L3/L4 checksum results' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/dpaa2: receive packets with additional parse errors' " Kevin Traynor
2025-10-31 14:33 ` patch 'net/dpaa2: fix error frame dump' " Kevin Traynor
2025-10-31 14:34 ` patch 'net/dpaa2: fix flow rule resizing' " Kevin Traynor
2025-10-31 14:34 ` patch 'crypto/qat: fix ECDH' " Kevin Traynor
2025-10-31 14:34 ` patch 'crypto/qat: fix source buffer alignment' " Kevin Traynor
2025-10-31 14:34 ` patch 'crypto/cnxk: refactor RSA verification' " Kevin Traynor
2025-10-31 14:34 ` patch 'test/crypto: fix mbuf handling' " Kevin Traynor
2025-10-31 14:34 ` patch 'app/crypto-perf: fix plaintext size exceeds buffer size' " Kevin Traynor
2025-10-31 14:34 ` patch 'test/crypto: fix vector initialization' " Kevin Traynor
2025-10-31 14:34 ` patch 'crypto/virtio: fix cookies leak' " Kevin Traynor
2025-10-31 14:34 ` patch 'bitops: improve power of 2 alignment documentation' " Kevin Traynor
2025-10-31 14:34 ` patch 'sched: fix WRR parameter data type' " Kevin Traynor
2025-10-31 14:34 ` patch 'test/argparse: change initialization to workaround LTO' " Kevin Traynor
2025-10-31 14:34 ` patch 'config/arm: enable NUMA for Neoverse N2' " Kevin Traynor
2025-10-31 14:34 ` patch 'dts: fix docstring in checksum suite' " Kevin Traynor
2025-10-31 14:34 ` patch 'bus/pci: fix resource leak in secondary process' " Kevin Traynor
2025-10-31 14:34 ` patch 'net/ice: fix vector Rx VLAN offload flags' " Kevin Traynor
2025-10-31 14:34 ` patch 'net/ice: remove unsupported SCTP Rx offload' " Kevin Traynor
2025-10-31 14:34 ` patch 'cmdline: fix highest bit port list parsing' " Kevin Traynor
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251031143421.324432-91-ktraynor@redhat.com \
--to=ktraynor@redhat.com \
--cc=dsosnowski@nvidia.com \
--cc=rongweil@nvidia.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).