* [PATCH 01/19] net/txgbe: fix hardware statistic rx_l3_l4_xsum_error
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 02/19] net/ngbe: " Jiawen Wu
` (18 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
Count the rx_l3_l4_xsum_error statistic in Rx path. Since this hardware
register counter is missing, resulted in the count always showing as 0
in error.
Fixes: 91fe49c87d76 ("net/txgbe: support device xstats")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 17 +++++++++++++++++
drivers/net/txgbe/txgbe_rxtx.c | 12 ++++++++----
drivers/net/txgbe/txgbe_rxtx.h | 1 +
drivers/net/txgbe/txgbe_rxtx_vec_neon.c | 9 ++++++++-
drivers/net/txgbe/txgbe_rxtx_vec_sse.c | 9 ++++++++-
5 files changed, 42 insertions(+), 6 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 7b040b08c5..cbb2ea815f 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -2666,6 +2666,8 @@ txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ struct txgbe_rx_queue *rxq;
+ uint64_t rx_csum_err = 0;
unsigned int i, count;
txgbe_read_stats_registers(hw, hw_stats);
@@ -2679,6 +2681,13 @@ txgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
limit = min(limit, txgbe_xstats_calc_num(dev));
+ /* Rx Checksum Errors */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rx_csum_err += rxq->csum_err;
+ }
+ hw_stats->rx_l3_l4_xsum_error = rx_csum_err;
+
/* Extended stats from txgbe_hw_stats */
for (i = 0; i < limit; i++) {
uint32_t offset = 0;
@@ -2755,6 +2764,8 @@ txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+ struct txgbe_rx_queue *rxq;
+ int i = 0;
/* HW registers are cleared on read */
hw->offset_loaded = 0;
@@ -2764,6 +2775,12 @@ txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
/* Reset software totals */
memset(hw_stats, 0, sizeof(*hw_stats));
+ /* Reset rxq checksum errors */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->csum_err = 0;
+ }
+
return 0;
}
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index 167bda8019..c606180741 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -1290,7 +1290,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
}
static inline uint64_t
-rx_desc_error_to_pkt_flags(uint32_t rx_status)
+rx_desc_error_to_pkt_flags(uint32_t rx_status, struct txgbe_rx_queue *rxq)
{
uint64_t pkt_flags = 0;
@@ -1298,16 +1298,19 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
if (rx_status & TXGBE_RXD_STAT_IPCS) {
pkt_flags |= (rx_status & TXGBE_RXD_ERR_IPCS
? RTE_MBUF_F_RX_IP_CKSUM_BAD : RTE_MBUF_F_RX_IP_CKSUM_GOOD);
+ rxq->csum_err += !!(rx_status & TXGBE_RXD_ERR_IPCS);
}
if (rx_status & TXGBE_RXD_STAT_L4CS) {
pkt_flags |= (rx_status & TXGBE_RXD_ERR_L4CS
? RTE_MBUF_F_RX_L4_CKSUM_BAD : RTE_MBUF_F_RX_L4_CKSUM_GOOD);
+ rxq->csum_err += !!(rx_status & TXGBE_RXD_ERR_L4CS);
}
if (rx_status & TXGBE_RXD_STAT_EIPCS &&
rx_status & TXGBE_RXD_ERR_EIPCS) {
pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
+ rxq->csum_err += !!(rx_status & TXGBE_RXD_ERR_EIPCS);
}
#ifdef RTE_LIB_SECURITY
@@ -1389,7 +1392,7 @@ txgbe_rx_scan_hw_ring(struct txgbe_rx_queue *rxq)
/* convert descriptor fields to rte mbuf flags */
pkt_flags = rx_desc_status_to_pkt_flags(s[j],
rxq->vlan_flags);
- pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags |= rx_desc_error_to_pkt_flags(s[j], rxq);
pkt_flags |=
txgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
mb->ol_flags = pkt_flags;
@@ -1728,7 +1731,7 @@ txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
pkt_flags = rx_desc_status_to_pkt_flags(staterr,
rxq->vlan_flags);
- pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr, rxq);
pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
rxm->ol_flags = pkt_flags;
rxm->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
@@ -1804,7 +1807,7 @@ txgbe_fill_cluster_head_buf(struct rte_mbuf *head, struct txgbe_rx_desc *desc,
head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
- pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr, rxq);
pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
if (TXGBE_RXD_RSCCNT(desc->qw0.dw0))
pkt_flags |= RTE_MBUF_F_RX_LRO;
@@ -2753,6 +2756,7 @@ txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
+ rxq->csum_err = 0;
rte_pktmbuf_free(rxq->pkt_first_seg);
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index b1ac03576f..02e2617cce 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -323,6 +323,7 @@ struct txgbe_rx_queue {
/** hold packets to return to application */
struct rte_mbuf *rx_stage[RTE_PMD_TXGBE_RX_MAX_BURST * 2];
const struct rte_memzone *mz;
+ uint64_t csum_err;
};
/**
diff --git a/drivers/net/txgbe/txgbe_rxtx_vec_neon.c b/drivers/net/txgbe/txgbe_rxtx_vec_neon.c
index a56e2f4164..c408a65036 100644
--- a/drivers/net/txgbe/txgbe_rxtx_vec_neon.c
+++ b/drivers/net/txgbe/txgbe_rxtx_vec_neon.c
@@ -222,7 +222,7 @@ _recv_raw_pkts_vec(struct txgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
volatile struct txgbe_rx_desc *rxdp;
struct txgbe_rx_entry *sw_ring;
uint16_t nb_pkts_recd;
- int pos;
+ int pos, i;
uint8x16_t shuf_msk = {
0xFF, 0xFF,
0xFF, 0xFF, /* skip 32 bits pkt_type */
@@ -331,6 +331,13 @@ _recv_raw_pkts_vec(struct txgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, vlan_flags,
&rx_pkts[pos]);
+ for (i = 0; i < RTE_TXGBE_DESCS_PER_LOOP; i++) {
+ if (rx_pkts[pos + i]->ol_flags &
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD))
+ rxq->csum_err++;
+ }
+
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
pkt_mb4 = vreinterpretq_u8_u16(tmp);
diff --git a/drivers/net/txgbe/txgbe_rxtx_vec_sse.c b/drivers/net/txgbe/txgbe_rxtx_vec_sse.c
index 8ecce33471..03c2af43d5 100644
--- a/drivers/net/txgbe/txgbe_rxtx_vec_sse.c
+++ b/drivers/net/txgbe/txgbe_rxtx_vec_sse.c
@@ -283,7 +283,7 @@ _recv_raw_pkts_vec(struct txgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
#ifdef RTE_LIB_SECURITY
uint8_t use_ipsec = rxq->using_ipsec;
#endif
- int pos;
+ int pos, i;
uint64_t var;
__m128i shuf_msk;
__m128i crc_adjust = _mm_set_epi16(0, 0, 0, /* ignore non-length fields */
@@ -451,6 +451,13 @@ _recv_raw_pkts_vec(struct txgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* set ol_flags with vlan packet type */
desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
+ for (i = 0; i < RTE_TXGBE_DESCS_PER_LOOP; i++) {
+ if (rx_pkts[pos + i]->ol_flags &
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD))
+ rxq->csum_err++;
+ }
+
#ifdef RTE_LIB_SECURITY
if (unlikely(use_ipsec))
desc_to_olflags_v_ipsec(descs, &rx_pkts[pos]);
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 02/19] net/ngbe: fix hardware statistic rx_l3_l4_xsum_error
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
2025-10-27 3:15 ` [PATCH 01/19] net/txgbe: fix hardware statistic rx_l3_l4_xsum_error Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 03/19] net/txgbe: reduce memory size of ring descriptors Jiawen Wu
` (17 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
Count the rx_l3_l4_xsum_error statistic in Rx path. Since this hardware
register counter is missing, resulted in the count always showing as 0
in error.
Fixes: 8b433d04adc9 ("net/ngbe: support device xstats")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/ngbe/ngbe_ethdev.c | 17 +++++++++++++++++
drivers/net/ngbe/ngbe_rxtx.c | 21 ++++++++++++++-------
drivers/net/ngbe/ngbe_rxtx.h | 1 +
drivers/net/ngbe/ngbe_rxtx_vec_neon.c | 9 ++++++++-
drivers/net/ngbe/ngbe_rxtx_vec_sse.c | 9 ++++++++-
5 files changed, 48 insertions(+), 9 deletions(-)
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index adb7785498..8b9d6371fb 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -1704,6 +1704,8 @@ ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
{
struct ngbe_hw *hw = ngbe_dev_hw(dev);
struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
+ struct ngbe_rx_queue *rxq;
+ uint64_t rx_csum_err = 0;
unsigned int i, count;
ngbe_read_stats_registers(hw, hw_stats);
@@ -1717,6 +1719,13 @@ ngbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
limit = min(limit, ngbe_xstats_calc_num(dev));
+ /* Rx Checksum Errors */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rx_csum_err += rxq->csum_err;
+ }
+ hw_stats->rx_l3_l4_xsum_error = rx_csum_err;
+
/* Extended stats from ngbe_hw_stats */
for (i = 0; i < limit; i++) {
uint32_t offset = 0;
@@ -1793,6 +1802,8 @@ ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
{
struct ngbe_hw *hw = ngbe_dev_hw(dev);
struct ngbe_hw_stats *hw_stats = NGBE_DEV_STATS(dev);
+ struct ngbe_rx_queue *rxq;
+ int i = 0;
/* HW registers are cleared on read */
hw->offset_loaded = 0;
@@ -1802,6 +1813,12 @@ ngbe_dev_xstats_reset(struct rte_eth_dev *dev)
/* Reset software totals */
memset(hw_stats, 0, sizeof(*hw_stats));
+ /* Reset rxq checksum errors */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->csum_err = 0;
+ }
+
return 0;
}
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 95e2172ee4..a60421293b 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -972,22 +972,28 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
}
static inline uint64_t
-rx_desc_error_to_pkt_flags(uint32_t rx_status)
+rx_desc_error_to_pkt_flags(uint32_t rx_status, struct ngbe_rx_queue *rxq)
{
uint64_t pkt_flags = 0;
/* checksum offload can't be disabled */
- if (rx_status & NGBE_RXD_STAT_IPCS)
+ if (rx_status & NGBE_RXD_STAT_IPCS) {
pkt_flags |= (rx_status & NGBE_RXD_ERR_IPCS
? RTE_MBUF_F_RX_IP_CKSUM_BAD : RTE_MBUF_F_RX_IP_CKSUM_GOOD);
+ rxq->csum_err += !!(rx_status & NGBE_RXD_ERR_IPCS);
+ }
- if (rx_status & NGBE_RXD_STAT_L4CS)
+ if (rx_status & NGBE_RXD_STAT_L4CS) {
pkt_flags |= (rx_status & NGBE_RXD_ERR_L4CS
? RTE_MBUF_F_RX_L4_CKSUM_BAD : RTE_MBUF_F_RX_L4_CKSUM_GOOD);
+ rxq->csum_err += !!(rx_status & NGBE_RXD_ERR_L4CS);
+ }
if (rx_status & NGBE_RXD_STAT_EIPCS &&
- rx_status & NGBE_RXD_ERR_EIPCS)
+ rx_status & NGBE_RXD_ERR_EIPCS) {
pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
+ rxq->csum_err += !!(rx_status & NGBE_RXD_ERR_EIPCS);
+ }
return pkt_flags;
}
@@ -1060,7 +1066,7 @@ ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
/* convert descriptor fields to rte mbuf flags */
pkt_flags = rx_desc_status_to_pkt_flags(s[j],
rxq->vlan_flags);
- pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
+ pkt_flags |= rx_desc_error_to_pkt_flags(s[j], rxq);
pkt_flags |=
ngbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
mb->ol_flags = pkt_flags;
@@ -1393,7 +1399,7 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
pkt_flags = rx_desc_status_to_pkt_flags(staterr,
rxq->vlan_flags);
- pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr, rxq);
pkt_flags |= ngbe_rxd_pkt_info_to_pkt_flags(pkt_info);
rxm->ol_flags = pkt_flags;
rxm->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
@@ -1464,7 +1470,7 @@ ngbe_fill_cluster_head_buf(struct rte_mbuf *head, struct ngbe_rx_desc *desc,
head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
- pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr, rxq);
pkt_flags |= ngbe_rxd_pkt_info_to_pkt_flags(pkt_info);
head->ol_flags = pkt_flags;
head->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
@@ -2266,6 +2272,7 @@ ngbe_reset_rx_queue(struct ngbe_adapter *adapter, struct ngbe_rx_queue *rxq)
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
rxq->rx_tail = 0;
rxq->nb_rx_hold = 0;
+ rxq->csum_err = 0;
rte_pktmbuf_free(rxq->pkt_first_seg);
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
index 8534ec123a..7b96b837ca 100644
--- a/drivers/net/ngbe/ngbe_rxtx.h
+++ b/drivers/net/ngbe/ngbe_rxtx.h
@@ -292,6 +292,7 @@ struct ngbe_rx_queue {
/** hold packets to return to application */
struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2];
const struct rte_memzone *mz;
+ uint64_t csum_err;
};
/**
diff --git a/drivers/net/ngbe/ngbe_rxtx_vec_neon.c b/drivers/net/ngbe/ngbe_rxtx_vec_neon.c
index 46391c9400..79685de181 100644
--- a/drivers/net/ngbe/ngbe_rxtx_vec_neon.c
+++ b/drivers/net/ngbe/ngbe_rxtx_vec_neon.c
@@ -222,7 +222,7 @@ _recv_raw_pkts_vec(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
volatile struct ngbe_rx_desc *rxdp;
struct ngbe_rx_entry *sw_ring;
uint16_t nb_pkts_recd;
- int pos;
+ int pos, i;
uint8x16_t shuf_msk = {
0xFF, 0xFF,
0xFF, 0xFF, /* skip 32 bits pkt_type */
@@ -331,6 +331,13 @@ _recv_raw_pkts_vec(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, vlan_flags,
&rx_pkts[pos]);
+ for (i = 0; i < RTE_NGBE_DESCS_PER_LOOP; i++) {
+ if (rx_pkts[pos + i]->ol_flags &
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD))
+ rxq->csum_err++;
+ }
+
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
pkt_mb4 = vreinterpretq_u8_u16(tmp);
diff --git a/drivers/net/ngbe/ngbe_rxtx_vec_sse.c b/drivers/net/ngbe/ngbe_rxtx_vec_sse.c
index 19c69cdfa6..474101c600 100644
--- a/drivers/net/ngbe/ngbe_rxtx_vec_sse.c
+++ b/drivers/net/ngbe/ngbe_rxtx_vec_sse.c
@@ -244,7 +244,7 @@ _recv_raw_pkts_vec(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
volatile struct ngbe_rx_desc *rxdp;
struct ngbe_rx_entry *sw_ring;
uint16_t nb_pkts_recd;
- int pos;
+ int pos, i;
uint64_t var;
__m128i shuf_msk;
__m128i crc_adjust = _mm_set_epi16(0, 0, 0, /* ignore non-length fields */
@@ -412,6 +412,13 @@ _recv_raw_pkts_vec(struct ngbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* set ol_flags with vlan packet type */
desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
+ for (i = 0; i < RTE_NGBE_DESCS_PER_LOOP; i++) {
+ if (rx_pkts[pos + i]->ol_flags &
+ (RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD))
+ rxq->csum_err++;
+ }
+
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 03/19] net/txgbe: reduce memory size of ring descriptors
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
2025-10-27 3:15 ` [PATCH 01/19] net/txgbe: fix hardware statistic rx_l3_l4_xsum_error Jiawen Wu
2025-10-27 3:15 ` [PATCH 02/19] net/ngbe: " Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 04/19] net/ngbe: " Jiawen Wu
` (16 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
The memory of ring descriptors was allocated in size of the maximum ring
size. It seems not friendly to our hardware on some domestic platforms.
Change it to allocate in size of the real ring size.
Fixes: 226bf98eda87 ("net/txgbe: add Rx and Tx queues setup and release")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_rxtx.c | 20 +++++++-------------
1 file changed, 7 insertions(+), 13 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index c606180741..d77db1efa2 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -2521,13 +2521,9 @@ txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
if (txq == NULL)
return -ENOMEM;
- /*
- * Allocate TX ring hardware descriptors. A memzone large enough to
- * handle the maximum ring size is allocated in order to allow for
- * resizing in later calls to the queue setup function.
- */
+ /* Allocate TX ring hardware descriptors. */
tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
- sizeof(struct txgbe_tx_desc) * TXGBE_RING_DESC_MAX,
+ sizeof(struct txgbe_tx_desc) * nb_desc,
TXGBE_ALIGN, socket_id);
if (tz == NULL) {
txgbe_tx_queue_release(txq);
@@ -2781,6 +2777,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len;
struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
uint64_t offloads;
+ uint32_t size;
PMD_INIT_FUNC_TRACE();
hw = TXGBE_DEV_HW(dev);
@@ -2831,13 +2828,10 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
*/
rxq->pkt_type_mask = TXGBE_PTID_MASK;
- /*
- * Allocate RX ring hardware descriptors. A memzone large enough to
- * handle the maximum ring size is allocated in order to allow for
- * resizing in later calls to the queue setup function.
- */
+ /* Allocate RX ring hardware descriptors. */
+ size = (nb_desc + RTE_PMD_TXGBE_RX_MAX_BURST) * sizeof(struct txgbe_rx_desc);
rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
- RX_RING_SZ, TXGBE_ALIGN, socket_id);
+ size, TXGBE_ALIGN, socket_id);
if (rz == NULL) {
txgbe_rx_queue_release(rxq);
return -ENOMEM;
@@ -2847,7 +2841,7 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
/*
* Zero init all the descriptors in the ring.
*/
- memset(rz->addr, 0, RX_RING_SZ);
+ memset(rz->addr, 0, size);
/*
* Modified to setup VFRDT for Virtual Function
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 04/19] net/ngbe: reduce memory size of ring descriptors
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (2 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 03/19] net/txgbe: reduce memory size of ring descriptors Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 05/19] net/txgbe: fix VF Rx buffer size in config register Jiawen Wu
` (15 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
The memory of ring descriptors was allocated in size of the maximum ring
size. It seems not friendly to our hardware on some domestic platforms.
Change it to allocate in size of the real ring size.
Fixes: 43b7e5ea60ac ("net/ngbe: support Rx queue setup/release")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/ngbe/ngbe_rxtx.c | 20 +++++++-------------
1 file changed, 7 insertions(+), 13 deletions(-)
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index a60421293b..03ada844bf 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -2058,13 +2058,9 @@ ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
if (txq == NULL)
return -ENOMEM;
- /*
- * Allocate Tx ring hardware descriptors. A memzone large enough to
- * handle the maximum ring size is allocated in order to allow for
- * resizing in later calls to the queue setup function.
- */
+ /* Allocate Tx ring hardware descriptors. */
tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
- sizeof(struct ngbe_tx_desc) * NGBE_RING_DESC_MAX,
+ sizeof(struct ngbe_tx_desc) * nb_desc,
NGBE_ALIGN, socket_id);
if (tz == NULL) {
ngbe_tx_queue_release(txq);
@@ -2324,6 +2320,7 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len;
struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
uint64_t offloads;
+ uint32_t size;
PMD_INIT_FUNC_TRACE();
hw = ngbe_dev_hw(dev);
@@ -2357,13 +2354,10 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
rxq->offloads = offloads;
- /*
- * Allocate Rx ring hardware descriptors. A memzone large enough to
- * handle the maximum ring size is allocated in order to allow for
- * resizing in later calls to the queue setup function.
- */
+ /* Allocate Rx ring hardware descriptors. */
+ size = (nb_desc + RTE_PMD_NGBE_RX_MAX_BURST) * sizeof(struct ngbe_rx_desc);
rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
- RX_RING_SZ, NGBE_ALIGN, socket_id);
+ size, NGBE_ALIGN, socket_id);
if (rz == NULL) {
ngbe_rx_queue_release(rxq);
return -ENOMEM;
@@ -2373,7 +2367,7 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
/*
* Zero init all the descriptors in the ring.
*/
- memset(rz->addr, 0, RX_RING_SZ);
+ memset(rz->addr, 0, size);
rxq->rdt_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXWP(rxq->reg_idx));
rxq->rdh_reg_addr = NGBE_REG_ADDR(hw, NGBE_RXRP(rxq->reg_idx));
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 05/19] net/txgbe: fix VF Rx buffer size in config register
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (3 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 04/19] net/ngbe: " Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 06/19] net/ngbe: " Jiawen Wu
` (14 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
Refer to commit 8a3ef4b89e6d ("net/txgbe: fix Rx buffer size in
config register").
When round up buffer size to 1K, to configure the register, hardware
will receive packets exceeding the buffer size in LRO mode. It will
cause a segment fault in the receive function.
Fixes: 92144bb36c6f ("net/txgbe: support VF Rx/Tx")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_rxtx.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index d77db1efa2..a3472bcf34 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -5256,7 +5256,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
*/
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
RTE_PKTMBUF_HEADROOM);
- buf_size = ROUND_UP(buf_size, 1 << 10);
+ buf_size = ROUND_DOWN(buf_size, 1 << 10);
srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
/*
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 06/19] net/ngbe: fix VF Rx buffer size in config register
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (4 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 05/19] net/txgbe: fix VF Rx buffer size in config register Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 07/19] net/txgbe: remove duplicate txq assignment Jiawen Wu
` (13 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
Refer to commit 8a3ef4b89e6d ("net/txgbe: fix Rx buffer size in
config register").
When round up buffer size to 1K, to configure the register, hardware
will receive packets exceeding the buffer size in scatter mode. It will
cause a segment fault in the receive function.
Fixes: 711a06e896ba ("net/ngbe: add datapath init for VF device")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/ngbe/ngbe_rxtx.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 03ada844bf..66d72c88df 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -3559,7 +3559,7 @@ ngbevf_dev_rx_init(struct rte_eth_dev *dev)
*/
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
RTE_PKTMBUF_HEADROOM);
- buf_size = ROUND_UP(buf_size, 1 << 10);
+ buf_size = ROUND_DOWN(buf_size, 1 << 10);
srrctl |= NGBE_RXCFG_PKTLEN(buf_size);
/*
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 07/19] net/txgbe: remove duplicate txq assignment
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (5 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 06/19] net/ngbe: " Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 08/19] net/txgbe: add device arguments for FDIR Jiawen Wu
` (12 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
Duplicate code was introduced in the previous patch, cleanup it.
Fixes: 0eabdfcd4af4 ("net/txgbe: enable Tx descriptor error interrupt")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_rxtx.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index a3472bcf34..78199ea00b 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -889,7 +889,6 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_offload.data[0] = 0;
tx_offload.data[1] = 0;
- txq = tx_queue;
sw_ring = txq->sw_ring;
txr = txq->tx_ring;
tx_id = txq->tx_tail;
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 08/19] net/txgbe: add device arguments for FDIR
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (6 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 07/19] net/txgbe: remove duplicate txq assignment Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 09/19] net/txgbe: fix the maxinum number of FDIR filter Jiawen Wu
` (11 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
Since FDIR configuration is deprecated in generic ethdev device
configuration and move it into the device private data, there is no way
to configure the FDIR parameters.
Two device arguments "pkt-filter-size" and "pkt-filter-drop-queue" are
added, they just continue to use the previous naming convention.
Fixes: 5007ac13189d ("ethdev: remove deprecated Flow Director configuration")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/base/txgbe_type.h | 4 ++++
drivers/net/txgbe/txgbe_ethdev.c | 20 ++++++++++++++++++--
2 files changed, 22 insertions(+), 2 deletions(-)
diff --git a/drivers/net/txgbe/base/txgbe_type.h b/drivers/net/txgbe/base/txgbe_type.h
index 07b443c2e0..b5dbc9b755 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -743,6 +743,8 @@ struct txgbe_phy_info {
#define TXGBE_DEVARG_FFE_MAIN "ffe_main"
#define TXGBE_DEVARG_FFE_PRE "ffe_pre"
#define TXGBE_DEVARG_FFE_POST "ffe_post"
+#define TXGBE_DEVARG_FDIR_PBALLOC "pkt-filter-size"
+#define TXGBE_DEVARG_FDIR_DROP_QUEUE "pkt-filter-drop-queue"
#define TXGBE_DEVARG_TX_HEAD_WB "tx_headwb"
#define TXGBE_DEVARG_TX_HEAD_WB_SIZE "tx_headwb_size"
#define TXGBE_DEVARG_RX_DESC_MERGE "rx_desc_merge"
@@ -756,6 +758,8 @@ static const char * const txgbe_valid_arguments[] = {
TXGBE_DEVARG_FFE_MAIN,
TXGBE_DEVARG_FFE_PRE,
TXGBE_DEVARG_FFE_POST,
+ TXGBE_DEVARG_FDIR_PBALLOC,
+ TXGBE_DEVARG_FDIR_DROP_QUEUE,
TXGBE_DEVARG_TX_HEAD_WB,
TXGBE_DEVARG_TX_HEAD_WB_SIZE,
TXGBE_DEVARG_RX_DESC_MERGE,
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index cbb2ea815f..e9bbf8ea72 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -524,8 +524,12 @@ txgbe_handle_devarg(__rte_unused const char *key, const char *value,
}
static void
-txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs)
+txgbe_parse_devargs(struct rte_eth_dev *dev)
{
+ struct rte_eth_fdir_conf *fdir_conf = TXGBE_DEV_FDIR_CONF(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_devargs *devargs = pci_dev->device.devargs;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct rte_kvargs *kvlist;
u16 auto_neg = 1;
u16 poll = 0;
@@ -535,6 +539,9 @@ txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs)
u16 ffe_main = 27;
u16 ffe_pre = 8;
u16 ffe_post = 44;
+ /* FDIR args */
+ u8 pballoc = 0;
+ u8 drop_queue = 127;
/* New devargs for amberlite config */
u16 tx_headwb = 1;
u16 tx_headwb_size = 16;
@@ -563,6 +570,10 @@ txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs)
&txgbe_handle_devarg, &ffe_pre);
rte_kvargs_process(kvlist, TXGBE_DEVARG_FFE_POST,
&txgbe_handle_devarg, &ffe_post);
+ rte_kvargs_process(kvlist, TXGBE_DEVARG_FDIR_PBALLOC,
+ &txgbe_handle_devarg, &pballoc);
+ rte_kvargs_process(kvlist, TXGBE_DEVARG_FDIR_DROP_QUEUE,
+ &txgbe_handle_devarg, &drop_queue);
rte_kvargs_process(kvlist, TXGBE_DEVARG_TX_HEAD_WB,
&txgbe_handle_devarg, &tx_headwb);
rte_kvargs_process(kvlist, TXGBE_DEVARG_TX_HEAD_WB_SIZE,
@@ -583,6 +594,9 @@ txgbe_parse_devargs(struct txgbe_hw *hw, struct rte_devargs *devargs)
hw->phy.ffe_main = ffe_main;
hw->phy.ffe_pre = ffe_pre;
hw->phy.ffe_post = ffe_post;
+
+ fdir_conf->pballoc = pballoc;
+ fdir_conf->drop_queue = drop_queue;
}
static int
@@ -671,7 +685,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
hw->isb_dma = TMZ_PADDR(mz);
hw->isb_mem = TMZ_VADDR(mz);
- txgbe_parse_devargs(hw, pci_dev->device.devargs);
+ txgbe_parse_devargs(eth_dev);
/* Initialize the shared code (base driver) */
err = txgbe_init_shared_code(hw);
if (err != 0) {
@@ -6034,6 +6048,8 @@ RTE_PMD_REGISTER_PARAM_STRING(net_txgbe,
TXGBE_DEVARG_FFE_MAIN "=<uint16>"
TXGBE_DEVARG_FFE_PRE "=<uint16>"
TXGBE_DEVARG_FFE_POST "=<uint16>"
+ TXGBE_DEVARG_FDIR_PBALLOC "=<0|1|2>"
+ TXGBE_DEVARG_FDIR_DROP_QUEUE "=<uint8>"
TXGBE_DEVARG_TX_HEAD_WB "=<0|1>"
TXGBE_DEVARG_TX_HEAD_WB_SIZE "=<1|16>"
TXGBE_DEVARG_RX_DESC_MERGE "=<0|1>");
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 09/19] net/txgbe: fix the maxinum number of FDIR filter
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (7 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 08/19] net/txgbe: add device arguments for FDIR Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 10/19] net/txgbe: fix FDIR mode is not be cleared Jiawen Wu
` (10 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
FDIR is determined the maximum value on the hardware based on the
memory space allocated (i.e. fdir_conf.pballoc). But the hash map of
FDIR is created based on TXGBE_MAX_FDIR_FILTER_NUM. These two do not
match. It resulted in the absence of error when creating more FDIR rules
than the maximum allowed by the hardware.
Fixes: 635c21354f9a ("net/txgbe: add flow director filter init and uninit")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 6 ++++--
drivers/net/txgbe/txgbe_fdir.c | 4 +++-
2 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index e9bbf8ea72..f650c5b7a4 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -935,11 +935,13 @@ static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
{
+ struct rte_eth_fdir_conf *fdir_conf = TXGBE_DEV_FDIR_CONF(eth_dev);
struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
char fdir_hash_name[RTE_HASH_NAMESIZE];
+ u16 max_fdir_num = (1024 << (fdir_conf->pballoc + 1)) - 2;
struct rte_hash_parameters fdir_hash_params = {
.name = fdir_hash_name,
- .entries = TXGBE_MAX_FDIR_FILTER_NUM,
+ .entries = max_fdir_num,
.key_len = sizeof(struct txgbe_atr_input),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
@@ -956,7 +958,7 @@ static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
}
fdir_info->hash_map = rte_zmalloc("txgbe",
sizeof(struct txgbe_fdir_filter *) *
- TXGBE_MAX_FDIR_FILTER_NUM,
+ max_fdir_num,
0);
if (!fdir_info->hash_map) {
PMD_INIT_LOG(ERR,
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 0efd43b59a..631dec69e8 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -959,6 +959,7 @@ txgbe_fdir_filter_restore(struct rte_eth_dev *dev)
int
txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
{
+ struct rte_eth_fdir_conf *fdir_conf = TXGBE_DEV_FDIR_CONF(dev);
struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
struct txgbe_fdir_filter *fdir_filter;
struct txgbe_fdir_filter *filter_flag;
@@ -967,7 +968,8 @@ txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
/* flush flow director */
rte_hash_reset(fdir_info->hash_handle);
memset(fdir_info->hash_map, 0,
- sizeof(struct txgbe_fdir_filter *) * TXGBE_MAX_FDIR_FILTER_NUM);
+ sizeof(struct txgbe_fdir_filter *) *
+ ((1024 << (fdir_conf->pballoc + 1)) - 2));
filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
TAILQ_REMOVE(&fdir_info->fdir_list,
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 10/19] net/txgbe: fix FDIR mode is not be cleared
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (8 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 09/19] net/txgbe: fix the maxinum number of FDIR filter Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 11/19] net/txgbe: fix FDIR drop action for L4 match packets Jiawen Wu
` (9 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
When FDIR flow rules are all cleared, FDIR mode is not cleared. This will
cause that creating new FDIR flow rules failed, for their mode are
different from the previously deleted ones.
Fixes: 6bde42fe7fa5 ("net/txgbe: flush all filters")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_fdir.c | 1 +
drivers/net/txgbe/txgbe_flow.c | 2 ++
2 files changed, 3 insertions(+)
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 631dec69e8..77d0cc4c30 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -970,6 +970,7 @@ txgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
memset(fdir_info->hash_map, 0,
sizeof(struct txgbe_fdir_filter *) *
((1024 << (fdir_conf->pballoc + 1)) - 2));
+ fdir_conf->mode = RTE_FDIR_MODE_NONE;
filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
TAILQ_REMOVE(&fdir_info->fdir_list,
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 31af3593ed..25cf0db316 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -3429,6 +3429,7 @@ txgbe_flow_destroy(struct rte_eth_dev *dev,
struct txgbe_fdir_rule_ele *fdir_rule_ptr;
struct txgbe_flow_mem *txgbe_flow_mem_ptr;
struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+ struct rte_eth_fdir_conf *fdir_conf = TXGBE_DEV_FDIR_CONF(dev);
struct txgbe_rss_conf_ele *rss_filter_ptr;
switch (filter_type) {
@@ -3488,6 +3489,7 @@ txgbe_flow_destroy(struct rte_eth_dev *dev,
fdir_info->mask_added = false;
fdir_info->flex_relative = false;
fdir_info->flex_bytes_offset = 0;
+ fdir_conf->mode = RTE_FDIR_MODE_NONE;
}
}
break;
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 11/19] net/txgbe: fix FDIR drop action for L4 match packets
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (9 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 10/19] net/txgbe: fix FDIR mode is not be cleared Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 12/19] net/txgbe: fix to create FDIR filter for tunnel SCTP packet Jiawen Wu
` (8 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
FDIR flow rules support to drop packets without being limited to L3
packets. Remove the redundant limitation.
Fixes: b973ee26747a ("net/txgbe: parse flow director filter")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_flow.c | 7 -------
1 file changed, 7 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 25cf0db316..7cf079a1cf 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2839,7 +2839,6 @@ txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
int ret;
- struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct rte_eth_fdir_conf *fdir_conf = TXGBE_DEV_FDIR_CONF(dev);
ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
@@ -2853,12 +2852,6 @@ txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
return ret;
step_next:
-
- if (hw->mac.type == txgbe_mac_sp &&
- rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
- (rule->input.src_port != 0 || rule->input.dst_port != 0))
- return -ENOTSUP;
-
if (fdir_conf->mode == RTE_FDIR_MODE_NONE) {
fdir_conf->mode = rule->mode;
ret = txgbe_fdir_configure(dev);
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 12/19] net/txgbe: fix to create FDIR filter for tunnel SCTP packet
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (10 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 11/19] net/txgbe: fix FDIR drop action for L4 match packets Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 13/19] net/txgbe: filter FDIR match flex bytes for tunnel packets Jiawen Wu
` (7 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
This commit is the same as commit 0db38d54b57a ("net/txgbe: fix to
create FDIR filter for SCTP packet").
The check for the mask of SCTP item is repeated and wrong, fix it to
make it work.
Fixes: a1851465f825 ("net/txgbe: fix to create FDIR filter for tunnel packet")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_flow.c | 13 -------------
1 file changed, 13 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 7cf079a1cf..5b03a35949 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2798,19 +2798,6 @@ txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
rule->input.dst_port =
sctp_spec->hdr.dst_port;
}
- /* others even sctp port is not supported */
- sctp_mask = item->mask;
- if (sctp_mask &&
- (sctp_mask->hdr.src_port ||
- sctp_mask->hdr.dst_port ||
- sctp_mask->hdr.tag ||
- sctp_mask->hdr.cksum)) {
- memset(rule, 0, sizeof(struct txgbe_fdir_rule));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item, "Not supported by fdir filter");
- return -rte_errno;
- }
}
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 13/19] net/txgbe: filter FDIR match flex bytes for tunnel packets
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (11 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 12/19] net/txgbe: fix to create FDIR filter for tunnel SCTP packet Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 14/19] net/txgbe: fix FDIR rule raw relative for L3 packets Jiawen Wu
` (6 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
For tunnel packets, pattern RAW is also supported to match in FDIR
rules. Fix to process this field.
Fixes: a1851465f825 ("net/txgbe: fix to create FDIR filter for tunnel packet")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_flow.c | 125 ++++++++++++++++++++++++++++++++-
1 file changed, 123 insertions(+), 2 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 5b03a35949..095c84823f 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2222,6 +2222,8 @@ txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
const struct rte_flow_item_udp *udp_mask;
const struct rte_flow_item_sctp *sctp_spec;
const struct rte_flow_item_sctp *sctp_mask;
+ const struct rte_flow_item_raw *raw_mask;
+ const struct rte_flow_item_raw *raw_spec;
u8 ptid = 0;
uint32_t j;
@@ -2548,7 +2550,8 @@ txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
- item->type != RTE_FLOW_ITEM_TYPE_END) {
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
memset(rule, 0, sizeof(struct txgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2637,7 +2640,8 @@ txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
- item->type != RTE_FLOW_ITEM_TYPE_END) {
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
memset(rule, 0, sizeof(struct txgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2699,6 +2703,16 @@ txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
rule->input.dst_port =
tcp_spec->hdr.dst_port;
}
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
}
/* Get the UDP info */
@@ -2748,6 +2762,16 @@ txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
rule->input.dst_port =
udp_spec->hdr.dst_port;
}
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
}
/* Get the SCTP info */
@@ -2798,6 +2822,103 @@ txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
rule->input.dst_port =
sctp_spec->hdr.dst_port;
}
+
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the flex byte info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+ uint16_t pattern = 0;
+
+ /* Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /* mask should not be null */
+ if (!item->mask || !item->spec) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->b_mask = TRUE;
+ raw_mask = item->mask;
+
+ /* check mask */
+ if (raw_mask->relative != 0x1 ||
+ raw_mask->search != 0x1 ||
+ raw_mask->reserved != 0x0 ||
+ (uint32_t)raw_mask->offset != 0xffffffff ||
+ raw_mask->limit != 0xffff ||
+ raw_mask->length != 0xffff) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->b_spec = TRUE;
+ raw_spec = item->spec;
+
+ /* check spec */
+ if (raw_spec->search != 0 ||
+ raw_spec->reserved != 0 ||
+ raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
+ raw_spec->offset % 2 ||
+ raw_spec->limit != 0 ||
+ raw_spec->length != 4 ||
+ /* pattern can't be 0xffff */
+ (raw_spec->pattern[0] == 0xff &&
+ raw_spec->pattern[1] == 0xff &&
+ raw_spec->pattern[2] == 0xff &&
+ raw_spec->pattern[3] == 0xff)) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check pattern mask */
+ if (raw_mask->pattern[0] != 0xff ||
+ raw_mask->pattern[1] != 0xff ||
+ raw_mask->pattern[2] != 0xff ||
+ raw_mask->pattern[3] != 0xff) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mask.flex_bytes_mask = 0xffff;
+ /* Convert pattern string to hex bytes */
+ if (sscanf((const char *)raw_spec->pattern, "%hx", &pattern) != 1) {
+ memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Failed to parse raw pattern");
+ return -rte_errno;
+ }
+ rule->input.flex_bytes = (pattern & 0x00FF) << 8;
+ rule->input.flex_bytes |= (pattern & 0xFF00) >> 8;
+
+ rule->flex_bytes_offset = raw_spec->offset;
+ rule->flex_relative = raw_spec->relative;
}
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 14/19] net/txgbe: fix FDIR rule raw relative for L3 packets
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (12 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 13/19] net/txgbe: filter FDIR match flex bytes for tunnel packets Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 15/19] net/txgbe: fix FDIR input mask Jiawen Wu
` (5 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
Hardware supports FDIR flex field base setting from start of MAC header,
IP header, L4 header, L4 payload. So for IP packet which has no L4 header,
it cannot match the raw bytes with relative offset start from L3 payload.
And FDIR flex bytes rule cannot match L2 packets.
Therefore, we will declare that the relative offset is only used for
matching the L4 packets.
Fixes: aa4974765499 ("net/txgbe: fix raw pattern match for FDIR rule")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_fdir.c | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 77d0cc4c30..8d181db33f 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -258,10 +258,7 @@ txgbe_fdir_get_flex_base(struct txgbe_fdir_rule *rule)
if (rule->input.flow_type & TXGBE_ATR_L4TYPE_MASK)
return TXGBE_FDIRFLEXCFG_BASE_PAY;
- if (rule->input.flow_type & TXGBE_ATR_L3TYPE_MASK)
- return TXGBE_FDIRFLEXCFG_BASE_L3;
-
- return TXGBE_FDIRFLEXCFG_BASE_L2;
+ return TXGBE_FDIRFLEXCFG_BASE_L3;
}
int
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 15/19] net/txgbe: fix FDIR input mask
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (13 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 14/19] net/txgbe: fix FDIR rule raw relative for L3 packets Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 16/19] net/txgbe: switch to use FDIR when ntuple filter is full Jiawen Wu
` (4 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
Fix FDIR mask settings to comply with the hardware configuration. And
mask out the spec field instead of manually setting it to 0.
There are some requirements of mask in hardware:
1) IPv4 mask should be little-endian.
2) Ipv6 source address mask has only 16 bits, one bit of mask
corresponds to one byte of spec.
3) IPv6 dest address is only supported to perfect match the low 8 bits,
so it is not taken into account for support in the driver.
Fixes: ea230dda16ad ("net/txgbe: configure flow director filter")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_fdir.c | 49 +++++++++++++++++++++++++++++-----
drivers/net/txgbe/txgbe_flow.c | 8 ++----
2 files changed, 45 insertions(+), 12 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 8d181db33f..6b83a7379d 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -165,6 +165,15 @@ configure_fdir_flags(const struct rte_eth_fdir_conf *conf,
return 0;
}
+static inline uint16_t
+txgbe_reverse_fdir_bitmasks(uint16_t mask)
+{
+ mask = ((mask & 0x5555) << 1) | ((mask & 0xAAAA) >> 1);
+ mask = ((mask & 0x3333) << 2) | ((mask & 0xCCCC) >> 2);
+ mask = ((mask & 0x0F0F) << 4) | ((mask & 0xF0F0) >> 4);
+ return ((mask & 0x00FF) << 8) | ((mask & 0xFF00) >> 8);
+}
+
int
txgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
{
@@ -206,15 +215,15 @@ txgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
wr32(hw, TXGBE_FDIRUDPMSK, ~fdirtcpm);
wr32(hw, TXGBE_FDIRSCTPMSK, ~fdirtcpm);
- /* Store source and destination IPv4 masks (big-endian) */
- wr32(hw, TXGBE_FDIRSIP4MSK, ~info->mask.src_ipv4_mask);
- wr32(hw, TXGBE_FDIRDIP4MSK, ~info->mask.dst_ipv4_mask);
+ /* Store source and destination IPv4 masks (little-endian) */
+ wr32(hw, TXGBE_FDIRSIP4MSK, rte_be_to_cpu_32(~info->mask.src_ipv4_mask));
+ wr32(hw, TXGBE_FDIRDIP4MSK, rte_be_to_cpu_32(~info->mask.dst_ipv4_mask));
/*
* Store source and destination IPv6 masks (bit reversed)
*/
- fdiripv6m = TXGBE_FDIRIP6MSK_DST(info->mask.dst_ipv6_mask) |
- TXGBE_FDIRIP6MSK_SRC(info->mask.src_ipv6_mask);
+ fdiripv6m = txgbe_reverse_fdir_bitmasks(info->mask.dst_ipv6_mask) << 16;
+ fdiripv6m |= txgbe_reverse_fdir_bitmasks(info->mask.src_ipv6_mask);
wr32(hw, TXGBE_FDIRIP6MSK, ~fdiripv6m);
return 0;
@@ -636,8 +645,14 @@ fdir_write_perfect_filter(struct txgbe_hw *hw,
fdircmd |= TXGBE_FDIRPICMD_QP(queue);
fdircmd |= TXGBE_FDIRPICMD_POOL(input->vm_pool);
- if (input->flow_type & TXGBE_ATR_L3TYPE_IPV6)
+ if (input->flow_type & TXGBE_ATR_L3TYPE_IPV6) {
+ /* use SIP4 to store LS Dword of the Source iPv6 address */
+ wr32(hw, TXGBE_FDIRPISIP4, be_to_le32(input->src_ip[3]));
+ wr32(hw, TXGBE_FDIRPISIP6(0), be_to_le32(input->src_ip[2]));
+ wr32(hw, TXGBE_FDIRPISIP6(1), be_to_le32(input->src_ip[1]));
+ wr32(hw, TXGBE_FDIRPISIP6(2), be_to_le32(input->src_ip[0]));
fdircmd |= TXGBE_FDIRPICMD_IP6;
+ }
wr32(hw, TXGBE_FDIRPICMD, fdircmd);
PMD_DRV_LOG(DEBUG, "Rx Queue=%x hash=%x", queue, fdirhash);
@@ -783,6 +798,26 @@ txgbe_remove_fdir_filter(struct txgbe_hw_fdir_info *fdir_info,
return 0;
}
+static void
+txgbe_fdir_mask_input(struct txgbe_hw_fdir_mask *mask,
+ struct txgbe_atr_input *input)
+{
+ int i;
+
+ if (input->flow_type & TXGBE_ATR_L3TYPE_IPV6) {
+ for (i = 0; i < 16; i++) {
+ if (!(mask->src_ipv6_mask & (1 << i)))
+ input->src_ip[i / 4] &= ~(0xFF << ((i % 4) * 8));
+ }
+ } else {
+ input->src_ip[0] &= mask->src_ipv4_mask;
+ input->dst_ip[0] &= mask->dst_ipv4_mask;
+ }
+
+ input->src_port &= mask->src_port_mask;
+ input->dst_port &= mask->dst_port_mask;
+}
+
int
txgbe_fdir_filter_program(struct rte_eth_dev *dev,
struct txgbe_fdir_rule *rule,
@@ -805,6 +840,8 @@ txgbe_fdir_filter_program(struct rte_eth_dev *dev,
if (fdir_mode >= RTE_FDIR_MODE_PERFECT)
is_perfect = TRUE;
+ txgbe_fdir_mask_input(&info->mask, &rule->input);
+
if (is_perfect) {
fdirhash = atr_compute_perfect_hash(&rule->input,
TXGBE_DEV_FDIR_CONF(dev)->pballoc);
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 095c84823f..d3113b6fc8 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -1849,9 +1849,7 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
/* check dst addr mask */
for (j = 0; j < 16; j++) {
- if (ipv6_mask->hdr.dst_addr.a[j] == UINT8_MAX) {
- rule->mask.dst_ipv6_mask |= 1 << j;
- } else if (ipv6_mask->hdr.dst_addr.a[j] != 0) {
+ if (ipv6_mask->hdr.dst_addr.a[j] != 0) {
memset(rule, 0, sizeof(struct txgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2612,9 +2610,7 @@ txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* check dst addr mask */
for (j = 0; j < 16; j++) {
- if (ipv6_mask->hdr.dst_addr.a[j] == UINT8_MAX) {
- rule->mask.dst_ipv6_mask |= 1 << j;
- } else if (ipv6_mask->hdr.dst_addr.a[j] != 0) {
+ if (ipv6_mask->hdr.dst_addr.a[j] != 0) {
memset(rule, 0, sizeof(struct txgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 16/19] net/txgbe: switch to use FDIR when ntuple filter is full
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (14 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 15/19] net/txgbe: fix FDIR input mask Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 17/19] net/txgbe: fix VF-PF message for ntuple flow filter Jiawen Wu
` (3 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
Using ntuple filter has less performance loss on the hardware compared
to FDIR filter. So when the flow rule both match ntuple filter and FDIR
filter, ntuple filter will be created first. But there are only maximum
128 flow rules can be created on ntuple filter, it is far less than the
requirements of many users. So switch to use FDIR when ntuple filters
are full.
Fixes: 77a72b4d9dc0 ("net/txgbe: support ntuple filter add and delete")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev.c | 5 ++++-
drivers/net/txgbe/txgbe_ethdev.h | 1 +
drivers/net/txgbe/txgbe_flow.c | 8 ++++++++
3 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index f650c5b7a4..21f0711762 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -893,6 +893,7 @@ int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
}
memset(filter_info->fivetuple_mask, 0,
sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
+ filter_info->ntuple_is_full = false;
return 0;
}
@@ -4495,7 +4496,8 @@ txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
}
}
if (i >= TXGBE_MAX_FTQF_FILTERS) {
- PMD_DRV_LOG(ERR, "5tuple filters are full.");
+ PMD_DRV_LOG(INFO, "5tuple filters are full, switch to FDIR");
+ filter_info->ntuple_is_full = true;
return -ENOSYS;
}
@@ -4526,6 +4528,7 @@ txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
~(1 << (index % (sizeof(uint32_t) * NBBY)));
TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
rte_free(filter);
+ filter_info->ntuple_is_full = false;
if (!txgbe_is_pf(TXGBE_DEV_HW(dev))) {
txgbevf_remove_5tuple_filter(dev, index);
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 053aa1645f..1e7cc5ea80 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -245,6 +245,7 @@ struct txgbe_filter_info {
/* Bit mask for every used 5tuple filter */
uint32_t fivetuple_mask[TXGBE_5TUPLE_ARRAY_SIZE];
struct txgbe_5tuple_filter_list fivetuple_list;
+ bool ntuple_is_full;
/* store the SYN filter info */
uint32_t syn_info;
/* store the rss filter info */
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index d3113b6fc8..cd05ceffed 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -580,8 +580,12 @@ txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter,
struct rte_flow_error *error)
{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
int ret;
+ if (filter_info->ntuple_is_full)
+ return -ENOSYS;
+
ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
if (ret)
@@ -3200,6 +3204,7 @@ txgbe_flow_create(struct rte_eth_dev *dev,
struct txgbe_fdir_rule_ele *fdir_rule_ptr;
struct txgbe_rss_conf_ele *rss_filter_ptr;
struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
uint8_t first_mask = FALSE;
flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
@@ -3245,6 +3250,8 @@ txgbe_flow_create(struct rte_eth_dev *dev,
flow->rule = ntuple_filter_ptr;
flow->filter_type = RTE_ETH_FILTER_NTUPLE;
return flow;
+ } else if (filter_info->ntuple_is_full) {
+ goto next;
}
goto out;
}
@@ -3254,6 +3261,7 @@ txgbe_flow_create(struct rte_eth_dev *dev,
goto out;
}
+next:
memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
actions, ðertype_filter, error);
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 17/19] net/txgbe: fix VF-PF message for ntuple flow filter
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (15 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 16/19] net/txgbe: switch to use FDIR when ntuple filter is full Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 18/19] net/txgbe: switch to use FDIR on VF Jiawen Wu
` (2 subsequent siblings)
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
VF-PF message box is only cleared for the first 4 bytes when add a
ntuple filter. This does not present any actual errors in usage, but
there is issue in the code.
Fixes: 065d64788cdc ("net/txgbe: support flow filter for VF")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_ethdev_vf.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index 4be7c5d659..ae04054d02 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -168,7 +168,7 @@ txgbevf_inject_5tuple_filter(struct rte_eth_dev *dev,
uint32_t msg[TXGBEVF_5T_MAX];
int err;
- memset(msg, 0, sizeof(*msg));
+ memset(msg, 0, sizeof(msg));
/* 0 means compare */
mask &= ~TXGBE_5TFCTL0_MPOOL;
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 18/19] net/txgbe: switch to use FDIR on VF
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (16 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 17/19] net/txgbe: fix VF-PF message for ntuple flow filter Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 3:15 ` [PATCH 19/19] net/txgbe: remove unsupported flow action mark Jiawen Wu
2025-10-27 16:51 ` [PATCH 00/19] Wangxun Fixes Stephen Hemminger
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
Switch to use FDIR when ntuple filters are full on VF, just like PF. And
VF should request PF driver to configure the FDIR rule on hardware, so
new PF-VF mailbox API version 2.3 is added to implement it.
Fixes: 065d64788cdc ("net/txgbe: support flow filter for VF")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/base/txgbe_mbx.h | 21 ++++
drivers/net/txgbe/base/txgbe_vf.c | 52 +++++++++-
drivers/net/txgbe/base/txgbe_vf.h | 1 +
drivers/net/txgbe/txgbe_ethdev.c | 22 ++--
drivers/net/txgbe/txgbe_ethdev.h | 6 +-
drivers/net/txgbe/txgbe_ethdev_vf.c | 19 +++-
drivers/net/txgbe/txgbe_fdir.c | 150 +++++++++++++++++++++++++++-
drivers/net/txgbe/txgbe_flow.c | 74 +++++++++++---
8 files changed, 318 insertions(+), 27 deletions(-)
diff --git a/drivers/net/txgbe/base/txgbe_mbx.h b/drivers/net/txgbe/base/txgbe_mbx.h
index 31e2d51658..b5b586bd30 100644
--- a/drivers/net/txgbe/base/txgbe_mbx.h
+++ b/drivers/net/txgbe/base/txgbe_mbx.h
@@ -20,6 +20,8 @@
#define TXGBE_VT_MSGTYPE_NACK 0x40000000
/* Indicates that VF is still clear to send requests */
#define TXGBE_VT_MSGTYPE_CTS 0x20000000
+/* Messages below or'd with this are the specific case */
+#define TXGBE_VT_MSGTYPE_SPEC 0x10000000
#define TXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for extra info for certain messages */
@@ -39,6 +41,7 @@ enum txgbe_pfvf_api_rev {
txgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
txgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
txgbe_mbox_api_21, /* API version 2.1 */
+ txgbe_mbox_api_23, /* API version 2.3 */
/* This value should always be last */
txgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -65,6 +68,9 @@ enum txgbe_pfvf_api_rev {
/* mailbox API, version 2.1 VF requests */
#define TXGBE_VF_SET_5TUPLE 0x20 /* VF request PF for 5-tuple filter */
+/* mailbox API, version 2.3 VF requests */
+#define TXGBE_VF_SET_FDIR 0x22 /* VF request PF for FDIR filter */
+
#define TXGBE_VF_BACKUP 0x8001 /* VF requests backup */
/* mode choices for TXGBE_VF_UPDATE_XCAST_MODE */
@@ -88,6 +94,21 @@ enum txgbevf_5tuple_msg {
#define TXGBEVF_5T_ADD_SHIFT 31
+enum txgbevf_fdir_msg {
+ TXGBEVF_FDIR_REQ = 0,
+ TXGBEVF_FDIR_CMD,
+ TXGBEVF_FDIR_IP4SA,
+ TXGBEVF_FDIR_IP4DA,
+ TXGBEVF_FDIR_PORT,
+ TXGBEVF_FDIR_FLEX,
+ TXGBEVF_FDIR_IP4DM,
+ TXGBEVF_FDIR_IP4SM,
+ TXGBEVF_FDIR_PORTM,
+ TXGBEVF_FDIR_MAX /* must be last */
+};
+
+#define TXGBEVF_FDIR_ADD_SHIFT 31
+
/* GET_QUEUES return data indices within the mailbox */
#define TXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
#define TXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
diff --git a/drivers/net/txgbe/base/txgbe_vf.c b/drivers/net/txgbe/base/txgbe_vf.c
index 5e41ba1a3e..7d418b9b37 100644
--- a/drivers/net/txgbe/base/txgbe_vf.c
+++ b/drivers/net/txgbe/base/txgbe_vf.c
@@ -358,6 +358,7 @@ s32 txgbevf_update_xcast_mode(struct txgbe_hw *hw, int xcast_mode)
/* Fall through */
case txgbe_mbox_api_13:
case txgbe_mbox_api_21:
+ case txgbe_mbox_api_23:
break;
default:
return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
@@ -612,6 +613,7 @@ int txgbevf_get_queues(struct txgbe_hw *hw, unsigned int *num_tcs,
case txgbe_mbox_api_12:
case txgbe_mbox_api_13:
case txgbe_mbox_api_21:
+ case txgbe_mbox_api_23:
break;
default:
return 0;
@@ -662,6 +664,8 @@ int txgbevf_get_queues(struct txgbe_hw *hw, unsigned int *num_tcs,
int
txgbevf_add_5tuple_filter(struct txgbe_hw *hw, u32 *msg, u16 index)
{
+ int err;
+
if (hw->api_version < txgbe_mbox_api_21)
return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
@@ -669,13 +673,22 @@ txgbevf_add_5tuple_filter(struct txgbe_hw *hw, u32 *msg, u16 index)
msg[TXGBEVF_5T_CMD] = index;
msg[TXGBEVF_5T_CMD] |= 1 << TXGBEVF_5T_ADD_SHIFT;
- return txgbevf_write_msg_read_ack(hw, msg, msg, TXGBEVF_5T_MAX);
+ err = txgbevf_write_msg_read_ack(hw, msg, msg, TXGBEVF_5T_MAX);
+ if (err)
+ return err;
+
+ msg[0] &= ~TXGBE_VT_MSGTYPE_CTS;
+ if (msg[0] != (TXGBE_VF_SET_5TUPLE | TXGBE_VT_MSGTYPE_ACK))
+ return TXGBE_ERR_NOSUPP;
+
+ return 0;
}
int
txgbevf_del_5tuple_filter(struct txgbe_hw *hw, u16 index)
{
u32 msg[2] = {0, 0};
+ int err;
if (hw->api_version < txgbe_mbox_api_21)
return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
@@ -683,5 +696,40 @@ txgbevf_del_5tuple_filter(struct txgbe_hw *hw, u16 index)
msg[TXGBEVF_5T_REQ] = TXGBE_VF_SET_5TUPLE;
msg[TXGBEVF_5T_CMD] = index;
- return txgbevf_write_msg_read_ack(hw, msg, msg, 2);
+ err = txgbevf_write_msg_read_ack(hw, msg, msg, 2);
+ if (err)
+ return err;
+
+ msg[0] &= ~TXGBE_VT_MSGTYPE_CTS;
+ if (msg[0] != (TXGBE_VF_SET_5TUPLE | TXGBE_VT_MSGTYPE_ACK))
+ return TXGBE_ERR_NOSUPP;
+
+ return 0;
+}
+
+int
+txgbevf_set_fdir(struct txgbe_hw *hw, u32 *msg, bool add)
+{
+ u16 msg_len = TXGBEVF_FDIR_MAX;
+ int err = 0;
+
+ if (hw->api_version < txgbe_mbox_api_23)
+ return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
+
+ msg[TXGBEVF_FDIR_REQ] = TXGBE_VF_SET_FDIR;
+
+ if (add)
+ msg[TXGBEVF_FDIR_CMD] |= 1 << TXGBEVF_FDIR_ADD_SHIFT;
+ else
+ msg_len = 2;
+
+ err = txgbevf_write_msg_read_ack(hw, msg, msg, msg_len);
+ if (err)
+ return err;
+
+ msg[0] &= ~TXGBE_VT_MSGTYPE_CTS;
+ if (msg[0] == (TXGBE_VF_SET_FDIR | TXGBE_VT_MSGTYPE_NACK))
+ return TXGBE_ERR_FEATURE_NOT_SUPPORTED;
+
+ return 0;
}
diff --git a/drivers/net/txgbe/base/txgbe_vf.h b/drivers/net/txgbe/base/txgbe_vf.h
index 1fac1c7e32..c67e2f59b3 100644
--- a/drivers/net/txgbe/base/txgbe_vf.h
+++ b/drivers/net/txgbe/base/txgbe_vf.h
@@ -60,5 +60,6 @@ int txgbevf_get_queues(struct txgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc);
int txgbevf_add_5tuple_filter(struct txgbe_hw *hw, u32 *msg, u16 index);
int txgbevf_del_5tuple_filter(struct txgbe_hw *hw, u16 index);
+int txgbevf_set_fdir(struct txgbe_hw *hw, u32 *msg, bool add);
#endif /* __TXGBE_VF_H__ */
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 21f0711762..5d360f8305 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -90,8 +90,6 @@ static const struct reg_info *txgbe_regs_others[] = {
txgbe_regs_diagnostic,
NULL};
-static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
-static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
static int txgbe_dev_set_link_up(struct rte_eth_dev *dev);
@@ -898,7 +896,7 @@ int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
-static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
+int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
{
struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
struct txgbe_fdir_filter *fdir_filter;
@@ -934,7 +932,7 @@ static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
-static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
{
struct rte_eth_fdir_conf *fdir_conf = TXGBE_DEV_FDIR_CONF(eth_dev);
struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
@@ -4478,6 +4476,7 @@ txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
{
struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
int i, idx, shift;
+ int err;
/*
* look for an unused 5tuple filter index,
@@ -4501,12 +4500,19 @@ txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
return -ENOSYS;
}
- if (txgbe_is_pf(TXGBE_DEV_HW(dev)))
+ if (txgbe_is_pf(TXGBE_DEV_HW(dev))) {
txgbe_inject_5tuple_filter(dev, filter);
- else
- txgbevf_inject_5tuple_filter(dev, filter);
+ return 0;
+ }
- return 0;
+ err = txgbevf_inject_5tuple_filter(dev, filter);
+ if (err) {
+ filter_info->fivetuple_mask[i / (sizeof(uint32_t) * NBBY)] &=
+ ~(1 << (i % (sizeof(uint32_t) * NBBY)));
+ TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+ }
+
+ return err;
}
/*
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 1e7cc5ea80..189fbac541 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -555,6 +555,8 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
struct txgbe_l2_tunnel_conf *l2_tunnel);
void txgbe_filterlist_init(void);
void txgbe_filterlist_flush(void);
+int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
+int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
uint8_t queue, uint8_t msix_vector);
@@ -570,7 +572,9 @@ int txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
int txgbe_fdir_filter_program(struct rte_eth_dev *dev,
struct txgbe_fdir_rule *rule,
bool del, bool update);
-
+int txgbevf_fdir_filter_program(struct rte_eth_dev *dev,
+ struct txgbe_fdir_rule *rule,
+ bool del);
void txgbe_configure_pb(struct rte_eth_dev *dev);
void txgbe_configure_port(struct rte_eth_dev *dev);
void txgbe_configure_dcb(struct rte_eth_dev *dev);
diff --git a/drivers/net/txgbe/txgbe_ethdev_vf.c b/drivers/net/txgbe/txgbe_ethdev_vf.c
index ae04054d02..0f914fa3f5 100644
--- a/drivers/net/txgbe/txgbe_ethdev_vf.c
+++ b/drivers/net/txgbe/txgbe_ethdev_vf.c
@@ -129,6 +129,7 @@ txgbevf_negotiate_api(struct txgbe_hw *hw)
/* start with highest supported, proceed down */
static const int sup_ver[] = {
+ txgbe_mbox_api_23,
txgbe_mbox_api_21,
txgbe_mbox_api_13,
txgbe_mbox_api_12,
@@ -162,6 +163,7 @@ int
txgbevf_inject_5tuple_filter(struct rte_eth_dev *dev,
struct txgbe_5tuple_filter *filter)
{
+ struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
uint32_t mask = TXGBE_5TFCTL0_MASK;
uint16_t index = filter->index;
@@ -194,9 +196,16 @@ txgbevf_inject_5tuple_filter(struct rte_eth_dev *dev,
msg[TXGBEVF_5T_SA] = be_to_le32(filter->filter_info.src_ip);
err = txgbevf_add_5tuple_filter(hw, msg, index);
- if (err)
- PMD_DRV_LOG(ERR, "VF request PF to add 5tuple filters failed.");
+ if (!err)
+ return 0;
+
+ if (msg[TXGBEVF_5T_REQ] & TXGBE_VT_MSGTYPE_SPEC) {
+ PMD_DRV_LOG(INFO, "5tuple filters are full, switch to FDIR");
+ filter_info->ntuple_is_full = true;
+ return -ENOSYS;
+ }
+ PMD_DRV_LOG(ERR, "VF request PF to add 5tuple filters failed.");
return err;
}
@@ -367,6 +376,9 @@ eth_txgbevf_dev_init(struct rte_eth_dev *eth_dev)
memset(filter_info, 0,
sizeof(struct txgbe_filter_info));
+ /* initialize flow director filter list & hash */
+ txgbe_fdir_filter_init(eth_dev);
+
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
@@ -860,6 +872,9 @@ txgbevf_dev_close(struct rte_eth_dev *dev)
rte_intr_callback_unregister(intr_handle,
txgbevf_dev_interrupt_handler, dev);
+ /* remove all the fdir filters & hash */
+ txgbe_fdir_filter_uninit(dev);
+
/* Remove all ntuple filters of the device */
txgbe_ntuple_filter_uninit(dev);
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 6b83a7379d..67f586ffc7 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -775,7 +775,7 @@ txgbe_insert_fdir_filter(struct txgbe_hw_fdir_info *fdir_info,
TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
- return 0;
+ return ret;
}
static inline int
@@ -795,7 +795,7 @@ txgbe_remove_fdir_filter(struct txgbe_hw_fdir_info *fdir_info,
TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
rte_free(fdir_filter);
- return 0;
+ return ret;
}
static void
@@ -929,6 +929,147 @@ txgbe_fdir_filter_program(struct rte_eth_dev *dev,
return err;
}
+static void
+txgbevf_flush_fdir_filter(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t msg[2] = {0, 0};
+
+ /* flush bit */
+ msg[TXGBEVF_FDIR_CMD] = 1 << 16;
+
+ txgbevf_set_fdir(hw, msg, FALSE);
+}
+
+static int
+txgbevf_del_fdir_filter(struct rte_eth_dev *dev,
+ struct txgbe_fdir_rule *rule,
+ int id)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ uint32_t msg[2] = {0, 0};
+ int ret = 0;
+
+ /* node id [15:0] */
+ msg[TXGBEVF_FDIR_CMD] = id;
+
+ ret = txgbevf_set_fdir(hw, msg, FALSE);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "VF request PF to delete FDIR filters failed.");
+ return ret;
+ }
+
+ ret = txgbe_remove_fdir_filter(info, &rule->input);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
+
+ return 0;
+}
+
+static int
+txgbevf_add_fdir_filter(struct rte_eth_dev *dev,
+ struct txgbe_fdir_rule *rule,
+ int id)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t msg[TXGBEVF_FDIR_MAX];
+ int ret = 0;
+
+ memset(msg, 0, sizeof(msg));
+
+ /* node id [15:0] */
+ msg[TXGBEVF_FDIR_CMD] = id;
+ /* ring_idx [23:16] */
+ msg[TXGBEVF_FDIR_CMD] |= rule->queue << 16;
+ /* flow_type [30:24] */
+ msg[TXGBEVF_FDIR_CMD] |= rule->input.flow_type << 24;
+
+ msg[TXGBEVF_FDIR_IP4SA] = rule->input.src_ip[0];
+ msg[TXGBEVF_FDIR_IP4DA] = rule->input.dst_ip[0];
+ msg[TXGBEVF_FDIR_PORT] = (rule->input.dst_port << 16) |
+ rule->input.src_port;
+ if (rule->mask.flex_bytes_mask) {
+ /* base [1:0] */
+ msg[TXGBEVF_FDIR_FLEX] = txgbe_fdir_get_flex_base(rule);
+ /* offset [7:3] */
+ msg[TXGBEVF_FDIR_FLEX] |=
+ TXGBE_FDIRFLEXCFG_OFST(rule->flex_bytes_offset / 2);
+ /* flex bytes [31:16]*/
+ msg[TXGBEVF_FDIR_FLEX] |= rule->input.flex_bytes << 16;
+ }
+ msg[TXGBEVF_FDIR_IP4SM] = rule->mask.src_ipv4_mask;
+ msg[TXGBEVF_FDIR_IP4DM] = rule->mask.dst_ipv4_mask;
+ msg[TXGBEVF_FDIR_PORTM] = (rule->mask.dst_port_mask << 16) |
+ rule->mask.src_port_mask;
+
+ ret = txgbevf_set_fdir(hw, msg, TRUE);
+ if (ret)
+ PMD_DRV_LOG(ERR, "VF request PF to add FDIR filters failed.");
+
+ return ret;
+}
+
+int
+txgbevf_fdir_filter_program(struct rte_eth_dev *dev,
+ struct txgbe_fdir_rule *rule,
+ bool del)
+{
+ struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
+ struct txgbe_atr_input *input = &rule->input;
+ struct txgbe_fdir_filter *node;
+ uint32_t fdirhash;
+ int ret;
+
+ if (rule->mode != RTE_FDIR_MODE_PERFECT ||
+ rule->fdirflags == TXGBE_FDIRPICMD_DROP)
+ return -ENOTSUP;
+
+ if (input->flow_type & TXGBE_ATR_FLOW_TYPE_IPV6)
+ return -ENOTSUP;
+
+ fdirhash = atr_compute_perfect_hash(input,
+ TXGBE_DEV_FDIR_CONF(dev)->pballoc);
+
+ ret = rte_hash_lookup(info->hash_handle, (const void *)input);
+ if (ret < 0) {
+ if (del) {
+ PMD_DRV_LOG(ERR, "No such fdir filter to delete!");
+ return ret;
+ }
+ } else {
+ if (!del) {
+ PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
+ return -EINVAL;
+ }
+ }
+
+ if (del)
+ return txgbevf_del_fdir_filter(dev, rule, ret);
+
+ node = rte_zmalloc("txgbe_fdir",
+ sizeof(struct txgbe_fdir_filter), 0);
+ if (!node)
+ return -ENOMEM;
+ rte_memcpy(&node->input, input,
+ sizeof(struct txgbe_atr_input));
+ node->fdirflags = rule->fdirflags;
+ node->fdirhash = fdirhash;
+ node->queue = rule->queue;
+
+ ret = txgbe_insert_fdir_filter(info, node);
+ if (ret < 0) {
+ rte_free(node);
+ return ret;
+ }
+
+ ret = txgbevf_add_fdir_filter(dev, rule, ret);
+ if (ret)
+ txgbe_remove_fdir_filter(info, input);
+
+ return ret;
+}
+
static int
txgbe_fdir_flush(struct rte_eth_dev *dev)
{
@@ -936,6 +1077,11 @@ txgbe_fdir_flush(struct rte_eth_dev *dev)
struct txgbe_hw_fdir_info *info = TXGBE_DEV_FDIR(dev);
int ret;
+ if (!txgbe_is_pf(hw)) {
+ txgbevf_flush_fdir_filter(dev);
+ return 0;
+ }
+
ret = txgbe_reinit_fdir_tables(hw);
if (ret < 0) {
PMD_INIT_LOG(ERR, "Failed to re-initialize FD table.");
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index cd05ceffed..5647165d52 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -828,6 +828,13 @@ txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
{
int ret;
+ if (!txgbe_is_pf(TXGBE_DEV_HW(dev))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Flow type not suppotted yet on VF");
+ return -rte_errno;
+ }
+
ret = cons_parse_ethertype_filter(attr, pattern,
actions, filter, error);
@@ -1114,6 +1121,13 @@ txgbe_parse_syn_filter(struct rte_eth_dev *dev,
{
int ret;
+ if (!txgbe_is_pf(TXGBE_DEV_HW(dev))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Flow type not suppotted yet on VF");
+ return -rte_errno;
+ }
+
ret = cons_parse_syn_filter(attr, pattern,
actions, filter, error);
@@ -1317,6 +1331,13 @@ txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
uint16_t vf_num;
+ if (!txgbe_is_pf(TXGBE_DEV_HW(dev))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Flow type not suppotted yet on VF");
+ return -rte_errno;
+ }
+
ret = cons_parse_l2_tn_filter(dev, attr, pattern,
actions, l2_tn_filter, error);
@@ -2960,6 +2981,9 @@ txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
return ret;
step_next:
+ if (!txgbe_is_pf(TXGBE_DEV_HW(dev)))
+ return ret;
+
if (fdir_conf->mode == RTE_FDIR_MODE_NONE) {
fdir_conf->mode = rule->mode;
ret = txgbe_fdir_configure(dev);
@@ -2988,6 +3012,13 @@ txgbe_parse_rss_filter(struct rte_eth_dev *dev,
const struct rte_flow_action_rss *rss;
uint16_t n;
+ if (!txgbe_is_pf(TXGBE_DEV_HW(dev))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Flow type not suppotted yet on VF");
+ return -rte_errno;
+ }
+
/**
* rss only supports forwarding,
* check if the first not void action is RSS.
@@ -3256,11 +3287,6 @@ txgbe_flow_create(struct rte_eth_dev *dev,
goto out;
}
- if (!txgbe_is_pf(TXGBE_DEV_HW(dev))) {
- PMD_DRV_LOG(ERR, "Flow type not suppotted yet on VF.");
- goto out;
- }
-
next:
memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
@@ -3317,6 +3343,27 @@ txgbe_flow_create(struct rte_eth_dev *dev,
ret = txgbe_parse_fdir_filter(dev, attr, pattern,
actions, &fdir_rule, error);
if (!ret) {
+ if (!txgbe_is_pf(TXGBE_DEV_HW(dev))) {
+ ret = txgbevf_fdir_filter_program(dev, &fdir_rule, FALSE);
+ if (ret < 0)
+ goto out;
+
+ fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter",
+ sizeof(struct txgbe_fdir_rule_ele), 0);
+ if (!fdir_rule_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ rte_memcpy(&fdir_rule_ptr->filter_info,
+ &fdir_rule,
+ sizeof(struct txgbe_fdir_rule));
+ TAILQ_INSERT_TAIL(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ flow->rule = fdir_rule_ptr;
+ flow->filter_type = RTE_ETH_FILTER_FDIR;
+ return flow;
+ }
+
/* A mask cannot be deleted. */
if (fdir_rule.b_mask) {
if (!fdir_info->mask_added) {
@@ -3583,7 +3630,10 @@ txgbe_flow_destroy(struct rte_eth_dev *dev,
rte_memcpy(&fdir_rule,
&fdir_rule_ptr->filter_info,
sizeof(struct txgbe_fdir_rule));
- ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
+ if (txgbe_is_pf(TXGBE_DEV_HW(dev)))
+ ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
+ else
+ ret = txgbevf_fdir_filter_program(dev, &fdir_rule, TRUE);
if (!ret) {
TAILQ_REMOVE(&filter_fdir_list,
fdir_rule_ptr, entries);
@@ -3656,12 +3706,6 @@ txgbe_flow_flush(struct rte_eth_dev *dev,
txgbe_clear_all_ntuple_filter(dev);
- if (!txgbe_is_pf(TXGBE_DEV_HW(dev)))
- goto out;
-
- txgbe_clear_all_ethertype_filter(dev);
- txgbe_clear_syn_filter(dev);
-
ret = txgbe_clear_all_fdir_filter(dev);
if (ret < 0) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -3669,6 +3713,12 @@ txgbe_flow_flush(struct rte_eth_dev *dev,
return ret;
}
+ if (!txgbe_is_pf(TXGBE_DEV_HW(dev)))
+ goto out;
+
+ txgbe_clear_all_ethertype_filter(dev);
+ txgbe_clear_syn_filter(dev);
+
ret = txgbe_clear_all_l2_tn_filter(dev);
if (ret < 0) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* [PATCH 19/19] net/txgbe: remove unsupported flow action mark
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (17 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 18/19] net/txgbe: switch to use FDIR on VF Jiawen Wu
@ 2025-10-27 3:15 ` Jiawen Wu
2025-10-27 16:51 ` [PATCH 00/19] Wangxun Fixes Stephen Hemminger
19 siblings, 0 replies; 21+ messages in thread
From: Jiawen Wu @ 2025-10-27 3:15 UTC (permalink / raw)
To: dev; +Cc: zaiyuwang, Jiawen Wu, stable
Flow action "mark" is not supported, just remove it.
Fixes: b973ee26747a ("net/txgbe: parse flow director filter")
Cc: stable@dpdk.org
Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
---
drivers/net/txgbe/txgbe_flow.c | 23 ++---------------------
1 file changed, 2 insertions(+), 21 deletions(-)
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 5647165d52..a97588e57a 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -1358,7 +1358,6 @@ txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
{
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
- const struct rte_flow_action_mark *mark;
/* parse attr */
/* must be input direction */
@@ -1423,10 +1422,9 @@ txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
rule->fdirflags = TXGBE_FDIRPICMD_DROP;
}
- /* check if the next not void item is MARK */
+ /* nothing else supported */
act = next_no_void_action(actions, act);
- if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
- act->type != RTE_FLOW_ACTION_TYPE_END) {
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(rule, 0, sizeof(struct txgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1436,21 +1434,6 @@ txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
rule->soft_id = 0;
- if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
- mark = (const struct rte_flow_action_mark *)act->conf;
- rule->soft_id = mark->id;
- act = next_no_void_action(actions, act);
- }
-
- /* check if the next not void item is END */
- if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(rule, 0, sizeof(struct txgbe_fdir_rule));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act, "Not supported action.");
- return -rte_errno;
- }
-
return 0;
}
@@ -1562,8 +1545,6 @@ txgbe_fdir_parse_flow_type(struct txgbe_atr_input *input, u8 ptid, bool tun)
* The next not void item must be END.
* ACTION:
* The first not void action should be QUEUE or DROP.
- * The second not void optional action should be MARK,
- * mark_id is a uint32_t number.
* The next not void action should be END.
* UDP/TCP/SCTP pattern example:
* ITEM Spec Mask
--
2.48.1
^ permalink raw reply [flat|nested] 21+ messages in thread* Re: [PATCH 00/19] Wangxun Fixes
2025-10-27 3:15 [PATCH 00/19] Wangxun Fixes Jiawen Wu
` (18 preceding siblings ...)
2025-10-27 3:15 ` [PATCH 19/19] net/txgbe: remove unsupported flow action mark Jiawen Wu
@ 2025-10-27 16:51 ` Stephen Hemminger
19 siblings, 0 replies; 21+ messages in thread
From: Stephen Hemminger @ 2025-10-27 16:51 UTC (permalink / raw)
To: Jiawen Wu; +Cc: dev, zaiyuwang
On Mon, 27 Oct 2025 11:15:23 +0800
Jiawen Wu <jiawenwu@trustnetic.com> wrote:
> Fix some usage issues.
>
> Jiawen Wu (19):
> net/txgbe: fix hardware statistic rx_l3_l4_xsum_error
> net/ngbe: fix hardware statistic rx_l3_l4_xsum_error
> net/txgbe: reduce memory size of ring descriptors
> net/ngbe: reduce memory size of ring descriptors
> net/txgbe: fix VF Rx buffer size in config register
> net/ngbe: fix VF Rx buffer size in config register
> net/txgbe: remove duplicate txq assignment
> net/txgbe: add device arguments for FDIR
> net/txgbe: fix the maxinum number of FDIR filter
> net/txgbe: fix FDIR mode is not be cleared
> net/txgbe: fix FDIR drop action for L4 match packets
> net/txgbe: fix to create FDIR filter for tunnel SCTP packet
> net/txgbe: filter FDIR match flex bytes for tunnel packets
> net/txgbe: fix FDIR rule raw relative for L3 packets
> net/txgbe: fix FDIR input mask
> net/txgbe: switch to use FDIR when ntuple filter is full
> net/txgbe: fix VF-PF message for ntuple flow filter
> net/txgbe: switch to use FDIR on VF
> net/txgbe: remove unsupported flow action mark
Queued to next-net
^ permalink raw reply [flat|nested] 21+ messages in thread