* [PATCH v0 0/3] [v0]drivers/net fixed Coverity issue
@ 2025-06-18 12:11 Wenbo Cao
2025-06-18 12:11 ` [PATCH v1 1/3] net/rnp: add check firmware respond info Wenbo Cao
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Wenbo Cao @ 2025-06-18 12:11 UTC (permalink / raw)
To: stephen; +Cc: dev, yaojun, Wenbo Cao
v1:
*:fixed compile issue
v0:
*:fixed the below issue:
Coverity issue: 468860,468866,468858
Fixes: 4530e70f1e32 ("net/rnp: support Tx TSO offload")
Fixes: 52dfb84e14be ("net/rnp: add device init and uninit")
Fixes: 52aae4ed4ffb ("net/rnp: add device capabilities")
*:fixed 64k tso
Wenbo Cao (3):
net/rnp: add check firmware respond info
net/rnp: fix Tunnel-TSO VLAN header untrusted loop bound
net/rnp: fix TSO segmentation for packets of 64KB
drivers/net/rnp/base/rnp_fw_cmd.h | 1 +
drivers/net/rnp/base/rnp_mbx_fw.c | 15 +++-
drivers/net/rnp/rnp_ethdev.c | 16 ++--
drivers/net/rnp/rnp_rxtx.c | 118 +++++++++++++++++++++++-------
drivers/net/rnp/rnp_rxtx.h | 1 +
5 files changed, 117 insertions(+), 34 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH v1 1/3] net/rnp: add check firmware respond info
2025-06-18 12:11 [PATCH v0 0/3] [v0]drivers/net fixed Coverity issue Wenbo Cao
@ 2025-06-18 12:11 ` Wenbo Cao
2025-06-18 12:11 ` [PATCH v1 2/3] net/rnp: fix Tunnel-TSO VLAN header untrusted loop bound Wenbo Cao
2025-06-18 12:11 ` [PATCH v1 3/3] net/rnp: fix TSO segmentation for packets of 64KB Wenbo Cao
2 siblings, 0 replies; 4+ messages in thread
From: Wenbo Cao @ 2025-06-18 12:11 UTC (permalink / raw)
To: stephen, Wenbo Cao, Ferruh Yigit; +Cc: dev, yaojun, stable
Add logic checks at critical points to detect potentially illegal
firmware information, preventing subsequent logic exceptions.
Fixes: 52aae4ed4ffb ("net/rnp: add device capabilities")
Fixes: 52dfb84e14be ("net/rnp: add device init and uninit")
Cc: stable@dpdk.org
Signed-off-by: Wenbo Cao <caowenbo@mucse.com>
---
drivers/net/rnp/base/rnp_fw_cmd.h | 1 +
drivers/net/rnp/base/rnp_mbx_fw.c | 15 ++++++++++++++-
drivers/net/rnp/rnp_ethdev.c | 16 ++++++++--------
3 files changed, 23 insertions(+), 9 deletions(-)
diff --git a/drivers/net/rnp/base/rnp_fw_cmd.h b/drivers/net/rnp/base/rnp_fw_cmd.h
index 26db07ad36..f6c0d77f1d 100644
--- a/drivers/net/rnp/base/rnp_fw_cmd.h
+++ b/drivers/net/rnp/base/rnp_fw_cmd.h
@@ -159,6 +159,7 @@ struct rnp_mac_addr_rep {
#define RNP_SPEED_CAP_100M_HALF RTE_BIT32(11)
#define RNP_SPEED_CAP_1GB_HALF RTE_BIT32(12)
+#define RNP_SPEED_VALID_MASK RTE_GENMASK32(12, 2)
enum rnp_pma_phy_type {
RNP_PHY_TYPE_NONE = 0,
RNP_PHY_TYPE_1G_BASE_KX,
diff --git a/drivers/net/rnp/base/rnp_mbx_fw.c b/drivers/net/rnp/base/rnp_mbx_fw.c
index 3e7cf7f9ad..9e0b1730c2 100644
--- a/drivers/net/rnp/base/rnp_mbx_fw.c
+++ b/drivers/net/rnp/base/rnp_mbx_fw.c
@@ -230,6 +230,7 @@ rnp_fw_get_phy_capability(struct rnp_eth_port *port,
return 0;
}
+#define RNP_MAX_LANE_MASK (0xf)
int rnp_mbx_fw_get_capability(struct rnp_eth_port *port)
{
struct rnp_phy_abilities_rep ability;
@@ -252,17 +253,29 @@ int rnp_mbx_fw_get_capability(struct rnp_eth_port *port)
hw->nic_mode = ability.nic_mode;
/* get phy<->lane mapping info */
lane_cnt = rte_popcount32(hw->lane_mask);
+ if (lane_cnt > RNP_MAX_PORT_OF_PF) {
+ RNP_PMD_LOG(ERR, "firmware invalid lane_mask");
+ return -EINVAL;
+ }
temp_mask = hw->lane_mask;
+ if (temp_mask == 0 || temp_mask > RNP_MAX_LANE_MASK) {
+ RNP_PMD_LOG(ERR, "lane_mask is invalid 0x%.2x", temp_mask);
+ return -EINVAL;
+ }
if (ability.e.ports_is_sgmii_valid)
is_sgmii_bits = ability.e.lane_is_sgmii;
for (idx = 0; idx < lane_cnt; idx++) {
hw->phy_port_ids[idx] = port_ids[idx];
+ if (temp_mask == 0) {
+ RNP_PMD_LOG(ERR, "temp_mask is zero at idx=%d", idx);
+ return -EINVAL;
+ }
lane_bit = ffs(temp_mask) - 1;
lane_idx = port_ids[idx] % lane_cnt;
hw->lane_of_port[lane_idx] = lane_bit;
is_sgmii = lane_bit & is_sgmii_bits ? 1 : 0;
hw->lane_is_sgmii[lane_idx] = is_sgmii;
- temp_mask &= ~RTE_BIT32(lane_bit);
+ temp_mask &= ~(1ULL << lane_bit);
}
hw->max_port_num = lane_cnt;
}
diff --git a/drivers/net/rnp/rnp_ethdev.c b/drivers/net/rnp/rnp_ethdev.c
index de1c077f61..7b996913c8 100644
--- a/drivers/net/rnp/rnp_ethdev.c
+++ b/drivers/net/rnp/rnp_ethdev.c
@@ -751,17 +751,17 @@ rnp_get_speed_caps(struct rte_eth_dev *dev)
{
struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
uint32_t speed_cap = 0;
- uint32_t i = 0, speed;
uint32_t support_link;
- uint32_t link_types;
+ uint32_t speed = 0;
+ int bit_pos = 0;
support_link = port->attr.phy_meta.supported_link;
- link_types = rte_popcount64(support_link);
- if (!link_types)
+ if (support_link == 0)
return 0;
- for (i = 0; i < link_types; i++) {
- speed = ffs(support_link) - 1;
- switch (RTE_BIT32(speed)) {
+ while (support_link) {
+ bit_pos = ffs(support_link) - 1;
+ speed = RTE_BIT32(bit_pos) & RNP_SPEED_VALID_MASK;
+ switch (speed) {
case RNP_SPEED_CAP_10M_FULL:
speed_cap |= RTE_ETH_LINK_SPEED_10M;
break;
@@ -789,7 +789,7 @@ rnp_get_speed_caps(struct rte_eth_dev *dev)
default:
speed_cap |= 0;
}
- support_link &= ~RTE_BIT32(speed);
+ support_link &= ~(1ULL << bit_pos);
}
if (!port->attr.phy_meta.link_autoneg)
speed_cap |= RTE_ETH_LINK_SPEED_FIXED;
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH v1 2/3] net/rnp: fix Tunnel-TSO VLAN header untrusted loop bound
2025-06-18 12:11 [PATCH v0 0/3] [v0]drivers/net fixed Coverity issue Wenbo Cao
2025-06-18 12:11 ` [PATCH v1 1/3] net/rnp: add check firmware respond info Wenbo Cao
@ 2025-06-18 12:11 ` Wenbo Cao
2025-06-18 12:11 ` [PATCH v1 3/3] net/rnp: fix TSO segmentation for packets of 64KB Wenbo Cao
2 siblings, 0 replies; 4+ messages in thread
From: Wenbo Cao @ 2025-06-18 12:11 UTC (permalink / raw)
To: stephen, Wenbo Cao; +Cc: dev, yaojun, stable
Adds support for boundary checking in the VLAN header
and corrects protocol header type verification.
Fixes: 4530e70f1e32 ("net/rnp: support Tx TSO offload")
Cc: stable@dpdk.org
Signed-off-by: Wenbo Cao <caowenbo@mucse.com>
---
drivers/net/rnp/rnp_rxtx.c | 70 ++++++++++++++++++++++++++------------
drivers/net/rnp/rnp_rxtx.h | 1 +
2 files changed, 50 insertions(+), 21 deletions(-)
diff --git a/drivers/net/rnp/rnp_rxtx.c b/drivers/net/rnp/rnp_rxtx.c
index da08728198..ee31f17cad 100644
--- a/drivers/net/rnp/rnp_rxtx.c
+++ b/drivers/net/rnp/rnp_rxtx.c
@@ -1205,6 +1205,7 @@ rnp_build_tx_control_desc(struct rnp_tx_queue *txq,
}
txbd->c.qword0.tunnel_len = tunnel_len;
txbd->c.qword1.cmd |= RNP_CTRL_DESC;
+ txq->tunnel_len = tunnel_len;
}
static void
@@ -1243,40 +1244,66 @@ rnp_padding_hdr_len(volatile struct rnp_tx_desc *txbd,
txbd->d.mac_ip_len |= l3_len;
}
-static void
-rnp_check_inner_eth_hdr(struct rte_mbuf *mbuf,
+#define RNP_MAX_VLAN_HDR_NUM (4)
+static int
+rnp_check_inner_eth_hdr(struct rnp_tx_queue *txq,
+ struct rte_mbuf *mbuf,
volatile struct rnp_tx_desc *txbd)
{
struct rte_ether_hdr *eth_hdr;
uint16_t inner_l2_offset = 0;
struct rte_vlan_hdr *vlan_hdr;
uint16_t ext_l2_len = 0;
- uint16_t l2_offset = 0;
+ char *vlan_start = NULL;
uint16_t l2_type;
- inner_l2_offset = mbuf->outer_l2_len + mbuf->outer_l3_len +
- sizeof(struct rte_udp_hdr) +
- sizeof(struct rte_vxlan_hdr);
+ inner_l2_offset = txq->tunnel_len;
+ if (inner_l2_offset + sizeof(struct rte_ether_hdr) > mbuf->data_len) {
+ RNP_PMD_LOG(ERR, "Invalid inner L2 offset");
+ return -EINVAL;
+ }
eth_hdr = rte_pktmbuf_mtod_offset(mbuf,
struct rte_ether_hdr *, inner_l2_offset);
l2_type = eth_hdr->ether_type;
- l2_offset = txbd->d.mac_ip_len >> RNP_TX_MAC_LEN_S;
- while (l2_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN) ||
- l2_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_QINQ)) {
- vlan_hdr = (struct rte_vlan_hdr *)
- ((char *)eth_hdr + l2_offset);
- l2_offset += RTE_VLAN_HLEN;
- ext_l2_len += RTE_VLAN_HLEN;
+ vlan_start = (char *)(eth_hdr + 1);
+ while ((l2_type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
+ l2_type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) &&
+ (ext_l2_len < RNP_MAX_VLAN_HDR_NUM * RTE_VLAN_HLEN)) {
+ if (vlan_start + ext_l2_len >
+ rte_pktmbuf_mtod(mbuf, char*) + mbuf->data_len) {
+ RNP_PMD_LOG(ERR, "VLAN header exceeds buffer");
+ break;
+ }
+ vlan_hdr = (struct rte_vlan_hdr *)(vlan_start + ext_l2_len);
l2_type = vlan_hdr->eth_proto;
+ ext_l2_len += RTE_VLAN_HLEN;
}
- txbd->d.mac_ip_len += (ext_l2_len << RNP_TX_MAC_LEN_S);
+ if (unlikely(mbuf->l3_len == 0)) {
+ switch (rte_be_to_cpu_16(l2_type)) {
+ case RTE_ETHER_TYPE_IPV4:
+ txbd->d.mac_ip_len = sizeof(struct rte_ipv4_hdr);
+ break;
+ case RTE_ETHER_TYPE_IPV6:
+ txbd->d.mac_ip_len = sizeof(struct rte_ipv6_hdr);
+ break;
+ default:
+ break;
+ }
+ } else {
+ txbd->d.mac_ip_len = mbuf->l3_len;
+ }
+ ext_l2_len += sizeof(*eth_hdr);
+ txbd->d.mac_ip_len |= (ext_l2_len << RNP_TX_MAC_LEN_S);
+
+ return 0;
}
#define RNP_TX_L4_OFFLOAD_ALL (RTE_MBUF_F_TX_SCTP_CKSUM | \
RTE_MBUF_F_TX_TCP_CKSUM | \
RTE_MBUF_F_TX_UDP_CKSUM)
static inline void
-rnp_setup_csum_offload(struct rte_mbuf *mbuf,
+rnp_setup_csum_offload(struct rnp_tx_queue *txq,
+ struct rte_mbuf *mbuf,
volatile struct rnp_tx_desc *tx_desc)
{
tx_desc->d.cmd |= (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ?
@@ -1296,8 +1323,6 @@ rnp_setup_csum_offload(struct rte_mbuf *mbuf,
tx_desc->d.cmd |= RNP_TX_L4TYPE_SCTP;
break;
}
- tx_desc->d.mac_ip_len = mbuf->l2_len << RNP_TX_MAC_LEN_S;
- tx_desc->d.mac_ip_len |= mbuf->l3_len;
if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
tx_desc->d.cmd |= RNP_TX_IP_CKSUM_EN;
tx_desc->d.cmd |= RNP_TX_L4CKSUM_EN;
@@ -1306,9 +1331,8 @@ rnp_setup_csum_offload(struct rte_mbuf *mbuf,
}
if (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
/* need inner l2 l3 lens for inner checksum offload */
- tx_desc->d.mac_ip_len &= ~RNP_TX_MAC_LEN_MASK;
- tx_desc->d.mac_ip_len |= RTE_ETHER_HDR_LEN << RNP_TX_MAC_LEN_S;
- rnp_check_inner_eth_hdr(mbuf, tx_desc);
+ if (rnp_check_inner_eth_hdr(txq, mbuf, tx_desc) < 0)
+ tx_desc->d.cmd &= ~RNP_TX_TSO_EN;
switch (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
case RTE_MBUF_F_TX_TUNNEL_VXLAN:
tx_desc->d.cmd |= RNP_TX_VXLAN_TUNNEL;
@@ -1317,6 +1341,9 @@ rnp_setup_csum_offload(struct rte_mbuf *mbuf,
tx_desc->d.cmd |= RNP_TX_NVGRE_TUNNEL;
break;
}
+ } else {
+ tx_desc->d.mac_ip_len = mbuf->l2_len << RNP_TX_MAC_LEN_S;
+ tx_desc->d.mac_ip_len |= mbuf->l3_len;
}
}
@@ -1329,7 +1356,7 @@ rnp_setup_tx_offload(struct rnp_tx_queue *txq,
if (flags & RTE_MBUF_F_TX_L4_MASK ||
flags & RTE_MBUF_F_TX_TCP_SEG ||
flags & RTE_MBUF_F_TX_IP_CKSUM)
- rnp_setup_csum_offload(tx_pkt, txbd);
+ rnp_setup_csum_offload(txq, tx_pkt, txbd);
if (flags & (RTE_MBUF_F_TX_VLAN |
RTE_MBUF_F_TX_QINQ)) {
txbd->d.cmd |= RNP_TX_VLAN_VALID;
@@ -1414,6 +1441,7 @@ rnp_multiseg_xmit_pkts(void *_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
} while (m_seg != NULL);
txq->stats.obytes += tx_pkt->pkt_len;
txbd->d.cmd |= RNP_CMD_EOP;
+ txq->tunnel_len = 0;
txq->nb_tx_used = (uint16_t)txq->nb_tx_used + nb_used_bd;
txq->nb_tx_free = (uint16_t)txq->nb_tx_free - nb_used_bd;
if (txq->nb_tx_used >= txq->tx_rs_thresh) {
diff --git a/drivers/net/rnp/rnp_rxtx.h b/drivers/net/rnp/rnp_rxtx.h
index 8639f0892d..dd72ac7d3f 100644
--- a/drivers/net/rnp/rnp_rxtx.h
+++ b/drivers/net/rnp/rnp_rxtx.h
@@ -110,6 +110,7 @@ struct rnp_tx_queue {
uint16_t nb_tx_free; /* avail desc to set pkts */
uint16_t nb_tx_used; /* multiseg mbuf used num */
uint16_t last_desc_cleaned;
+ uint16_t tunnel_len;
uint16_t tx_tail;
uint16_t tx_next_dd; /* next to scan writeback dd bit */
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH v1 3/3] net/rnp: fix TSO segmentation for packets of 64KB
2025-06-18 12:11 [PATCH v0 0/3] [v0]drivers/net fixed Coverity issue Wenbo Cao
2025-06-18 12:11 ` [PATCH v1 1/3] net/rnp: add check firmware respond info Wenbo Cao
2025-06-18 12:11 ` [PATCH v1 2/3] net/rnp: fix Tunnel-TSO VLAN header untrusted loop bound Wenbo Cao
@ 2025-06-18 12:11 ` Wenbo Cao
2 siblings, 0 replies; 4+ messages in thread
From: Wenbo Cao @ 2025-06-18 12:11 UTC (permalink / raw)
To: stephen, Wenbo Cao; +Cc: dev, yaojun, stable
Packets exceeding 64KB TSO size must be fragmented
across multiple descriptors,Otherwise, it may cause
TSO fragmentation anomalies.
Fixes: 4530e70f1e32 ("net/rnp: support Tx TSO offload")
Cc: stable@dpdk.org
Signed-off-by: Wenbo Cao <caowenbo@mucse.com>
---
drivers/net/rnp/rnp_rxtx.c | 48 ++++++++++++++++++++++++++++++++++----
1 file changed, 44 insertions(+), 4 deletions(-)
diff --git a/drivers/net/rnp/rnp_rxtx.c b/drivers/net/rnp/rnp_rxtx.c
index ee31f17cad..81e8c6ba44 100644
--- a/drivers/net/rnp/rnp_rxtx.c
+++ b/drivers/net/rnp/rnp_rxtx.c
@@ -1157,6 +1157,21 @@ rnp_need_ctrl_desc(uint64_t flags)
return (flags & mask) ? 1 : 0;
}
+#define RNP_MAX_TSO_SEG_LEN (4096)
+static inline uint16_t
+rnp_calc_pkt_desc(struct rte_mbuf *tx_pkt)
+{
+ struct rte_mbuf *txd = tx_pkt;
+ uint16_t count = 0;
+
+ while (txd != NULL) {
+ count += DIV_ROUND_UP(txd->data_len, RNP_MAX_TSO_SEG_LEN);
+ txd = txd->next;
+ }
+
+ return count;
+}
+
static void
rnp_build_tx_control_desc(struct rnp_tx_queue *txq,
volatile struct rnp_tx_desc *txbd,
@@ -1394,6 +1409,10 @@ rnp_multiseg_xmit_pkts(void *_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
tx_pkt = tx_pkts[nb_tx];
ctx_desc_use = rnp_need_ctrl_desc(tx_pkt->ol_flags);
nb_used_bd = tx_pkt->nb_segs + ctx_desc_use;
+ if (tx_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+ nb_used_bd = (uint16_t)(rnp_calc_pkt_desc(tx_pkt) + ctx_desc_use);
+ else
+ nb_used_bd = tx_pkt->nb_segs + ctx_desc_use;
tx_last = (uint16_t)(tx_id + nb_used_bd - 1);
if (tx_last >= txq->attr.nb_desc)
tx_last = (uint16_t)(tx_last - txq->attr.nb_desc);
@@ -1416,8 +1435,11 @@ rnp_multiseg_xmit_pkts(void *_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
m_seg = tx_pkt;
first_seg = 1;
do {
+ uint16_t remain_len = 0;
+ uint64_t dma_addr = 0;
+
txbd = &txq->tx_bdr[tx_id];
- txbd->d.cmd = 0;
+ *txbd = txq->zero_desc;
txn = &txq->sw_ring[txe->next_id];
if ((first_seg && m_seg->ol_flags)) {
rnp_setup_tx_offload(txq, txbd,
@@ -1430,11 +1452,29 @@ rnp_multiseg_xmit_pkts(void *_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_pktmbuf_free_seg(txe->mbuf);
txe->mbuf = NULL;
}
+ dma_addr = rnp_get_dma_addr(&txq->attr, m_seg);
+ remain_len = m_seg->data_len;
txe->mbuf = m_seg;
+ while ((tx_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
+ unlikely(remain_len > RNP_MAX_TSO_SEG_LEN)) {
+ txbd->d.addr = dma_addr;
+ txbd->d.blen = rte_cpu_to_le_32(RNP_MAX_TSO_SEG_LEN);
+ dma_addr += RNP_MAX_TSO_SEG_LEN;
+ remain_len -= RNP_MAX_TSO_SEG_LEN;
+ txe->last_id = tx_last;
+ tx_id = txe->next_id;
+ txe = txn;
+ if (txe->mbuf) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+ txbd = &txq->tx_bdr[tx_id];
+ *txbd = txq->zero_desc;
+ txn = &txq->sw_ring[txe->next_id];
+ }
txe->last_id = tx_last;
- txbd->d.addr = rnp_get_dma_addr(&txq->attr, m_seg);
- txbd->d.blen = rte_cpu_to_le_32(m_seg->data_len);
- txbd->d.cmd &= ~RNP_CMD_EOP;
+ txbd->d.addr = dma_addr;
+ txbd->d.blen = rte_cpu_to_le_32(remain_len);
m_seg = m_seg->next;
tx_id = txe->next_id;
txe = txn;
--
2.25.1
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2025-06-18 12:11 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-06-18 12:11 [PATCH v0 0/3] [v0]drivers/net fixed Coverity issue Wenbo Cao
2025-06-18 12:11 ` [PATCH v1 1/3] net/rnp: add check firmware respond info Wenbo Cao
2025-06-18 12:11 ` [PATCH v1 2/3] net/rnp: fix Tunnel-TSO VLAN header untrusted loop bound Wenbo Cao
2025-06-18 12:11 ` [PATCH v1 3/3] net/rnp: fix TSO segmentation for packets of 64KB Wenbo Cao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).