DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/iavf: add diagnostic support in TX path
@ 2023-12-21 10:12 Mingjin Ye
  2023-12-21 12:00 ` Zhang, Qi Z
  2023-12-22 10:44 ` [PATCH v2] " Mingjin Ye
  0 siblings, 2 replies; 36+ messages in thread
From: Mingjin Ye @ 2023-12-21 10:12 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye, Jingjing Wu, Beilei Xing

The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload, strict.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.
 5. strict: check protocol headers.

parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
 drivers/net/iavf/iavf.h        |  26 ++-
 drivers/net/iavf/iavf_ethdev.c |  74 ++++++++
 drivers/net/iavf/iavf_rxtx.c   | 337 +++++++++++++++++++++++++++++----
 drivers/net/iavf/iavf_rxtx.h   |   8 +
 4 files changed, 402 insertions(+), 43 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 10868f2c30..428b20b8e4 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -113,9 +113,15 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mdd_stats {
+	uint64_t mdd_mbuf_err_count;
+	uint64_t mdd_pkt_err_count;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mdd_stats mdd_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -299,6 +305,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_MAX,
 };
 
+
 /**
  * Cache devargs parse result.
  */
@@ -309,10 +316,27 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	int mbuf_check;
 };
 
 struct iavf_security_ctx;
 
+struct iavf_rx_burst_elem {
+	TAILQ_ENTRY(iavf_rx_burst_elem) next;
+	eth_rx_burst_t rx_pkt_burst;
+};
+
+struct iavf_tx_burst_elem {
+	TAILQ_ENTRY(iavf_tx_burst_elem) next;
+	eth_tx_burst_t tx_pkt_burst;
+};
+
+#define IAVF_MDD_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MDD_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MDD_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MDD_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+#define IAVF_MDD_CHECK_F_TX_STRICT      (1ULL << 4)
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -329,7 +353,7 @@ struct iavf_adapter {
 	bool closed;
 	bool no_poll;
 	eth_rx_burst_t rx_pkt_burst;
-	eth_tx_burst_t tx_pkt_burst;
+	uint64_t mc_flags; /* mdd check flags. */
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d1edb0dd5c..f2af228042 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -39,6 +39,8 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MDD_CHECK_ARG       "mbuf_check"
+
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 
@@ -48,6 +50,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MDD_CHECK_ARG,
 	NULL
 };
 
@@ -188,6 +191,8 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 			_OFF_OF(ips_stats.ierrors.ipsec_length)},
 	{"inline_ipsec_crypto_ierrors_misc",
 			_OFF_OF(ips_stats.ierrors.misc)},
+	{"mdd_mbuf_error_packets", _OFF_OF(mdd_stats.mdd_mbuf_err_count)},
+	{"mdd_pkt_error_packets", _OFF_OF(mdd_stats.mdd_pkt_err_count)},
 };
 #undef _OFF_OF
 
@@ -1881,6 +1886,9 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 {
 	int ret;
 	unsigned int i;
+	struct iavf_tx_queue *txq;
+	uint64_t mdd_mbuf_err_count = 0;
+	uint64_t mdd_pkt_err_count = 0;
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1904,6 +1912,16 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check) {
+		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+			txq = dev->data->tx_queues[i];
+			mdd_mbuf_err_count += txq->mdd_mbuf_err_count;
+			mdd_pkt_err_count += txq->mdd_pkt_err_count;
+		}
+		iavf_xtats.mdd_stats.mdd_mbuf_err_count = mdd_mbuf_err_count;
+		iavf_xtats.mdd_stats.mdd_pkt_err_count = mdd_pkt_err_count;
+	}
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2286,6 +2304,52 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mdd_checker(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_OFFLOAD;
+		else if (!strcmp(cur, "strict"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_STRICT;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2340,6 +2404,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MDD_CHECK_ARG,
+				 &iavf_parse_mdd_checker, &ad->mc_flags);
+	if (ret)
+		goto bail;
+
+	if (ad->mc_flags)
+		ad->devargs.mbuf_check = 1;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
@@ -2718,6 +2790,8 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 	adapter->dev_data = eth_dev->data;
 	adapter->stopped = 1;
 
+	iavf_pkt_burst_init(eth_dev);
+
 	if (iavf_dev_event_handler_init())
 		goto init_vf_err;
 
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f19aa14646..9f05cd1eee 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -18,6 +18,7 @@
 #include <rte_malloc.h>
 #include <rte_ether.h>
 #include <ethdev_driver.h>
+#include <rte_tailq.h>
 #include <rte_tcp.h>
 #include <rte_sctp.h>
 #include <rte_udp.h>
@@ -425,6 +426,8 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
 
 };
 
+static struct iavf_pkt_burst ice_rxtx_pkt_burst[RTE_MAX_ETHPORTS];
+
 static inline void
 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
 				    struct rte_mbuf *mb,
@@ -1128,6 +1131,36 @@ iavf_reset_queues(struct rte_eth_dev *dev)
 	}
 }
 
+static void
+iavf_rx_pkt_burst_cleanup(struct rte_eth_dev *dev)
+{
+	struct iavf_pkt_burst *item;
+	struct iavf_rx_burst_elem *pos;
+	struct iavf_rx_burst_elem *save_next;
+
+	item = &ice_rxtx_pkt_burst[dev->data->port_id];
+
+	RTE_TAILQ_FOREACH_SAFE(pos, &item->rx_burst_list, next, save_next) {
+		TAILQ_REMOVE(&item->rx_burst_list, pos, next);
+		rte_free(pos);
+	}
+}
+
+static void
+iavf_tx_pkt_burst_cleanup(struct rte_eth_dev *dev)
+{
+	struct iavf_pkt_burst *item;
+	struct iavf_tx_burst_elem *pos;
+	struct iavf_tx_burst_elem *save_next;
+
+	item = &ice_rxtx_pkt_burst[dev->data->port_id];
+
+	RTE_TAILQ_FOREACH_SAFE(pos, &item->tx_burst_list, next, save_next) {
+		TAILQ_REMOVE(&item->tx_burst_list, pos, next);
+		rte_free(pos);
+	}
+}
+
 void
 iavf_stop_queues(struct rte_eth_dev *dev)
 {
@@ -1157,6 +1190,9 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(WARNING, "Fail to stop queues");
 
 	iavf_reset_queues(dev);
+
+	iavf_rx_pkt_burst_cleanup(dev);
+	iavf_tx_pkt_burst_cleanup(dev);
 }
 
 #define IAVF_RX_FLEX_ERR0_BITS	\
@@ -2503,7 +2539,7 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 			eip_typ |= IAVF_TXD_CTX_GRE_TUNNELING;
 			break;
 		default:
-			PMD_TX_LOG(ERR, "Tunnel type not supported");
+			PMD_DRV_LOG(ERR, "Tunnel type not supported");
 			return;
 		}
 
@@ -3027,7 +3063,7 @@ iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
 	up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET;
 
 	if (!(vf->qos_cap->cap[txq->tc].tc_prio & BIT(up))) {
-		PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n",
+		PMD_DRV_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n",
 			up, txq->queue_id);
 		return -1;
 	} else {
@@ -3394,13 +3430,13 @@ check_mbuf_len(struct offload_info *info, struct rte_mbuf *m)
 {
 	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
 		if (info->outer_l2_len != m->outer_l2_len) {
-			PMD_TX_LOG(ERR, "outer_l2_len error in mbuf. Original "
+			PMD_DRV_LOG(ERR, "outer_l2_len error in mbuf. Original "
 			"length: %hu, calculated length: %u", m->outer_l2_len,
 			info->outer_l2_len);
 			return -1;
 		}
 		if (info->outer_l3_len != m->outer_l3_len) {
-			PMD_TX_LOG(ERR, "outer_l3_len error in mbuf. Original "
+			PMD_DRV_LOG(ERR, "outer_l3_len error in mbuf. Original "
 			"length: %hu,calculated length: %u", m->outer_l3_len,
 			info->outer_l3_len);
 			return -1;
@@ -3408,19 +3444,19 @@ check_mbuf_len(struct offload_info *info, struct rte_mbuf *m)
 	}
 
 	if (info->l2_len != m->l2_len) {
-		PMD_TX_LOG(ERR, "l2_len error in mbuf. Original "
+		PMD_DRV_LOG(ERR, "l2_len error in mbuf. Original "
 		"length: %hu, calculated length: %u", m->l2_len,
 		info->l2_len);
 		return -1;
 	}
 	if (info->l3_len != m->l3_len) {
-		PMD_TX_LOG(ERR, "l3_len error in mbuf. Original "
+		PMD_DRV_LOG(ERR, "l3_len error in mbuf. Original "
 		"length: %hu, calculated length: %u", m->l3_len,
 		info->l3_len);
 		return -1;
 	}
 	if (info->l4_len != m->l4_len) {
-		PMD_TX_LOG(ERR, "l4_len error in mbuf. Original "
+		PMD_DRV_LOG(ERR, "l4_len error in mbuf. Original "
 		"length: %hu, calculated length: %u", m->l4_len,
 		info->l4_len);
 		return -1;
@@ -3438,24 +3474,24 @@ check_ether_type(struct offload_info *info, struct rte_mbuf *m)
 		if (info->outer_ethertype ==
 			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
 			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) {
-				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, "
+				PMD_DRV_LOG(ERR, "Outer ethernet type is ipv4, "
 				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV4` flag.");
 				ret = -1;
 			}
 			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) {
-				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, tx "
+				PMD_DRV_LOG(ERR, "Outer ethernet type is ipv4, tx "
 				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV6` flag");
 				ret = -1;
 			}
 		} else if (info->outer_ethertype ==
 			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
 			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) {
-				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, "
+				PMD_DRV_LOG(ERR, "Outer ethernet type is ipv6, "
 				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV6` flag.");
 				ret = -1;
 			}
 			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
-				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, tx "
+				PMD_DRV_LOG(ERR, "Outer ethernet type is ipv6, tx "
 				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV4` flag");
 				ret = -1;
 			}
@@ -3465,24 +3501,24 @@ check_ether_type(struct offload_info *info, struct rte_mbuf *m)
 	if (info->ethertype ==
 		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
 		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV4)) {
-			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx offload "
+			PMD_DRV_LOG(ERR, "Ethernet type is ipv4, tx offload "
 			"missing `RTE_MBUF_F_TX_IPV4` flag.");
 			ret = -1;
 		}
 		if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
-			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx "
+			PMD_DRV_LOG(ERR, "Ethernet type is ipv4, tx "
 			"offload contains wrong `RTE_MBUF_F_TX_IPV6` flag");
 			ret = -1;
 		}
 	} else if (info->ethertype ==
 		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
 		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV6)) {
-			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			PMD_DRV_LOG(ERR, "Ethernet type is ipv6, tx offload "
 			"missing `RTE_MBUF_F_TX_IPV6` flag.");
 			ret = -1;
 		}
 		if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
-			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			PMD_DRV_LOG(ERR, "Ethernet type is ipv6, tx offload "
 			"contains wrong `RTE_MBUF_F_TX_IPV4` flag");
 			ret = -1;
 		}
@@ -3512,12 +3548,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_gtp(udp_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "gtp tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "gtp tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_GTP` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GTP) {
-				PMD_TX_LOG(ERR, "gtp tunnel packet, tx offload has wrong "
+				PMD_DRV_LOG(ERR, "gtp tunnel packet, tx offload has wrong "
 				"`%s` flag, correct is `RTE_MBUF_F_TX_TUNNEL_GTP` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
 				return -1;
@@ -3527,12 +3563,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_vxlan_gpe(udp_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "vxlan gpe tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE) {
-				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "vxlan gpe tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3543,12 +3579,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_vxlan(udp_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "vxlan tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "vxlan tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_VXLAN` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN) {
-				PMD_TX_LOG(ERR, "vxlan tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "vxlan tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_VXLAN` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3559,12 +3595,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_geneve(udp_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "geneve tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "geneve tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_GENEVE` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GENEVE) {
-				PMD_TX_LOG(ERR, "geneve tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "geneve tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_GENEVE` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3575,7 +3611,7 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		/* Always keep last. */
 		if (unlikely(RTE_ETH_IS_TUNNEL_PKT(m->packet_type)
 			!= 0)) {
-			PMD_TX_LOG(ERR, "Unknown tunnel packet. UDP dst port: %hu",
+			PMD_DRV_LOG(ERR, "Unknown tunnel packet. UDP dst port: %hu",
 				udp_hdr->dst_port);
 				return -1;
 		}
@@ -3587,12 +3623,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_gre(gre_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "gre tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "gre tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_GRE` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GRE) {
-				PMD_TX_LOG(ERR, "gre tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "gre tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_GRE` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3607,12 +3643,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_encap_ip(encap_ip_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "Ipip tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "Ipip tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_IPIP` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_IPIP) {
-				PMD_TX_LOG(ERR, "Ipip tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "Ipip tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_IPIP` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3629,6 +3665,103 @@ iavf_check_mbuf(struct rte_mbuf *m)
 	return check_ether_type(&info, m);
 }
 
+/* Tx MDD check */
+static uint16_t
+iavf_xmit_pkts_mdd(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	struct iavf_tx_queue *txq = tx_queue;
+	struct rte_mbuf *mb;
+	uint16_t idx;
+	const char *reason = NULL;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	uint64_t mdd_mbuf_err_count = 0;
+	uint64_t mdd_pkt_err_count = 0;
+	uint64_t ol_flags;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->mc_flags & IAVF_MDD_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_DRV_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			mdd_mbuf_err_count++;
+			continue;
+		}
+
+		if ((adapter->mc_flags & IAVF_MDD_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_DRV_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			mdd_pkt_err_count++;
+			continue;
+		}
+
+		if (adapter->mc_flags & IAVF_MDD_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_DRV_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					mdd_pkt_err_count++;
+					continue;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_DRV_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				mdd_pkt_err_count++;
+				continue;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_DRV_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				mdd_pkt_err_count++;
+				continue;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MDD_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_DRV_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				mdd_pkt_err_count++;
+				continue;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_DRV_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				mdd_pkt_err_count++;
+				continue;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MDD_CHECK_F_TX_STRICT &&
+			iavf_check_mbuf(mb)) {
+			mdd_pkt_err_count++;
+			continue;
+		}
+	}
+
+	if (mdd_mbuf_err_count || mdd_pkt_err_count) {
+		if (mdd_mbuf_err_count)
+			rte_atomic_fetch_add_explicit(&txq->mdd_mbuf_err_count,
+					mdd_mbuf_err_count, rte_memory_order_release);
+		if (mdd_pkt_err_count)
+			rte_atomic_fetch_add_explicit(&txq->mdd_pkt_err_count,
+					mdd_pkt_err_count, rte_memory_order_release);
+		return 0;
+	}
+
+	return idx;
+}
+
 /* TX prep functions */
 uint16_t
 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -3720,15 +3853,135 @@ iavf_recv_pkts_no_poll(void *rx_queue, struct rte_mbuf **rx_pkts,
 }
 
 static uint16_t
-iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
+iavf_xmit_pkts_no_poll(void *tx_queue, __rte_unused struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_tx_queue *txq = tx_queue;
 	if (!txq->vsi || txq->vsi->adapter->no_poll)
 		return 0;
 
-	return txq->vsi->adapter->tx_pkt_burst(tx_queue,
-								tx_pkts, nb_pkts);
+	return nb_pkts;
+}
+
+static int __rte_unused
+iavf_rx_pkt_burst_insert(struct rte_eth_dev *dev, eth_rx_burst_t func)
+{
+	struct iavf_rx_burst_elem *elem;
+	struct iavf_pkt_burst *item;
+
+	if (!func) {
+		PMD_DRV_LOG(ERR, "RX functions cannot be NULL");
+		return -1;
+	}
+
+	elem = rte_malloc(NULL, sizeof(*elem), 0);
+	if (!elem) {
+		PMD_DRV_LOG(ERR, "Unable to allocate memory");
+		return -1;
+	}
+
+	item = &ice_rxtx_pkt_burst[dev->data->port_id];
+	elem->rx_pkt_burst = func;
+	TAILQ_INSERT_TAIL(&item->rx_burst_list, elem, next);
+
+	return 0;
+}
+
+static int
+iavf_tx_pkt_burst_insert(struct rte_eth_dev *dev, eth_tx_burst_t func)
+{
+	struct iavf_tx_burst_elem *elem;
+	struct iavf_pkt_burst *item;
+
+	if (!func) {
+		PMD_DRV_LOG(ERR, "TX functions cannot be NULL");
+		return -1;
+	}
+
+	elem = rte_malloc(NULL, sizeof(*elem), 0);
+	if (!elem) {
+		PMD_DRV_LOG(ERR, "Unable to allocate memory");
+		return -1;
+	}
+
+	item = &ice_rxtx_pkt_burst[dev->data->port_id];
+	elem->tx_pkt_burst = func;
+	TAILQ_INSERT_TAIL(&item->tx_burst_list, elem, next);
+
+	return 0;
+}
+
+static uint16_t
+iavf_xmit_pkts_chain(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	struct iavf_tx_burst_elem *pos;
+	struct iavf_tx_burst_elem *save_next;
+	struct iavf_pkt_burst *item;
+	uint16_t ret;
+
+	item = &ice_rxtx_pkt_burst[adapter->dev_data->port_id];
+	RTE_TAILQ_FOREACH_SAFE(pos, &item->tx_burst_list, next, save_next) {
+		ret = pos->tx_pkt_burst(tx_queue, tx_pkts, nb_pkts);
+		if (nb_pkts != ret)
+			break;
+	}
+
+	return ret;
+}
+
+/* choose tx interceptors*/
+static void
+iavf_set_tx_interceptors(struct rte_eth_dev *dev)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	eth_tx_burst_t tx_pkt_burst;
+	int err;
+	uint16_t mdd_check = adapter->devargs.mbuf_check;
+	uint16_t no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
+
+	if (!mdd_check && !no_poll_on_link_down)
+		return;
+
+	/* Replace tx_pkt_burst in struct rte_eth_dev to
+	 * intercept the purpose of the default TX path.
+	 * All tasks are done at iavf_xmit_pkts_chain.
+	 */
+	tx_pkt_burst = dev->tx_pkt_burst;
+	dev->tx_pkt_burst = iavf_xmit_pkts_chain;
+
+	/* Register all interceptors. We need to pay
+	 * attention to the order of precedence.
+	 */
+	if (mdd_check) {
+		err = iavf_tx_pkt_burst_insert(dev, iavf_xmit_pkts_mdd);
+		if (!err)
+			PMD_DRV_LOG(DEBUG, "Register diagnostics Tx callback (port=%d).",
+					    dev->data->port_id);
+		else
+			PMD_DRV_LOG(ERR, "Failed to register diagnostics TX callback (port %d).",
+					    dev->data->port_id);
+	}
+
+	if (no_poll_on_link_down) {
+		err = iavf_tx_pkt_burst_insert(dev, iavf_xmit_pkts_no_poll);
+		if (!err)
+			PMD_DRV_LOG(DEBUG, "Register no poll Tx callback (port=%d).",
+					    dev->data->port_id);
+		else
+			PMD_DRV_LOG(ERR, "Failed to register no poll TX callback (port %d).",
+					    dev->data->port_id);
+	}
+
+	err = iavf_tx_pkt_burst_insert(dev, tx_pkt_burst);
+	if (!err)
+		PMD_DRV_LOG(DEBUG, "Register PMD Tx callback (port=%d).",
+					dev->data->port_id);
+	else
+		PMD_DRV_LOG(ERR, "Failed to register PMD TX callback (port %d).",
+					dev->data->port_id);
 }
 
 /* choose rx function*/
@@ -3973,9 +4226,6 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 void
 iavf_set_tx_function(struct rte_eth_dev *dev)
 {
-	struct iavf_adapter *adapter =
-		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
 	int i;
@@ -4062,10 +4312,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 #endif
 		}
 
-		if (no_poll_on_link_down) {
-			adapter->tx_pkt_burst = dev->tx_pkt_burst;
-			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
-		}
+		iavf_set_tx_interceptors(dev);
 		return;
 	}
 
@@ -4076,10 +4323,16 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	dev->tx_pkt_burst = iavf_xmit_pkts;
 	dev->tx_pkt_prepare = iavf_prep_pkts;
 
-	if (no_poll_on_link_down) {
-		adapter->tx_pkt_burst = dev->tx_pkt_burst;
-		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
-	}
+	iavf_set_tx_interceptors(dev);
+}
+
+void iavf_pkt_burst_init(struct rte_eth_dev *dev)
+{
+	struct iavf_pkt_burst *item;
+
+	item = &ice_rxtx_pkt_burst[dev->data->port_id];
+	TAILQ_INIT(&item->rx_burst_list);
+	TAILQ_INIT(&item->tx_burst_list);
 }
 
 static int
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f432f9d956..fa948aeb95 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -203,6 +203,10 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_pkt_burst {
+	TAILQ_HEAD(rx_pkt_burst_list, iavf_rx_burst_elem) rx_burst_list;
+	TAILQ_HEAD(tx_pkt_burst_list, iavf_tx_burst_elem) tx_burst_list;
+};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
@@ -297,6 +301,9 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mdd_mbuf_err_count;
+	uint64_t mdd_pkt_err_count;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
@@ -669,6 +676,7 @@ uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		       uint16_t nb_pkts);
 void iavf_set_rx_function(struct rte_eth_dev *dev);
 void iavf_set_tx_function(struct rte_eth_dev *dev);
+void iavf_pkt_burst_init(struct rte_eth_dev *dev);
 void iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 			  struct rte_eth_rxq_info *qinfo);
 void iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH] net/iavf: add diagnostic support in TX path
  2023-12-21 10:12 [PATCH] net/iavf: add diagnostic support in TX path Mingjin Ye
@ 2023-12-21 12:00 ` Zhang, Qi Z
  2023-12-22 10:44 ` [PATCH v2] " Mingjin Ye
  1 sibling, 0 replies; 36+ messages in thread
From: Zhang, Qi Z @ 2023-12-21 12:00 UTC (permalink / raw)
  To: Ye, MingjinX, dev; +Cc: Yang, Qiming, Ye, MingjinX, Wu, Jingjing, Xing, Beilei



> -----Original Message-----
> From: Mingjin Ye <mingjinx.ye@intel.com>
> Sent: Thursday, December 21, 2023 6:12 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Ye, MingjinX
> <mingjinx.ye@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH] net/iavf: add diagnostic support in TX path
> 
> The only way to enable diagnostics for TX paths is to modify the application
> source code. Making it difficult to diagnose faults.
> 
> In this patch, the devarg option "mbuf_check" is introduced and the
> parameters are configured to enable the corresponding diagnostics.
> 
> supported cases: mbuf, size, segment, offload, strict.
>  1. mbuf: check for corrupted mbuf.
>  2. size: check min/max packet length according to hw spec.
>  3. segment: check number of mbuf segments not exceed hw limitation.
>  4. offload: check any unsupported offload flag.
>  5. strict: check protocol headers.
> 
> parameter format: mbuf_check=[mbuf,<case1>,<case2>]
> eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>



> +
> +static uint16_t
> +iavf_xmit_pkts_chain(void *tx_queue, struct rte_mbuf **tx_pkts,
> +uint16_t nb_pkts) {
> +	struct iavf_tx_queue *txq = tx_queue;
> +	struct iavf_adapter *adapter = txq->vsi->adapter;
> +	struct iavf_tx_burst_elem *pos;
> +	struct iavf_tx_burst_elem *save_next;
> +	struct iavf_pkt_burst *item;
> +	uint16_t ret;
> +
> +	item = &ice_rxtx_pkt_burst[adapter->dev_data->port_id];
> +	RTE_TAILQ_FOREACH_SAFE(pos, &item->tx_burst_list, next,
> save_next) {
> +		ret = pos->tx_pkt_burst(tx_queue, tx_pkts, nb_pkts);
> +		if (nb_pkts != ret)

This is not correct,

> +			break;
> +	}
> +
> +	return ret;
> +}
> +
> 
>  /* choose rx function*/
> @@ -3973,9 +4226,6 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
> void  iavf_set_tx_function(struct rte_eth_dev *dev)  {
> -	struct iavf_adapter *adapter =
> -		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> -	int no_poll_on_link_down = adapter-
> >devargs.no_poll_on_link_down;
>  #ifdef RTE_ARCH_X86
>  	struct iavf_tx_queue *txq;
>  	int i;
> @@ -4062,10 +4312,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
> #endif
>  		}
> 
> -		if (no_poll_on_link_down) {
> -			adapter->tx_pkt_burst = dev->tx_pkt_burst;
> -			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
> -		}
> +		iavf_set_tx_interceptors(dev);

If no_poll or diagnose mode are not enabled, why we still need to interceptor?, 

Btw, its too heavy to use a chain here and I even didn't see an Rx chain implementation.

Why not simplify it as below:

adapter->tx_pkt_burst = dev->tx_pkt_burst;
if (no_poll_on_link_down)
	dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
else if (diagnose mode)
	dev->tx_pkt_burst = iavf_xmit_pkt_mdd_check.

then iavf_xmit_pkt_mdd_check,  call iavf_xmit_pkts_no_poll or adapter->tx_pkt_burst






^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v2] net/iavf: add diagnostic support in TX path
  2023-12-21 10:12 [PATCH] net/iavf: add diagnostic support in TX path Mingjin Ye
  2023-12-21 12:00 ` Zhang, Qi Z
@ 2023-12-22 10:44 ` Mingjin Ye
  2023-12-22 11:37   ` Zhang, Qi Z
  2023-12-26 10:07   ` [PATCH v3] " Mingjin Ye
  1 sibling, 2 replies; 36+ messages in thread
From: Mingjin Ye @ 2023-12-22 10:44 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye, Jingjing Wu, Beilei Xing

The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload, strict.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.
 5. strict: check protocol headers.

parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Remove call chain.
---
 drivers/net/iavf/iavf.h        |  25 ++++-
 drivers/net/iavf/iavf_ethdev.c |  69 ++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 186 ++++++++++++++++++++++++++-------
 drivers/net/iavf/iavf_rxtx.h   |   6 ++
 4 files changed, 246 insertions(+), 40 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 10868f2c30..ab80388422 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -113,9 +113,14 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mdd_stats {
+	uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mdd_stats mdd_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -309,10 +314,27 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	int mbuf_check;
 };
 
 struct iavf_security_ctx;
 
+struct iavf_rx_burst_elem {
+	TAILQ_ENTRY(iavf_rx_burst_elem) next;
+	eth_rx_burst_t rx_pkt_burst;
+};
+
+struct iavf_tx_burst_elem {
+	TAILQ_ENTRY(iavf_tx_burst_elem) next;
+	eth_tx_burst_t tx_pkt_burst;
+};
+
+#define IAVF_MDD_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MDD_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MDD_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MDD_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+#define IAVF_MDD_CHECK_F_TX_STRICT      (1ULL << 4)
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -328,8 +350,7 @@ struct iavf_adapter {
 	bool stopped;
 	bool closed;
 	bool no_poll;
-	eth_rx_burst_t rx_pkt_burst;
-	eth_tx_burst_t tx_pkt_burst;
+	uint64_t mc_flags; /* mdd check flags. */
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d1edb0dd5c..6f4dd54602 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -39,6 +39,8 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MDD_CHECK_ARG       "mbuf_check"
+
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 
@@ -48,6 +50,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MDD_CHECK_ARG,
 	NULL
 };
 
@@ -174,6 +177,7 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
 	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
 	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+	{"tx_mdd_error_packets", _OFF_OF(mdd_stats.tx_pkt_errors)},
 
 	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
 	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
@@ -1881,6 +1885,8 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 {
 	int ret;
 	unsigned int i;
+	struct iavf_tx_queue *txq;
+	uint64_t mdd_pkt_errors = 0;
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1904,6 +1910,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check) {
+		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+			txq = dev->data->tx_queues[i];
+			mdd_pkt_errors += __atomic_load_n(&txq->mdd_pkt_errors,
+				__ATOMIC_RELAXED);
+		}
+		iavf_xtats.mdd_stats.tx_pkt_errors = mdd_pkt_errors;
+	}
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2286,6 +2301,52 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mdd_checker(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_OFFLOAD;
+		else if (!strcmp(cur, "strict"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_STRICT;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2340,6 +2401,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MDD_CHECK_ARG,
+				 &iavf_parse_mdd_checker, &ad->mc_flags);
+	if (ret)
+		goto bail;
+
+	if (ad->mc_flags)
+		ad->devargs.mbuf_check = 1;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f19aa14646..86b7af3263 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -425,6 +425,8 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
 
 };
 
+static struct iavf_pkt_burst iavf_rxtx_pkt_burst[RTE_MAX_ETHPORTS];
+
 static inline void
 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
 				    struct rte_mbuf *mb,
@@ -3394,34 +3396,34 @@ check_mbuf_len(struct offload_info *info, struct rte_mbuf *m)
 {
 	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
 		if (info->outer_l2_len != m->outer_l2_len) {
-			PMD_TX_LOG(ERR, "outer_l2_len error in mbuf. Original "
-			"length: %hu, calculated length: %u", m->outer_l2_len,
+			PMD_DRV_LOG(ERR, "outer_l2_len error in mbuf. Original "
+			"length: %d, calculated length: %u", m->outer_l2_len,
 			info->outer_l2_len);
 			return -1;
 		}
 		if (info->outer_l3_len != m->outer_l3_len) {
-			PMD_TX_LOG(ERR, "outer_l3_len error in mbuf. Original "
-			"length: %hu,calculated length: %u", m->outer_l3_len,
+			PMD_DRV_LOG(ERR, "outer_l3_len error in mbuf. Original "
+			"length: %d,calculated length: %u", m->outer_l3_len,
 			info->outer_l3_len);
 			return -1;
 		}
 	}
 
 	if (info->l2_len != m->l2_len) {
-		PMD_TX_LOG(ERR, "l2_len error in mbuf. Original "
-		"length: %hu, calculated length: %u", m->l2_len,
+		PMD_DRV_LOG(ERR, "l2_len error in mbuf. Original "
+		"length: %d, calculated length: %u", m->l2_len,
 		info->l2_len);
 		return -1;
 	}
 	if (info->l3_len != m->l3_len) {
-		PMD_TX_LOG(ERR, "l3_len error in mbuf. Original "
-		"length: %hu, calculated length: %u", m->l3_len,
+		PMD_DRV_LOG(ERR, "l3_len error in mbuf. Original "
+		"length: %d, calculated length: %u", m->l3_len,
 		info->l3_len);
 		return -1;
 	}
 	if (info->l4_len != m->l4_len) {
-		PMD_TX_LOG(ERR, "l4_len error in mbuf. Original "
-		"length: %hu, calculated length: %u", m->l4_len,
+		PMD_DRV_LOG(ERR, "l4_len error in mbuf. Original "
+		"length: %d, calculated length: %u", m->l4_len,
 		info->l4_len);
 		return -1;
 	}
@@ -3438,24 +3440,24 @@ check_ether_type(struct offload_info *info, struct rte_mbuf *m)
 		if (info->outer_ethertype ==
 			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
 			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) {
-				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, "
+				PMD_DRV_LOG(ERR, "Outer ethernet type is ipv4, "
 				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV4` flag.");
 				ret = -1;
 			}
 			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) {
-				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, tx "
+				PMD_DRV_LOG(ERR, "Outer ethernet type is ipv4, tx "
 				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV6` flag");
 				ret = -1;
 			}
 		} else if (info->outer_ethertype ==
 			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
 			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) {
-				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, "
+				PMD_DRV_LOG(ERR, "Outer ethernet type is ipv6, "
 				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV6` flag.");
 				ret = -1;
 			}
 			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
-				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, tx "
+				PMD_DRV_LOG(ERR, "Outer ethernet type is ipv6, tx "
 				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV4` flag");
 				ret = -1;
 			}
@@ -3465,24 +3467,24 @@ check_ether_type(struct offload_info *info, struct rte_mbuf *m)
 	if (info->ethertype ==
 		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
 		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV4)) {
-			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx offload "
+			PMD_DRV_LOG(ERR, "Ethernet type is ipv4, tx offload "
 			"missing `RTE_MBUF_F_TX_IPV4` flag.");
 			ret = -1;
 		}
 		if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
-			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx "
+			PMD_DRV_LOG(ERR, "Ethernet type is ipv4, tx "
 			"offload contains wrong `RTE_MBUF_F_TX_IPV6` flag");
 			ret = -1;
 		}
 	} else if (info->ethertype ==
 		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
 		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV6)) {
-			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			PMD_DRV_LOG(ERR, "Ethernet type is ipv6, tx offload "
 			"missing `RTE_MBUF_F_TX_IPV6` flag.");
 			ret = -1;
 		}
 		if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
-			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			PMD_DRV_LOG(ERR, "Ethernet type is ipv6, tx offload "
 			"contains wrong `RTE_MBUF_F_TX_IPV4` flag");
 			ret = -1;
 		}
@@ -3512,12 +3514,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_gtp(udp_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "gtp tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "gtp tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_GTP` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GTP) {
-				PMD_TX_LOG(ERR, "gtp tunnel packet, tx offload has wrong "
+				PMD_DRV_LOG(ERR, "gtp tunnel packet, tx offload has wrong "
 				"`%s` flag, correct is `RTE_MBUF_F_TX_TUNNEL_GTP` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
 				return -1;
@@ -3527,12 +3529,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_vxlan_gpe(udp_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "vxlan gpe tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE) {
-				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "vxlan gpe tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3543,12 +3545,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_vxlan(udp_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "vxlan tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "vxlan tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_VXLAN` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN) {
-				PMD_TX_LOG(ERR, "vxlan tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "vxlan tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_VXLAN` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3559,12 +3561,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_geneve(udp_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "geneve tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "geneve tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_GENEVE` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GENEVE) {
-				PMD_TX_LOG(ERR, "geneve tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "geneve tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_GENEVE` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3575,7 +3577,7 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		/* Always keep last. */
 		if (unlikely(RTE_ETH_IS_TUNNEL_PKT(m->packet_type)
 			!= 0)) {
-			PMD_TX_LOG(ERR, "Unknown tunnel packet. UDP dst port: %hu",
+			PMD_DRV_LOG(ERR, "Unknown tunnel packet. UDP dst port: %hu",
 				udp_hdr->dst_port);
 				return -1;
 		}
@@ -3587,12 +3589,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_gre(gre_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "gre tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "gre tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_GRE` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GRE) {
-				PMD_TX_LOG(ERR, "gre tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "gre tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_GRE` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3607,12 +3609,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_encap_ip(encap_ip_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "Ipip tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "Ipip tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_IPIP` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_IPIP) {
-				PMD_TX_LOG(ERR, "Ipip tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "Ipip tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_IPIP` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3629,6 +3631,100 @@ iavf_check_mbuf(struct rte_mbuf *m)
 	return check_ether_type(&info, m);
 }
 
+/* Tx MDD check */
+static uint16_t
+iavf_xmit_pkts_mdd(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	uint16_t idx;
+	uint64_t ol_flags;
+	struct rte_mbuf *mb;
+	const char *reason = NULL;
+	uint64_t pkt_errors = 0;
+
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->mc_flags & IAVF_MDD_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_DRV_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_errors++;
+			continue;
+		}
+
+		if ((adapter->mc_flags & IAVF_MDD_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_DRV_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			pkt_errors++;
+			continue;
+		}
+
+		if (adapter->mc_flags & IAVF_MDD_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_DRV_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					pkt_errors++;
+					continue;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_DRV_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				pkt_errors++;
+				continue;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_DRV_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				pkt_errors++;
+				continue;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MDD_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_DRV_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_errors++;
+				continue;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_DRV_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_errors++;
+				continue;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MDD_CHECK_F_TX_STRICT &&
+			iavf_check_mbuf(mb)) {
+			pkt_errors++;
+			continue;
+		}
+	}
+
+	if (pkt_errors) {
+		__atomic_fetch_add(&txq->mdd_pkt_errors,
+			pkt_errors, rte_memory_order_release);
+		return 0;
+	}
+
+	return iavf_rxtx_pkt_burst[txq->port_id].tx_burst_raw(tx_queue,
+								tx_pkts, nb_pkts);
+}
+
 /* TX prep functions */
 uint16_t
 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -3715,7 +3811,7 @@ iavf_recv_pkts_no_poll(void *rx_queue, struct rte_mbuf **rx_pkts,
 	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
 		return 0;
 
-	return rxq->vsi->adapter->rx_pkt_burst(rx_queue,
+	return iavf_rxtx_pkt_burst[rxq->port_id].rx_burst_raw(rx_queue,
 								rx_pkts, nb_pkts);
 }
 
@@ -3727,7 +3823,7 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 	if (!txq->vsi || txq->vsi->adapter->no_poll)
 		return 0;
 
-	return txq->vsi->adapter->tx_pkt_burst(tx_queue,
+	return iavf_rxtx_pkt_burst[txq->port_id].tx_burst_raw(tx_queue,
 								tx_pkts, nb_pkts);
 }
 
@@ -3917,7 +4013,8 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->rx_pkt_burst = dev->rx_pkt_burst;
+			iavf_rxtx_pkt_burst[dev->data->port_id].rx_burst_raw =
+				dev->rx_pkt_burst;
 			dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
 		}
 		return;
@@ -3937,7 +4034,8 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 		dev->rx_pkt_burst = iavf_recv_pkts_vec;
 
 		if (no_poll_on_link_down) {
-			adapter->rx_pkt_burst = dev->rx_pkt_burst;
+			iavf_rxtx_pkt_burst[dev->data->port_id].rx_burst_raw =
+				dev->rx_pkt_burst;
 			dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
 		}
 		return;
@@ -3964,7 +4062,8 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 	}
 
 	if (no_poll_on_link_down) {
-		adapter->rx_pkt_burst = dev->rx_pkt_burst;
+		iavf_rxtx_pkt_burst[dev->data->port_id].rx_burst_raw =
+			dev->rx_pkt_burst;
 		dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
 	}
 }
@@ -3976,6 +4075,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
+	int mbuf_check = adapter->devargs.mbuf_check;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
 	int i;
@@ -4063,8 +4163,13 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->tx_pkt_burst = dev->tx_pkt_burst;
+			iavf_rxtx_pkt_burst[dev->data->port_id].tx_burst_raw =
+				dev->tx_pkt_burst;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else if (mbuf_check) {
+			iavf_rxtx_pkt_burst[dev->data->port_id].tx_burst_raw =
+				dev->tx_pkt_burst;
+			dev->tx_pkt_burst = iavf_xmit_pkts_mdd;
 		}
 		return;
 	}
@@ -4077,8 +4182,13 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	dev->tx_pkt_prepare = iavf_prep_pkts;
 
 	if (no_poll_on_link_down) {
-		adapter->tx_pkt_burst = dev->tx_pkt_burst;
+		iavf_rxtx_pkt_burst[dev->data->port_id].tx_burst_raw =
+			dev->tx_pkt_burst;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else if (mbuf_check) {
+		iavf_rxtx_pkt_burst[dev->data->port_id].tx_burst_raw =
+			dev->tx_pkt_burst;
+		dev->tx_pkt_burst = iavf_xmit_pkts_mdd;
 	}
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f432f9d956..ab95bb30c7 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -203,6 +203,10 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_pkt_burst {
+	eth_rx_burst_t rx_burst_raw;
+	eth_tx_burst_t tx_burst_raw;
+};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
@@ -297,6 +301,8 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mdd_pkt_errors;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v2] net/iavf: add diagnostic support in TX path
  2023-12-22 10:44 ` [PATCH v2] " Mingjin Ye
@ 2023-12-22 11:37   ` Zhang, Qi Z
  2023-12-25  2:48     ` Ye, MingjinX
  2023-12-26 10:07   ` [PATCH v3] " Mingjin Ye
  1 sibling, 1 reply; 36+ messages in thread
From: Zhang, Qi Z @ 2023-12-22 11:37 UTC (permalink / raw)
  To: Ye, MingjinX, dev; +Cc: Yang, Qiming, Ye, MingjinX, Wu, Jingjing, Xing, Beilei



> -----Original Message-----
> From: Mingjin Ye <mingjinx.ye@intel.com>
> Sent: Friday, December 22, 2023 6:45 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Ye, MingjinX
> <mingjinx.ye@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v2] net/iavf: add diagnostic support in TX path
> 
> The only way to enable diagnostics for TX paths is to modify the application
> source code. Making it difficult to diagnose faults.
> 
> In this patch, the devarg option "mbuf_check" is introduced and the
> parameters are configured to enable the corresponding diagnostics.
> 
> supported cases: mbuf, size, segment, offload, strict.
>  1. mbuf: check for corrupted mbuf.
>  2. size: check min/max packet length according to hw spec.
>  3. segment: check number of mbuf segments not exceed hw limitation.
>  4. offload: check any unsupported offload flag.
>  5. strict: check protocol headers.
> 
> parameter format: mbuf_check=[mbuf,<case1>,<case2>]
> eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> ---
> v2: Remove call chain.

...

> 
> +static struct iavf_pkt_burst iavf_rxtx_pkt_burst[RTE_MAX_ETHPORTS];

Global array is not necessary, I assume we can get adapter with rxq->vsi->adapter.

> +
>  static inline void
>  iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue
> *rxq,
>  				    struct rte_mbuf *mb,
> @@ -3394,34 +3396,34 @@ check_mbuf_len(struct offload_info *info,
> struct rte_mbuf *m)  {
>  	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
>  		if (info->outer_l2_len != m->outer_l2_len) {
> -			PMD_TX_LOG(ERR, "outer_l2_len error in mbuf.
> Original "
> -			"length: %hu, calculated length: %u", m-
> >outer_l2_len,
> +			PMD_DRV_LOG(ERR, "outer_l2_len error in mbuf.
> Original "
> +			"length: %d, calculated length: %u", m-
> >outer_l2_len,
>  			info->outer_l2_len);
>  			return -1;
>  		}
>  		if (info->outer_l3_len != m->outer_l3_len) {
> -			PMD_TX_LOG(ERR, "outer_l3_len error in mbuf.
> Original "
> -			"length: %hu,calculated length: %u", m-
> >outer_l3_len,
> +			PMD_DRV_LOG(ERR, "outer_l3_len error in mbuf.
> Original "
> +			"length: %d,calculated length: %u", m->outer_l3_len,
>  			info->outer_l3_len);
>  			return -1;
>  		}
>  	}
> 
>  	if (info->l2_len != m->l2_len) {
> -		PMD_TX_LOG(ERR, "l2_len error in mbuf. Original "
> -		"length: %hu, calculated length: %u", m->l2_len,
> +		PMD_DRV_LOG(ERR, "l2_len error in mbuf. Original "
> +		"length: %d, calculated length: %u", m->l2_len,
>  		info->l2_len);
>  		return -1;

Can you explain why need to change all the log type here? basically the diagnose check is for Tx only , we don't need to touch existing Rx implementation.  it could be a separate patch if you think something need to be refactor.



^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v2] net/iavf: add diagnostic support in TX path
  2023-12-22 11:37   ` Zhang, Qi Z
@ 2023-12-25  2:48     ` Ye, MingjinX
  0 siblings, 0 replies; 36+ messages in thread
From: Ye, MingjinX @ 2023-12-25  2:48 UTC (permalink / raw)
  To: Zhang, Qi Z, dev; +Cc: Yang, Qiming, Wu, Jingjing, Xing, Beilei



> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zhang@intel.com>
> Sent: 2023年12月22日 19:38
> To: Ye, MingjinX <mingjinx.ye@intel.com>; dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Ye, MingjinX
> <mingjinx.ye@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: RE: [PATCH v2] net/iavf: add diagnostic support in TX path
> 
> 
> 
> > -----Original Message-----
> > From: Mingjin Ye <mingjinx.ye@intel.com>
> > Sent: Friday, December 22, 2023 6:45 PM
> > To: dev@dpdk.org
> > Cc: Yang, Qiming <qiming.yang@intel.com>; Ye, MingjinX
> > <mingjinx.ye@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing,
> > Beilei <beilei.xing@intel.com>
> > Subject: [PATCH v2] net/iavf: add diagnostic support in TX path
> >
> > The only way to enable diagnostics for TX paths is to modify the
> > application source code. Making it difficult to diagnose faults.
> >
> > In this patch, the devarg option "mbuf_check" is introduced and the
> > parameters are configured to enable the corresponding diagnostics.
> >
> > supported cases: mbuf, size, segment, offload, strict.
> >  1. mbuf: check for corrupted mbuf.
> >  2. size: check min/max packet length according to hw spec.
> >  3. segment: check number of mbuf segments not exceed hw limitation.
> >  4. offload: check any unsupported offload flag.
> >  5. strict: check protocol headers.
> >
> > parameter format: mbuf_check=[mbuf,<case1>,<case2>]
> > eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i
> >
> > Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> > ---
> > v2: Remove call chain.
> 
> ...
> 
> >
> > +static struct iavf_pkt_burst iavf_rxtx_pkt_burst[RTE_MAX_ETHPORTS];
> 
> Global array is not necessary, I assume we can get adapter with rxq->vsi-
> >adapter.
Multi-process support to solve the problems caused by ASLR. 
> 
> > +
> >  static inline void
> >  iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct
> iavf_rx_queue
> > *rxq,
> >  				    struct rte_mbuf *mb,
> > @@ -3394,34 +3396,34 @@ check_mbuf_len(struct offload_info *info,
> > struct rte_mbuf *m)  {
> >  	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
> >  		if (info->outer_l2_len != m->outer_l2_len) {
> > -			PMD_TX_LOG(ERR, "outer_l2_len error in mbuf.
> > Original "
> > -			"length: %hu, calculated length: %u", m-
> > >outer_l2_len,
> > +			PMD_DRV_LOG(ERR, "outer_l2_len error in mbuf.
> > Original "
> > +			"length: %d, calculated length: %u", m-
> > >outer_l2_len,
> >  			info->outer_l2_len);
> >  			return -1;
> >  		}
> >  		if (info->outer_l3_len != m->outer_l3_len) {
> > -			PMD_TX_LOG(ERR, "outer_l3_len error in mbuf.
> > Original "
> > -			"length: %hu,calculated length: %u", m-
> > >outer_l3_len,
> > +			PMD_DRV_LOG(ERR, "outer_l3_len error in mbuf.
> > Original "
> > +			"length: %d,calculated length: %u", m->outer_l3_len,
> >  			info->outer_l3_len);
> >  			return -1;
> >  		}
> >  	}
> >
> >  	if (info->l2_len != m->l2_len) {
> > -		PMD_TX_LOG(ERR, "l2_len error in mbuf. Original "
> > -		"length: %hu, calculated length: %u", m->l2_len,
> > +		PMD_DRV_LOG(ERR, "l2_len error in mbuf. Original "
> > +		"length: %d, calculated length: %u", m->l2_len,
> >  		info->l2_len);
> >  		return -1;
> 
> Can you explain why need to change all the log type here? 
PMD_TX_LOG requires the RTE_ETHDEV_DEBUG_TX macro to be configured and recompiled to output the log. 
Modifying PMD_DRV_LOG allows for quick debugging of operations without modifying or compiling code.

> basically the
> diagnose check is for Tx only , we don't need to touch existing Rx
> implementation.  it could be a separate patch if you think something need to
> be refactor.
Restore RX Change.
> 


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v3] net/iavf: add diagnostic support in TX path
  2023-12-22 10:44 ` [PATCH v2] " Mingjin Ye
  2023-12-22 11:37   ` Zhang, Qi Z
@ 2023-12-26 10:07   ` Mingjin Ye
  2023-12-27 10:16     ` [PATCH v4 1/2] " Mingjin Ye
  1 sibling, 1 reply; 36+ messages in thread
From: Mingjin Ye @ 2023-12-26 10:07 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye, Jingjing Wu, Beilei Xing

The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload, strict.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.
 5. strict: check protocol headers.

parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Remove call chain.
---
v3: Optimisation implementation.
---
 drivers/net/iavf/iavf.h        |  25 +++++-
 drivers/net/iavf/iavf_ethdev.c |  69 +++++++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 138 ++++++++++++++++++++++++++++++++-
 drivers/net/iavf/iavf_rxtx.h   |   5 ++
 4 files changed, 233 insertions(+), 4 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 10868f2c30..b81329bb56 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -113,9 +113,14 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mbuf_stats {
+	uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mbuf_stats mbuf_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -309,10 +314,27 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	int mbuf_check;
 };
 
 struct iavf_security_ctx;
 
+#define IAVF_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+#define IAVF_MBUF_CHECK_F_TX_STRICT      (1ULL << 4)
+
+enum iavf_tx_pkt_burst_type {
+	IAVF_PKT_BURST_DEFAULT		= 0,
+	IAVF_PKT_BURST_VEC		= 1,
+	IAVF_PKT_BURST_VEC_AVX2		= 2,
+	IAVF_PKT_BURST_VEC_AVX2_OFFLOAD	= 3,
+	IAVF_PKT_BURST_VEC_AVX512	= 4,
+	IAVF_PKT_BURST_VEC_AVX512_OFFLOAD	= 5,
+	IAVF_PKT_BURST_VEC_AVX512_CTX_OFFLOAD	= 6,
+};
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -329,7 +351,8 @@ struct iavf_adapter {
 	bool closed;
 	bool no_poll;
 	eth_rx_burst_t rx_pkt_burst;
-	eth_tx_burst_t tx_pkt_burst;
+	enum iavf_tx_pkt_burst_type tx_burst_type;
+	uint64_t mc_flags; /* mbuf check flags. */
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d1edb0dd5c..8d1aadfae8 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -39,6 +39,8 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MBUF_CHECK_ARG       "mbuf_check"
+
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 
@@ -48,6 +50,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MBUF_CHECK_ARG,
 	NULL
 };
 
@@ -174,6 +177,7 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
 	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
 	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+	{"tx_mbuf_error_packets", _OFF_OF(mbuf_stats.tx_pkt_errors)},
 
 	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
 	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
@@ -1881,6 +1885,8 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 {
 	int ret;
 	unsigned int i;
+	struct iavf_tx_queue *txq;
+	uint64_t mbuf_errors = 0;
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1904,6 +1910,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check) {
+		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+			txq = dev->data->tx_queues[i];
+			mbuf_errors += __atomic_load_n(&txq->mbuf_errors,
+				__ATOMIC_RELAXED);
+		}
+		iavf_xtats.mbuf_stats.tx_pkt_errors = mbuf_errors;
+	}
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2286,6 +2301,52 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mbuf_check(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_OFFLOAD;
+		else if (!strcmp(cur, "strict"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_STRICT;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2340,6 +2401,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MBUF_CHECK_ARG,
+				 &iavf_parse_mbuf_check, &ad->mc_flags);
+	if (ret)
+		goto bail;
+
+	if (ad->mc_flags)
+		ad->devargs.mbuf_check = 1;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f19aa14646..7fa84e9f0e 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -425,6 +425,23 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
 
 };
 
+static const
+struct iavf_tx_burst_ops iavf_tx_pkt_burst_ops[] = {
+	[IAVF_PKT_BURST_DEFAULT].tx_pkt_burst = iavf_xmit_pkts,
+#ifdef RTE_ARCH_X86
+	[IAVF_PKT_BURST_VEC].tx_pkt_burst = iavf_xmit_pkts_vec,
+	[IAVF_PKT_BURST_VEC_AVX2].tx_pkt_burst = iavf_xmit_pkts_vec_avx2,
+	[IAVF_PKT_BURST_VEC_AVX2_OFFLOAD].tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload,
+#ifdef CC_AVX512_SUPPORT
+	[IAVF_PKT_BURST_VEC_AVX512].tx_pkt_burst = iavf_xmit_pkts_vec_avx512,
+	[IAVF_PKT_BURST_VEC_AVX512_OFFLOAD].tx_pkt_burst =
+		iavf_xmit_pkts_vec_avx512_offload,
+	[IAVF_PKT_BURST_VEC_AVX512_CTX_OFFLOAD].tx_pkt_burst =
+		iavf_xmit_pkts_vec_avx512_ctx_offload,
+#endif
+#endif
+};
+
 static inline void
 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
 				    struct rte_mbuf *mb,
@@ -3629,6 +3646,103 @@ iavf_check_mbuf(struct rte_mbuf *m)
 	return check_ether_type(&info, m);
 }
 
+/* Tx mbuf check */
+static uint16_t
+iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	uint16_t idx;
+	uint64_t ol_flags;
+	struct rte_mbuf *mb;
+	uint16_t good_pkts = nb_pkts;
+	const char *reason = NULL;
+	bool pkt_error = false;
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	enum iavf_tx_pkt_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_error = true;
+			break;
+		}
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			pkt_error = true;
+			break;
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					pkt_error = true;
+					break;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				pkt_error = true;
+				break;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_error = true;
+				break;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_STRICT &&
+			iavf_check_mbuf(mb)) {
+			pkt_error = true;
+			break;
+		}
+	}
+
+	if (pkt_error) {
+		__atomic_fetch_add(&txq->mbuf_errors, 1, __ATOMIC_RELAXED);
+		good_pkts = idx;
+		if (good_pkts == 0)
+			return 0;
+	}
+
+	return iavf_tx_pkt_burst_ops[tx_burst_type].tx_pkt_burst(tx_queue,
+								tx_pkts, good_pkts);
+}
+
 /* TX prep functions */
 uint16_t
 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -3724,10 +3838,13 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_tx_queue *txq = tx_queue;
+	enum iavf_tx_pkt_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
 	if (!txq->vsi || txq->vsi->adapter->no_poll)
 		return 0;
 
-	return txq->vsi->adapter->tx_pkt_burst(tx_queue,
+	return iavf_tx_pkt_burst_ops[tx_burst_type].tx_pkt_burst(tx_queue,
 								tx_pkts, nb_pkts);
 }
 
@@ -3975,6 +4092,8 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	enum iavf_tx_pkt_burst_type tx_burst_type;
+	int mbuf_check = adapter->devargs.mbuf_check;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4011,10 +4130,12 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(DEBUG, "Using Vector Tx (port %d).",
 				    dev->data->port_id);
 			dev->tx_pkt_burst = iavf_xmit_pkts_vec;
+			tx_burst_type = IAVF_PKT_BURST_VEC;
 		}
 		if (use_avx2) {
 			if (check_ret == IAVF_VECTOR_PATH) {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX2;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_CTX_OFFLOAD_PATH) {
@@ -4023,6 +4144,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 				goto normal;
 			} else {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX2_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4032,15 +4154,18 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (use_avx512) {
 			if (check_ret == IAVF_VECTOR_PATH) {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX512;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_OFFLOAD_PATH) {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX512_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_ctx_offload;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX512_CTX_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 CONTEXT OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4063,8 +4188,11 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->tx_pkt_burst = dev->tx_pkt_burst;
+			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else if (mbuf_check) {
+			adapter->tx_burst_type = tx_burst_type;
+			dev->tx_pkt_burst = iavf_xmit_pkts_check;
 		}
 		return;
 	}
@@ -4074,11 +4202,15 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
 		    dev->data->port_id);
 	dev->tx_pkt_burst = iavf_xmit_pkts;
+	tx_burst_type = IAVF_PKT_BURST_DEFAULT;
 	dev->tx_pkt_prepare = iavf_prep_pkts;
 
 	if (no_poll_on_link_down) {
-		adapter->tx_pkt_burst = dev->tx_pkt_burst;
+		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else if (mbuf_check) {
+		adapter->tx_burst_type = tx_burst_type;
+		dev->tx_pkt_burst = iavf_xmit_pkts_check;
 	}
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f432f9d956..a4128ff7a3 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -203,6 +203,9 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_tx_burst_ops {
+	eth_tx_burst_t tx_pkt_burst;
+};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
@@ -297,6 +300,8 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mbuf_errors;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v4 1/2] net/iavf: add diagnostic support in TX path
  2023-12-26 10:07   ` [PATCH v3] " Mingjin Ye
@ 2023-12-27 10:16     ` Mingjin Ye
  2023-12-27 11:30       ` Zhang, Qi Z
  2023-12-28 10:26       ` [PATCH v5 0/2] net/iavf: add diagnostics and fix error Mingjin Ye
  0 siblings, 2 replies; 36+ messages in thread
From: Mingjin Ye @ 2023-12-27 10:16 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye, Jingjing Wu, Beilei Xing

The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload, strict.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.
 5. strict: check protocol headers.

parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Remove call chain.
---
v3: Optimisation implementation.
---
v4: Fix Windows os compilation error.
---
 drivers/net/iavf/iavf.h        |  25 +++++-
 drivers/net/iavf/iavf_ethdev.c |  70 +++++++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 138 ++++++++++++++++++++++++++++++++-
 drivers/net/iavf/iavf_rxtx.h   |   5 ++
 4 files changed, 234 insertions(+), 4 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 10868f2c30..b81329bb56 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -113,9 +113,14 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mbuf_stats {
+	uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mbuf_stats mbuf_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -309,10 +314,27 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	int mbuf_check;
 };
 
 struct iavf_security_ctx;
 
+#define IAVF_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+#define IAVF_MBUF_CHECK_F_TX_STRICT      (1ULL << 4)
+
+enum iavf_tx_pkt_burst_type {
+	IAVF_PKT_BURST_DEFAULT		= 0,
+	IAVF_PKT_BURST_VEC		= 1,
+	IAVF_PKT_BURST_VEC_AVX2		= 2,
+	IAVF_PKT_BURST_VEC_AVX2_OFFLOAD	= 3,
+	IAVF_PKT_BURST_VEC_AVX512	= 4,
+	IAVF_PKT_BURST_VEC_AVX512_OFFLOAD	= 5,
+	IAVF_PKT_BURST_VEC_AVX512_CTX_OFFLOAD	= 6,
+};
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -329,7 +351,8 @@ struct iavf_adapter {
 	bool closed;
 	bool no_poll;
 	eth_rx_burst_t rx_pkt_burst;
-	eth_tx_burst_t tx_pkt_burst;
+	enum iavf_tx_pkt_burst_type tx_burst_type;
+	uint64_t mc_flags; /* mbuf check flags. */
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d1edb0dd5c..5398d2783f 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -13,6 +13,7 @@
 #include <inttypes.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
+#include <rte_os_shim.h>
 
 #include <rte_interrupts.h>
 #include <rte_debug.h>
@@ -39,6 +40,8 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MBUF_CHECK_ARG       "mbuf_check"
+
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 
@@ -48,6 +51,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MBUF_CHECK_ARG,
 	NULL
 };
 
@@ -174,6 +178,7 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
 	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
 	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+	{"tx_mbuf_error_packets", _OFF_OF(mbuf_stats.tx_pkt_errors)},
 
 	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
 	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
@@ -1881,6 +1886,8 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 {
 	int ret;
 	unsigned int i;
+	struct iavf_tx_queue *txq;
+	uint64_t mbuf_errors = 0;
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1904,6 +1911,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check) {
+		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+			txq = dev->data->tx_queues[i];
+			mbuf_errors += __atomic_load_n(&txq->mbuf_errors,
+				__ATOMIC_RELAXED);
+		}
+		iavf_xtats.mbuf_stats.tx_pkt_errors = mbuf_errors;
+	}
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2286,6 +2302,52 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mbuf_check(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_OFFLOAD;
+		else if (!strcmp(cur, "strict"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_STRICT;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2340,6 +2402,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MBUF_CHECK_ARG,
+				 &iavf_parse_mbuf_check, &ad->mc_flags);
+	if (ret)
+		goto bail;
+
+	if (ad->mc_flags)
+		ad->devargs.mbuf_check = 1;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f19aa14646..7fa84e9f0e 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -425,6 +425,23 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
 
 };
 
+static const
+struct iavf_tx_burst_ops iavf_tx_pkt_burst_ops[] = {
+	[IAVF_PKT_BURST_DEFAULT].tx_pkt_burst = iavf_xmit_pkts,
+#ifdef RTE_ARCH_X86
+	[IAVF_PKT_BURST_VEC].tx_pkt_burst = iavf_xmit_pkts_vec,
+	[IAVF_PKT_BURST_VEC_AVX2].tx_pkt_burst = iavf_xmit_pkts_vec_avx2,
+	[IAVF_PKT_BURST_VEC_AVX2_OFFLOAD].tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload,
+#ifdef CC_AVX512_SUPPORT
+	[IAVF_PKT_BURST_VEC_AVX512].tx_pkt_burst = iavf_xmit_pkts_vec_avx512,
+	[IAVF_PKT_BURST_VEC_AVX512_OFFLOAD].tx_pkt_burst =
+		iavf_xmit_pkts_vec_avx512_offload,
+	[IAVF_PKT_BURST_VEC_AVX512_CTX_OFFLOAD].tx_pkt_burst =
+		iavf_xmit_pkts_vec_avx512_ctx_offload,
+#endif
+#endif
+};
+
 static inline void
 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
 				    struct rte_mbuf *mb,
@@ -3629,6 +3646,103 @@ iavf_check_mbuf(struct rte_mbuf *m)
 	return check_ether_type(&info, m);
 }
 
+/* Tx mbuf check */
+static uint16_t
+iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	uint16_t idx;
+	uint64_t ol_flags;
+	struct rte_mbuf *mb;
+	uint16_t good_pkts = nb_pkts;
+	const char *reason = NULL;
+	bool pkt_error = false;
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	enum iavf_tx_pkt_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_error = true;
+			break;
+		}
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			pkt_error = true;
+			break;
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					pkt_error = true;
+					break;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				pkt_error = true;
+				break;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_error = true;
+				break;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_STRICT &&
+			iavf_check_mbuf(mb)) {
+			pkt_error = true;
+			break;
+		}
+	}
+
+	if (pkt_error) {
+		__atomic_fetch_add(&txq->mbuf_errors, 1, __ATOMIC_RELAXED);
+		good_pkts = idx;
+		if (good_pkts == 0)
+			return 0;
+	}
+
+	return iavf_tx_pkt_burst_ops[tx_burst_type].tx_pkt_burst(tx_queue,
+								tx_pkts, good_pkts);
+}
+
 /* TX prep functions */
 uint16_t
 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -3724,10 +3838,13 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_tx_queue *txq = tx_queue;
+	enum iavf_tx_pkt_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
 	if (!txq->vsi || txq->vsi->adapter->no_poll)
 		return 0;
 
-	return txq->vsi->adapter->tx_pkt_burst(tx_queue,
+	return iavf_tx_pkt_burst_ops[tx_burst_type].tx_pkt_burst(tx_queue,
 								tx_pkts, nb_pkts);
 }
 
@@ -3975,6 +4092,8 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	enum iavf_tx_pkt_burst_type tx_burst_type;
+	int mbuf_check = adapter->devargs.mbuf_check;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4011,10 +4130,12 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(DEBUG, "Using Vector Tx (port %d).",
 				    dev->data->port_id);
 			dev->tx_pkt_burst = iavf_xmit_pkts_vec;
+			tx_burst_type = IAVF_PKT_BURST_VEC;
 		}
 		if (use_avx2) {
 			if (check_ret == IAVF_VECTOR_PATH) {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX2;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_CTX_OFFLOAD_PATH) {
@@ -4023,6 +4144,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 				goto normal;
 			} else {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX2_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4032,15 +4154,18 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (use_avx512) {
 			if (check_ret == IAVF_VECTOR_PATH) {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX512;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_OFFLOAD_PATH) {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX512_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_ctx_offload;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX512_CTX_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 CONTEXT OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4063,8 +4188,11 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->tx_pkt_burst = dev->tx_pkt_burst;
+			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else if (mbuf_check) {
+			adapter->tx_burst_type = tx_burst_type;
+			dev->tx_pkt_burst = iavf_xmit_pkts_check;
 		}
 		return;
 	}
@@ -4074,11 +4202,15 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
 		    dev->data->port_id);
 	dev->tx_pkt_burst = iavf_xmit_pkts;
+	tx_burst_type = IAVF_PKT_BURST_DEFAULT;
 	dev->tx_pkt_prepare = iavf_prep_pkts;
 
 	if (no_poll_on_link_down) {
-		adapter->tx_pkt_burst = dev->tx_pkt_burst;
+		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else if (mbuf_check) {
+		adapter->tx_burst_type = tx_burst_type;
+		dev->tx_pkt_burst = iavf_xmit_pkts_check;
 	}
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f432f9d956..a4128ff7a3 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -203,6 +203,9 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_tx_burst_ops {
+	eth_tx_burst_t tx_pkt_burst;
+};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
@@ -297,6 +300,8 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mbuf_errors;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v4 1/2] net/iavf: add diagnostic support in TX path
  2023-12-27 10:16     ` [PATCH v4 1/2] " Mingjin Ye
@ 2023-12-27 11:30       ` Zhang, Qi Z
  2023-12-28 10:26       ` [PATCH v5 0/2] net/iavf: add diagnostics and fix error Mingjin Ye
  1 sibling, 0 replies; 36+ messages in thread
From: Zhang, Qi Z @ 2023-12-27 11:30 UTC (permalink / raw)
  To: Ye, MingjinX, dev; +Cc: Yang, Qiming, Ye, MingjinX, Wu, Jingjing, Xing, Beilei



> -----Original Message-----
> From: Mingjin Ye <mingjinx.ye@intel.com>
> Sent: Wednesday, December 27, 2023 6:17 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Ye, MingjinX
> <mingjinx.ye@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v4 1/2] net/iavf: add diagnostic support in TX path
> 
> The only way to enable diagnostics for TX paths is to modify the application
> source code. Making it difficult to diagnose faults.
> 
> In this patch, the devarg option "mbuf_check" is introduced and the
> parameters are configured to enable the corresponding diagnostics.

Can you separate this patch into two?

1. introduce the tx burst type and the ops array, this actually fixed the multi-process issue, and should be backported into LTS.
2. add mbuf check for tx diagnose 

Btw, please also update the document , so user can know how to use the new devarg.


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v5 0/2] net/iavf: add diagnostics and fix error
  2023-12-27 10:16     ` [PATCH v4 1/2] " Mingjin Ye
  2023-12-27 11:30       ` Zhang, Qi Z
@ 2023-12-28 10:26       ` Mingjin Ye
  2023-12-28 10:26         ` [PATCH v5 1/2] net/iavf: fix Tx path error in multi-process Mingjin Ye
  2023-12-28 10:26         ` [PATCH v5 " Mingjin Ye
  1 sibling, 2 replies; 36+ messages in thread
From: Mingjin Ye @ 2023-12-28 10:26 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye

1. Fix multi-process error in Tx path.
2. Add diagnostics to Tx path.

Mingjin Ye (2):
  net/iavf: fix Tx path error in multi-process
  net/iavf: add diagnostic support in TX path

 doc/guides/nics/intel_vf.rst   |   4 +
 drivers/net/iavf/iavf.h        |  25 +++++-
 drivers/net/iavf/iavf_ethdev.c |  70 +++++++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 138 ++++++++++++++++++++++++++++++++-
 drivers/net/iavf/iavf_rxtx.h   |   5 ++
 5 files changed, 238 insertions(+), 4 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v5 1/2] net/iavf: fix Tx path error in multi-process
  2023-12-28 10:26       ` [PATCH v5 0/2] net/iavf: add diagnostics and fix error Mingjin Ye
@ 2023-12-28 10:26         ` Mingjin Ye
  2023-12-28 10:50           ` Zhang, Qi Z
  2023-12-29 10:11           ` [PATCH v6 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
  2023-12-28 10:26         ` [PATCH v5 " Mingjin Ye
  1 sibling, 2 replies; 36+ messages in thread
From: Mingjin Ye @ 2023-12-28 10:26 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye, stable, Jingjing Wu, Beilei Xing

In a multi-process environment, a secondary process operates on shared
memory and changes the PMD transmit function pointer of the primary
process, causing the primary process to send pkts without being able
to find the function address, resulting in a crash.

Fixes: 5b3124a0a6ef ("net/iavf: support no polling when link down")
Cc: stable@dpdk.org

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
 drivers/net/iavf/iavf.h      | 12 +++++++++++-
 drivers/net/iavf/iavf_rxtx.c | 34 +++++++++++++++++++++++++++++++---
 drivers/net/iavf/iavf_rxtx.h |  3 +++
 3 files changed, 45 insertions(+), 4 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 10868f2c30..4cd5bea167 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -313,6 +313,16 @@ struct iavf_devargs {
 
 struct iavf_security_ctx;
 
+enum iavf_tx_pkt_burst_type {
+	IAVF_PKT_BURST_DEFAULT		= 0,
+	IAVF_PKT_BURST_VEC		= 1,
+	IAVF_PKT_BURST_VEC_AVX2		= 2,
+	IAVF_PKT_BURST_VEC_AVX2_OFFLOAD	= 3,
+	IAVF_PKT_BURST_VEC_AVX512	= 4,
+	IAVF_PKT_BURST_VEC_AVX512_OFFLOAD	= 5,
+	IAVF_PKT_BURST_VEC_AVX512_CTX_OFFLOAD	= 6,
+};
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -329,7 +339,7 @@ struct iavf_adapter {
 	bool closed;
 	bool no_poll;
 	eth_rx_burst_t rx_pkt_burst;
-	eth_tx_burst_t tx_pkt_burst;
+	enum iavf_tx_pkt_burst_type tx_burst_type;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f19aa14646..0d95447054 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -425,6 +425,23 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
 
 };
 
+static const
+struct iavf_tx_burst_ops iavf_tx_pkt_burst_ops[] = {
+	[IAVF_PKT_BURST_DEFAULT].tx_pkt_burst = iavf_xmit_pkts,
+#ifdef RTE_ARCH_X86
+	[IAVF_PKT_BURST_VEC].tx_pkt_burst = iavf_xmit_pkts_vec,
+	[IAVF_PKT_BURST_VEC_AVX2].tx_pkt_burst = iavf_xmit_pkts_vec_avx2,
+	[IAVF_PKT_BURST_VEC_AVX2_OFFLOAD].tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload,
+#ifdef CC_AVX512_SUPPORT
+	[IAVF_PKT_BURST_VEC_AVX512].tx_pkt_burst = iavf_xmit_pkts_vec_avx512,
+	[IAVF_PKT_BURST_VEC_AVX512_OFFLOAD].tx_pkt_burst =
+		iavf_xmit_pkts_vec_avx512_offload,
+	[IAVF_PKT_BURST_VEC_AVX512_CTX_OFFLOAD].tx_pkt_burst =
+		iavf_xmit_pkts_vec_avx512_ctx_offload,
+#endif
+#endif
+};
+
 static inline void
 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
 				    struct rte_mbuf *mb,
@@ -3724,10 +3741,13 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_tx_queue *txq = tx_queue;
+	enum iavf_tx_pkt_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
 	if (!txq->vsi || txq->vsi->adapter->no_poll)
 		return 0;
 
-	return txq->vsi->adapter->tx_pkt_burst(tx_queue,
+	return iavf_tx_pkt_burst_ops[tx_burst_type].tx_pkt_burst(tx_queue,
 								tx_pkts, nb_pkts);
 }
 
@@ -3975,6 +3995,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	enum iavf_tx_pkt_burst_type tx_burst_type;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4011,10 +4032,12 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(DEBUG, "Using Vector Tx (port %d).",
 				    dev->data->port_id);
 			dev->tx_pkt_burst = iavf_xmit_pkts_vec;
+			tx_burst_type = IAVF_PKT_BURST_VEC;
 		}
 		if (use_avx2) {
 			if (check_ret == IAVF_VECTOR_PATH) {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX2;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_CTX_OFFLOAD_PATH) {
@@ -4023,6 +4046,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 				goto normal;
 			} else {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX2_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4032,15 +4056,18 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (use_avx512) {
 			if (check_ret == IAVF_VECTOR_PATH) {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX512;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_OFFLOAD_PATH) {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX512_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_ctx_offload;
+				tx_burst_type = IAVF_PKT_BURST_VEC_AVX512_CTX_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 CONTEXT OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4063,7 +4090,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->tx_pkt_burst = dev->tx_pkt_burst;
+			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
 		}
 		return;
@@ -4074,10 +4101,11 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
 		    dev->data->port_id);
 	dev->tx_pkt_burst = iavf_xmit_pkts;
+	tx_burst_type = IAVF_PKT_BURST_DEFAULT;
 	dev->tx_pkt_prepare = iavf_prep_pkts;
 
 	if (no_poll_on_link_down) {
-		adapter->tx_pkt_burst = dev->tx_pkt_burst;
+		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
 	}
 }
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f432f9d956..fadc931b51 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -203,6 +203,9 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_tx_burst_ops {
+	eth_tx_burst_t tx_pkt_burst;
+};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v5 2/2] net/iavf: add diagnostic support in TX path
  2023-12-28 10:26       ` [PATCH v5 0/2] net/iavf: add diagnostics and fix error Mingjin Ye
  2023-12-28 10:26         ` [PATCH v5 1/2] net/iavf: fix Tx path error in multi-process Mingjin Ye
@ 2023-12-28 10:26         ` Mingjin Ye
  1 sibling, 0 replies; 36+ messages in thread
From: Mingjin Ye @ 2023-12-28 10:26 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, Mingjin Ye, Wenjun Wu, Yuying Zhang, Beilei Xing,
	Simei Su, Jingjing Wu

The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload, strict.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.
 5. strict: check protocol headers.

parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Remove call chain.
---
v3: Optimisation implementation.
---
v4: Fix Windows os compilation error.
---
v5: Split Patch.
---
 doc/guides/nics/intel_vf.rst   |   4 ++
 drivers/net/iavf/iavf.h        |  13 +++++
 drivers/net/iavf/iavf_ethdev.c |  70 ++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 104 +++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h   |   2 +
 5 files changed, 193 insertions(+)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index ad08198f0f..8e39bc831c 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -111,6 +111,10 @@ For more detail on SR-IOV, please refer to the following documents:
     by setting the ``devargs`` parameter like ``-a 18:01.0,no-poll-on-link-down=1``
     when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
 
+    Enable mbuf check for Tx diagnostics by setting the devargs parameter like
+    ``-a 18:01.0,mbuf_check=[mbuf,<case1>,<case2>]`` when IAVF is backed by an
+    Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
+
 The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 4cd5bea167..b81329bb56 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -113,9 +113,14 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mbuf_stats {
+	uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mbuf_stats mbuf_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -309,10 +314,17 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	int mbuf_check;
 };
 
 struct iavf_security_ctx;
 
+#define IAVF_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+#define IAVF_MBUF_CHECK_F_TX_STRICT      (1ULL << 4)
+
 enum iavf_tx_pkt_burst_type {
 	IAVF_PKT_BURST_DEFAULT		= 0,
 	IAVF_PKT_BURST_VEC		= 1,
@@ -340,6 +352,7 @@ struct iavf_adapter {
 	bool no_poll;
 	eth_rx_burst_t rx_pkt_burst;
 	enum iavf_tx_pkt_burst_type tx_burst_type;
+	uint64_t mc_flags; /* mbuf check flags. */
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d1edb0dd5c..5398d2783f 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -13,6 +13,7 @@
 #include <inttypes.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
+#include <rte_os_shim.h>
 
 #include <rte_interrupts.h>
 #include <rte_debug.h>
@@ -39,6 +40,8 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MBUF_CHECK_ARG       "mbuf_check"
+
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 
@@ -48,6 +51,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MBUF_CHECK_ARG,
 	NULL
 };
 
@@ -174,6 +178,7 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
 	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
 	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+	{"tx_mbuf_error_packets", _OFF_OF(mbuf_stats.tx_pkt_errors)},
 
 	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
 	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
@@ -1881,6 +1886,8 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 {
 	int ret;
 	unsigned int i;
+	struct iavf_tx_queue *txq;
+	uint64_t mbuf_errors = 0;
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1904,6 +1911,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check) {
+		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+			txq = dev->data->tx_queues[i];
+			mbuf_errors += __atomic_load_n(&txq->mbuf_errors,
+				__ATOMIC_RELAXED);
+		}
+		iavf_xtats.mbuf_stats.tx_pkt_errors = mbuf_errors;
+	}
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2286,6 +2302,52 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mbuf_check(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_OFFLOAD;
+		else if (!strcmp(cur, "strict"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_STRICT;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2340,6 +2402,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MBUF_CHECK_ARG,
+				 &iavf_parse_mbuf_check, &ad->mc_flags);
+	if (ret)
+		goto bail;
+
+	if (ad->mc_flags)
+		ad->devargs.mbuf_check = 1;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 0d95447054..7fa84e9f0e 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3646,6 +3646,103 @@ iavf_check_mbuf(struct rte_mbuf *m)
 	return check_ether_type(&info, m);
 }
 
+/* Tx mbuf check */
+static uint16_t
+iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	uint16_t idx;
+	uint64_t ol_flags;
+	struct rte_mbuf *mb;
+	uint16_t good_pkts = nb_pkts;
+	const char *reason = NULL;
+	bool pkt_error = false;
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	enum iavf_tx_pkt_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_error = true;
+			break;
+		}
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			pkt_error = true;
+			break;
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					pkt_error = true;
+					break;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				pkt_error = true;
+				break;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_error = true;
+				break;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_STRICT &&
+			iavf_check_mbuf(mb)) {
+			pkt_error = true;
+			break;
+		}
+	}
+
+	if (pkt_error) {
+		__atomic_fetch_add(&txq->mbuf_errors, 1, __ATOMIC_RELAXED);
+		good_pkts = idx;
+		if (good_pkts == 0)
+			return 0;
+	}
+
+	return iavf_tx_pkt_burst_ops[tx_burst_type].tx_pkt_burst(tx_queue,
+								tx_pkts, good_pkts);
+}
+
 /* TX prep functions */
 uint16_t
 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -3996,6 +4093,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	enum iavf_tx_pkt_burst_type tx_burst_type;
+	int mbuf_check = adapter->devargs.mbuf_check;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4092,6 +4190,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (no_poll_on_link_down) {
 			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else if (mbuf_check) {
+			adapter->tx_burst_type = tx_burst_type;
+			dev->tx_pkt_burst = iavf_xmit_pkts_check;
 		}
 		return;
 	}
@@ -4107,6 +4208,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	if (no_poll_on_link_down) {
 		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else if (mbuf_check) {
+		adapter->tx_burst_type = tx_burst_type;
+		dev->tx_pkt_burst = iavf_xmit_pkts_check;
 	}
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index fadc931b51..a4128ff7a3 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -300,6 +300,8 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mbuf_errors;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v5 1/2] net/iavf: fix Tx path error in multi-process
  2023-12-28 10:26         ` [PATCH v5 1/2] net/iavf: fix Tx path error in multi-process Mingjin Ye
@ 2023-12-28 10:50           ` Zhang, Qi Z
  2023-12-29 10:11           ` [PATCH v6 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
  1 sibling, 0 replies; 36+ messages in thread
From: Zhang, Qi Z @ 2023-12-28 10:50 UTC (permalink / raw)
  To: Ye, MingjinX, dev
  Cc: Yang, Qiming, Ye, MingjinX, stable, Wu, Jingjing, Xing, Beilei



> -----Original Message-----
> From: Mingjin Ye <mingjinx.ye@intel.com>
> Sent: Thursday, December 28, 2023 6:26 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Ye, MingjinX
> <mingjinx.ye@intel.com>; stable@dpdk.org; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>
> Subject: [PATCH v5 1/2] net/iavf: fix Tx path error in multi-process
> 
> In a multi-process environment, a secondary process operates on shared
> memory and changes the PMD transmit function pointer of the primary
> process, causing the primary process to send pkts without being able to find
> the function address, resulting in a crash.
> 
> Fixes: 5b3124a0a6ef ("net/iavf: support no polling when link down")
> Cc: stable@dpdk.org

Should Rx also need to be fixed? please make a complete fix.

> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> ---
>  drivers/net/iavf/iavf.h      | 12 +++++++++++-
>  drivers/net/iavf/iavf_rxtx.c | 34 +++++++++++++++++++++++++++++++---
>  drivers/net/iavf/iavf_rxtx.h |  3 +++
>  3 files changed, 45 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index
> 10868f2c30..4cd5bea167 100644
> --- a/drivers/net/iavf/iavf.h
> +++ b/drivers/net/iavf/iavf.h
> @@ -313,6 +313,16 @@ struct iavf_devargs {
> 
>  struct iavf_security_ctx;
> 
> +enum iavf_tx_pkt_burst_type {
> +	IAVF_PKT_BURST_DEFAULT		= 0,
> +	IAVF_PKT_BURST_VEC		= 1,

Better to rename with  xxx_VEC_SSE



^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v6 0/2] net/iavf: fix Rx/Tx burst and add diagnostics
  2023-12-28 10:26         ` [PATCH v5 1/2] net/iavf: fix Tx path error in multi-process Mingjin Ye
  2023-12-28 10:50           ` Zhang, Qi Z
@ 2023-12-29 10:11           ` Mingjin Ye
  2023-12-29 10:11             ` [PATCH v6 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
  2023-12-29 10:11             ` [PATCH v6 " Mingjin Ye
  1 sibling, 2 replies; 36+ messages in thread
From: Mingjin Ye @ 2023-12-29 10:11 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye

Fixed Rx/Tx crash in multi-process environment and added
Tx diagnostic feature.

Mingjin Ye (2):
  net/iavf: fix Rx/Tx burst in multi-process
  net/iavf: add diagnostic support in TX path

 doc/guides/nics/intel_vf.rst   |   4 +
 drivers/net/iavf/iavf.h        |  54 ++++++-
 drivers/net/iavf/iavf_ethdev.c |  68 ++++++++
 drivers/net/iavf/iavf_rxtx.c   | 282 +++++++++++++++++++++++++++++----
 drivers/net/iavf/iavf_rxtx.h   |  10 ++
 5 files changed, 389 insertions(+), 29 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v6 1/2] net/iavf: fix Rx/Tx burst in multi-process
  2023-12-29 10:11           ` [PATCH v6 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
@ 2023-12-29 10:11             ` Mingjin Ye
  2023-12-31  6:41               ` Zhang, Qi Z
  2024-01-02 10:52               ` [PATCH v7 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
  2023-12-29 10:11             ` [PATCH v6 " Mingjin Ye
  1 sibling, 2 replies; 36+ messages in thread
From: Mingjin Ye @ 2023-12-29 10:11 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye, stable, Jingjing Wu, Beilei Xing

In a multi-process environment, a secondary process operates on shared
memory and changes the function pointer of the primary process, resulting
in a crash when the primary process cannot find the function address
during an Rx/Tx burst.

Fixes: 5b3124a0a6ef ("net/iavf: support no polling when link down")
Cc: stable@dpdk.org

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Add fix for Rx burst.
---
 drivers/net/iavf/iavf.h      |  42 +++++++-
 drivers/net/iavf/iavf_rxtx.c | 184 ++++++++++++++++++++++++++++++-----
 drivers/net/iavf/iavf_rxtx.h |   8 ++
 3 files changed, 205 insertions(+), 29 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 10868f2c30..8db9f3d7cd 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -313,6 +313,44 @@ struct iavf_devargs {
 
 struct iavf_security_ctx;
 
+enum iavf_rx_burst_type {
+	IAVF_RX_BURST_DEFAULT,
+	IAVF_RX_BURST_FRXD,
+	IAVF_RX_BURST_BULK_ALLOC,
+	IAVF_RX_BURST_SCATTERED,
+	IAVF_RX_BURST_SFRXD,
+	IAVF_RX_BURST_VEC_SSE,
+	IAVF_RX_BURST_VEC_AVX2,
+	IAVF_RX_BURST_VEC_AVX2_OFFLOAD,
+	IAVF_RX_BURST_VEC_SSE_FRXD,
+	IAVF_RX_BURST_VEC_AVX2_FRXD,
+	IAVF_RX_BURST_VEC_AVX2_FRXD_OFFLOAD,
+	IAVF_RX_BURST_VEC_SSE_SCATTERED,
+	IAVF_RX_BURST_VEC_AVX2_SCATTERED,
+	IAVF_RX_BURST_VEC_AVX2_SCATTERED_OFFLOAD,
+	IAVF_RX_BURST_VEC_SSE_SFLEX_RXD,
+	IAVF_RX_BURST_VEC_AVX2_SFLEX_RXD,
+	IAVF_RX_BURST_VEC_AVX2_SFRXD_OFFLOAD,
+	IAVF_RX_BURST_VEC_AVX512,
+	IAVF_RX_BURST_VEC_AVX512_OFFLOAD,
+	IAVF_RX_BURST_VEC_AVX512_FRXD,
+	IAVF_RX_BURST_VEC_AVX512_FRXD_OFFLOAD,
+	IAVF_RX_BURST_VEC_AVX512_SCATTERED,
+	IAVF_RX_BURST_VEC_AVX512_SCATTERED_OFFLOAD,
+	IAVF_RX_BURST_VEC_AVX512_SFLEX_RXD,
+	IAVF_RX_BURST_VEC_AVX512_SFRXD_OFFLOAD,
+};
+
+enum iavf_tx_burst_type {
+	IAVF_TX_BURST_DEFAULT,
+	IAVF_TX_BURST_VEC_SSE,
+	IAVF_TX_BURST_VEC_AVX2,
+	IAVF_TX_BURST_VEC_AVX2_OFFLOAD,
+	IAVF_TX_BURST_VEC_AVX512,
+	IAVF_TX_BURST_VEC_AVX512_OFFLOAD,
+	IAVF_TX_BURST_VEC_AVX512_CTX_OFFLOAD,
+};
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -328,8 +366,8 @@ struct iavf_adapter {
 	bool stopped;
 	bool closed;
 	bool no_poll;
-	eth_rx_burst_t rx_pkt_burst;
-	eth_tx_burst_t tx_pkt_burst;
+	enum iavf_rx_burst_type rx_burst_type;
+	enum iavf_tx_burst_type tx_burst_type;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f19aa14646..152f755206 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3707,15 +3707,88 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 	return i;
 }
 
+static const
+struct iavf_rx_burst_ops iavf_rx_pkt_burst_ops[] = {
+	[IAVF_RX_BURST_DEFAULT].rx_pkt_burst = iavf_recv_pkts,
+	[IAVF_RX_BURST_FRXD].rx_pkt_burst = iavf_recv_pkts_flex_rxd,
+	[IAVF_RX_BURST_BULK_ALLOC].rx_pkt_burst = iavf_recv_pkts_bulk_alloc,
+	[IAVF_RX_BURST_SCATTERED].rx_pkt_burst = iavf_recv_scattered_pkts,
+	[IAVF_RX_BURST_SFRXD].rx_pkt_burst =
+		iavf_recv_scattered_pkts_flex_rxd,
+#ifdef RTE_ARCH_X86
+	[IAVF_RX_BURST_VEC_SSE].rx_pkt_burst = iavf_recv_pkts_vec,
+	[IAVF_RX_BURST_VEC_AVX2].rx_pkt_burst = iavf_recv_pkts_vec_avx2,
+	[IAVF_RX_BURST_VEC_AVX2_OFFLOAD].rx_pkt_burst =
+		iavf_recv_pkts_vec_avx2_offload,
+	[IAVF_RX_BURST_VEC_SSE_FRXD].rx_pkt_burst =
+		iavf_recv_pkts_vec_flex_rxd,
+	[IAVF_RX_BURST_VEC_AVX2_FRXD].rx_pkt_burst =
+		iavf_recv_pkts_vec_avx2_flex_rxd,
+	[IAVF_RX_BURST_VEC_AVX2_FRXD_OFFLOAD].rx_pkt_burst =
+		iavf_recv_pkts_vec_avx2_flex_rxd_offload,
+	[IAVF_RX_BURST_VEC_SSE_SCATTERED].rx_pkt_burst =
+		iavf_recv_scattered_pkts_vec,
+	[IAVF_RX_BURST_VEC_AVX2_SCATTERED].rx_pkt_burst =
+		iavf_recv_scattered_pkts_vec_avx2,
+	[IAVF_RX_BURST_VEC_AVX2_SCATTERED_OFFLOAD].rx_pkt_burst =
+		iavf_recv_scattered_pkts_vec_avx2_offload,
+	[IAVF_RX_BURST_VEC_SSE_SFLEX_RXD].rx_pkt_burst =
+		iavf_recv_scattered_pkts_vec_flex_rxd,
+	[IAVF_RX_BURST_VEC_AVX2_SFLEX_RXD].rx_pkt_burst =
+		iavf_recv_scattered_pkts_vec_avx2_flex_rxd,
+	[IAVF_RX_BURST_VEC_AVX2_SFRXD_OFFLOAD].rx_pkt_burst =
+		iavf_recv_scattered_pkts_vec_avx2_flex_rxd_offload,
+#ifdef CC_AVX512_SUPPORT
+	[IAVF_RX_BURST_VEC_AVX512].rx_pkt_burst = iavf_recv_pkts_vec_avx512,
+	[IAVF_RX_BURST_VEC_AVX512_OFFLOAD].rx_pkt_burst =
+		iavf_recv_pkts_vec_avx512_offload,
+	[IAVF_RX_BURST_VEC_AVX512_FRXD].rx_pkt_burst =
+		iavf_recv_pkts_vec_avx512_flex_rxd,
+	[IAVF_RX_BURST_VEC_AVX512_FRXD_OFFLOAD].rx_pkt_burst =
+		iavf_recv_pkts_vec_avx512_flex_rxd_offload,
+	[IAVF_RX_BURST_VEC_AVX512_SCATTERED].rx_pkt_burst =
+		iavf_recv_scattered_pkts_vec_avx512,
+	[IAVF_RX_BURST_VEC_AVX512_SCATTERED_OFFLOAD].rx_pkt_burst =
+		iavf_recv_scattered_pkts_vec_avx512_offload,
+	[IAVF_RX_BURST_VEC_AVX512_SFLEX_RXD].rx_pkt_burst =
+		iavf_recv_scattered_pkts_vec_avx512_flex_rxd,
+	[IAVF_RX_BURST_VEC_AVX512_SFRXD_OFFLOAD].rx_pkt_burst =
+		iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload,
+#endif
+#elif defined RTE_ARCH_ARM
+	[IAVF_RX_BURST_VEC_SSE].rx_pkt_burst = iavf_recv_pkts_vec,
+#endif
+};
+
+static const
+struct iavf_tx_burst_ops iavf_tx_pkt_burst_ops[] = {
+	[IAVF_TX_BURST_DEFAULT].tx_pkt_burst = iavf_xmit_pkts,
+#ifdef RTE_ARCH_X86
+	[IAVF_TX_BURST_VEC_SSE].tx_pkt_burst = iavf_xmit_pkts_vec,
+	[IAVF_TX_BURST_VEC_AVX2].tx_pkt_burst = iavf_xmit_pkts_vec_avx2,
+	[IAVF_TX_BURST_VEC_AVX2_OFFLOAD].tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload,
+#ifdef CC_AVX512_SUPPORT
+	[IAVF_TX_BURST_VEC_AVX512].tx_pkt_burst = iavf_xmit_pkts_vec_avx512,
+	[IAVF_TX_BURST_VEC_AVX512_OFFLOAD].tx_pkt_burst =
+		iavf_xmit_pkts_vec_avx512_offload,
+	[IAVF_TX_BURST_VEC_AVX512_CTX_OFFLOAD].tx_pkt_burst =
+		iavf_xmit_pkts_vec_avx512_ctx_offload,
+#endif
+#endif
+};
+
 static uint16_t
 iavf_recv_pkts_no_poll(void *rx_queue, struct rte_mbuf **rx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_rx_queue *rxq = rx_queue;
+	enum iavf_rx_burst_type rx_burst_type =
+		rxq->vsi->adapter->rx_burst_type;
+
 	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
 		return 0;
 
-	return rxq->vsi->adapter->rx_pkt_burst(rx_queue,
+	return iavf_rx_pkt_burst_ops[rx_burst_type].rx_pkt_burst(rx_queue,
 								rx_pkts, nb_pkts);
 }
 
@@ -3724,10 +3797,13 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_tx_queue *txq = tx_queue;
+	enum iavf_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
 	if (!txq->vsi || txq->vsi->adapter->no_poll)
 		return 0;
 
-	return txq->vsi->adapter->tx_pkt_burst(tx_queue,
+	return iavf_tx_pkt_burst_ops[tx_burst_type].tx_pkt_burst(tx_queue,
 								tx_pkts, nb_pkts);
 }
 
@@ -3738,6 +3814,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	enum iavf_rx_burst_type rx_burst_type;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 	int i;
 	struct iavf_rx_queue *rxq;
@@ -3809,42 +3886,64 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 			}
 			if (use_flex) {
 				dev->rx_pkt_burst = iavf_recv_scattered_pkts_vec_flex_rxd;
+				rx_burst_type = IAVF_RX_BURST_VEC_SSE_SFLEX_RXD;
 				if (use_avx2) {
-					if (check_ret == IAVF_VECTOR_PATH)
+					if (check_ret == IAVF_VECTOR_PATH) {
 						dev->rx_pkt_burst =
 							iavf_recv_scattered_pkts_vec_avx2_flex_rxd;
-					else
+						rx_burst_type =
+							IAVF_RX_BURST_VEC_AVX2_SFLEX_RXD;
+					} else {
 						dev->rx_pkt_burst =
 							iavf_recv_scattered_pkts_vec_avx2_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_BURST_VEC_AVX2_SFRXD_OFFLOAD;
+					}
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
-					if (check_ret == IAVF_VECTOR_PATH)
+					if (check_ret == IAVF_VECTOR_PATH) {
 						dev->rx_pkt_burst =
 							iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
-					else
+						rx_burst_type =
+							IAVF_RX_BURST_VEC_AVX512_SFLEX_RXD;
+					} else {
 						dev->rx_pkt_burst =
 							iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_BURST_VEC_AVX512_SFRXD_OFFLOAD;
+					}
 				}
 #endif
 			} else {
 				dev->rx_pkt_burst = iavf_recv_scattered_pkts_vec;
+				rx_burst_type = IAVF_RX_BURST_VEC_SSE_SCATTERED;
 				if (use_avx2) {
-					if (check_ret == IAVF_VECTOR_PATH)
+					if (check_ret == IAVF_VECTOR_PATH) {
 						dev->rx_pkt_burst =
 							iavf_recv_scattered_pkts_vec_avx2;
-					else
+						rx_burst_type =
+							IAVF_RX_BURST_VEC_AVX2_SCATTERED;
+					} else {
 						dev->rx_pkt_burst =
 							iavf_recv_scattered_pkts_vec_avx2_offload;
+						rx_burst_type =
+							IAVF_RX_BURST_VEC_AVX2_SCATTERED_OFFLOAD;
+					}
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
-					if (check_ret == IAVF_VECTOR_PATH)
+					if (check_ret == IAVF_VECTOR_PATH) {
 						dev->rx_pkt_burst =
 							iavf_recv_scattered_pkts_vec_avx512;
-					else
+						rx_burst_type =
+							IAVF_RX_BURST_VEC_AVX512_SCATTERED;
+					} else {
 						dev->rx_pkt_burst =
 							iavf_recv_scattered_pkts_vec_avx512_offload;
+						rx_burst_type =
+							IAVF_RX_BURST_VEC_AVX512_SCATTERED_OFFLOAD;
+					}
 				}
 #endif
 			}
@@ -3875,49 +3974,64 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 			}
 			if (use_flex) {
 				dev->rx_pkt_burst = iavf_recv_pkts_vec_flex_rxd;
+				rx_burst_type = IAVF_RX_BURST_VEC_SSE_FRXD;
 				if (use_avx2) {
-					if (check_ret == IAVF_VECTOR_PATH)
+					if (check_ret == IAVF_VECTOR_PATH) {
 						dev->rx_pkt_burst =
 							iavf_recv_pkts_vec_avx2_flex_rxd;
-					else
+					rx_burst_type = IAVF_RX_BURST_VEC_AVX2_FRXD;
+					} else {
 						dev->rx_pkt_burst =
 							iavf_recv_pkts_vec_avx2_flex_rxd_offload;
+					rx_burst_type = IAVF_RX_BURST_VEC_AVX2_FRXD_OFFLOAD;
+					}
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
-					if (check_ret == IAVF_VECTOR_PATH)
+					if (check_ret == IAVF_VECTOR_PATH) {
 						dev->rx_pkt_burst =
 							iavf_recv_pkts_vec_avx512_flex_rxd;
-					else
+						rx_burst_type = IAVF_RX_BURST_VEC_AVX512_FRXD;
+					} else {
 						dev->rx_pkt_burst =
 							iavf_recv_pkts_vec_avx512_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_BURST_VEC_AVX512_FRXD_OFFLOAD;
+					}
 				}
 #endif
 			} else {
 				dev->rx_pkt_burst = iavf_recv_pkts_vec;
+				rx_burst_type = IAVF_RX_BURST_VEC_SSE;
 				if (use_avx2) {
-					if (check_ret == IAVF_VECTOR_PATH)
+					if (check_ret == IAVF_VECTOR_PATH) {
 						dev->rx_pkt_burst =
 							iavf_recv_pkts_vec_avx2;
-					else
+						rx_burst_type = IAVF_RX_BURST_VEC_AVX2;
+					} else {
 						dev->rx_pkt_burst =
 							iavf_recv_pkts_vec_avx2_offload;
+						rx_burst_type = IAVF_RX_BURST_VEC_AVX2_OFFLOAD;
+					}
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
-					if (check_ret == IAVF_VECTOR_PATH)
+					if (check_ret == IAVF_VECTOR_PATH) {
 						dev->rx_pkt_burst =
 							iavf_recv_pkts_vec_avx512;
-					else
+						rx_burst_type = IAVF_RX_BURST_VEC_AVX512;
+					} else {
 						dev->rx_pkt_burst =
 							iavf_recv_pkts_vec_avx512_offload;
+						rx_burst_type = IAVF_RX_BURST_VEC_AVX512_OFFLOAD;
+					}
 				}
 #endif
 			}
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->rx_pkt_burst = dev->rx_pkt_burst;
+			adapter->rx_burst_type = rx_burst_type;
 			dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
 		}
 		return;
@@ -3935,9 +4049,10 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 			(void)iavf_rxq_vec_setup(rxq);
 		}
 		dev->rx_pkt_burst = iavf_recv_pkts_vec;
+		rx_burst_type = IAVF_RX_BURST_VEC_SSE;
 
 		if (no_poll_on_link_down) {
-			adapter->rx_pkt_burst = dev->rx_pkt_burst;
+			adapter->rx_burst_type = rx_burst_type;
 			dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
 		}
 		return;
@@ -3946,25 +4061,32 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 	if (dev->data->scattered_rx) {
 		PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
 			    dev->data->port_id);
-		if (use_flex)
+		if (use_flex) {
 			dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
-		else
+			rx_burst_type = IAVF_RX_BURST_SFRXD;
+		} else {
 			dev->rx_pkt_burst = iavf_recv_scattered_pkts;
+			rx_burst_type = IAVF_RX_BURST_SCATTERED;
+		}
 	} else if (adapter->rx_bulk_alloc_allowed) {
 		PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
 			    dev->data->port_id);
 		dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
+		rx_burst_type = IAVF_RX_BURST_BULK_ALLOC;
 	} else {
 		PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
 			    dev->data->port_id);
-		if (use_flex)
+		if (use_flex) {
 			dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
-		else
+			rx_burst_type = IAVF_RX_BURST_FRXD;
+		} else {
 			dev->rx_pkt_burst = iavf_recv_pkts;
+			rx_burst_type = IAVF_RX_BURST_DEFAULT;
+		}
 	}
 
 	if (no_poll_on_link_down) {
-		adapter->rx_pkt_burst = dev->rx_pkt_burst;
+		adapter->rx_burst_type = rx_burst_type;
 		dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
 	}
 }
@@ -3975,6 +4097,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	enum iavf_tx_burst_type tx_burst_type;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4011,10 +4134,12 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(DEBUG, "Using Vector Tx (port %d).",
 				    dev->data->port_id);
 			dev->tx_pkt_burst = iavf_xmit_pkts_vec;
+			tx_burst_type = IAVF_TX_BURST_VEC_SSE;
 		}
 		if (use_avx2) {
 			if (check_ret == IAVF_VECTOR_PATH) {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2;
+				tx_burst_type = IAVF_TX_BURST_VEC_AVX2;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_CTX_OFFLOAD_PATH) {
@@ -4023,6 +4148,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 				goto normal;
 			} else {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload;
+				tx_burst_type = IAVF_TX_BURST_VEC_AVX2_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4032,15 +4158,18 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (use_avx512) {
 			if (check_ret == IAVF_VECTOR_PATH) {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
+				tx_burst_type = IAVF_TX_BURST_VEC_AVX512;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_OFFLOAD_PATH) {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
+				tx_burst_type = IAVF_TX_BURST_VEC_AVX512_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else {
 				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_ctx_offload;
+				tx_burst_type = IAVF_TX_BURST_VEC_AVX512_CTX_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 CONTEXT OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4063,7 +4192,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->tx_pkt_burst = dev->tx_pkt_burst;
+			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
 		}
 		return;
@@ -4074,10 +4203,11 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
 		    dev->data->port_id);
 	dev->tx_pkt_burst = iavf_xmit_pkts;
+	tx_burst_type = IAVF_TX_BURST_DEFAULT;
 	dev->tx_pkt_prepare = iavf_prep_pkts;
 
 	if (no_poll_on_link_down) {
-		adapter->tx_pkt_burst = dev->tx_pkt_burst;
+		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
 	}
 }
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f432f9d956..42c18242db 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -203,6 +203,14 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_rx_burst_ops {
+	eth_rx_burst_t rx_pkt_burst;
+};
+
+struct iavf_tx_burst_ops {
+	eth_tx_burst_t tx_pkt_burst;
+};
+
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v6 2/2] net/iavf: add diagnostic support in TX path
  2023-12-29 10:11           ` [PATCH v6 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
  2023-12-29 10:11             ` [PATCH v6 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
@ 2023-12-29 10:11             ` Mingjin Ye
  1 sibling, 0 replies; 36+ messages in thread
From: Mingjin Ye @ 2023-12-29 10:11 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, Mingjin Ye, Yuying Zhang, Beilei Xing, Simei Su,
	Wenjun Wu, Jingjing Wu

The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.

parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Remove call chain.
---
v3: Optimisation implementation.
---
v4: Fix Windows os compilation error.
---
v5: Split Patch.
---
v6: remove strict.
---
 doc/guides/nics/intel_vf.rst   |  4 ++
 drivers/net/iavf/iavf.h        | 12 +++++
 drivers/net/iavf/iavf_ethdev.c | 68 +++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 98 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h   |  2 +
 5 files changed, 184 insertions(+)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index ad08198f0f..8e39bc831c 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -111,6 +111,10 @@ For more detail on SR-IOV, please refer to the following documents:
     by setting the ``devargs`` parameter like ``-a 18:01.0,no-poll-on-link-down=1``
     when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
 
+    Enable mbuf check for Tx diagnostics by setting the devargs parameter like
+    ``-a 18:01.0,mbuf_check=[mbuf,<case1>,<case2>]`` when IAVF is backed by an
+    Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
+
 The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 8db9f3d7cd..0a7053e311 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -113,9 +113,14 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mbuf_stats {
+	uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mbuf_stats mbuf_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -309,10 +314,16 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	int mbuf_check;
 };
 
 struct iavf_security_ctx;
 
+#define IAVF_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+
 enum iavf_rx_burst_type {
 	IAVF_RX_BURST_DEFAULT,
 	IAVF_RX_BURST_FRXD,
@@ -368,6 +379,7 @@ struct iavf_adapter {
 	bool no_poll;
 	enum iavf_rx_burst_type rx_burst_type;
 	enum iavf_tx_burst_type tx_burst_type;
+	uint64_t mc_flags; /* mbuf check flags. */
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d1edb0dd5c..a7a0d99868 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -13,6 +13,7 @@
 #include <inttypes.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
+#include <rte_os_shim.h>
 
 #include <rte_interrupts.h>
 #include <rte_debug.h>
@@ -39,6 +40,8 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MBUF_CHECK_ARG       "mbuf_check"
+
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 
@@ -48,6 +51,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MBUF_CHECK_ARG,
 	NULL
 };
 
@@ -174,6 +178,7 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
 	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
 	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+	{"tx_mbuf_error_packets", _OFF_OF(mbuf_stats.tx_pkt_errors)},
 
 	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
 	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
@@ -1881,6 +1886,8 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 {
 	int ret;
 	unsigned int i;
+	struct iavf_tx_queue *txq;
+	uint64_t mbuf_errors = 0;
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1904,6 +1911,15 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check) {
+		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+			txq = dev->data->tx_queues[i];
+			mbuf_errors += __atomic_load_n(&txq->mbuf_errors,
+				__ATOMIC_RELAXED);
+		}
+		iavf_xtats.mbuf_stats.tx_pkt_errors = mbuf_errors;
+	}
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2286,6 +2302,50 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mbuf_check(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_OFFLOAD;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2340,6 +2400,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MBUF_CHECK_ARG,
+				 &iavf_parse_mbuf_check, &ad->mc_flags);
+	if (ret)
+		goto bail;
+
+	if (ad->mc_flags)
+		ad->devargs.mbuf_check = 1;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 152f755206..469c218372 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3807,6 +3807,97 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 								tx_pkts, nb_pkts);
 }
 
+/* Tx mbuf check */
+static uint16_t
+iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	uint16_t idx;
+	uint64_t ol_flags;
+	struct rte_mbuf *mb;
+	uint16_t good_pkts = nb_pkts;
+	const char *reason = NULL;
+	bool pkt_error = false;
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	enum iavf_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_error = true;
+			break;
+		}
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			pkt_error = true;
+			break;
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					pkt_error = true;
+					break;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				pkt_error = true;
+				break;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_error = true;
+				break;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_error = true;
+				break;
+			}
+		}
+	}
+
+	if (pkt_error) {
+		__atomic_fetch_add(&txq->mbuf_errors, 1, __ATOMIC_RELAXED);
+		good_pkts = idx;
+		if (good_pkts == 0)
+			return 0;
+	}
+
+	return iavf_tx_pkt_burst_ops[tx_burst_type].tx_pkt_burst(tx_queue,
+								tx_pkts, good_pkts);
+}
+
 /* choose rx function*/
 void
 iavf_set_rx_function(struct rte_eth_dev *dev)
@@ -4098,6 +4189,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	enum iavf_tx_burst_type tx_burst_type;
+	int mbuf_check = adapter->devargs.mbuf_check;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4194,6 +4286,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (no_poll_on_link_down) {
 			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else if (mbuf_check) {
+			adapter->tx_burst_type = tx_burst_type;
+			dev->tx_pkt_burst = iavf_xmit_pkts_check;
 		}
 		return;
 	}
@@ -4209,6 +4304,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	if (no_poll_on_link_down) {
 		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else if (mbuf_check) {
+		adapter->tx_burst_type = tx_burst_type;
+		dev->tx_pkt_burst = iavf_xmit_pkts_check;
 	}
 }
 
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 42c18242db..7c40fe51f9 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -305,6 +305,8 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mbuf_errors;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v6 1/2] net/iavf: fix Rx/Tx burst in multi-process
  2023-12-29 10:11             ` [PATCH v6 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
@ 2023-12-31  6:41               ` Zhang, Qi Z
  2024-01-02 10:52               ` [PATCH v7 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
  1 sibling, 0 replies; 36+ messages in thread
From: Zhang, Qi Z @ 2023-12-31  6:41 UTC (permalink / raw)
  To: Ye, MingjinX, dev
  Cc: Yang, Qiming, Ye, MingjinX, stable, Wu, Jingjing, Xing, Beilei



> -----Original Message-----
> From: Mingjin Ye <mingjinx.ye@intel.com>
> Sent: Friday, December 29, 2023 6:11 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Ye, MingjinX
> <mingjinx.ye@intel.com>; stable@dpdk.org; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>
> Subject: [PATCH v6 1/2] net/iavf: fix Rx/Tx burst in multi-process
> 
> In a multi-process environment, a secondary process operates on shared
> memory and changes the function pointer of the primary process, resulting
> in a crash when the primary process cannot find the function address during
> an Rx/Tx burst.
> 
> Fixes: 5b3124a0a6ef ("net/iavf: support no polling when link down")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> ---
> v2: Add fix for Rx burst.
> ---
>  drivers/net/iavf/iavf.h      |  42 +++++++-
>  drivers/net/iavf/iavf_rxtx.c | 184 ++++++++++++++++++++++++++++++-----
>  drivers/net/iavf/iavf_rxtx.h |   8 ++
>  3 files changed, 205 insertions(+), 29 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index
> 10868f2c30..8db9f3d7cd 100644
> --- a/drivers/net/iavf/iavf.h
> +++ b/drivers/net/iavf/iavf.h
> @@ -313,6 +313,44 @@ struct iavf_devargs {
> 
>  struct iavf_security_ctx;
> 
> +enum iavf_rx_burst_type {
> +	IAVF_RX_BURST_DEFAULT,
> +	IAVF_RX_BURST_FRXD,
> +	IAVF_RX_BURST_BULK_ALLOC,
> +	IAVF_RX_BURST_SCATTERED,
> +	IAVF_RX_BURST_SFRXD,
> +	IAVF_RX_BURST_VEC_SSE,
> +	IAVF_RX_BURST_VEC_AVX2,
> +	IAVF_RX_BURST_VEC_AVX2_OFFLOAD,
> +	IAVF_RX_BURST_VEC_SSE_FRXD,
> +	IAVF_RX_BURST_VEC_AVX2_FRXD,
> +	IAVF_RX_BURST_VEC_AVX2_FRXD_OFFLOAD,
> +	IAVF_RX_BURST_VEC_SSE_SCATTERED,
> +	IAVF_RX_BURST_VEC_AVX2_SCATTERED,
> +	IAVF_RX_BURST_VEC_AVX2_SCATTERED_OFFLOAD,
> +	IAVF_RX_BURST_VEC_SSE_SFLEX_RXD,
> +	IAVF_RX_BURST_VEC_AVX2_SFLEX_RXD,
> +	IAVF_RX_BURST_VEC_AVX2_SFRXD_OFFLOAD,
> +	IAVF_RX_BURST_VEC_AVX512,
> +	IAVF_RX_BURST_VEC_AVX512_OFFLOAD,
> +	IAVF_RX_BURST_VEC_AVX512_FRXD,
> +	IAVF_RX_BURST_VEC_AVX512_FRXD_OFFLOAD,
> +	IAVF_RX_BURST_VEC_AVX512_SCATTERED,
> +	IAVF_RX_BURST_VEC_AVX512_SCATTERED_OFFLOAD,
> +	IAVF_RX_BURST_VEC_AVX512_SFLEX_RXD,
> +	IAVF_RX_BURST_VEC_AVX512_SFRXD_OFFLOAD,

What is SFLEX, SFRXD, SFRXD, FRXD, please make it clear by following a consistent naming pattern.
Btw, you can consider removing BURST and VEC which didn't give any additional information if you are looking for a short name.

....

> @@ -3809,42 +3886,64 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
>  			}
>  			if (use_flex) {
>  				dev->rx_pkt_burst =
> iavf_recv_scattered_pkts_vec_flex_rxd;
> +				rx_burst_type =
> IAVF_RX_BURST_VEC_SSE_SFLEX_RXD;
>  				if (use_avx2) {
> -					if (check_ret == IAVF_VECTOR_PATH)
> +					if (check_ret == IAVF_VECTOR_PATH)
> {
>  						dev->rx_pkt_burst =
> 
> 	iavf_recv_scattered_pkts_vec_avx2_flex_rxd;
> -					else
> +						rx_burst_type =
> +
> 	IAVF_RX_BURST_VEC_AVX2_SFLEX_RXD;

As you already introduce the burst_type, its not necessary to set the function pointer for each case.
Why not just 
dev->rx_pkt_burst = rx_burst_ops[rx_burst_type] at last?

....

> +struct iavf_rx_burst_ops {
> +	eth_rx_burst_t rx_pkt_burst;
> +};

Why create a wrapper here but not just use eth_rx_burst_t directly?



^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v7 0/2] net/iavf: fix Rx/Tx burst and add diagnostics
  2023-12-29 10:11             ` [PATCH v6 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
  2023-12-31  6:41               ` Zhang, Qi Z
@ 2024-01-02 10:52               ` Mingjin Ye
  2024-01-02 10:52                 ` [PATCH v7 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
  2024-01-02 10:52                 ` [PATCH v7 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
  1 sibling, 2 replies; 36+ messages in thread
From: Mingjin Ye @ 2024-01-02 10:52 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye

Fixed Rx/Tx crash in multi-process environment and
added Tx diagnostic feature.

Mingjin Ye (2):
  net/iavf: fix Rx/Tx burst in multi-process
  net/iavf: add diagnostic support in TX path

 doc/guides/nics/intel_vf.rst   |   4 +
 drivers/net/iavf/iavf.h        |  54 ++++++-
 drivers/net/iavf/iavf_ethdev.c |  72 +++++++++
 drivers/net/iavf/iavf_rxtx.c   | 271 ++++++++++++++++++++++++++-------
 drivers/net/iavf/iavf_rxtx.h   |   2 +
 5 files changed, 345 insertions(+), 58 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v7 1/2] net/iavf: fix Rx/Tx burst in multi-process
  2024-01-02 10:52               ` [PATCH v7 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
@ 2024-01-02 10:52                 ` Mingjin Ye
  2024-01-03  2:22                   ` Zhang, Qi Z
  2024-01-02 10:52                 ` [PATCH v7 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
  1 sibling, 1 reply; 36+ messages in thread
From: Mingjin Ye @ 2024-01-02 10:52 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye, stable, Jingjing Wu, Beilei Xing

In a multi-process environment, a secondary process operates on shared
memory and changes the function pointer of the primary process, resulting
in a crash when the primary process cannot find the function address
during an Rx/Tx burst.

Fixes: 5b3124a0a6ef ("net/iavf: support no polling when link down")
Cc: stable@dpdk.org

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Add fix for Rx burst.
---
v3: fix Rx/Tx routing.
---
 drivers/net/iavf/iavf.h      |  42 ++++++++-
 drivers/net/iavf/iavf_rxtx.c | 173 +++++++++++++++++++++++------------
 2 files changed, 157 insertions(+), 58 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 10868f2c30..73a089c199 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -313,6 +313,44 @@ struct iavf_devargs {
 
 struct iavf_security_ctx;
 
+enum iavf_rx_burst_type {
+	IAVF_RX_DEFAULT,
+	IAVF_RX_FLEX_RXD,
+	IAVF_RX_BULK_ALLOC,
+	IAVF_RX_SCATTERED,
+	IAVF_RX_SCATTERED_FLEX_RXD,
+	IAVF_RX_SSE,
+	IAVF_RX_AVX2,
+	IAVF_RX_AVX2_OFFLOAD,
+	IAVF_RX_SSE_FLEX_RXD,
+	IAVF_RX_AVX2_FLEX_RXD,
+	IAVF_RX_AVX2_FLEX_RXD_OFFLOAD,
+	IAVF_RX_SSE_SCATTERED,
+	IAVF_RX_AVX2_SCATTERED,
+	IAVF_RX_AVX2_SCATTERED_OFFLOAD,
+	IAVF_RX_SSE_SCATTERED_FLEX_RXD,
+	IAVF_RX_AVX2_SCATTERED_FLEX_RXD,
+	IAVF_RX_AVX2_SCATTERED_FLEX_RXD_OFFLOAD,
+	IAVF_RX_AVX512,
+	IAVF_RX_AVX512_OFFLOAD,
+	IAVF_RX_AVX512_FLEX_RXD,
+	IAVF_RX_AVX512_FLEX_RXD_OFFLOAD,
+	IAVF_RX_AVX512_SCATTERED,
+	IAVF_RX_AVX512_SCATTERED_OFFLOAD,
+	IAVF_RX_AVX512_SCATTERED_FLEX_RXD,
+	IAVF_RX_AVX512_SCATTERED_FLEX_RXD_OFFLOAD,
+};
+
+enum iavf_tx_burst_type {
+	IAVF_TX_DEFAULT,
+	IAVF_TX_SSE,
+	IAVF_TX_AVX2,
+	IAVF_TX_AVX2_OFFLOAD,
+	IAVF_TX_AVX512,
+	IAVF_TX_AVX512_OFFLOAD,
+	IAVF_TX_AVX512_CTX_OFFLOAD,
+};
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -328,8 +366,8 @@ struct iavf_adapter {
 	bool stopped;
 	bool closed;
 	bool no_poll;
-	eth_rx_burst_t rx_pkt_burst;
-	eth_tx_burst_t tx_pkt_burst;
+	enum iavf_rx_burst_type rx_burst_type;
+	enum iavf_tx_burst_type tx_burst_type;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f19aa14646..13b932ad85 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3707,15 +3707,68 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 	return i;
 }
 
+static
+const eth_rx_burst_t iavf_rx_pkt_burst_ops[] = {
+	iavf_recv_pkts,
+	iavf_recv_pkts_flex_rxd,
+	iavf_recv_pkts_bulk_alloc,
+	iavf_recv_scattered_pkts,
+	iavf_recv_scattered_pkts_flex_rxd,
+#ifdef RTE_ARCH_X86
+	iavf_recv_pkts_vec,
+	iavf_recv_pkts_vec_avx2,
+	iavf_recv_pkts_vec_avx2_offload,
+	iavf_recv_pkts_vec_flex_rxd,
+	iavf_recv_pkts_vec_avx2_flex_rxd,
+	iavf_recv_pkts_vec_avx2_flex_rxd_offload,
+	iavf_recv_scattered_pkts_vec,
+	iavf_recv_scattered_pkts_vec_avx2,
+	iavf_recv_scattered_pkts_vec_avx2_offload,
+	iavf_recv_scattered_pkts_vec_flex_rxd,
+	iavf_recv_scattered_pkts_vec_avx2_flex_rxd,
+	iavf_recv_scattered_pkts_vec_avx2_flex_rxd_offload,
+#ifdef CC_AVX512_SUPPORT
+	iavf_recv_pkts_vec_avx512,
+	iavf_recv_pkts_vec_avx512_offload,
+	iavf_recv_pkts_vec_avx512_flex_rxd,
+	iavf_recv_pkts_vec_avx512_flex_rxd_offload,
+	iavf_recv_scattered_pkts_vec_avx512,
+	iavf_recv_scattered_pkts_vec_avx512_offload,
+	iavf_recv_scattered_pkts_vec_avx512_flex_rxd,
+	iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload,
+#endif
+#elif defined RTE_ARCH_ARM
+	iavf_recv_pkts_vec,
+#endif
+};
+
+static
+const eth_tx_burst_t iavf_tx_pkt_burst_ops[] = {
+	iavf_xmit_pkts,
+#ifdef RTE_ARCH_X86
+	iavf_xmit_pkts_vec,
+	iavf_xmit_pkts_vec_avx2,
+	iavf_xmit_pkts_vec_avx2_offload,
+#ifdef CC_AVX512_SUPPORT
+	iavf_xmit_pkts_vec_avx512,
+	iavf_xmit_pkts_vec_avx512_offload,
+	iavf_xmit_pkts_vec_avx512_ctx_offload,
+#endif
+#endif
+};
+
 static uint16_t
 iavf_recv_pkts_no_poll(void *rx_queue, struct rte_mbuf **rx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_rx_queue *rxq = rx_queue;
+	enum iavf_rx_burst_type rx_burst_type =
+		rxq->vsi->adapter->rx_burst_type;
+
 	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
 		return 0;
 
-	return rxq->vsi->adapter->rx_pkt_burst(rx_queue,
+	return iavf_rx_pkt_burst_ops[rx_burst_type](rx_queue,
 								rx_pkts, nb_pkts);
 }
 
@@ -3724,10 +3777,13 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_tx_queue *txq = tx_queue;
+	enum iavf_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
 	if (!txq->vsi || txq->vsi->adapter->no_poll)
 		return 0;
 
-	return txq->vsi->adapter->tx_pkt_burst(tx_queue,
+	return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
 								tx_pkts, nb_pkts);
 }
 
@@ -3738,6 +3794,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	enum iavf_rx_burst_type rx_burst_type;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 	int i;
 	struct iavf_rx_queue *rxq;
@@ -3808,43 +3865,43 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 				}
 			}
 			if (use_flex) {
-				dev->rx_pkt_burst = iavf_recv_scattered_pkts_vec_flex_rxd;
+				rx_burst_type = IAVF_RX_SSE_SCATTERED_FLEX_RXD;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2_flex_rxd;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED_FLEX_RXD_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED_FLEX_RXD_OFFLOAD;
 				}
 #endif
 			} else {
-				dev->rx_pkt_burst = iavf_recv_scattered_pkts_vec;
+				rx_burst_type = IAVF_RX_SSE_SCATTERED;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2_offload;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512_offload;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED_OFFLOAD;
 				}
 #endif
 			}
@@ -3874,51 +3931,46 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 				}
 			}
 			if (use_flex) {
-				dev->rx_pkt_burst = iavf_recv_pkts_vec_flex_rxd;
+				rx_burst_type = IAVF_RX_SSE_FLEX_RXD;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2_flex_rxd;
+						rx_burst_type = IAVF_RX_AVX2_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2_flex_rxd_offload;
+						rx_burst_type = IAVF_RX_AVX2_FLEX_RXD_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512_flex_rxd;
+						rx_burst_type = IAVF_RX_AVX512_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_AVX512_FLEX_RXD_OFFLOAD;
 				}
 #endif
 			} else {
-				dev->rx_pkt_burst = iavf_recv_pkts_vec;
+				rx_burst_type = IAVF_RX_SSE;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2;
+						rx_burst_type = IAVF_RX_AVX2;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2_offload;
+						rx_burst_type = IAVF_RX_AVX2_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512;
+						rx_burst_type = IAVF_RX_AVX512;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512_offload;
+						rx_burst_type = IAVF_RX_AVX512_OFFLOAD;
 				}
 #endif
 			}
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->rx_pkt_burst = dev->rx_pkt_burst;
+			adapter->rx_burst_type = rx_burst_type;
 			dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
+		} else {
+			dev->rx_pkt_burst = iavf_rx_pkt_burst_ops[rx_burst_type];
 		}
 		return;
 	}
@@ -3934,11 +3986,13 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 			rxq = dev->data->rx_queues[i];
 			(void)iavf_rxq_vec_setup(rxq);
 		}
-		dev->rx_pkt_burst = iavf_recv_pkts_vec;
+		rx_burst_type = IAVF_RX_SSE;
 
 		if (no_poll_on_link_down) {
-			adapter->rx_pkt_burst = dev->rx_pkt_burst;
+			adapter->rx_burst_type = rx_burst_type;
 			dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
+		} else {
+			dev->rx_pkt_burst = iavf_rx_pkt_burst_ops[rx_burst_type];
 		}
 		return;
 	}
@@ -3947,25 +4001,27 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
 			    dev->data->port_id);
 		if (use_flex)
-			dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
+			rx_burst_type = IAVF_RX_SCATTERED_FLEX_RXD;
 		else
-			dev->rx_pkt_burst = iavf_recv_scattered_pkts;
+			rx_burst_type = IAVF_RX_SCATTERED;
 	} else if (adapter->rx_bulk_alloc_allowed) {
 		PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
 			    dev->data->port_id);
-		dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
+		rx_burst_type = IAVF_RX_BULK_ALLOC;
 	} else {
 		PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
 			    dev->data->port_id);
 		if (use_flex)
-			dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
+			rx_burst_type = IAVF_RX_FLEX_RXD;
 		else
-			dev->rx_pkt_burst = iavf_recv_pkts;
+			rx_burst_type = IAVF_RX_DEFAULT;
 	}
 
 	if (no_poll_on_link_down) {
-		adapter->rx_pkt_burst = dev->rx_pkt_burst;
+		adapter->rx_burst_type = rx_burst_type;
 		dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
+	} else {
+		dev->rx_pkt_burst = iavf_rx_pkt_burst_ops[rx_burst_type];
 	}
 }
 
@@ -3975,6 +4031,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	enum iavf_tx_burst_type tx_burst_type;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4010,11 +4067,11 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (use_sse) {
 			PMD_DRV_LOG(DEBUG, "Using Vector Tx (port %d).",
 				    dev->data->port_id);
-			dev->tx_pkt_burst = iavf_xmit_pkts_vec;
+			tx_burst_type = IAVF_TX_SSE;
 		}
 		if (use_avx2) {
 			if (check_ret == IAVF_VECTOR_PATH) {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2;
+				tx_burst_type = IAVF_TX_AVX2;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_CTX_OFFLOAD_PATH) {
@@ -4022,7 +4079,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 					"AVX2 does not support outer checksum offload.");
 				goto normal;
 			} else {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload;
+				tx_burst_type = IAVF_TX_AVX2_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4031,16 +4088,16 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 #ifdef CC_AVX512_SUPPORT
 		if (use_avx512) {
 			if (check_ret == IAVF_VECTOR_PATH) {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
+				tx_burst_type = IAVF_TX_AVX512;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_OFFLOAD_PATH) {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
+				tx_burst_type = IAVF_TX_AVX512_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_ctx_offload;
+				tx_burst_type = IAVF_TX_AVX512_CTX_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 CONTEXT OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4063,8 +4120,10 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->tx_pkt_burst = dev->tx_pkt_burst;
+			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else {
+			dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 		}
 		return;
 	}
@@ -4073,12 +4132,14 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 #endif
 	PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
 		    dev->data->port_id);
-	dev->tx_pkt_burst = iavf_xmit_pkts;
+	tx_burst_type = IAVF_TX_DEFAULT;
 	dev->tx_pkt_prepare = iavf_prep_pkts;
 
 	if (no_poll_on_link_down) {
-		adapter->tx_pkt_burst = dev->tx_pkt_burst;
+		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else {
+		dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 	}
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v7 2/2] net/iavf: add diagnostic support in TX path
  2024-01-02 10:52               ` [PATCH v7 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
  2024-01-02 10:52                 ` [PATCH v7 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
@ 2024-01-02 10:52                 ` Mingjin Ye
  2024-01-03  2:54                   ` Zhang, Qi Z
  2024-01-03 10:10                   ` [PATCH v8 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
  1 sibling, 2 replies; 36+ messages in thread
From: Mingjin Ye @ 2024-01-02 10:52 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, Mingjin Ye, Wenjun Wu, Yuying Zhang, Beilei Xing,
	Simei Su, Jingjing Wu

The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.

parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Remove call chain.
---
v3: Optimisation implementation.
---
v4: Fix Windows os compilation error.
---
v5: Split Patch.
---
v6: remove strict.
---
 doc/guides/nics/intel_vf.rst   |  4 ++
 drivers/net/iavf/iavf.h        | 12 +++++
 drivers/net/iavf/iavf_ethdev.c | 72 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 98 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h   |  2 +
 5 files changed, 188 insertions(+)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index ad08198f0f..8e39bc831c 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -111,6 +111,10 @@ For more detail on SR-IOV, please refer to the following documents:
     by setting the ``devargs`` parameter like ``-a 18:01.0,no-poll-on-link-down=1``
     when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
 
+    Enable mbuf check for Tx diagnostics by setting the devargs parameter like
+    ``-a 18:01.0,mbuf_check=[mbuf,<case1>,<case2>]`` when IAVF is backed by an
+    Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
+
 The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 73a089c199..6535b624cb 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -113,9 +113,14 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mbuf_stats {
+	uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mbuf_stats mbuf_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -309,6 +314,7 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	int mbuf_check;
 };
 
 struct iavf_security_ctx;
@@ -351,6 +357,11 @@ enum iavf_tx_burst_type {
 	IAVF_TX_AVX512_CTX_OFFLOAD,
 };
 
+#define IAVF_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -368,6 +379,7 @@ struct iavf_adapter {
 	bool no_poll;
 	enum iavf_rx_burst_type rx_burst_type;
 	enum iavf_tx_burst_type tx_burst_type;
+	uint64_t mc_flags; /* mbuf check flags. */
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d1edb0dd5c..25938b9558 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -13,6 +13,7 @@
 #include <inttypes.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
+#include <rte_os_shim.h>
 
 #include <rte_interrupts.h>
 #include <rte_debug.h>
@@ -39,6 +40,8 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MBUF_CHECK_ARG       "mbuf_check"
+
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 
@@ -48,6 +51,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MBUF_CHECK_ARG,
 	NULL
 };
 
@@ -174,6 +178,7 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
 	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
 	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+	{"tx_mbuf_error_packets", _OFF_OF(mbuf_stats.tx_pkt_errors)},
 
 	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
 	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
@@ -1837,6 +1842,9 @@ iavf_dev_xstats_reset(struct rte_eth_dev *dev)
 	iavf_dev_stats_reset(dev);
 	memset(&vf->vsi.eth_stats_offset.ips_stats, 0,
 			sizeof(struct iavf_ipsec_crypto_stats));
+	memset(&vf->vsi.eth_stats_offset.mbuf_stats, 0,
+			sizeof(struct iavf_mbuf_stats));
+
 	return 0;
 }
 
@@ -1881,6 +1889,8 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 {
 	int ret;
 	unsigned int i;
+	struct iavf_tx_queue *txq;
+	uint64_t mbuf_errors = 0;
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1904,6 +1914,16 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check) {
+		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+			txq = dev->data->tx_queues[i];
+			mbuf_errors += __atomic_exchange_n(&txq->mbuf_errors,
+				0, __ATOMIC_RELAXED);
+		}
+		if (mbuf_errors > 0)
+			iavf_xtats.mbuf_stats.tx_pkt_errors += mbuf_errors;
+	}
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2286,6 +2306,50 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mbuf_check(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_OFFLOAD;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2340,6 +2404,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MBUF_CHECK_ARG,
+				 &iavf_parse_mbuf_check, &ad->mc_flags);
+	if (ret)
+		goto bail;
+
+	if (ad->mc_flags)
+		ad->devargs.mbuf_check = 1;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 13b932ad85..cb767fb668 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3787,6 +3787,97 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 								tx_pkts, nb_pkts);
 }
 
+/* Tx mbuf check */
+static uint16_t
+iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	uint16_t idx;
+	uint64_t ol_flags;
+	struct rte_mbuf *mb;
+	uint16_t good_pkts = nb_pkts;
+	const char *reason = NULL;
+	bool pkt_error = false;
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	enum iavf_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_error = true;
+			break;
+		}
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			pkt_error = true;
+			break;
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					pkt_error = true;
+					break;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				pkt_error = true;
+				break;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_error = true;
+				break;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_error = true;
+				break;
+			}
+		}
+	}
+
+	if (pkt_error) {
+		__atomic_fetch_add(&txq->mbuf_errors, 1, __ATOMIC_RELAXED);
+		good_pkts = idx;
+		if (good_pkts == 0)
+			return 0;
+	}
+
+	return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
+								tx_pkts, good_pkts);
+}
+
 /* choose rx function*/
 void
 iavf_set_rx_function(struct rte_eth_dev *dev)
@@ -4032,6 +4123,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	enum iavf_tx_burst_type tx_burst_type;
+	int mbuf_check = adapter->devargs.mbuf_check;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4122,6 +4214,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (no_poll_on_link_down) {
 			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else if (mbuf_check) {
+			adapter->tx_burst_type = tx_burst_type;
+			dev->tx_pkt_burst = iavf_xmit_pkts_check;
 		} else {
 			dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 		}
@@ -4138,6 +4233,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	if (no_poll_on_link_down) {
 		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else if (mbuf_check) {
+		adapter->tx_burst_type = tx_burst_type;
+		dev->tx_pkt_burst = iavf_xmit_pkts_check;
 	} else {
 		dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f432f9d956..90e7291928 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -297,6 +297,8 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mbuf_errors;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v7 1/2] net/iavf: fix Rx/Tx burst in multi-process
  2024-01-02 10:52                 ` [PATCH v7 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
@ 2024-01-03  2:22                   ` Zhang, Qi Z
  0 siblings, 0 replies; 36+ messages in thread
From: Zhang, Qi Z @ 2024-01-03  2:22 UTC (permalink / raw)
  To: Ye, MingjinX, dev
  Cc: Yang, Qiming, Ye, MingjinX, stable, Wu, Jingjing, Xing, Beilei



> -----Original Message-----
> From: Mingjin Ye <mingjinx.ye@intel.com>
> Sent: Tuesday, January 2, 2024 6:52 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Ye, MingjinX
> <mingjinx.ye@intel.com>; stable@dpdk.org; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>
> Subject: [PATCH v7 1/2] net/iavf: fix Rx/Tx burst in multi-process
> 
> In a multi-process environment, a secondary process operates on shared
> memory and changes the function pointer of the primary process, resulting
> in a crash when the primary process cannot find the function address during
> an Rx/Tx burst.
> 
> Fixes: 5b3124a0a6ef ("net/iavf: support no polling when link down")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> ---
> v2: Add fix for Rx burst.
> ---
> v3: fix Rx/Tx routing.
> ---
>  drivers/net/iavf/iavf.h      |  42 ++++++++-
>  drivers/net/iavf/iavf_rxtx.c | 173 +++++++++++++++++++++++------------
>  2 files changed, 157 insertions(+), 58 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index
> 10868f2c30..73a089c199 100644
> --- a/drivers/net/iavf/iavf.h
> +++ b/drivers/net/iavf/iavf.h
> @@ -313,6 +313,44 @@ struct iavf_devargs {
> 
>  struct iavf_security_ctx;
> 
> +enum iavf_rx_burst_type {
> +	IAVF_RX_DEFAULT,
> +	IAVF_RX_FLEX_RXD,
> +	IAVF_RX_BULK_ALLOC,
> +	IAVF_RX_SCATTERED,
> +	IAVF_RX_SCATTERED_FLEX_RXD,
> +	IAVF_RX_SSE,
> +	IAVF_RX_AVX2,
> +	IAVF_RX_AVX2_OFFLOAD,
> +	IAVF_RX_SSE_FLEX_RXD,
> +	IAVF_RX_AVX2_FLEX_RXD,
> +	IAVF_RX_AVX2_FLEX_RXD_OFFLOAD,
> +	IAVF_RX_SSE_SCATTERED,
> +	IAVF_RX_AVX2_SCATTERED,
> +	IAVF_RX_AVX2_SCATTERED_OFFLOAD,
> +	IAVF_RX_SSE_SCATTERED_FLEX_RXD,
> +	IAVF_RX_AVX2_SCATTERED_FLEX_RXD,
> +	IAVF_RX_AVX2_SCATTERED_FLEX_RXD_OFFLOAD,
> +	IAVF_RX_AVX512,
> +	IAVF_RX_AVX512_OFFLOAD,
> +	IAVF_RX_AVX512_FLEX_RXD,
> +	IAVF_RX_AVX512_FLEX_RXD_OFFLOAD,
> +	IAVF_RX_AVX512_SCATTERED,
> +	IAVF_RX_AVX512_SCATTERED_OFFLOAD,
> +	IAVF_RX_AVX512_SCATTERED_FLEX_RXD,
> +	IAVF_RX_AVX512_SCATTERED_FLEX_RXD_OFFLOAD,
> +};
> +
> +enum iavf_tx_burst_type {
> +	IAVF_TX_DEFAULT,
> +	IAVF_TX_SSE,
> +	IAVF_TX_AVX2,
> +	IAVF_TX_AVX2_OFFLOAD,
> +	IAVF_TX_AVX512,
> +	IAVF_TX_AVX512_OFFLOAD,
> +	IAVF_TX_AVX512_CTX_OFFLOAD,
> +};
> +
>  /* Structure to store private data for each VF instance. */  struct iavf_adapter
> {
>  	struct iavf_hw hw;
> @@ -328,8 +366,8 @@ struct iavf_adapter {
>  	bool stopped;
>  	bool closed;
>  	bool no_poll;
> -	eth_rx_burst_t rx_pkt_burst;
> -	eth_tx_burst_t tx_pkt_burst;
> +	enum iavf_rx_burst_type rx_burst_type;
> +	enum iavf_tx_burst_type tx_burst_type;
>  	uint16_t fdir_ref_cnt;
>  	struct iavf_devargs devargs;
>  };
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index
> f19aa14646..13b932ad85 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -3707,15 +3707,68 @@ iavf_prep_pkts(__rte_unused void *tx_queue,
> struct rte_mbuf **tx_pkts,
>  	return i;
>  }
> 
> +static
> +const eth_rx_burst_t iavf_rx_pkt_burst_ops[] = {

Its will broke if we insert a new type without change the ops array.

It's better to explicitly initial an array with index

[IAVF_RX_DEFAULT] = iavf_recv_pkts,

....

^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v7 2/2] net/iavf: add diagnostic support in TX path
  2024-01-02 10:52                 ` [PATCH v7 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
@ 2024-01-03  2:54                   ` Zhang, Qi Z
  2024-01-03 10:10                   ` [PATCH v8 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
  1 sibling, 0 replies; 36+ messages in thread
From: Zhang, Qi Z @ 2024-01-03  2:54 UTC (permalink / raw)
  To: Ye, MingjinX, dev
  Cc: Yang, Qiming, Ye, MingjinX, Wu, Wenjun1, Zhang, Yuying, Xing,
	Beilei, Su, Simei, Wu, Jingjing



> -----Original Message-----
> From: Mingjin Ye <mingjinx.ye@intel.com>
> Sent: Tuesday, January 2, 2024 6:52 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Ye, MingjinX
> <mingjinx.ye@intel.com>; Wu, Wenjun1 <wenjun1.wu@intel.com>; Zhang,
> Yuying <yuying.zhang@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Su,
> Simei <simei.su@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Subject: [PATCH v7 2/2] net/iavf: add diagnostic support in TX path
> 
> The only way to enable diagnostics for TX paths is to modify the application
> source code. Making it difficult to diagnose faults.
> 
> In this patch, the devarg option "mbuf_check" is introduced and the
> parameters are configured to enable the corresponding diagnostics.
> 
> supported cases: mbuf, size, segment, offload.
>  1. mbuf: check for corrupted mbuf.
>  2. size: check min/max packet length according to hw spec.
>  3. segment: check number of mbuf segments not exceed hw limitation.
>  4. offload: check any unsupported offload flag.
> 
> parameter format: mbuf_check=[mbuf,<case1>,<case2>]
> eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> ---
> v2: Remove call chain.
> ---
> v3: Optimisation implementation.
> ---
> v4: Fix Windows os compilation error.
> ---
> v5: Split Patch.
> ---
> v6: remove strict.
> ---
>  doc/guides/nics/intel_vf.rst   |  4 ++
>  drivers/net/iavf/iavf.h        | 12 +++++
>  drivers/net/iavf/iavf_ethdev.c | 72 +++++++++++++++++++++++++
>  drivers/net/iavf/iavf_rxtx.c   | 98 ++++++++++++++++++++++++++++++++++
>  drivers/net/iavf/iavf_rxtx.h   |  2 +
>  5 files changed, 188 insertions(+)
> 
> diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst index
> ad08198f0f..8e39bc831c 100644
> --- a/doc/guides/nics/intel_vf.rst
> +++ b/doc/guides/nics/intel_vf.rst
> @@ -111,6 +111,10 @@ For more detail on SR-IOV, please refer to the
> following documents:
>      by setting the ``devargs`` parameter like ``-a 18:01.0,no-poll-on-link-
> down=1``
>      when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700
> Series Ethernet device.
> 
> +    Enable mbuf check for Tx diagnostics by setting the devargs parameter
> like
> +    ``-a 18:01.0,mbuf_check=[mbuf,<case1>,<case2>]`` when IAVF is backed
> by an
> +    Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
> +

Please list all the options with descriptions which you already have them in the commit log.
Its necessary for user to understand the detail about how to use the devarg from document.



^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v8 0/2] net/iavf: fix Rx/Tx burst and add diagnostics
  2024-01-02 10:52                 ` [PATCH v7 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
  2024-01-03  2:54                   ` Zhang, Qi Z
@ 2024-01-03 10:10                   ` Mingjin Ye
  2024-01-03 10:10                     ` [PATCH v8 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
  2024-01-03 10:10                     ` [PATCH v8 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
  1 sibling, 2 replies; 36+ messages in thread
From: Mingjin Ye @ 2024-01-03 10:10 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye

Fixed Rx/Tx crash in multi-process environment and added
Tx diagnostic feature.

Mingjin Ye (2):
  net/iavf: fix Rx/Tx burst in multi-process
  net/iavf: add diagnostic support in TX path

 doc/guides/nics/intel_vf.rst   |   9 ++
 drivers/net/iavf/iavf.h        |  54 ++++++-
 drivers/net/iavf/iavf_ethdev.c |  76 +++++++++
 drivers/net/iavf/iavf_rxtx.c   | 280 ++++++++++++++++++++++++++-------
 drivers/net/iavf/iavf_rxtx.h   |   2 +
 5 files changed, 363 insertions(+), 58 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v8 1/2] net/iavf: fix Rx/Tx burst in multi-process
  2024-01-03 10:10                   ` [PATCH v8 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
@ 2024-01-03 10:10                     ` Mingjin Ye
  2024-01-03 10:10                     ` [PATCH v8 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
  1 sibling, 0 replies; 36+ messages in thread
From: Mingjin Ye @ 2024-01-03 10:10 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye, stable, Jingjing Wu, Beilei Xing

In a multi-process environment, a secondary process operates on shared
memory and changes the function pointer of the primary process, resulting
in a crash when the primary process cannot find the function address
during an Rx/Tx burst.

Fixes: 5b3124a0a6ef ("net/iavf: support no polling when link down")
Cc: stable@dpdk.org

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Add fix for Rx burst.
---
v3: fix Rx/Tx routing.
---
v4: Fix the ops array.
---
 drivers/net/iavf/iavf.h      |  42 +++++++-
 drivers/net/iavf/iavf_rxtx.c | 182 ++++++++++++++++++++++++-----------
 2 files changed, 166 insertions(+), 58 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 10868f2c30..73a089c199 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -313,6 +313,44 @@ struct iavf_devargs {
 
 struct iavf_security_ctx;
 
+enum iavf_rx_burst_type {
+	IAVF_RX_DEFAULT,
+	IAVF_RX_FLEX_RXD,
+	IAVF_RX_BULK_ALLOC,
+	IAVF_RX_SCATTERED,
+	IAVF_RX_SCATTERED_FLEX_RXD,
+	IAVF_RX_SSE,
+	IAVF_RX_AVX2,
+	IAVF_RX_AVX2_OFFLOAD,
+	IAVF_RX_SSE_FLEX_RXD,
+	IAVF_RX_AVX2_FLEX_RXD,
+	IAVF_RX_AVX2_FLEX_RXD_OFFLOAD,
+	IAVF_RX_SSE_SCATTERED,
+	IAVF_RX_AVX2_SCATTERED,
+	IAVF_RX_AVX2_SCATTERED_OFFLOAD,
+	IAVF_RX_SSE_SCATTERED_FLEX_RXD,
+	IAVF_RX_AVX2_SCATTERED_FLEX_RXD,
+	IAVF_RX_AVX2_SCATTERED_FLEX_RXD_OFFLOAD,
+	IAVF_RX_AVX512,
+	IAVF_RX_AVX512_OFFLOAD,
+	IAVF_RX_AVX512_FLEX_RXD,
+	IAVF_RX_AVX512_FLEX_RXD_OFFLOAD,
+	IAVF_RX_AVX512_SCATTERED,
+	IAVF_RX_AVX512_SCATTERED_OFFLOAD,
+	IAVF_RX_AVX512_SCATTERED_FLEX_RXD,
+	IAVF_RX_AVX512_SCATTERED_FLEX_RXD_OFFLOAD,
+};
+
+enum iavf_tx_burst_type {
+	IAVF_TX_DEFAULT,
+	IAVF_TX_SSE,
+	IAVF_TX_AVX2,
+	IAVF_TX_AVX2_OFFLOAD,
+	IAVF_TX_AVX512,
+	IAVF_TX_AVX512_OFFLOAD,
+	IAVF_TX_AVX512_CTX_OFFLOAD,
+};
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -328,8 +366,8 @@ struct iavf_adapter {
 	bool stopped;
 	bool closed;
 	bool no_poll;
-	eth_rx_burst_t rx_pkt_burst;
-	eth_tx_burst_t tx_pkt_burst;
+	enum iavf_rx_burst_type rx_burst_type;
+	enum iavf_tx_burst_type tx_burst_type;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f19aa14646..89db82c694 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3707,15 +3707,77 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 	return i;
 }
 
+static
+const eth_rx_burst_t iavf_rx_pkt_burst_ops[] = {
+	[IAVF_RX_DEFAULT] = iavf_recv_pkts,
+	[IAVF_RX_FLEX_RXD] = iavf_recv_pkts_flex_rxd,
+	[IAVF_RX_BULK_ALLOC] = iavf_recv_pkts_bulk_alloc,
+	[IAVF_RX_SCATTERED] = iavf_recv_scattered_pkts,
+	[IAVF_RX_SCATTERED_FLEX_RXD] = iavf_recv_scattered_pkts_flex_rxd,
+#ifdef RTE_ARCH_X86
+	[IAVF_RX_SSE] = iavf_recv_pkts_vec,
+	[IAVF_RX_AVX2] = iavf_recv_pkts_vec_avx2,
+	[IAVF_RX_AVX2_OFFLOAD] = iavf_recv_pkts_vec_avx2_offload,
+	[IAVF_RX_SSE_FLEX_RXD] = iavf_recv_pkts_vec_flex_rxd,
+	[IAVF_RX_AVX2_FLEX_RXD] = iavf_recv_pkts_vec_avx2_flex_rxd,
+	[IAVF_RX_AVX2_FLEX_RXD_OFFLOAD] =
+		iavf_recv_pkts_vec_avx2_flex_rxd_offload,
+	[IAVF_RX_SSE_SCATTERED] = iavf_recv_scattered_pkts_vec,
+	[IAVF_RX_AVX2_SCATTERED] = iavf_recv_scattered_pkts_vec_avx2,
+	[IAVF_RX_AVX2_SCATTERED_OFFLOAD] =
+		iavf_recv_scattered_pkts_vec_avx2_offload,
+	[IAVF_RX_SSE_SCATTERED_FLEX_RXD] =
+		iavf_recv_scattered_pkts_vec_flex_rxd,
+	[IAVF_RX_AVX2_SCATTERED_FLEX_RXD] =
+		iavf_recv_scattered_pkts_vec_avx2_flex_rxd,
+	[IAVF_RX_AVX2_SCATTERED_FLEX_RXD_OFFLOAD] =
+		iavf_recv_scattered_pkts_vec_avx2_flex_rxd_offload,
+#ifdef CC_AVX512_SUPPORT
+	[IAVF_RX_AVX512] = iavf_recv_pkts_vec_avx512,
+	[IAVF_RX_AVX512_OFFLOAD] = iavf_recv_pkts_vec_avx512_offload,
+	[IAVF_RX_AVX512_FLEX_RXD] = iavf_recv_pkts_vec_avx512_flex_rxd,
+	[IAVF_RX_AVX512_FLEX_RXD_OFFLOAD] =
+		iavf_recv_pkts_vec_avx512_flex_rxd_offload,
+	[IAVF_RX_AVX512_SCATTERED] = iavf_recv_scattered_pkts_vec_avx512,
+	[IAVF_RX_AVX512_SCATTERED_OFFLOAD] =
+		iavf_recv_scattered_pkts_vec_avx512_offload,
+	[IAVF_RX_AVX512_SCATTERED_FLEX_RXD] =
+		iavf_recv_scattered_pkts_vec_avx512_flex_rxd,
+	[IAVF_RX_AVX512_SCATTERED_FLEX_RXD_OFFLOAD] =
+		iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload,
+#endif
+#elif defined RTE_ARCH_ARM
+	[IAVF_RX_SSE] = iavf_recv_pkts_vec,
+#endif
+};
+
+static
+const eth_tx_burst_t iavf_tx_pkt_burst_ops[] = {
+	[IAVF_TX_DEFAULT] = iavf_xmit_pkts,
+#ifdef RTE_ARCH_X86
+	[IAVF_TX_SSE] = iavf_xmit_pkts_vec,
+	[IAVF_TX_AVX2] = iavf_xmit_pkts_vec_avx2,
+	[IAVF_TX_AVX2_OFFLOAD] = iavf_xmit_pkts_vec_avx2_offload,
+#ifdef CC_AVX512_SUPPORT
+	[IAVF_TX_AVX512] = iavf_xmit_pkts_vec_avx512,
+	[IAVF_TX_AVX512_OFFLOAD] = iavf_xmit_pkts_vec_avx512_offload,
+	[IAVF_TX_AVX512_CTX_OFFLOAD] = iavf_xmit_pkts_vec_avx512_ctx_offload,
+#endif
+#endif
+};
+
 static uint16_t
 iavf_recv_pkts_no_poll(void *rx_queue, struct rte_mbuf **rx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_rx_queue *rxq = rx_queue;
+	enum iavf_rx_burst_type rx_burst_type =
+		rxq->vsi->adapter->rx_burst_type;
+
 	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
 		return 0;
 
-	return rxq->vsi->adapter->rx_pkt_burst(rx_queue,
+	return iavf_rx_pkt_burst_ops[rx_burst_type](rx_queue,
 								rx_pkts, nb_pkts);
 }
 
@@ -3724,10 +3786,13 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_tx_queue *txq = tx_queue;
+	enum iavf_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
 	if (!txq->vsi || txq->vsi->adapter->no_poll)
 		return 0;
 
-	return txq->vsi->adapter->tx_pkt_burst(tx_queue,
+	return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
 								tx_pkts, nb_pkts);
 }
 
@@ -3738,6 +3803,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	enum iavf_rx_burst_type rx_burst_type;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 	int i;
 	struct iavf_rx_queue *rxq;
@@ -3808,43 +3874,43 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 				}
 			}
 			if (use_flex) {
-				dev->rx_pkt_burst = iavf_recv_scattered_pkts_vec_flex_rxd;
+				rx_burst_type = IAVF_RX_SSE_SCATTERED_FLEX_RXD;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2_flex_rxd;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED_FLEX_RXD_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED_FLEX_RXD_OFFLOAD;
 				}
 #endif
 			} else {
-				dev->rx_pkt_burst = iavf_recv_scattered_pkts_vec;
+				rx_burst_type = IAVF_RX_SSE_SCATTERED;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2_offload;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512_offload;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED_OFFLOAD;
 				}
 #endif
 			}
@@ -3874,51 +3940,46 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 				}
 			}
 			if (use_flex) {
-				dev->rx_pkt_burst = iavf_recv_pkts_vec_flex_rxd;
+				rx_burst_type = IAVF_RX_SSE_FLEX_RXD;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2_flex_rxd;
+						rx_burst_type = IAVF_RX_AVX2_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2_flex_rxd_offload;
+						rx_burst_type = IAVF_RX_AVX2_FLEX_RXD_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512_flex_rxd;
+						rx_burst_type = IAVF_RX_AVX512_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_AVX512_FLEX_RXD_OFFLOAD;
 				}
 #endif
 			} else {
-				dev->rx_pkt_burst = iavf_recv_pkts_vec;
+				rx_burst_type = IAVF_RX_SSE;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2;
+						rx_burst_type = IAVF_RX_AVX2;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2_offload;
+						rx_burst_type = IAVF_RX_AVX2_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512;
+						rx_burst_type = IAVF_RX_AVX512;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512_offload;
+						rx_burst_type = IAVF_RX_AVX512_OFFLOAD;
 				}
 #endif
 			}
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->rx_pkt_burst = dev->rx_pkt_burst;
+			adapter->rx_burst_type = rx_burst_type;
 			dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
+		} else {
+			dev->rx_pkt_burst = iavf_rx_pkt_burst_ops[rx_burst_type];
 		}
 		return;
 	}
@@ -3934,11 +3995,13 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 			rxq = dev->data->rx_queues[i];
 			(void)iavf_rxq_vec_setup(rxq);
 		}
-		dev->rx_pkt_burst = iavf_recv_pkts_vec;
+		rx_burst_type = IAVF_RX_SSE;
 
 		if (no_poll_on_link_down) {
-			adapter->rx_pkt_burst = dev->rx_pkt_burst;
+			adapter->rx_burst_type = rx_burst_type;
 			dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
+		} else {
+			dev->rx_pkt_burst = iavf_rx_pkt_burst_ops[rx_burst_type];
 		}
 		return;
 	}
@@ -3947,25 +4010,27 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
 			    dev->data->port_id);
 		if (use_flex)
-			dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
+			rx_burst_type = IAVF_RX_SCATTERED_FLEX_RXD;
 		else
-			dev->rx_pkt_burst = iavf_recv_scattered_pkts;
+			rx_burst_type = IAVF_RX_SCATTERED;
 	} else if (adapter->rx_bulk_alloc_allowed) {
 		PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
 			    dev->data->port_id);
-		dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
+		rx_burst_type = IAVF_RX_BULK_ALLOC;
 	} else {
 		PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
 			    dev->data->port_id);
 		if (use_flex)
-			dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
+			rx_burst_type = IAVF_RX_FLEX_RXD;
 		else
-			dev->rx_pkt_burst = iavf_recv_pkts;
+			rx_burst_type = IAVF_RX_DEFAULT;
 	}
 
 	if (no_poll_on_link_down) {
-		adapter->rx_pkt_burst = dev->rx_pkt_burst;
+		adapter->rx_burst_type = rx_burst_type;
 		dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
+	} else {
+		dev->rx_pkt_burst = iavf_rx_pkt_burst_ops[rx_burst_type];
 	}
 }
 
@@ -3975,6 +4040,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	enum iavf_tx_burst_type tx_burst_type;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4010,11 +4076,11 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (use_sse) {
 			PMD_DRV_LOG(DEBUG, "Using Vector Tx (port %d).",
 				    dev->data->port_id);
-			dev->tx_pkt_burst = iavf_xmit_pkts_vec;
+			tx_burst_type = IAVF_TX_SSE;
 		}
 		if (use_avx2) {
 			if (check_ret == IAVF_VECTOR_PATH) {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2;
+				tx_burst_type = IAVF_TX_AVX2;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_CTX_OFFLOAD_PATH) {
@@ -4022,7 +4088,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 					"AVX2 does not support outer checksum offload.");
 				goto normal;
 			} else {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload;
+				tx_burst_type = IAVF_TX_AVX2_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4031,16 +4097,16 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 #ifdef CC_AVX512_SUPPORT
 		if (use_avx512) {
 			if (check_ret == IAVF_VECTOR_PATH) {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
+				tx_burst_type = IAVF_TX_AVX512;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_OFFLOAD_PATH) {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
+				tx_burst_type = IAVF_TX_AVX512_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_ctx_offload;
+				tx_burst_type = IAVF_TX_AVX512_CTX_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 CONTEXT OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4063,8 +4129,10 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->tx_pkt_burst = dev->tx_pkt_burst;
+			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else {
+			dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 		}
 		return;
 	}
@@ -4073,12 +4141,14 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 #endif
 	PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
 		    dev->data->port_id);
-	dev->tx_pkt_burst = iavf_xmit_pkts;
+	tx_burst_type = IAVF_TX_DEFAULT;
 	dev->tx_pkt_prepare = iavf_prep_pkts;
 
 	if (no_poll_on_link_down) {
-		adapter->tx_pkt_burst = dev->tx_pkt_burst;
+		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else {
+		dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 	}
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v8 2/2] net/iavf: add diagnostic support in TX path
  2024-01-03 10:10                   ` [PATCH v8 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
  2024-01-03 10:10                     ` [PATCH v8 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
@ 2024-01-03 10:10                     ` Mingjin Ye
  2024-01-04 10:18                       ` [PATCH v9 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
  2024-01-05  0:44                       ` [PATCH v8 2/2] " Zhang, Qi Z
  1 sibling, 2 replies; 36+ messages in thread
From: Mingjin Ye @ 2024-01-03 10:10 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, Mingjin Ye, Simei Su, Wenjun Wu, Yuying Zhang,
	Beilei Xing, Jingjing Wu

The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.

parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Remove call chain.
---
v3: Optimisation implementation.
---
v4: Fix Windows os compilation error.
---
v5: Split Patch.
---
v6: remove strict.
---
v7: Modify the description document.
---
 doc/guides/nics/intel_vf.rst   |  9 ++++
 drivers/net/iavf/iavf.h        | 12 +++++
 drivers/net/iavf/iavf_ethdev.c | 76 ++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 98 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h   |  2 +
 5 files changed, 197 insertions(+)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index ad08198f0f..bda6648726 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -111,6 +111,15 @@ For more detail on SR-IOV, please refer to the following documents:
     by setting the ``devargs`` parameter like ``-a 18:01.0,no-poll-on-link-down=1``
     when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
 
+    When IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 series Ethernet devices.
+    Set the ``devargs`` parameter ``mbuf_check`` to enable TX diagnostics. For example,
+    ``-a 18:01.0,mbuf_check=mbuf`` or ``-a 18:01.0,mbuf_check=[mbuf,size]``. Supported cases:
+
+    *   mbuf: Check for corrupted mbuf.
+    *   size: Check min/max packet length according to hw spec.
+    *   segment: Check number of mbuf segments not exceed hw limitation.
+    *   offload: Check any unsupported offload flag.
+
 The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 73a089c199..6535b624cb 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -113,9 +113,14 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mbuf_stats {
+	uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mbuf_stats mbuf_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -309,6 +314,7 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	int mbuf_check;
 };
 
 struct iavf_security_ctx;
@@ -351,6 +357,11 @@ enum iavf_tx_burst_type {
 	IAVF_TX_AVX512_CTX_OFFLOAD,
 };
 
+#define IAVF_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -368,6 +379,7 @@ struct iavf_adapter {
 	bool no_poll;
 	enum iavf_rx_burst_type rx_burst_type;
 	enum iavf_tx_burst_type tx_burst_type;
+	uint64_t mc_flags; /* mbuf check flags. */
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d1edb0dd5c..7d1cd9050b 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -13,6 +13,7 @@
 #include <inttypes.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
+#include <rte_os_shim.h>
 
 #include <rte_interrupts.h>
 #include <rte_debug.h>
@@ -39,6 +40,8 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MBUF_CHECK_ARG       "mbuf_check"
+
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 
@@ -48,6 +51,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MBUF_CHECK_ARG,
 	NULL
 };
 
@@ -174,6 +178,7 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
 	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
 	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+	{"tx_mbuf_error_packets", _OFF_OF(mbuf_stats.tx_pkt_errors)},
 
 	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
 	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
@@ -1837,6 +1842,9 @@ iavf_dev_xstats_reset(struct rte_eth_dev *dev)
 	iavf_dev_stats_reset(dev);
 	memset(&vf->vsi.eth_stats_offset.ips_stats, 0,
 			sizeof(struct iavf_ipsec_crypto_stats));
+	memset(&vf->vsi.eth_stats_offset.mbuf_stats, 0,
+			sizeof(struct iavf_mbuf_stats));
+
 	return 0;
 }
 
@@ -1876,6 +1884,19 @@ iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
 	}
 }
 
+static void
+iavf_dev_update_mbuf_stats(struct rte_eth_dev *ethdev,
+		struct iavf_mbuf_stats *mbuf_stats)
+{
+	uint16_t idx;
+	struct iavf_tx_queue *txq;
+
+	for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
+		txq = ethdev->data->tx_queues[idx];
+		mbuf_stats->tx_pkt_errors += txq->mbuf_errors;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1904,6 +1925,9 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check)
+		iavf_dev_update_mbuf_stats(dev, &iavf_xtats.mbuf_stats);
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2286,6 +2310,50 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mbuf_check(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_OFFLOAD;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2340,6 +2408,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MBUF_CHECK_ARG,
+				 &iavf_parse_mbuf_check, &ad->mc_flags);
+	if (ret)
+		goto bail;
+
+	if (ad->mc_flags)
+		ad->devargs.mbuf_check = 1;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 89db82c694..74dd72ce68 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3796,6 +3796,97 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 								tx_pkts, nb_pkts);
 }
 
+/* Tx mbuf check */
+static uint16_t
+iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	uint16_t idx;
+	uint64_t ol_flags;
+	struct rte_mbuf *mb;
+	uint16_t good_pkts = nb_pkts;
+	const char *reason = NULL;
+	bool pkt_error = false;
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	enum iavf_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_error = true;
+			break;
+		}
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			pkt_error = true;
+			break;
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					pkt_error = true;
+					break;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				pkt_error = true;
+				break;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_error = true;
+				break;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_error = true;
+				break;
+			}
+		}
+	}
+
+	if (pkt_error) {
+		txq->mbuf_errors++;
+		good_pkts = idx;
+		if (good_pkts == 0)
+			return 0;
+	}
+
+	return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
+								tx_pkts, good_pkts);
+}
+
 /* choose rx function*/
 void
 iavf_set_rx_function(struct rte_eth_dev *dev)
@@ -4041,6 +4132,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	enum iavf_tx_burst_type tx_burst_type;
+	int mbuf_check = adapter->devargs.mbuf_check;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4131,6 +4223,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (no_poll_on_link_down) {
 			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else if (mbuf_check) {
+			adapter->tx_burst_type = tx_burst_type;
+			dev->tx_pkt_burst = iavf_xmit_pkts_check;
 		} else {
 			dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 		}
@@ -4147,6 +4242,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	if (no_poll_on_link_down) {
 		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else if (mbuf_check) {
+		adapter->tx_burst_type = tx_burst_type;
+		dev->tx_pkt_burst = iavf_xmit_pkts_check;
 	} else {
 		dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f432f9d956..90e7291928 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -297,6 +297,8 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mbuf_errors;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v9 0/2] net/iavf: fix Rx/Tx burst and add diagnostics
  2024-01-03 10:10                     ` [PATCH v8 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
@ 2024-01-04 10:18                       ` Mingjin Ye
  2024-01-04 10:18                         ` [PATCH v9 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
  2024-01-04 10:18                         ` [PATCH v9 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
  2024-01-05  0:44                       ` [PATCH v8 2/2] " Zhang, Qi Z
  1 sibling, 2 replies; 36+ messages in thread
From: Mingjin Ye @ 2024-01-04 10:18 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye

Fixed Rx/Tx crash in multi-process environment and
added Tx diagnostic feature.

Mingjin Ye (2):
  net/iavf: fix Rx/Tx burst in multi-process
  net/iavf: add diagnostic support in TX path

 doc/guides/nics/intel_vf.rst   |   9 ++
 drivers/net/iavf/iavf.h        |  55 ++++++-
 drivers/net/iavf/iavf_ethdev.c |  75 +++++++++
 drivers/net/iavf/iavf_rxtx.c   | 283 ++++++++++++++++++++++++++-------
 drivers/net/iavf/iavf_rxtx.h   |   2 +
 5 files changed, 365 insertions(+), 59 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v9 1/2] net/iavf: fix Rx/Tx burst in multi-process
  2024-01-04 10:18                       ` [PATCH v9 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
@ 2024-01-04 10:18                         ` Mingjin Ye
  2024-01-04 10:18                         ` [PATCH v9 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
  1 sibling, 0 replies; 36+ messages in thread
From: Mingjin Ye @ 2024-01-04 10:18 UTC (permalink / raw)
  To: dev; +Cc: qiming.yang, Mingjin Ye, stable, Jingjing Wu, Beilei Xing

In a multi-process environment, a secondary process operates on shared
memory and changes the function pointer of the primary process, resulting
in a crash when the primary process cannot find the function address
during an Rx/Tx burst.

Fixes: 5b3124a0a6ef ("net/iavf: support no polling when link down")
Cc: stable@dpdk.org

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Add fix for Rx burst.
---
v3: fix Rx/Tx routing.
---
v4: Fix the ops array.
---
v5: rebase.
---
 drivers/net/iavf/iavf.h      |  43 +++++++-
 drivers/net/iavf/iavf_rxtx.c | 185 ++++++++++++++++++++++++-----------
 2 files changed, 169 insertions(+), 59 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index d273d884f5..ab24cb02c3 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -314,6 +314,45 @@ struct iavf_devargs {
 
 struct iavf_security_ctx;
 
+enum iavf_rx_burst_type {
+	IAVF_RX_DEFAULT,
+	IAVF_RX_FLEX_RXD,
+	IAVF_RX_BULK_ALLOC,
+	IAVF_RX_SCATTERED,
+	IAVF_RX_SCATTERED_FLEX_RXD,
+	IAVF_RX_SSE,
+	IAVF_RX_AVX2,
+	IAVF_RX_AVX2_OFFLOAD,
+	IAVF_RX_SSE_FLEX_RXD,
+	IAVF_RX_AVX2_FLEX_RXD,
+	IAVF_RX_AVX2_FLEX_RXD_OFFLOAD,
+	IAVF_RX_SSE_SCATTERED,
+	IAVF_RX_AVX2_SCATTERED,
+	IAVF_RX_AVX2_SCATTERED_OFFLOAD,
+	IAVF_RX_SSE_SCATTERED_FLEX_RXD,
+	IAVF_RX_AVX2_SCATTERED_FLEX_RXD,
+	IAVF_RX_AVX2_SCATTERED_FLEX_RXD_OFFLOAD,
+	IAVF_RX_AVX512,
+	IAVF_RX_AVX512_OFFLOAD,
+	IAVF_RX_AVX512_FLEX_RXD,
+	IAVF_RX_AVX512_FLEX_RXD_OFFLOAD,
+	IAVF_RX_AVX512_SCATTERED,
+	IAVF_RX_AVX512_SCATTERED_OFFLOAD,
+	IAVF_RX_AVX512_SCATTERED_FLEX_RXD,
+	IAVF_RX_AVX512_SCATTERED_FLEX_RXD_OFFLOAD,
+};
+
+enum iavf_tx_burst_type {
+	IAVF_TX_DEFAULT,
+	IAVF_TX_SSE,
+	IAVF_TX_AVX2,
+	IAVF_TX_AVX2_OFFLOAD,
+	IAVF_TX_AVX512,
+	IAVF_TX_AVX512_OFFLOAD,
+	IAVF_TX_AVX512_CTX,
+	IAVF_TX_AVX512_CTX_OFFLOAD,
+};
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -329,8 +368,8 @@ struct iavf_adapter {
 	bool stopped;
 	bool closed;
 	bool no_poll;
-	eth_rx_burst_t rx_pkt_burst;
-	eth_tx_burst_t tx_pkt_burst;
+	enum iavf_rx_burst_type rx_burst_type;
+	enum iavf_tx_burst_type tx_burst_type;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index e54fb74b79..f044ad3f26 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3716,15 +3716,78 @@ iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 	return i;
 }
 
+static
+const eth_rx_burst_t iavf_rx_pkt_burst_ops[] = {
+	[IAVF_RX_DEFAULT] = iavf_recv_pkts,
+	[IAVF_RX_FLEX_RXD] = iavf_recv_pkts_flex_rxd,
+	[IAVF_RX_BULK_ALLOC] = iavf_recv_pkts_bulk_alloc,
+	[IAVF_RX_SCATTERED] = iavf_recv_scattered_pkts,
+	[IAVF_RX_SCATTERED_FLEX_RXD] = iavf_recv_scattered_pkts_flex_rxd,
+#ifdef RTE_ARCH_X86
+	[IAVF_RX_SSE] = iavf_recv_pkts_vec,
+	[IAVF_RX_AVX2] = iavf_recv_pkts_vec_avx2,
+	[IAVF_RX_AVX2_OFFLOAD] = iavf_recv_pkts_vec_avx2_offload,
+	[IAVF_RX_SSE_FLEX_RXD] = iavf_recv_pkts_vec_flex_rxd,
+	[IAVF_RX_AVX2_FLEX_RXD] = iavf_recv_pkts_vec_avx2_flex_rxd,
+	[IAVF_RX_AVX2_FLEX_RXD_OFFLOAD] =
+		iavf_recv_pkts_vec_avx2_flex_rxd_offload,
+	[IAVF_RX_SSE_SCATTERED] = iavf_recv_scattered_pkts_vec,
+	[IAVF_RX_AVX2_SCATTERED] = iavf_recv_scattered_pkts_vec_avx2,
+	[IAVF_RX_AVX2_SCATTERED_OFFLOAD] =
+		iavf_recv_scattered_pkts_vec_avx2_offload,
+	[IAVF_RX_SSE_SCATTERED_FLEX_RXD] =
+		iavf_recv_scattered_pkts_vec_flex_rxd,
+	[IAVF_RX_AVX2_SCATTERED_FLEX_RXD] =
+		iavf_recv_scattered_pkts_vec_avx2_flex_rxd,
+	[IAVF_RX_AVX2_SCATTERED_FLEX_RXD_OFFLOAD] =
+		iavf_recv_scattered_pkts_vec_avx2_flex_rxd_offload,
+#ifdef CC_AVX512_SUPPORT
+	[IAVF_RX_AVX512] = iavf_recv_pkts_vec_avx512,
+	[IAVF_RX_AVX512_OFFLOAD] = iavf_recv_pkts_vec_avx512_offload,
+	[IAVF_RX_AVX512_FLEX_RXD] = iavf_recv_pkts_vec_avx512_flex_rxd,
+	[IAVF_RX_AVX512_FLEX_RXD_OFFLOAD] =
+		iavf_recv_pkts_vec_avx512_flex_rxd_offload,
+	[IAVF_RX_AVX512_SCATTERED] = iavf_recv_scattered_pkts_vec_avx512,
+	[IAVF_RX_AVX512_SCATTERED_OFFLOAD] =
+		iavf_recv_scattered_pkts_vec_avx512_offload,
+	[IAVF_RX_AVX512_SCATTERED_FLEX_RXD] =
+		iavf_recv_scattered_pkts_vec_avx512_flex_rxd,
+	[IAVF_RX_AVX512_SCATTERED_FLEX_RXD_OFFLOAD] =
+		iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload,
+#endif
+#elif defined RTE_ARCH_ARM
+	[IAVF_RX_SSE] = iavf_recv_pkts_vec,
+#endif
+};
+
+static
+const eth_tx_burst_t iavf_tx_pkt_burst_ops[] = {
+	[IAVF_TX_DEFAULT] = iavf_xmit_pkts,
+#ifdef RTE_ARCH_X86
+	[IAVF_TX_SSE] = iavf_xmit_pkts_vec,
+	[IAVF_TX_AVX2] = iavf_xmit_pkts_vec_avx2,
+	[IAVF_TX_AVX2_OFFLOAD] = iavf_xmit_pkts_vec_avx2_offload,
+#ifdef CC_AVX512_SUPPORT
+	[IAVF_TX_AVX512] = iavf_xmit_pkts_vec_avx512,
+	[IAVF_TX_AVX512_OFFLOAD] = iavf_xmit_pkts_vec_avx512_offload,
+	[IAVF_TX_AVX512_CTX] = iavf_xmit_pkts_vec_avx512_ctx,
+	[IAVF_TX_AVX512_CTX_OFFLOAD] = iavf_xmit_pkts_vec_avx512_ctx_offload,
+#endif
+#endif
+};
+
 static uint16_t
 iavf_recv_pkts_no_poll(void *rx_queue, struct rte_mbuf **rx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_rx_queue *rxq = rx_queue;
+	enum iavf_rx_burst_type rx_burst_type =
+		rxq->vsi->adapter->rx_burst_type;
+
 	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
 		return 0;
 
-	return rxq->vsi->adapter->rx_pkt_burst(rx_queue,
+	return iavf_rx_pkt_burst_ops[rx_burst_type](rx_queue,
 								rx_pkts, nb_pkts);
 }
 
@@ -3733,10 +3796,13 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_tx_queue *txq = tx_queue;
+	enum iavf_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
 	if (!txq->vsi || txq->vsi->adapter->no_poll)
 		return 0;
 
-	return txq->vsi->adapter->tx_pkt_burst(tx_queue,
+	return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
 								tx_pkts, nb_pkts);
 }
 
@@ -3747,6 +3813,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	enum iavf_rx_burst_type rx_burst_type;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 	int i;
 	struct iavf_rx_queue *rxq;
@@ -3817,43 +3884,43 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 				}
 			}
 			if (use_flex) {
-				dev->rx_pkt_burst = iavf_recv_scattered_pkts_vec_flex_rxd;
+				rx_burst_type = IAVF_RX_SSE_SCATTERED_FLEX_RXD;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2_flex_rxd;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED_FLEX_RXD_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512_flex_rxd;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED_FLEX_RXD_OFFLOAD;
 				}
 #endif
 			} else {
-				dev->rx_pkt_burst = iavf_recv_scattered_pkts_vec;
+				rx_burst_type = IAVF_RX_SSE_SCATTERED;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx2_offload;
+						rx_burst_type =
+							IAVF_RX_AVX2_SCATTERED_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_scattered_pkts_vec_avx512_offload;
+						rx_burst_type =
+							IAVF_RX_AVX512_SCATTERED_OFFLOAD;
 				}
 #endif
 			}
@@ -3883,51 +3950,46 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 				}
 			}
 			if (use_flex) {
-				dev->rx_pkt_burst = iavf_recv_pkts_vec_flex_rxd;
+				rx_burst_type = IAVF_RX_SSE_FLEX_RXD;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2_flex_rxd;
+						rx_burst_type = IAVF_RX_AVX2_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2_flex_rxd_offload;
+						rx_burst_type = IAVF_RX_AVX2_FLEX_RXD_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512_flex_rxd;
+						rx_burst_type = IAVF_RX_AVX512_FLEX_RXD;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512_flex_rxd_offload;
+						rx_burst_type =
+							IAVF_RX_AVX512_FLEX_RXD_OFFLOAD;
 				}
 #endif
 			} else {
-				dev->rx_pkt_burst = iavf_recv_pkts_vec;
+				rx_burst_type = IAVF_RX_SSE;
 				if (use_avx2) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2;
+						rx_burst_type = IAVF_RX_AVX2;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx2_offload;
+						rx_burst_type = IAVF_RX_AVX2_OFFLOAD;
 				}
 #ifdef CC_AVX512_SUPPORT
 				if (use_avx512) {
 					if (check_ret == IAVF_VECTOR_PATH)
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512;
+						rx_burst_type = IAVF_RX_AVX512;
 					else
-						dev->rx_pkt_burst =
-							iavf_recv_pkts_vec_avx512_offload;
+						rx_burst_type = IAVF_RX_AVX512_OFFLOAD;
 				}
 #endif
 			}
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->rx_pkt_burst = dev->rx_pkt_burst;
+			adapter->rx_burst_type = rx_burst_type;
 			dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
+		} else {
+			dev->rx_pkt_burst = iavf_rx_pkt_burst_ops[rx_burst_type];
 		}
 		return;
 	}
@@ -3943,11 +4005,13 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 			rxq = dev->data->rx_queues[i];
 			(void)iavf_rxq_vec_setup(rxq);
 		}
-		dev->rx_pkt_burst = iavf_recv_pkts_vec;
+		rx_burst_type = IAVF_RX_SSE;
 
 		if (no_poll_on_link_down) {
-			adapter->rx_pkt_burst = dev->rx_pkt_burst;
+			adapter->rx_burst_type = rx_burst_type;
 			dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
+		} else {
+			dev->rx_pkt_burst = iavf_rx_pkt_burst_ops[rx_burst_type];
 		}
 		return;
 	}
@@ -3956,25 +4020,27 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
 			    dev->data->port_id);
 		if (use_flex)
-			dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
+			rx_burst_type = IAVF_RX_SCATTERED_FLEX_RXD;
 		else
-			dev->rx_pkt_burst = iavf_recv_scattered_pkts;
+			rx_burst_type = IAVF_RX_SCATTERED;
 	} else if (adapter->rx_bulk_alloc_allowed) {
 		PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
 			    dev->data->port_id);
-		dev->rx_pkt_burst = iavf_recv_pkts_bulk_alloc;
+		rx_burst_type = IAVF_RX_BULK_ALLOC;
 	} else {
 		PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
 			    dev->data->port_id);
 		if (use_flex)
-			dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
+			rx_burst_type = IAVF_RX_FLEX_RXD;
 		else
-			dev->rx_pkt_burst = iavf_recv_pkts;
+			rx_burst_type = IAVF_RX_DEFAULT;
 	}
 
 	if (no_poll_on_link_down) {
-		adapter->rx_pkt_burst = dev->rx_pkt_burst;
+		adapter->rx_burst_type = rx_burst_type;
 		dev->rx_pkt_burst = iavf_recv_pkts_no_poll;
+	} else {
+		dev->rx_pkt_burst = iavf_rx_pkt_burst_ops[rx_burst_type];
 	}
 }
 
@@ -3984,6 +4050,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	enum iavf_tx_burst_type tx_burst_type;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4019,11 +4086,11 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (use_sse) {
 			PMD_DRV_LOG(DEBUG, "Using Vector Tx (port %d).",
 				    dev->data->port_id);
-			dev->tx_pkt_burst = iavf_xmit_pkts_vec;
+			tx_burst_type = IAVF_TX_SSE;
 		}
 		if (use_avx2) {
 			if (check_ret == IAVF_VECTOR_PATH) {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2;
+				tx_burst_type = IAVF_TX_AVX2;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_CTX_OFFLOAD_PATH) {
@@ -4031,7 +4098,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 					"AVX2 does not support outer checksum offload.");
 				goto normal;
 			} else {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx2_offload;
+				tx_burst_type = IAVF_TX_AVX2_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX2 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4040,21 +4107,21 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 #ifdef CC_AVX512_SUPPORT
 		if (use_avx512) {
 			if (check_ret == IAVF_VECTOR_PATH) {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512;
+				tx_burst_type = IAVF_TX_AVX512;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_OFFLOAD_PATH) {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_offload;
+				tx_burst_type = IAVF_TX_AVX512_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
 			} else if (check_ret == IAVF_VECTOR_CTX_PATH) {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_ctx;
+				tx_burst_type = IAVF_TX_AVX512_CTX;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 CONTEXT Vector Tx (port %d).",
 						dev->data->port_id);
 			} else {
-				dev->tx_pkt_burst = iavf_xmit_pkts_vec_avx512_ctx_offload;
+				tx_burst_type = IAVF_TX_AVX512_CTX_OFFLOAD;
 				dev->tx_pkt_prepare = iavf_prep_pkts;
 				PMD_DRV_LOG(DEBUG, "Using AVX512 CONTEXT OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
@@ -4077,8 +4144,10 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		}
 
 		if (no_poll_on_link_down) {
-			adapter->tx_pkt_burst = dev->tx_pkt_burst;
+			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else {
+			dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 		}
 		return;
 	}
@@ -4087,12 +4156,14 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 #endif
 	PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).",
 		    dev->data->port_id);
-	dev->tx_pkt_burst = iavf_xmit_pkts;
+	tx_burst_type = IAVF_TX_DEFAULT;
 	dev->tx_pkt_prepare = iavf_prep_pkts;
 
 	if (no_poll_on_link_down) {
-		adapter->tx_pkt_burst = dev->tx_pkt_burst;
+		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else {
+		dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 	}
 }
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v9 2/2] net/iavf: add diagnostic support in TX path
  2024-01-04 10:18                       ` [PATCH v9 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
  2024-01-04 10:18                         ` [PATCH v9 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
@ 2024-01-04 10:18                         ` Mingjin Ye
  2024-01-05  9:58                           ` [PATCH v10] " Mingjin Ye
  1 sibling, 1 reply; 36+ messages in thread
From: Mingjin Ye @ 2024-01-04 10:18 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, Mingjin Ye, Wenjun Wu, Simei Su, Yuying Zhang,
	Beilei Xing, Jingjing Wu

The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.

parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Remove call chain.
---
v3: Optimisation implementation.
---
v4: Fix Windows os compilation error.
---
v5: Split Patch.
---
v6: remove strict.
---
v8: Modify the description document.
---
 doc/guides/nics/intel_vf.rst   |  9 ++++
 drivers/net/iavf/iavf.h        | 12 +++++
 drivers/net/iavf/iavf_ethdev.c | 75 ++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 98 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h   |  2 +
 5 files changed, 196 insertions(+)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index ce96c2e1f8..bf6936082e 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -111,6 +111,15 @@ For more detail on SR-IOV, please refer to the following documents:
     by setting the ``devargs`` parameter like ``-a 18:01.0,no-poll-on-link-down=1``
     when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
 
+    When IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 series Ethernet devices.
+    Set the ``devargs`` parameter ``mbuf_check`` to enable TX diagnostics. For example,
+    ``-a 18:01.0,mbuf_check=mbuf`` or ``-a 18:01.0,mbuf_check=[mbuf,size]``. Supported cases:
+
+    *   mbuf: Check for corrupted mbuf.
+    *   size: Check min/max packet length according to hw spec.
+    *   segment: Check number of mbuf segments not exceed hw limitation.
+    *   offload: Check any unsupported offload flag.
+
 The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index ab24cb02c3..23c0496d54 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -114,9 +114,14 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mbuf_stats {
+	uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mbuf_stats mbuf_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -310,6 +315,7 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	int mbuf_check;
 };
 
 struct iavf_security_ctx;
@@ -353,6 +359,11 @@ enum iavf_tx_burst_type {
 	IAVF_TX_AVX512_CTX_OFFLOAD,
 };
 
+#define IAVF_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -370,6 +381,7 @@ struct iavf_adapter {
 	bool no_poll;
 	enum iavf_rx_burst_type rx_burst_type;
 	enum iavf_tx_burst_type tx_burst_type;
+	uint64_t mc_flags; /* mbuf check flags. */
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 1fb876e827..903a43d004 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -13,6 +13,7 @@
 #include <inttypes.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
+#include <rte_os_shim.h>
 
 #include <rte_interrupts.h>
 #include <rte_debug.h>
@@ -39,6 +40,7 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MBUF_CHECK_ARG       "mbuf_check"
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 int rte_pmd_iavf_tx_lldp_dynfield_offset = -1;
@@ -49,6 +51,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MBUF_CHECK_ARG,
 	NULL
 };
 
@@ -175,6 +178,7 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
 	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
 	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+	{"tx_mbuf_error_packets", _OFF_OF(mbuf_stats.tx_pkt_errors)},
 
 	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
 	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
@@ -1841,6 +1845,9 @@ iavf_dev_xstats_reset(struct rte_eth_dev *dev)
 	iavf_dev_stats_reset(dev);
 	memset(&vf->vsi.eth_stats_offset.ips_stats, 0,
 			sizeof(struct iavf_ipsec_crypto_stats));
+	memset(&vf->vsi.eth_stats_offset.mbuf_stats, 0,
+			sizeof(struct iavf_mbuf_stats));
+
 	return 0;
 }
 
@@ -1880,6 +1887,19 @@ iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
 	}
 }
 
+static void
+iavf_dev_update_mbuf_stats(struct rte_eth_dev *ethdev,
+		struct iavf_mbuf_stats *mbuf_stats)
+{
+	uint16_t idx;
+	struct iavf_tx_queue *txq;
+
+	for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
+		txq = ethdev->data->tx_queues[idx];
+		mbuf_stats->tx_pkt_errors += txq->mbuf_errors;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1908,6 +1928,9 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check)
+		iavf_dev_update_mbuf_stats(dev, &iavf_xtats.mbuf_stats);
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2290,6 +2313,50 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mbuf_check(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_OFFLOAD;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2344,6 +2411,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MBUF_CHECK_ARG,
+				 &iavf_parse_mbuf_check, &ad->mc_flags);
+	if (ret)
+		goto bail;
+
+	if (ad->mc_flags)
+		ad->devargs.mbuf_check = 1;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f044ad3f26..54dd4cc23d 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3806,6 +3806,97 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 								tx_pkts, nb_pkts);
 }
 
+/* Tx mbuf check */
+static uint16_t
+iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	uint16_t idx;
+	uint64_t ol_flags;
+	struct rte_mbuf *mb;
+	uint16_t good_pkts = nb_pkts;
+	const char *reason = NULL;
+	bool pkt_error = false;
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	enum iavf_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_error = true;
+			break;
+		}
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			pkt_error = true;
+			break;
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					pkt_error = true;
+					break;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				pkt_error = true;
+				break;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_error = true;
+				break;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_error = true;
+				break;
+			}
+		}
+	}
+
+	if (pkt_error) {
+		txq->mbuf_errors++;
+		good_pkts = idx;
+		if (good_pkts == 0)
+			return 0;
+	}
+
+	return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
+								tx_pkts, good_pkts);
+}
+
 /* choose rx function*/
 void
 iavf_set_rx_function(struct rte_eth_dev *dev)
@@ -4051,6 +4142,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	enum iavf_tx_burst_type tx_burst_type;
+	int mbuf_check = adapter->devargs.mbuf_check;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4146,6 +4238,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (no_poll_on_link_down) {
 			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else if (mbuf_check) {
+			adapter->tx_burst_type = tx_burst_type;
+			dev->tx_pkt_burst = iavf_xmit_pkts_check;
 		} else {
 			dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 		}
@@ -4162,6 +4257,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	if (no_poll_on_link_down) {
 		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else if (mbuf_check) {
+		adapter->tx_burst_type = tx_burst_type;
+		dev->tx_pkt_burst = iavf_xmit_pkts_check;
 	} else {
 		dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index a8af263d59..7b56076d32 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -306,6 +306,8 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mbuf_errors;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* RE: [PATCH v8 2/2] net/iavf: add diagnostic support in TX path
  2024-01-03 10:10                     ` [PATCH v8 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
  2024-01-04 10:18                       ` [PATCH v9 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
@ 2024-01-05  0:44                       ` Zhang, Qi Z
  1 sibling, 0 replies; 36+ messages in thread
From: Zhang, Qi Z @ 2024-01-05  0:44 UTC (permalink / raw)
  To: Ye, MingjinX, dev
  Cc: Yang, Qiming, Ye, MingjinX, Su, Simei, Wu, Wenjun1, Zhang,
	Yuying, Xing, Beilei, Wu, Jingjing



> -----Original Message-----
> From: Mingjin Ye <mingjinx.ye@intel.com>
> Sent: Wednesday, January 3, 2024 6:11 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Ye, MingjinX
> <mingjinx.ye@intel.com>; Su, Simei <simei.su@intel.com>; Wu, Wenjun1
> <wenjun1.wu@intel.com>; Zhang, Yuying <yuying.zhang@intel.com>; Xing,
> Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Subject: [PATCH v8 2/2] net/iavf: add diagnostic support in TX path
> 
> The only way to enable diagnostics for TX paths is to modify the application
> source code. Making it difficult to diagnose faults.
> 
> In this patch, the devarg option "mbuf_check" is introduced and the
> parameters are configured to enable the corresponding diagnostics.
> 
> supported cases: mbuf, size, segment, offload.
>  1. mbuf: check for corrupted mbuf.
>  2. size: check min/max packet length according to hw spec.
>  3. segment: check number of mbuf segments not exceed hw limitation.
>  4. offload: check any unsupported offload flag.
> 
> parameter format: mbuf_check=[mbuf,<case1>,<case2>]
> eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> ---
> v2: Remove call chain.
> ---
> v3: Optimisation implementation.
> ---
> v4: Fix Windows os compilation error.
> ---
> v5: Split Patch.
> ---
> v6: remove strict.
> ---
> v7: Modify the description document.
> ---
>  doc/guides/nics/intel_vf.rst   |  9 ++++
>  drivers/net/iavf/iavf.h        | 12 +++++
>  drivers/net/iavf/iavf_ethdev.c | 76 ++++++++++++++++++++++++++
>  drivers/net/iavf/iavf_rxtx.c   | 98 ++++++++++++++++++++++++++++++++++
>  drivers/net/iavf/iavf_rxtx.h   |  2 +
>  5 files changed, 197 insertions(+)
> 
> diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst index
> ad08198f0f..bda6648726 100644
> --- a/doc/guides/nics/intel_vf.rst
> +++ b/doc/guides/nics/intel_vf.rst
> @@ -111,6 +111,15 @@ For more detail on SR-IOV, please refer to the
> following documents:
>      by setting the ``devargs`` parameter like ``-a 18:01.0,no-poll-on-link-
> down=1``
>      when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700
> Series Ethernet device.
> 
> +    When IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700
> series Ethernet devices.
> +    Set the ``devargs`` parameter ``mbuf_check`` to enable TX diagnostics. For
> example,
> +    ``-a 18:01.0,mbuf_check=mbuf`` or ``-a 18:01.0,mbuf_check=[mbuf,size]``.
> Supported cases:

``-a 18:01.0,mbuf_check=<case>`` or ``-a 18:01.0,mbuf_check=[<case1>,<case2>...]``

> +
> +    *   mbuf: Check for corrupted mbuf.
> +    *   size: Check min/max packet length according to hw spec.
> +    *   segment: Check number of mbuf segments not exceed hw limitation.
> +    *   offload: Check any unsupported offload flag.

please also describe how to get the error count by xstats_get, a testpmd command is suggested

Btw, PATCH 1/2 as a fix has been merged seperately, new version can only target to this patch only.

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v10] net/iavf: add diagnostic support in TX path
  2024-01-04 10:18                         ` [PATCH v9 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
@ 2024-01-05  9:58                           ` Mingjin Ye
  2024-01-09 10:09                             ` [PATCH v11] " Mingjin Ye
  2024-01-10  2:25                             ` Mingjin Ye
  0 siblings, 2 replies; 36+ messages in thread
From: Mingjin Ye @ 2024-01-05  9:58 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, Mingjin Ye, Simei Su, Wenjun Wu, Yuying Zhang,
	Beilei Xing, Jingjing Wu

The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.

parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Remove call chain.
---
v3: Optimisation implementation.
---
v4: Fix Windows os compilation error.
---
v5: Split Patch.
---
v6: remove strict.
---
v9: Modify the description document.
---
v10: Modify vf rst document.
---
 doc/guides/nics/intel_vf.rst   | 11 ++++
 drivers/net/iavf/iavf.h        | 12 +++++
 drivers/net/iavf/iavf_ethdev.c | 75 ++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 98 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h   |  2 +
 5 files changed, 198 insertions(+)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index ce96c2e1f8..f62bb4233c 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -111,6 +111,17 @@ For more detail on SR-IOV, please refer to the following documents:
     by setting the ``devargs`` parameter like ``-a 18:01.0,no-poll-on-link-down=1``
     when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
 
+    When IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 series Ethernet devices.
+    Set the ``devargs`` parameter ``mbuf_check`` to enable TX diagnostics. For example,
+    ``-a 18:01.0,mbuf_check=<case>`` or ``-a 18:01.0,mbuf_check=[<case1>,<case2>...]``. Also,
+    ``xstats_get`` can be used to get the error counts, which are collected in ``tx_mbuf_error_packets``
+    xstats. For example, ``testpmd> show port xstats all``. Supported cases:
+
+    *   mbuf: Check for corrupted mbuf.
+    *   size: Check min/max packet length according to hw spec.
+    *   segment: Check number of mbuf segments not exceed hw limitation.
+    *   offload: Check any unsupported offload flag.
+
 The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index ab24cb02c3..23c0496d54 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -114,9 +114,14 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mbuf_stats {
+	uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mbuf_stats mbuf_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -310,6 +315,7 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	int mbuf_check;
 };
 
 struct iavf_security_ctx;
@@ -353,6 +359,11 @@ enum iavf_tx_burst_type {
 	IAVF_TX_AVX512_CTX_OFFLOAD,
 };
 
+#define IAVF_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -370,6 +381,7 @@ struct iavf_adapter {
 	bool no_poll;
 	enum iavf_rx_burst_type rx_burst_type;
 	enum iavf_tx_burst_type tx_burst_type;
+	uint64_t mc_flags; /* mbuf check flags. */
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 1fb876e827..903a43d004 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -13,6 +13,7 @@
 #include <inttypes.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
+#include <rte_os_shim.h>
 
 #include <rte_interrupts.h>
 #include <rte_debug.h>
@@ -39,6 +40,7 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MBUF_CHECK_ARG       "mbuf_check"
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 int rte_pmd_iavf_tx_lldp_dynfield_offset = -1;
@@ -49,6 +51,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MBUF_CHECK_ARG,
 	NULL
 };
 
@@ -175,6 +178,7 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
 	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
 	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+	{"tx_mbuf_error_packets", _OFF_OF(mbuf_stats.tx_pkt_errors)},
 
 	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
 	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
@@ -1841,6 +1845,9 @@ iavf_dev_xstats_reset(struct rte_eth_dev *dev)
 	iavf_dev_stats_reset(dev);
 	memset(&vf->vsi.eth_stats_offset.ips_stats, 0,
 			sizeof(struct iavf_ipsec_crypto_stats));
+	memset(&vf->vsi.eth_stats_offset.mbuf_stats, 0,
+			sizeof(struct iavf_mbuf_stats));
+
 	return 0;
 }
 
@@ -1880,6 +1887,19 @@ iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
 	}
 }
 
+static void
+iavf_dev_update_mbuf_stats(struct rte_eth_dev *ethdev,
+		struct iavf_mbuf_stats *mbuf_stats)
+{
+	uint16_t idx;
+	struct iavf_tx_queue *txq;
+
+	for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
+		txq = ethdev->data->tx_queues[idx];
+		mbuf_stats->tx_pkt_errors += txq->mbuf_errors;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1908,6 +1928,9 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check)
+		iavf_dev_update_mbuf_stats(dev, &iavf_xtats.mbuf_stats);
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2290,6 +2313,50 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mbuf_check(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_OFFLOAD;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2344,6 +2411,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MBUF_CHECK_ARG,
+				 &iavf_parse_mbuf_check, &ad->mc_flags);
+	if (ret)
+		goto bail;
+
+	if (ad->mc_flags)
+		ad->devargs.mbuf_check = 1;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f044ad3f26..54dd4cc23d 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3806,6 +3806,97 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 								tx_pkts, nb_pkts);
 }
 
+/* Tx mbuf check */
+static uint16_t
+iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	uint16_t idx;
+	uint64_t ol_flags;
+	struct rte_mbuf *mb;
+	uint16_t good_pkts = nb_pkts;
+	const char *reason = NULL;
+	bool pkt_error = false;
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	enum iavf_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_error = true;
+			break;
+		}
+
+		if ((adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			pkt_error = true;
+			break;
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					pkt_error = true;
+					break;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				pkt_error = true;
+				break;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MBUF_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_error = true;
+				break;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_error = true;
+				break;
+			}
+		}
+	}
+
+	if (pkt_error) {
+		txq->mbuf_errors++;
+		good_pkts = idx;
+		if (good_pkts == 0)
+			return 0;
+	}
+
+	return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
+								tx_pkts, good_pkts);
+}
+
 /* choose rx function*/
 void
 iavf_set_rx_function(struct rte_eth_dev *dev)
@@ -4051,6 +4142,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	enum iavf_tx_burst_type tx_burst_type;
+	int mbuf_check = adapter->devargs.mbuf_check;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4146,6 +4238,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (no_poll_on_link_down) {
 			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else if (mbuf_check) {
+			adapter->tx_burst_type = tx_burst_type;
+			dev->tx_pkt_burst = iavf_xmit_pkts_check;
 		} else {
 			dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 		}
@@ -4162,6 +4257,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	if (no_poll_on_link_down) {
 		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else if (mbuf_check) {
+		adapter->tx_burst_type = tx_burst_type;
+		dev->tx_pkt_burst = iavf_xmit_pkts_check;
 	} else {
 		dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index a8af263d59..7b56076d32 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -306,6 +306,8 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mbuf_errors;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v11] net/iavf: add diagnostic support in TX path
  2024-01-05  9:58                           ` [PATCH v10] " Mingjin Ye
@ 2024-01-09 10:09                             ` Mingjin Ye
  2024-01-10  2:25                             ` Mingjin Ye
  1 sibling, 0 replies; 36+ messages in thread
From: Mingjin Ye @ 2024-01-09 10:09 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, Mingjin Ye, Wenjun Wu, Yuying Zhang, Beilei Xing,
	Simei Su, Jingjing Wu

Implemented a Tx wrapper to perform a thorough check on mbufs,
categorizing and counting invalid cases by types for diagnostic
purposes. The count of invalid cases is accessible through xstats_get.

Also, the devarg option "mbuf_check" was introduced to configure the
diagnostic parameters to enable the appropriate diagnostic features.

supported cases: mbuf, size, segment, offload.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.

parameter format: "mbuf_check=<case>" or "mbuf_check=[<case1>,<case2>]"
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Remove call chain.
---
v3: Optimisation implementation.
---
v4: Fix Windows os compilation error.
---
v5: Split Patch.
---
v6: remove strict.
---
v9: Modify the description document.
---
v10: Modify vf rst document.
---
v11: modify comment log.
---
 doc/guides/nics/intel_vf.rst   | 11 ++++
 drivers/net/iavf/iavf.h        | 11 ++++
 drivers/net/iavf/iavf_ethdev.c | 72 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 98 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h   |  2 +
 5 files changed, 194 insertions(+)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index ce96c2e1f8..f62bb4233c 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -111,6 +111,17 @@ For more detail on SR-IOV, please refer to the following documents:
     by setting the ``devargs`` parameter like ``-a 18:01.0,no-poll-on-link-down=1``
     when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
 
+    When IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 series Ethernet devices.
+    Set the ``devargs`` parameter ``mbuf_check`` to enable TX diagnostics. For example,
+    ``-a 18:01.0,mbuf_check=<case>`` or ``-a 18:01.0,mbuf_check=[<case1>,<case2>...]``. Also,
+    ``xstats_get`` can be used to get the error counts, which are collected in ``tx_mbuf_error_packets``
+    xstats. For example, ``testpmd> show port xstats all``. Supported cases:
+
+    *   mbuf: Check for corrupted mbuf.
+    *   size: Check min/max packet length according to hw spec.
+    *   segment: Check number of mbuf segments not exceed hw limitation.
+    *   offload: Check any unsupported offload flag.
+
 The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index ab24cb02c3..824ae4aa02 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -114,9 +114,14 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mbuf_stats {
+	uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mbuf_stats mbuf_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -310,6 +315,7 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	uint64_t mbuf_check;
 };
 
 struct iavf_security_ctx;
@@ -353,6 +359,11 @@ enum iavf_tx_burst_type {
 	IAVF_TX_AVX512_CTX_OFFLOAD,
 };
 
+#define IAVF_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 1fb876e827..fca57b50b3 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -13,6 +13,7 @@
 #include <inttypes.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
+#include <rte_os_shim.h>
 
 #include <rte_interrupts.h>
 #include <rte_debug.h>
@@ -39,6 +40,7 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MBUF_CHECK_ARG       "mbuf_check"
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 int rte_pmd_iavf_tx_lldp_dynfield_offset = -1;
@@ -49,6 +51,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MBUF_CHECK_ARG,
 	NULL
 };
 
@@ -175,6 +178,7 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
 	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
 	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+	{"tx_mbuf_error_packets", _OFF_OF(mbuf_stats.tx_pkt_errors)},
 
 	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
 	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
@@ -1841,6 +1845,9 @@ iavf_dev_xstats_reset(struct rte_eth_dev *dev)
 	iavf_dev_stats_reset(dev);
 	memset(&vf->vsi.eth_stats_offset.ips_stats, 0,
 			sizeof(struct iavf_ipsec_crypto_stats));
+	memset(&vf->vsi.eth_stats_offset.mbuf_stats, 0,
+			sizeof(struct iavf_mbuf_stats));
+
 	return 0;
 }
 
@@ -1880,6 +1887,19 @@ iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
 	}
 }
 
+static void
+iavf_dev_update_mbuf_stats(struct rte_eth_dev *ethdev,
+		struct iavf_mbuf_stats *mbuf_stats)
+{
+	uint16_t idx;
+	struct iavf_tx_queue *txq;
+
+	for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
+		txq = ethdev->data->tx_queues[idx];
+		mbuf_stats->tx_pkt_errors += txq->mbuf_errors;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1908,6 +1928,9 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check)
+		iavf_dev_update_mbuf_stats(dev, &iavf_xtats.mbuf_stats);
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2290,6 +2313,50 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mbuf_check(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_OFFLOAD;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2344,6 +2411,11 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MBUF_CHECK_ARG,
+				 &iavf_parse_mbuf_check, &ad->devargs.mbuf_check);
+	if (ret)
+		goto bail;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f044ad3f26..5ba4527ae3 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3806,6 +3806,97 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 								tx_pkts, nb_pkts);
 }
 
+/* Tx mbuf check */
+static uint16_t
+iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	uint16_t idx;
+	uint64_t ol_flags;
+	struct rte_mbuf *mb;
+	uint16_t good_pkts = nb_pkts;
+	const char *reason = NULL;
+	bool pkt_error = false;
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	enum iavf_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->devargs.mbuf_check & IAVF_MBUF_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_error = true;
+			break;
+		}
+
+		if ((adapter->devargs.mbuf_check & IAVF_MBUF_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			pkt_error = true;
+			break;
+		}
+
+		if (adapter->devargs.mbuf_check & IAVF_MBUF_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					pkt_error = true;
+					break;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				pkt_error = true;
+				break;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->devargs.mbuf_check & IAVF_MBUF_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_error = true;
+				break;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_error = true;
+				break;
+			}
+		}
+	}
+
+	if (pkt_error) {
+		txq->mbuf_errors++;
+		good_pkts = idx;
+		if (good_pkts == 0)
+			return 0;
+	}
+
+	return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
+								tx_pkts, good_pkts);
+}
+
 /* choose rx function*/
 void
 iavf_set_rx_function(struct rte_eth_dev *dev)
@@ -4051,6 +4142,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	enum iavf_tx_burst_type tx_burst_type;
+	int mbuf_check = adapter->devargs.mbuf_check;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4146,6 +4238,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (no_poll_on_link_down) {
 			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else if (mbuf_check) {
+			adapter->tx_burst_type = tx_burst_type;
+			dev->tx_pkt_burst = iavf_xmit_pkts_check;
 		} else {
 			dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 		}
@@ -4162,6 +4257,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	if (no_poll_on_link_down) {
 		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else if (mbuf_check) {
+		adapter->tx_burst_type = tx_burst_type;
+		dev->tx_pkt_burst = iavf_xmit_pkts_check;
 	} else {
 		dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index a8af263d59..7b56076d32 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -306,6 +306,8 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mbuf_errors;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v11] net/iavf: add diagnostic support in TX path
  2024-01-05  9:58                           ` [PATCH v10] " Mingjin Ye
  2024-01-09 10:09                             ` [PATCH v11] " Mingjin Ye
@ 2024-01-10  2:25                             ` Mingjin Ye
  2024-02-09 14:43                               ` Burakov, Anatoly
                                                 ` (2 more replies)
  1 sibling, 3 replies; 36+ messages in thread
From: Mingjin Ye @ 2024-01-10  2:25 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, Mingjin Ye, Yuying Zhang, Beilei Xing, Wenjun Wu,
	Simei Su, Jingjing Wu

Implemented a Tx wrapper to perform a thorough check on mbufs,
categorizing and counting invalid cases by types for diagnostic
purposes. The count of invalid cases is accessible through xstats_get.

Also, the devarg option "mbuf_check" was introduced to configure the
diagnostic parameters to enable the appropriate diagnostic features.

supported cases: mbuf, size, segment, offload.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.

parameter format: "mbuf_check=<case>" or "mbuf_check=[<case1>,<case2>]"
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Remove call chain.
---
v3: Optimisation implementation.
---
v4: Fix Windows os compilation error.
---
v5: Split Patch.
---
v6: remove strict.
---
v9: Modify the description document.
---
v10: Modify vf rst document.
---
v11: modify comment log.
---
 doc/guides/nics/intel_vf.rst   | 11 ++++
 drivers/net/iavf/iavf.h        | 11 ++++
 drivers/net/iavf/iavf_ethdev.c | 72 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 98 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h   |  2 +
 5 files changed, 194 insertions(+)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index ce96c2e1f8..f62bb4233c 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -111,6 +111,17 @@ For more detail on SR-IOV, please refer to the following documents:
     by setting the ``devargs`` parameter like ``-a 18:01.0,no-poll-on-link-down=1``
     when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
 
+    When IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 series Ethernet devices.
+    Set the ``devargs`` parameter ``mbuf_check`` to enable TX diagnostics. For example,
+    ``-a 18:01.0,mbuf_check=<case>`` or ``-a 18:01.0,mbuf_check=[<case1>,<case2>...]``. Also,
+    ``xstats_get`` can be used to get the error counts, which are collected in ``tx_mbuf_error_packets``
+    xstats. For example, ``testpmd> show port xstats all``. Supported cases:
+
+    *   mbuf: Check for corrupted mbuf.
+    *   size: Check min/max packet length according to hw spec.
+    *   segment: Check number of mbuf segments not exceed hw limitation.
+    *   offload: Check any unsupported offload flag.
+
 The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index ab24cb02c3..824ae4aa02 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -114,9 +114,14 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mbuf_stats {
+	uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mbuf_stats mbuf_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -310,6 +315,7 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	uint64_t mbuf_check;
 };
 
 struct iavf_security_ctx;
@@ -353,6 +359,11 @@ enum iavf_tx_burst_type {
 	IAVF_TX_AVX512_CTX_OFFLOAD,
 };
 
+#define IAVF_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 1fb876e827..fca57b50b3 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -13,6 +13,7 @@
 #include <inttypes.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
+#include <rte_os_shim.h>
 
 #include <rte_interrupts.h>
 #include <rte_debug.h>
@@ -39,6 +40,7 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MBUF_CHECK_ARG       "mbuf_check"
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 int rte_pmd_iavf_tx_lldp_dynfield_offset = -1;
@@ -49,6 +51,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MBUF_CHECK_ARG,
 	NULL
 };
 
@@ -175,6 +178,7 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
 	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
 	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+	{"tx_mbuf_error_packets", _OFF_OF(mbuf_stats.tx_pkt_errors)},
 
 	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
 	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
@@ -1841,6 +1845,9 @@ iavf_dev_xstats_reset(struct rte_eth_dev *dev)
 	iavf_dev_stats_reset(dev);
 	memset(&vf->vsi.eth_stats_offset.ips_stats, 0,
 			sizeof(struct iavf_ipsec_crypto_stats));
+	memset(&vf->vsi.eth_stats_offset.mbuf_stats, 0,
+			sizeof(struct iavf_mbuf_stats));
+
 	return 0;
 }
 
@@ -1880,6 +1887,19 @@ iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
 	}
 }
 
+static void
+iavf_dev_update_mbuf_stats(struct rte_eth_dev *ethdev,
+		struct iavf_mbuf_stats *mbuf_stats)
+{
+	uint16_t idx;
+	struct iavf_tx_queue *txq;
+
+	for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
+		txq = ethdev->data->tx_queues[idx];
+		mbuf_stats->tx_pkt_errors += txq->mbuf_errors;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1908,6 +1928,9 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check)
+		iavf_dev_update_mbuf_stats(dev, &iavf_xtats.mbuf_stats);
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2290,6 +2313,50 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mbuf_check(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_OFFLOAD;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2344,6 +2411,11 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MBUF_CHECK_ARG,
+				 &iavf_parse_mbuf_check, &ad->devargs.mbuf_check);
+	if (ret)
+		goto bail;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f044ad3f26..5ba4527ae3 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3806,6 +3806,97 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 								tx_pkts, nb_pkts);
 }
 
+/* Tx mbuf check */
+static uint16_t
+iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	uint16_t idx;
+	uint64_t ol_flags;
+	struct rte_mbuf *mb;
+	uint16_t good_pkts = nb_pkts;
+	const char *reason = NULL;
+	bool pkt_error = false;
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	enum iavf_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->devargs.mbuf_check & IAVF_MBUF_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_error = true;
+			break;
+		}
+
+		if ((adapter->devargs.mbuf_check & IAVF_MBUF_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			pkt_error = true;
+			break;
+		}
+
+		if (adapter->devargs.mbuf_check & IAVF_MBUF_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					pkt_error = true;
+					break;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				pkt_error = true;
+				break;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->devargs.mbuf_check & IAVF_MBUF_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_error = true;
+				break;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_error = true;
+				break;
+			}
+		}
+	}
+
+	if (pkt_error) {
+		txq->mbuf_errors++;
+		good_pkts = idx;
+		if (good_pkts == 0)
+			return 0;
+	}
+
+	return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
+								tx_pkts, good_pkts);
+}
+
 /* choose rx function*/
 void
 iavf_set_rx_function(struct rte_eth_dev *dev)
@@ -4051,6 +4142,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	enum iavf_tx_burst_type tx_burst_type;
+	int mbuf_check = adapter->devargs.mbuf_check;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4146,6 +4238,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (no_poll_on_link_down) {
 			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else if (mbuf_check) {
+			adapter->tx_burst_type = tx_burst_type;
+			dev->tx_pkt_burst = iavf_xmit_pkts_check;
 		} else {
 			dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 		}
@@ -4162,6 +4257,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	if (no_poll_on_link_down) {
 		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else if (mbuf_check) {
+		adapter->tx_burst_type = tx_burst_type;
+		dev->tx_pkt_burst = iavf_xmit_pkts_check;
 	} else {
 		dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index a8af263d59..7b56076d32 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -306,6 +306,8 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mbuf_errors;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v11] net/iavf: add diagnostic support in TX path
  2024-01-10  2:25                             ` Mingjin Ye
@ 2024-02-09 14:43                               ` Burakov, Anatoly
  2024-02-09 15:20                               ` Burakov, Anatoly
  2024-02-19  9:55                               ` [PATCH v12] " Mingjin Ye
  2 siblings, 0 replies; 36+ messages in thread
From: Burakov, Anatoly @ 2024-02-09 14:43 UTC (permalink / raw)
  To: Mingjin Ye, dev
  Cc: qiming.yang, Yuying Zhang, Beilei Xing, Wenjun Wu, Simei Su, Jingjing Wu

On 1/10/2024 3:25 AM, Mingjin Ye wrote:
> Implemented a Tx wrapper to perform a thorough check on mbufs,
> categorizing and counting invalid cases by types for diagnostic
> purposes. The count of invalid cases is accessible through xstats_get.
> 
> Also, the devarg option "mbuf_check" was introduced to configure the
> diagnostic parameters to enable the appropriate diagnostic features.
> 
> supported cases: mbuf, size, segment, offload.
>   1. mbuf: check for corrupted mbuf.
>   2. size: check min/max packet length according to hw spec.
>   3. segment: check number of mbuf segments not exceed hw limitation.
>   4. offload: check any unsupported offload flag.
> 
> parameter format: "mbuf_check=<case>" or "mbuf_check=[<case1>,<case2>]"
> eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> ---

<snip>

> 
> diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
> index ce96c2e1f8..f62bb4233c 100644
> --- a/doc/guides/nics/intel_vf.rst
> +++ b/doc/guides/nics/intel_vf.rst
> @@ -111,6 +111,17 @@ For more detail on SR-IOV, please refer to the following documents:
>       by setting the ``devargs`` parameter like ``-a 18:01.0,no-poll-on-link-down=1``
>       when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
>   
> +    When IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 series Ethernet devices.

This looks like a duplicate line of the one above it, so probably needs 
to be removed?

> +    Set the ``devargs`` parameter ``mbuf_check`` to enable TX diagnostics. For example,
> +    ``-a 18:01.0,mbuf_check=<case>`` or ``-a 18:01.0,mbuf_check=[<case1>,<case2>...]``. Also,
> +    ``xstats_get`` can be used to get the error counts, which are collected in ``tx_mbuf_error_packets``
> +    xstats. For example, ``testpmd> show port xstats all``. Supported cases:
> +
> +    *   mbuf: Check for corrupted mbuf.
> +    *   size: Check min/max packet length according to hw spec.
> +    *   segment: Check number of mbuf segments not exceed hw limitation.
> +    *   offload: Check any unsupported offload flag.
> +

<snip>

> +
> +	int ret = 0;
> +	uint64_t *mc_flags = args;
> +	char *str2 = strdup(value);
> +	if (str2 == NULL)
> +		return -1;
> +
> +	str_len = strlen(str2);
> +	if (str2[0] == '[' && str2[str_len - 1] == ']') {
> +		if (str_len < 3) {
> +			ret = -1;
> +			goto mdd_end;
> +		}
> +		valid_len = str_len - 2;
> +		memmove(str2, str2 + 1, valid_len);
> +		memset(str2 + valid_len, '\0', 2);
> +	}

I would suggest adding a comment mentioning that we're removing outer 
square brackets from the value. Even better would be to factor it out 
into a separate function, so that code for this function is not polluted 
with implementation details of brackets removal.

> +	cur = strtok_r(str2, ",", &tmp);
> +	while (cur != NULL) {
> +		if (!strcmp(cur, "mbuf"))
> +			*mc_flags |= IAVF_MBUF_CHECK_F_TX_MBUF;
> +		else if (!strcmp(cur, "size"))
> +			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SIZE;
> +		else if (!strcmp(cur, "segment"))
> +			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SEGMENT;
> +		else if (!strcmp(cur, "offload"))
> +			*mc_flags |= IAVF_MBUF_CHECK_F_TX_OFFLOAD;
> +		else
> +			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);

I suspect 'mdd' is an artifact of earlier versions? What does it mean, 
and can the error message be made more meaningful?

> +		cur = strtok_r(NULL, ",", &tmp);
> +	}
> +
> +mdd_end:
> +	free(str2);
> +	return ret;
> +}
> +
>   static int iavf_parse_devargs(struct rte_eth_dev *dev)
>   {
>   	struct iavf_adapter *ad =
> @@ -2344,6 +2411,11 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
>   		goto bail;
>   	}
>   

<snip>

> +		}
> +	}
> +
> +	if (pkt_error) {
> +		txq->mbuf_errors++;
> +		good_pkts = idx;
> +		if (good_pkts == 0)
> +			return 0;
> +	}
> +
> +	return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
> +								tx_pkts, good_pkts);

The indentation here is a bit odd.


The above suggestions are not blocking, so overall

Acked-by: Anatoly Burakov <anatoly.burakov@intel.com>
-- 
Thanks,
Anatoly


^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v11] net/iavf: add diagnostic support in TX path
  2024-01-10  2:25                             ` Mingjin Ye
  2024-02-09 14:43                               ` Burakov, Anatoly
@ 2024-02-09 15:20                               ` Burakov, Anatoly
  2024-02-19  9:55                               ` [PATCH v12] " Mingjin Ye
  2 siblings, 0 replies; 36+ messages in thread
From: Burakov, Anatoly @ 2024-02-09 15:20 UTC (permalink / raw)
  To: Mingjin Ye, dev
  Cc: qiming.yang, Yuying Zhang, Beilei Xing, Wenjun Wu, Simei Su, Jingjing Wu

On 1/10/2024 3:25 AM, Mingjin Ye wrote:
> Implemented a Tx wrapper to perform a thorough check on mbufs,
> categorizing and counting invalid cases by types for diagnostic
> purposes. The count of invalid cases is accessible through xstats_get.
> 
> Also, the devarg option "mbuf_check" was introduced to configure the
> diagnostic parameters to enable the appropriate diagnostic features.
> 
> supported cases: mbuf, size, segment, offload.
>   1. mbuf: check for corrupted mbuf.
>   2. size: check min/max packet length according to hw spec.
>   3. segment: check number of mbuf segments not exceed hw limitation.
>   4. offload: check any unsupported offload flag.
> 
> parameter format: "mbuf_check=<case>" or "mbuf_check=[<case1>,<case2>]"
> eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> ---
> v2: Remove call chain.
> ---
> v3: Optimisation implementation.
> ---
> v4: Fix Windows os compilation error.
> ---
> v5: Split Patch.
> ---
> v6: remove strict.
> ---
> v9: Modify the description document.
> ---
> v10: Modify vf rst document.
> ---
> v11: modify comment log.
> ---
Acked-by: Anatoly Burakov <anatoly.burakov@intel.com>

-- 
Thanks,
Anatoly


^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH v12] net/iavf: add diagnostic support in TX path
  2024-01-10  2:25                             ` Mingjin Ye
  2024-02-09 14:43                               ` Burakov, Anatoly
  2024-02-09 15:20                               ` Burakov, Anatoly
@ 2024-02-19  9:55                               ` Mingjin Ye
  2024-02-29 18:38                                 ` Bruce Richardson
  2 siblings, 1 reply; 36+ messages in thread
From: Mingjin Ye @ 2024-02-19  9:55 UTC (permalink / raw)
  To: dev
  Cc: Mingjin Ye, Yuying Zhang, Beilei Xing, Qiming Yang, Wenjun Wu,
	Simei Su, Jingjing Wu

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset=y, Size: 12856 bytes --]

Implemented a Tx wrapper to perform a thorough check on mbufs,
categorizing and counting invalid cases by types for diagnostic
purposes. The count of invalid cases is accessible through xstats_get.

Also, the devarg option "mbuf_check" was introduced to configure the
diagnostic parameters to enable the appropriate diagnostic features.

supported cases: mbuf, size, segment, offload.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.

parameter format: "mbuf_check=<case>" or "mbuf_check=[<case1>,<case2>]"
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: Remove call chain.
---
v3: Optimisation implementation.
---
v4: Fix Windows os compilation error.
---
v5: Split Patch.
---
v6: remove strict.
---
v9: Modify the description document.
---
v10: Modify vf rst document.
---
v11: modify comment log.
---
v12: Fix buggy logs and add necessary notes.
---
 doc/guides/nics/intel_vf.rst   | 11 ++++
 drivers/net/iavf/iavf.h        | 11 ++++
 drivers/net/iavf/iavf_ethdev.c | 79 +++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.c   | 98 ++++++++++++++++++++++++++++++++++
 drivers/net/iavf/iavf_rxtx.h   |  2 +
 5 files changed, 201 insertions(+)

diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index ce96c2e1f8..b84ea214d4 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -111,6 +111,17 @@ For more detail on SR-IOV, please refer to the following documents:
     by setting the ``devargs`` parameter like ``-a 18:01.0,no-poll-on-link-down=1``
     when IAVF is backed by an Intel\ |reg| E810 device or an Intel\ |reg| 700 Series Ethernet device.
 
+    When IAVF is backed by an Intel® E810 device or an Intel® 700 Series Ethernet device.
+    Set the ``devargs`` parameter ``mbuf_check`` to enable TX diagnostics. For example,
+    ``-a 18:01.0,mbuf_check=<case>`` or ``-a 18:01.0,mbuf_check=[<case1>,<case2>...]``. Also,
+    ``xstats_get`` can be used to get the error counts, which are collected in ``tx_mbuf_error_packets``
+    xstats. For example, ``testpmd> show port xstats all``. Supported cases:
+
+    *   mbuf: Check for corrupted mbuf.
+    *   size: Check min/max packet length according to hw spec.
+    *   segment: Check number of mbuf segments not exceed hw limitation.
+    *   offload: Check any unsupported offload flag.
+
 The PCIE host-interface of Intel Ethernet Switch FM10000 Series VF infrastructure
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index ab24cb02c3..824ae4aa02 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -114,9 +114,14 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mbuf_stats {
+	uint64_t tx_pkt_errors;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mbuf_stats mbuf_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -310,6 +315,7 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	uint64_t mbuf_check;
 };
 
 struct iavf_security_ctx;
@@ -353,6 +359,11 @@ enum iavf_tx_burst_type {
 	IAVF_TX_AVX512_CTX_OFFLOAD,
 };
 
+#define IAVF_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 1fb876e827..3fb255d748 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -13,6 +13,7 @@
 #include <inttypes.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
+#include <rte_os_shim.h>
 
 #include <rte_interrupts.h>
 #include <rte_debug.h>
@@ -39,6 +40,7 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MBUF_CHECK_ARG       "mbuf_check"
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 int rte_pmd_iavf_tx_lldp_dynfield_offset = -1;
@@ -49,6 +51,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MBUF_CHECK_ARG,
 	NULL
 };
 
@@ -175,6 +178,7 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 	{"tx_broadcast_packets", _OFF_OF(eth_stats.tx_broadcast)},
 	{"tx_dropped_packets", _OFF_OF(eth_stats.tx_discards)},
 	{"tx_error_packets", _OFF_OF(eth_stats.tx_errors)},
+	{"tx_mbuf_error_packets", _OFF_OF(mbuf_stats.tx_pkt_errors)},
 
 	{"inline_ipsec_crypto_ipackets", _OFF_OF(ips_stats.icount)},
 	{"inline_ipsec_crypto_ibytes", _OFF_OF(ips_stats.ibytes)},
@@ -1841,6 +1845,9 @@ iavf_dev_xstats_reset(struct rte_eth_dev *dev)
 	iavf_dev_stats_reset(dev);
 	memset(&vf->vsi.eth_stats_offset.ips_stats, 0,
 			sizeof(struct iavf_ipsec_crypto_stats));
+	memset(&vf->vsi.eth_stats_offset.mbuf_stats, 0,
+			sizeof(struct iavf_mbuf_stats));
+
 	return 0;
 }
 
@@ -1880,6 +1887,19 @@ iavf_dev_update_ipsec_xstats(struct rte_eth_dev *ethdev,
 	}
 }
 
+static void
+iavf_dev_update_mbuf_stats(struct rte_eth_dev *ethdev,
+		struct iavf_mbuf_stats *mbuf_stats)
+{
+	uint16_t idx;
+	struct iavf_tx_queue *txq;
+
+	for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
+		txq = ethdev->data->tx_queues[idx];
+		mbuf_stats->tx_pkt_errors += txq->mbuf_errors;
+	}
+}
+
 static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 				 struct rte_eth_xstat *xstats, unsigned int n)
 {
@@ -1908,6 +1928,9 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check)
+		iavf_dev_update_mbuf_stats(dev, &iavf_xtats.mbuf_stats);
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2290,6 +2313,57 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mbuf_check(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str_len == 0) {
+		ret = -1;
+		goto err_end;
+	}
+
+	/* Try stripping the outer square brackets of the parameter string. */
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto err_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MBUF_CHECK_F_TX_OFFLOAD;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported diagnostic type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+err_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2344,6 +2418,11 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MBUF_CHECK_ARG,
+				 &iavf_parse_mbuf_check, &ad->devargs.mbuf_check);
+	if (ret)
+		goto bail;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f044ad3f26..b9218d48b7 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -3806,6 +3806,97 @@ iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
 								tx_pkts, nb_pkts);
 }
 
+/* Tx mbuf check */
+static uint16_t
+iavf_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	uint16_t idx;
+	uint64_t ol_flags;
+	struct rte_mbuf *mb;
+	uint16_t good_pkts = nb_pkts;
+	const char *reason = NULL;
+	bool pkt_error = false;
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	enum iavf_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->devargs.mbuf_check & IAVF_MBUF_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_error = true;
+			break;
+		}
+
+		if ((adapter->devargs.mbuf_check & IAVF_MBUF_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			pkt_error = true;
+			break;
+		}
+
+		if (adapter->devargs.mbuf_check & IAVF_MBUF_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					pkt_error = true;
+					break;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				pkt_error = true;
+				break;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				pkt_error = true;
+				break;
+			}
+		}
+
+		if (adapter->devargs.mbuf_check & IAVF_MBUF_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_error = true;
+				break;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_error = true;
+				break;
+			}
+		}
+	}
+
+	if (pkt_error) {
+		txq->mbuf_errors++;
+		good_pkts = idx;
+		if (good_pkts == 0)
+			return 0;
+	}
+
+	return iavf_tx_pkt_burst_ops[tx_burst_type](tx_queue,
+				tx_pkts, good_pkts);
+}
+
 /* choose rx function*/
 void
 iavf_set_rx_function(struct rte_eth_dev *dev)
@@ -4051,6 +4142,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	enum iavf_tx_burst_type tx_burst_type;
+	int mbuf_check = adapter->devargs.mbuf_check;
 	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
@@ -4146,6 +4238,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 		if (no_poll_on_link_down) {
 			adapter->tx_burst_type = tx_burst_type;
 			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+		} else if (mbuf_check) {
+			adapter->tx_burst_type = tx_burst_type;
+			dev->tx_pkt_burst = iavf_xmit_pkts_check;
 		} else {
 			dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 		}
@@ -4162,6 +4257,9 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	if (no_poll_on_link_down) {
 		adapter->tx_burst_type = tx_burst_type;
 		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
+	} else if (mbuf_check) {
+		adapter->tx_burst_type = tx_burst_type;
+		dev->tx_pkt_burst = iavf_xmit_pkts_check;
 	} else {
 		dev->tx_pkt_burst = iavf_tx_pkt_burst_ops[tx_burst_type];
 	}
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index a8af263d59..7b56076d32 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -306,6 +306,8 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mbuf_errors;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v12] net/iavf: add diagnostic support in TX path
  2024-02-19  9:55                               ` [PATCH v12] " Mingjin Ye
@ 2024-02-29 18:38                                 ` Bruce Richardson
  2024-03-04 12:34                                   ` Bruce Richardson
  0 siblings, 1 reply; 36+ messages in thread
From: Bruce Richardson @ 2024-02-29 18:38 UTC (permalink / raw)
  To: Mingjin Ye
  Cc: dev, Yuying Zhang, Beilei Xing, Qiming Yang, Wenjun Wu, Simei Su,
	Jingjing Wu

On Mon, Feb 19, 2024 at 09:55:14AM +0000, Mingjin Ye wrote:
> Implemented a Tx wrapper to perform a thorough check on mbufs,
> categorizing and counting invalid cases by types for diagnostic
> purposes. The count of invalid cases is accessible through xstats_get.
> 
> Also, the devarg option "mbuf_check" was introduced to configure the
> diagnostic parameters to enable the appropriate diagnostic features.
> 
> supported cases: mbuf, size, segment, offload.
>  1. mbuf: check for corrupted mbuf.
>  2. size: check min/max packet length according to hw spec.
>  3. segment: check number of mbuf segments not exceed hw limitation.
>  4. offload: check any unsupported offload flag.
> 
> parameter format: "mbuf_check=<case>" or "mbuf_check=[<case1>,<case2>]"
> eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i
> 
> Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> ---
Carrying ack from v11:

Acked-by: Anatoly Burakov <anatoly.burakov@intel.com>

Patch applied, with some minor rework of the docs, to dpdk-next-net-intel.

Thanks,
/Bruce

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH v12] net/iavf: add diagnostic support in TX path
  2024-02-29 18:38                                 ` Bruce Richardson
@ 2024-03-04 12:34                                   ` Bruce Richardson
  0 siblings, 0 replies; 36+ messages in thread
From: Bruce Richardson @ 2024-03-04 12:34 UTC (permalink / raw)
  To: Mingjin Ye
  Cc: dev, Yuying Zhang, Beilei Xing, Qiming Yang, Wenjun Wu, Simei Su,
	Jingjing Wu

On Thu, Feb 29, 2024 at 06:38:47PM +0000, Bruce Richardson wrote:
> On Mon, Feb 19, 2024 at 09:55:14AM +0000, Mingjin Ye wrote:
> > Implemented a Tx wrapper to perform a thorough check on mbufs,
> > categorizing and counting invalid cases by types for diagnostic
> > purposes. The count of invalid cases is accessible through xstats_get.
> > 
> > Also, the devarg option "mbuf_check" was introduced to configure the
> > diagnostic parameters to enable the appropriate diagnostic features.
> > 
> > supported cases: mbuf, size, segment, offload.
> >  1. mbuf: check for corrupted mbuf.
> >  2. size: check min/max packet length according to hw spec.
> >  3. segment: check number of mbuf segments not exceed hw limitation.
> >  4. offload: check any unsupported offload flag.
> > 
> > parameter format: "mbuf_check=<case>" or "mbuf_check=[<case1>,<case2>]"
> > eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i
> > 
> > Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
> > ---
> Carrying ack from v11:
> 
> Acked-by: Anatoly Burakov <anatoly.burakov@intel.com>
> 
> Patch applied, with some minor rework of the docs, to dpdk-next-net-intel.
> 
Also updated some line wrapping following additional review. Corrected
version pushed now.

/Bruce

^ permalink raw reply	[flat|nested] 36+ messages in thread

end of thread, other threads:[~2024-03-04 12:34 UTC | newest]

Thread overview: 36+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-21 10:12 [PATCH] net/iavf: add diagnostic support in TX path Mingjin Ye
2023-12-21 12:00 ` Zhang, Qi Z
2023-12-22 10:44 ` [PATCH v2] " Mingjin Ye
2023-12-22 11:37   ` Zhang, Qi Z
2023-12-25  2:48     ` Ye, MingjinX
2023-12-26 10:07   ` [PATCH v3] " Mingjin Ye
2023-12-27 10:16     ` [PATCH v4 1/2] " Mingjin Ye
2023-12-27 11:30       ` Zhang, Qi Z
2023-12-28 10:26       ` [PATCH v5 0/2] net/iavf: add diagnostics and fix error Mingjin Ye
2023-12-28 10:26         ` [PATCH v5 1/2] net/iavf: fix Tx path error in multi-process Mingjin Ye
2023-12-28 10:50           ` Zhang, Qi Z
2023-12-29 10:11           ` [PATCH v6 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
2023-12-29 10:11             ` [PATCH v6 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
2023-12-31  6:41               ` Zhang, Qi Z
2024-01-02 10:52               ` [PATCH v7 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
2024-01-02 10:52                 ` [PATCH v7 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
2024-01-03  2:22                   ` Zhang, Qi Z
2024-01-02 10:52                 ` [PATCH v7 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
2024-01-03  2:54                   ` Zhang, Qi Z
2024-01-03 10:10                   ` [PATCH v8 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
2024-01-03 10:10                     ` [PATCH v8 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
2024-01-03 10:10                     ` [PATCH v8 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
2024-01-04 10:18                       ` [PATCH v9 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
2024-01-04 10:18                         ` [PATCH v9 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
2024-01-04 10:18                         ` [PATCH v9 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
2024-01-05  9:58                           ` [PATCH v10] " Mingjin Ye
2024-01-09 10:09                             ` [PATCH v11] " Mingjin Ye
2024-01-10  2:25                             ` Mingjin Ye
2024-02-09 14:43                               ` Burakov, Anatoly
2024-02-09 15:20                               ` Burakov, Anatoly
2024-02-19  9:55                               ` [PATCH v12] " Mingjin Ye
2024-02-29 18:38                                 ` Bruce Richardson
2024-03-04 12:34                                   ` Bruce Richardson
2024-01-05  0:44                       ` [PATCH v8 2/2] " Zhang, Qi Z
2023-12-29 10:11             ` [PATCH v6 " Mingjin Ye
2023-12-28 10:26         ` [PATCH v5 " Mingjin Ye

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).