DPDK patches and discussions
 help / color / mirror / Atom feed
From: Mingjin Ye <mingjinx.ye@intel.com>
To: dev@dpdk.org
Cc: qiming.yang@intel.com, Mingjin Ye <mingjinx.ye@intel.com>,
	Jingjing Wu <jingjing.wu@intel.com>,
	Beilei Xing <beilei.xing@intel.com>
Subject: [PATCH] net/iavf: add diagnostic support in TX path
Date: Thu, 21 Dec 2023 10:12:02 +0000	[thread overview]
Message-ID: <20231221101202.2815069-1-mingjinx.ye@intel.com> (raw)

The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload, strict.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.
 5. strict: check protocol headers.

parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
 drivers/net/iavf/iavf.h        |  26 ++-
 drivers/net/iavf/iavf_ethdev.c |  74 ++++++++
 drivers/net/iavf/iavf_rxtx.c   | 337 +++++++++++++++++++++++++++++----
 drivers/net/iavf/iavf_rxtx.h   |   8 +
 4 files changed, 402 insertions(+), 43 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 10868f2c30..428b20b8e4 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -113,9 +113,15 @@ struct iavf_ipsec_crypto_stats {
 	} ierrors;
 };
 
+struct iavf_mdd_stats {
+	uint64_t mdd_mbuf_err_count;
+	uint64_t mdd_pkt_err_count;
+};
+
 struct iavf_eth_xstats {
 	struct virtchnl_eth_stats eth_stats;
 	struct iavf_ipsec_crypto_stats ips_stats;
+	struct iavf_mdd_stats mdd_stats;
 };
 
 /* Structure that defines a VSI, associated with a adapter. */
@@ -299,6 +305,7 @@ enum iavf_proto_xtr_type {
 	IAVF_PROTO_XTR_MAX,
 };
 
+
 /**
  * Cache devargs parse result.
  */
@@ -309,10 +316,27 @@ struct iavf_devargs {
 	uint32_t watchdog_period;
 	int auto_reset;
 	int no_poll_on_link_down;
+	int mbuf_check;
 };
 
 struct iavf_security_ctx;
 
+struct iavf_rx_burst_elem {
+	TAILQ_ENTRY(iavf_rx_burst_elem) next;
+	eth_rx_burst_t rx_pkt_burst;
+};
+
+struct iavf_tx_burst_elem {
+	TAILQ_ENTRY(iavf_tx_burst_elem) next;
+	eth_tx_burst_t tx_pkt_burst;
+};
+
+#define IAVF_MDD_CHECK_F_TX_MBUF        (1ULL << 0)
+#define IAVF_MDD_CHECK_F_TX_SIZE        (1ULL << 1)
+#define IAVF_MDD_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define IAVF_MDD_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+#define IAVF_MDD_CHECK_F_TX_STRICT      (1ULL << 4)
+
 /* Structure to store private data for each VF instance. */
 struct iavf_adapter {
 	struct iavf_hw hw;
@@ -329,7 +353,7 @@ struct iavf_adapter {
 	bool closed;
 	bool no_poll;
 	eth_rx_burst_t rx_pkt_burst;
-	eth_tx_burst_t tx_pkt_burst;
+	uint64_t mc_flags; /* mdd check flags. */
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index d1edb0dd5c..f2af228042 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -39,6 +39,8 @@
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
 #define IAVF_ENABLE_AUTO_RESET_ARG "auto_reset"
 #define IAVF_NO_POLL_ON_LINK_DOWN_ARG "no-poll-on-link-down"
+#define IAVF_MDD_CHECK_ARG       "mbuf_check"
+
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
 
@@ -48,6 +50,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_RESET_WATCHDOG_ARG,
 	IAVF_ENABLE_AUTO_RESET_ARG,
 	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
+	IAVF_MDD_CHECK_ARG,
 	NULL
 };
 
@@ -188,6 +191,8 @@ static const struct rte_iavf_xstats_name_off rte_iavf_stats_strings[] = {
 			_OFF_OF(ips_stats.ierrors.ipsec_length)},
 	{"inline_ipsec_crypto_ierrors_misc",
 			_OFF_OF(ips_stats.ierrors.misc)},
+	{"mdd_mbuf_error_packets", _OFF_OF(mdd_stats.mdd_mbuf_err_count)},
+	{"mdd_pkt_error_packets", _OFF_OF(mdd_stats.mdd_pkt_err_count)},
 };
 #undef _OFF_OF
 
@@ -1881,6 +1886,9 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 {
 	int ret;
 	unsigned int i;
+	struct iavf_tx_queue *txq;
+	uint64_t mdd_mbuf_err_count = 0;
+	uint64_t mdd_pkt_err_count = 0;
 	struct iavf_adapter *adapter =
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1904,6 +1912,16 @@ static int iavf_dev_xstats_get(struct rte_eth_dev *dev,
 	if (iavf_ipsec_crypto_supported(adapter))
 		iavf_dev_update_ipsec_xstats(dev, &iavf_xtats.ips_stats);
 
+	if (adapter->devargs.mbuf_check) {
+		for (i = 0; i < dev->data->nb_tx_queues; i++) {
+			txq = dev->data->tx_queues[i];
+			mdd_mbuf_err_count += txq->mdd_mbuf_err_count;
+			mdd_pkt_err_count += txq->mdd_pkt_err_count;
+		}
+		iavf_xtats.mdd_stats.mdd_mbuf_err_count = mdd_mbuf_err_count;
+		iavf_xtats.mdd_stats.mdd_pkt_err_count = mdd_pkt_err_count;
+	}
+
 	/* loop over xstats array and values from pstats */
 	for (i = 0; i < IAVF_NB_XSTATS; i++) {
 		xstats[i].id = i;
@@ -2286,6 +2304,52 @@ iavf_parse_watchdog_period(__rte_unused const char *key, const char *value, void
 	return 0;
 }
 
+static int
+iavf_parse_mdd_checker(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_OFFLOAD;
+		else if (!strcmp(cur, "strict"))
+			*mc_flags |= IAVF_MDD_CHECK_F_TX_STRICT;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int iavf_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct iavf_adapter *ad =
@@ -2340,6 +2404,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 		goto bail;
 	}
 
+	ret = rte_kvargs_process(kvlist, IAVF_MDD_CHECK_ARG,
+				 &iavf_parse_mdd_checker, &ad->mc_flags);
+	if (ret)
+		goto bail;
+
+	if (ad->mc_flags)
+		ad->devargs.mbuf_check = 1;
+
 	ret = rte_kvargs_process(kvlist, IAVF_ENABLE_AUTO_RESET_ARG,
 				 &parse_bool, &ad->devargs.auto_reset);
 	if (ret)
@@ -2718,6 +2790,8 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 	adapter->dev_data = eth_dev->data;
 	adapter->stopped = 1;
 
+	iavf_pkt_burst_init(eth_dev);
+
 	if (iavf_dev_event_handler_init())
 		goto init_vf_err;
 
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f19aa14646..9f05cd1eee 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -18,6 +18,7 @@
 #include <rte_malloc.h>
 #include <rte_ether.h>
 #include <ethdev_driver.h>
+#include <rte_tailq.h>
 #include <rte_tcp.h>
 #include <rte_sctp.h>
 #include <rte_udp.h>
@@ -425,6 +426,8 @@ struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
 
 };
 
+static struct iavf_pkt_burst ice_rxtx_pkt_burst[RTE_MAX_ETHPORTS];
+
 static inline void
 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
 				    struct rte_mbuf *mb,
@@ -1128,6 +1131,36 @@ iavf_reset_queues(struct rte_eth_dev *dev)
 	}
 }
 
+static void
+iavf_rx_pkt_burst_cleanup(struct rte_eth_dev *dev)
+{
+	struct iavf_pkt_burst *item;
+	struct iavf_rx_burst_elem *pos;
+	struct iavf_rx_burst_elem *save_next;
+
+	item = &ice_rxtx_pkt_burst[dev->data->port_id];
+
+	RTE_TAILQ_FOREACH_SAFE(pos, &item->rx_burst_list, next, save_next) {
+		TAILQ_REMOVE(&item->rx_burst_list, pos, next);
+		rte_free(pos);
+	}
+}
+
+static void
+iavf_tx_pkt_burst_cleanup(struct rte_eth_dev *dev)
+{
+	struct iavf_pkt_burst *item;
+	struct iavf_tx_burst_elem *pos;
+	struct iavf_tx_burst_elem *save_next;
+
+	item = &ice_rxtx_pkt_burst[dev->data->port_id];
+
+	RTE_TAILQ_FOREACH_SAFE(pos, &item->tx_burst_list, next, save_next) {
+		TAILQ_REMOVE(&item->tx_burst_list, pos, next);
+		rte_free(pos);
+	}
+}
+
 void
 iavf_stop_queues(struct rte_eth_dev *dev)
 {
@@ -1157,6 +1190,9 @@ iavf_stop_queues(struct rte_eth_dev *dev)
 		PMD_DRV_LOG(WARNING, "Fail to stop queues");
 
 	iavf_reset_queues(dev);
+
+	iavf_rx_pkt_burst_cleanup(dev);
+	iavf_tx_pkt_burst_cleanup(dev);
 }
 
 #define IAVF_RX_FLEX_ERR0_BITS	\
@@ -2503,7 +2539,7 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 			eip_typ |= IAVF_TXD_CTX_GRE_TUNNELING;
 			break;
 		default:
-			PMD_TX_LOG(ERR, "Tunnel type not supported");
+			PMD_DRV_LOG(ERR, "Tunnel type not supported");
 			return;
 		}
 
@@ -3027,7 +3063,7 @@ iavf_check_vlan_up2tc(struct iavf_tx_queue *txq, struct rte_mbuf *m)
 	up = m->vlan_tci >> IAVF_VLAN_TAG_PCP_OFFSET;
 
 	if (!(vf->qos_cap->cap[txq->tc].tc_prio & BIT(up))) {
-		PMD_TX_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n",
+		PMD_DRV_LOG(ERR, "packet with vlan pcp %u cannot transmit in queue %u\n",
 			up, txq->queue_id);
 		return -1;
 	} else {
@@ -3394,13 +3430,13 @@ check_mbuf_len(struct offload_info *info, struct rte_mbuf *m)
 {
 	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
 		if (info->outer_l2_len != m->outer_l2_len) {
-			PMD_TX_LOG(ERR, "outer_l2_len error in mbuf. Original "
+			PMD_DRV_LOG(ERR, "outer_l2_len error in mbuf. Original "
 			"length: %hu, calculated length: %u", m->outer_l2_len,
 			info->outer_l2_len);
 			return -1;
 		}
 		if (info->outer_l3_len != m->outer_l3_len) {
-			PMD_TX_LOG(ERR, "outer_l3_len error in mbuf. Original "
+			PMD_DRV_LOG(ERR, "outer_l3_len error in mbuf. Original "
 			"length: %hu,calculated length: %u", m->outer_l3_len,
 			info->outer_l3_len);
 			return -1;
@@ -3408,19 +3444,19 @@ check_mbuf_len(struct offload_info *info, struct rte_mbuf *m)
 	}
 
 	if (info->l2_len != m->l2_len) {
-		PMD_TX_LOG(ERR, "l2_len error in mbuf. Original "
+		PMD_DRV_LOG(ERR, "l2_len error in mbuf. Original "
 		"length: %hu, calculated length: %u", m->l2_len,
 		info->l2_len);
 		return -1;
 	}
 	if (info->l3_len != m->l3_len) {
-		PMD_TX_LOG(ERR, "l3_len error in mbuf. Original "
+		PMD_DRV_LOG(ERR, "l3_len error in mbuf. Original "
 		"length: %hu, calculated length: %u", m->l3_len,
 		info->l3_len);
 		return -1;
 	}
 	if (info->l4_len != m->l4_len) {
-		PMD_TX_LOG(ERR, "l4_len error in mbuf. Original "
+		PMD_DRV_LOG(ERR, "l4_len error in mbuf. Original "
 		"length: %hu, calculated length: %u", m->l4_len,
 		info->l4_len);
 		return -1;
@@ -3438,24 +3474,24 @@ check_ether_type(struct offload_info *info, struct rte_mbuf *m)
 		if (info->outer_ethertype ==
 			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
 			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4)) {
-				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, "
+				PMD_DRV_LOG(ERR, "Outer ethernet type is ipv4, "
 				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV4` flag.");
 				ret = -1;
 			}
 			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6) {
-				PMD_TX_LOG(ERR, "Outer ethernet type is ipv4, tx "
+				PMD_DRV_LOG(ERR, "Outer ethernet type is ipv4, tx "
 				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV6` flag");
 				ret = -1;
 			}
 		} else if (info->outer_ethertype ==
 			rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
 			if (!(m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)) {
-				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, "
+				PMD_DRV_LOG(ERR, "Outer ethernet type is ipv6, "
 				"tx offload missing `RTE_MBUF_F_TX_OUTER_IPV6` flag.");
 				ret = -1;
 			}
 			if (m->ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
-				PMD_TX_LOG(ERR, "Outer ethernet type is ipv6, tx "
+				PMD_DRV_LOG(ERR, "Outer ethernet type is ipv6, tx "
 				"offload contains wrong `RTE_MBUF_F_TX_OUTER_IPV4` flag");
 				ret = -1;
 			}
@@ -3465,24 +3501,24 @@ check_ether_type(struct offload_info *info, struct rte_mbuf *m)
 	if (info->ethertype ==
 		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
 		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV4)) {
-			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx offload "
+			PMD_DRV_LOG(ERR, "Ethernet type is ipv4, tx offload "
 			"missing `RTE_MBUF_F_TX_IPV4` flag.");
 			ret = -1;
 		}
 		if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
-			PMD_TX_LOG(ERR, "Ethernet type is ipv4, tx "
+			PMD_DRV_LOG(ERR, "Ethernet type is ipv4, tx "
 			"offload contains wrong `RTE_MBUF_F_TX_IPV6` flag");
 			ret = -1;
 		}
 	} else if (info->ethertype ==
 		rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
 		if (!(m->ol_flags & RTE_MBUF_F_TX_IPV6)) {
-			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			PMD_DRV_LOG(ERR, "Ethernet type is ipv6, tx offload "
 			"missing `RTE_MBUF_F_TX_IPV6` flag.");
 			ret = -1;
 		}
 		if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
-			PMD_TX_LOG(ERR, "Ethernet type is ipv6, tx offload "
+			PMD_DRV_LOG(ERR, "Ethernet type is ipv6, tx offload "
 			"contains wrong `RTE_MBUF_F_TX_IPV4` flag");
 			ret = -1;
 		}
@@ -3512,12 +3548,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_gtp(udp_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "gtp tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "gtp tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_GTP` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GTP) {
-				PMD_TX_LOG(ERR, "gtp tunnel packet, tx offload has wrong "
+				PMD_DRV_LOG(ERR, "gtp tunnel packet, tx offload has wrong "
 				"`%s` flag, correct is `RTE_MBUF_F_TX_TUNNEL_GTP` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
 				return -1;
@@ -3527,12 +3563,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_vxlan_gpe(udp_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "vxlan gpe tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE) {
-				PMD_TX_LOG(ERR, "vxlan gpe tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "vxlan gpe tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3543,12 +3579,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_vxlan(udp_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "vxlan tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "vxlan tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_VXLAN` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_VXLAN) {
-				PMD_TX_LOG(ERR, "vxlan tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "vxlan tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_VXLAN` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3559,12 +3595,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_geneve(udp_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "geneve tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "geneve tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_GENEVE` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GENEVE) {
-				PMD_TX_LOG(ERR, "geneve tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "geneve tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_GENEVE` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3575,7 +3611,7 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		/* Always keep last. */
 		if (unlikely(RTE_ETH_IS_TUNNEL_PKT(m->packet_type)
 			!= 0)) {
-			PMD_TX_LOG(ERR, "Unknown tunnel packet. UDP dst port: %hu",
+			PMD_DRV_LOG(ERR, "Unknown tunnel packet. UDP dst port: %hu",
 				udp_hdr->dst_port);
 				return -1;
 		}
@@ -3587,12 +3623,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_gre(gre_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "gre tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "gre tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_GRE` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_GRE) {
-				PMD_TX_LOG(ERR, "gre tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "gre tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_GRE` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3607,12 +3643,12 @@ iavf_check_mbuf(struct rte_mbuf *m)
 		parse_encap_ip(encap_ip_hdr, &info);
 		if (info.is_tunnel) {
 			if (!tunnel_type) {
-				PMD_TX_LOG(ERR, "Ipip tunnel packet missing tx "
+				PMD_DRV_LOG(ERR, "Ipip tunnel packet missing tx "
 				"offload missing `RTE_MBUF_F_TX_TUNNEL_IPIP` flag.");
 				return -1;
 			}
 			if (tunnel_type != RTE_MBUF_F_TX_TUNNEL_IPIP) {
-				PMD_TX_LOG(ERR, "Ipip tunnel packet, tx offload has "
+				PMD_DRV_LOG(ERR, "Ipip tunnel packet, tx offload has "
 				"wrong `%s` flag, correct is "
 				"`RTE_MBUF_F_TX_TUNNEL_IPIP` flag",
 				rte_get_tx_ol_flag_name(tunnel_type));
@@ -3629,6 +3665,103 @@ iavf_check_mbuf(struct rte_mbuf *m)
 	return check_ether_type(&info, m);
 }
 
+/* Tx MDD check */
+static uint16_t
+iavf_xmit_pkts_mdd(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	struct iavf_tx_queue *txq = tx_queue;
+	struct rte_mbuf *mb;
+	uint16_t idx;
+	const char *reason = NULL;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	uint64_t mdd_mbuf_err_count = 0;
+	uint64_t mdd_pkt_err_count = 0;
+	uint64_t ol_flags;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->mc_flags & IAVF_MDD_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 1, &reason) != 0)) {
+			PMD_DRV_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			mdd_mbuf_err_count++;
+			continue;
+		}
+
+		if ((adapter->mc_flags & IAVF_MDD_CHECK_F_TX_SIZE) &&
+			(mb->data_len < IAVF_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->vf.max_pkt_len)) {
+			PMD_DRV_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			IAVF_TX_MIN_PKT_LEN, adapter->vf.max_pkt_len);
+			mdd_pkt_err_count++;
+			continue;
+		}
+
+		if (adapter->mc_flags & IAVF_MDD_CHECK_F_TX_SEGMENT) {
+			/* Check condition for nb_segs > IAVF_TX_MAX_MTU_SEG. */
+			if (!(ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+				if (mb->nb_segs > IAVF_TX_MAX_MTU_SEG) {
+					PMD_DRV_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					IAVF_TX_MAX_MTU_SEG);
+					mdd_pkt_err_count++;
+					continue;
+				}
+			} else if ((mb->tso_segsz < IAVF_MIN_TSO_MSS) ||
+				(mb->tso_segsz > IAVF_MAX_TSO_MSS)) {
+				/* MSS outside the range are considered malicious */
+				PMD_DRV_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+				"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+				IAVF_MIN_TSO_MSS, IAVF_MAX_TSO_MSS);
+				mdd_pkt_err_count++;
+				continue;
+			} else if (mb->nb_segs > txq->nb_tx_desc) {
+				PMD_DRV_LOG(ERR, "INVALID mbuf: nb_segs out "
+				"of ring length\n");
+				mdd_pkt_err_count++;
+				continue;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MDD_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & IAVF_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_DRV_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				mdd_pkt_err_count++;
+				continue;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_DRV_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				mdd_pkt_err_count++;
+				continue;
+			}
+		}
+
+		if (adapter->mc_flags & IAVF_MDD_CHECK_F_TX_STRICT &&
+			iavf_check_mbuf(mb)) {
+			mdd_pkt_err_count++;
+			continue;
+		}
+	}
+
+	if (mdd_mbuf_err_count || mdd_pkt_err_count) {
+		if (mdd_mbuf_err_count)
+			rte_atomic_fetch_add_explicit(&txq->mdd_mbuf_err_count,
+					mdd_mbuf_err_count, rte_memory_order_release);
+		if (mdd_pkt_err_count)
+			rte_atomic_fetch_add_explicit(&txq->mdd_pkt_err_count,
+					mdd_pkt_err_count, rte_memory_order_release);
+		return 0;
+	}
+
+	return idx;
+}
+
 /* TX prep functions */
 uint16_t
 iavf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -3720,15 +3853,135 @@ iavf_recv_pkts_no_poll(void *rx_queue, struct rte_mbuf **rx_pkts,
 }
 
 static uint16_t
-iavf_xmit_pkts_no_poll(void *tx_queue, struct rte_mbuf **tx_pkts,
+iavf_xmit_pkts_no_poll(void *tx_queue, __rte_unused struct rte_mbuf **tx_pkts,
 				uint16_t nb_pkts)
 {
 	struct iavf_tx_queue *txq = tx_queue;
 	if (!txq->vsi || txq->vsi->adapter->no_poll)
 		return 0;
 
-	return txq->vsi->adapter->tx_pkt_burst(tx_queue,
-								tx_pkts, nb_pkts);
+	return nb_pkts;
+}
+
+static int __rte_unused
+iavf_rx_pkt_burst_insert(struct rte_eth_dev *dev, eth_rx_burst_t func)
+{
+	struct iavf_rx_burst_elem *elem;
+	struct iavf_pkt_burst *item;
+
+	if (!func) {
+		PMD_DRV_LOG(ERR, "RX functions cannot be NULL");
+		return -1;
+	}
+
+	elem = rte_malloc(NULL, sizeof(*elem), 0);
+	if (!elem) {
+		PMD_DRV_LOG(ERR, "Unable to allocate memory");
+		return -1;
+	}
+
+	item = &ice_rxtx_pkt_burst[dev->data->port_id];
+	elem->rx_pkt_burst = func;
+	TAILQ_INSERT_TAIL(&item->rx_burst_list, elem, next);
+
+	return 0;
+}
+
+static int
+iavf_tx_pkt_burst_insert(struct rte_eth_dev *dev, eth_tx_burst_t func)
+{
+	struct iavf_tx_burst_elem *elem;
+	struct iavf_pkt_burst *item;
+
+	if (!func) {
+		PMD_DRV_LOG(ERR, "TX functions cannot be NULL");
+		return -1;
+	}
+
+	elem = rte_malloc(NULL, sizeof(*elem), 0);
+	if (!elem) {
+		PMD_DRV_LOG(ERR, "Unable to allocate memory");
+		return -1;
+	}
+
+	item = &ice_rxtx_pkt_burst[dev->data->port_id];
+	elem->tx_pkt_burst = func;
+	TAILQ_INSERT_TAIL(&item->tx_burst_list, elem, next);
+
+	return 0;
+}
+
+static uint16_t
+iavf_xmit_pkts_chain(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct iavf_tx_queue *txq = tx_queue;
+	struct iavf_adapter *adapter = txq->vsi->adapter;
+	struct iavf_tx_burst_elem *pos;
+	struct iavf_tx_burst_elem *save_next;
+	struct iavf_pkt_burst *item;
+	uint16_t ret;
+
+	item = &ice_rxtx_pkt_burst[adapter->dev_data->port_id];
+	RTE_TAILQ_FOREACH_SAFE(pos, &item->tx_burst_list, next, save_next) {
+		ret = pos->tx_pkt_burst(tx_queue, tx_pkts, nb_pkts);
+		if (nb_pkts != ret)
+			break;
+	}
+
+	return ret;
+}
+
+/* choose tx interceptors*/
+static void
+iavf_set_tx_interceptors(struct rte_eth_dev *dev)
+{
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	eth_tx_burst_t tx_pkt_burst;
+	int err;
+	uint16_t mdd_check = adapter->devargs.mbuf_check;
+	uint16_t no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
+
+	if (!mdd_check && !no_poll_on_link_down)
+		return;
+
+	/* Replace tx_pkt_burst in struct rte_eth_dev to
+	 * intercept the purpose of the default TX path.
+	 * All tasks are done at iavf_xmit_pkts_chain.
+	 */
+	tx_pkt_burst = dev->tx_pkt_burst;
+	dev->tx_pkt_burst = iavf_xmit_pkts_chain;
+
+	/* Register all interceptors. We need to pay
+	 * attention to the order of precedence.
+	 */
+	if (mdd_check) {
+		err = iavf_tx_pkt_burst_insert(dev, iavf_xmit_pkts_mdd);
+		if (!err)
+			PMD_DRV_LOG(DEBUG, "Register diagnostics Tx callback (port=%d).",
+					    dev->data->port_id);
+		else
+			PMD_DRV_LOG(ERR, "Failed to register diagnostics TX callback (port %d).",
+					    dev->data->port_id);
+	}
+
+	if (no_poll_on_link_down) {
+		err = iavf_tx_pkt_burst_insert(dev, iavf_xmit_pkts_no_poll);
+		if (!err)
+			PMD_DRV_LOG(DEBUG, "Register no poll Tx callback (port=%d).",
+					    dev->data->port_id);
+		else
+			PMD_DRV_LOG(ERR, "Failed to register no poll TX callback (port %d).",
+					    dev->data->port_id);
+	}
+
+	err = iavf_tx_pkt_burst_insert(dev, tx_pkt_burst);
+	if (!err)
+		PMD_DRV_LOG(DEBUG, "Register PMD Tx callback (port=%d).",
+					dev->data->port_id);
+	else
+		PMD_DRV_LOG(ERR, "Failed to register PMD TX callback (port %d).",
+					dev->data->port_id);
 }
 
 /* choose rx function*/
@@ -3973,9 +4226,6 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
 void
 iavf_set_tx_function(struct rte_eth_dev *dev)
 {
-	struct iavf_adapter *adapter =
-		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
-	int no_poll_on_link_down = adapter->devargs.no_poll_on_link_down;
 #ifdef RTE_ARCH_X86
 	struct iavf_tx_queue *txq;
 	int i;
@@ -4062,10 +4312,7 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 #endif
 		}
 
-		if (no_poll_on_link_down) {
-			adapter->tx_pkt_burst = dev->tx_pkt_burst;
-			dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
-		}
+		iavf_set_tx_interceptors(dev);
 		return;
 	}
 
@@ -4076,10 +4323,16 @@ iavf_set_tx_function(struct rte_eth_dev *dev)
 	dev->tx_pkt_burst = iavf_xmit_pkts;
 	dev->tx_pkt_prepare = iavf_prep_pkts;
 
-	if (no_poll_on_link_down) {
-		adapter->tx_pkt_burst = dev->tx_pkt_burst;
-		dev->tx_pkt_burst = iavf_xmit_pkts_no_poll;
-	}
+	iavf_set_tx_interceptors(dev);
+}
+
+void iavf_pkt_burst_init(struct rte_eth_dev *dev)
+{
+	struct iavf_pkt_burst *item;
+
+	item = &ice_rxtx_pkt_burst[dev->data->port_id];
+	TAILQ_INIT(&item->rx_burst_list);
+	TAILQ_INIT(&item->tx_burst_list);
 }
 
 static int
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index f432f9d956..fa948aeb95 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -203,6 +203,10 @@ struct iavf_txq_ops {
 	void (*release_mbufs)(struct iavf_tx_queue *txq);
 };
 
+struct iavf_pkt_burst {
+	TAILQ_HEAD(rx_pkt_burst_list, iavf_rx_burst_elem) rx_burst_list;
+	TAILQ_HEAD(tx_pkt_burst_list, iavf_tx_burst_elem) tx_burst_list;
+};
 
 struct iavf_rx_queue_stats {
 	uint64_t reserved;
@@ -297,6 +301,9 @@ struct iavf_tx_queue {
 	uint16_t next_rs;              /* next to check DD,  for VPMD */
 	uint16_t ipsec_crypto_pkt_md_offset;
 
+	uint64_t mdd_mbuf_err_count;
+	uint64_t mdd_pkt_err_count;
+
 	bool q_set;                    /* if rx queue has been configured */
 	bool tx_deferred_start;        /* don't start this queue in dev start */
 	const struct iavf_txq_ops *ops;
@@ -669,6 +676,7 @@ uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		       uint16_t nb_pkts);
 void iavf_set_rx_function(struct rte_eth_dev *dev);
 void iavf_set_tx_function(struct rte_eth_dev *dev);
+void iavf_pkt_burst_init(struct rte_eth_dev *dev);
 void iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 			  struct rte_eth_rxq_info *qinfo);
 void iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
-- 
2.25.1


             reply	other threads:[~2023-12-21 10:28 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-21 10:12 Mingjin Ye [this message]
2023-12-21 12:00 ` Zhang, Qi Z
2023-12-22 10:44 ` [PATCH v2] " Mingjin Ye
2023-12-22 11:37   ` Zhang, Qi Z
2023-12-25  2:48     ` Ye, MingjinX
2023-12-26 10:07   ` [PATCH v3] " Mingjin Ye
2023-12-27 10:16     ` [PATCH v4 1/2] " Mingjin Ye
2023-12-27 11:30       ` Zhang, Qi Z
2023-12-28 10:26       ` [PATCH v5 0/2] net/iavf: add diagnostics and fix error Mingjin Ye
2023-12-28 10:26         ` [PATCH v5 1/2] net/iavf: fix Tx path error in multi-process Mingjin Ye
2023-12-28 10:50           ` Zhang, Qi Z
2023-12-29 10:11           ` [PATCH v6 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
2023-12-29 10:11             ` [PATCH v6 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
2023-12-31  6:41               ` Zhang, Qi Z
2024-01-02 10:52               ` [PATCH v7 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
2024-01-02 10:52                 ` [PATCH v7 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
2024-01-03  2:22                   ` Zhang, Qi Z
2024-01-02 10:52                 ` [PATCH v7 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
2024-01-03  2:54                   ` Zhang, Qi Z
2024-01-03 10:10                   ` [PATCH v8 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
2024-01-03 10:10                     ` [PATCH v8 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
2024-01-03 10:10                     ` [PATCH v8 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
2024-01-04 10:18                       ` [PATCH v9 0/2] net/iavf: fix Rx/Tx burst and add diagnostics Mingjin Ye
2024-01-04 10:18                         ` [PATCH v9 1/2] net/iavf: fix Rx/Tx burst in multi-process Mingjin Ye
2024-01-04 10:18                         ` [PATCH v9 2/2] net/iavf: add diagnostic support in TX path Mingjin Ye
2024-01-05  9:58                           ` [PATCH v10] " Mingjin Ye
2024-01-09 10:09                             ` [PATCH v11] " Mingjin Ye
2024-01-10  2:25                             ` Mingjin Ye
2024-02-09 14:43                               ` Burakov, Anatoly
2024-02-09 15:20                               ` Burakov, Anatoly
2024-02-19  9:55                               ` [PATCH v12] " Mingjin Ye
2024-02-29 18:38                                 ` Bruce Richardson
2024-03-04 12:34                                   ` Bruce Richardson
2024-01-05  0:44                       ` [PATCH v8 2/2] " Zhang, Qi Z
2023-12-29 10:11             ` [PATCH v6 " Mingjin Ye
2023-12-28 10:26         ` [PATCH v5 " Mingjin Ye

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231221101202.2815069-1-mingjinx.ye@intel.com \
    --to=mingjinx.ye@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qiming.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).