DPDK patches and discussions
 help / color / mirror / Atom feed
From: Mingjin Ye <mingjinx.ye@intel.com>
To: dev@dpdk.org
Cc: qiming.yang@intel.com, yidingx.zhou@intel.com,
	stephen@networkplumber.org, Mingjin Ye <mingjinx.ye@intel.com>,
	Jingjing Wu <jingjing.wu@intel.com>,
	Beilei Xing <beilei.xing@intel.com>,
	Bruce Richardson <bruce.richardson@intel.com>,
	Konstantin Ananyev <konstantin.v.ananyev@yandex.ru>
Subject: [POC v3] net/iavf: support no data path polling mode
Date: Fri, 21 Jul 2023 09:57:25 +0000	[thread overview]
Message-ID: <20230721095725.454894-1-mingjinx.ye@intel.com> (raw)
In-Reply-To: <20230720100815.440778-1-mingjinx.ye@intel.com>

Currently, during a PF to VF reset due to an action such as changing
trust settings on a VF, the DPDK application running with iavf PMD
loses connectivity, and the only solution is to reset the DPDK
application.

Instead of forcing a reset of the DPDK application to restore
connectivity, the iavf PMD driver handles the PF to VF reset event
normally by performing all necessary steps to bring the VF back
online.

To minimize downtime, a devargs "no-poll-on-link-down" is introduced
in iavf PMD. When this flag is set, the PMD switches to no-poll mode
when the link state is down (rx/tx bursts return to 0 immediately).
When the link state returns to normal, the PMD switches to normal
rx/tx burst state.

NOTE: The DPDK application needs to handle the
RTE_ETH_EVENT_INTR_RESET event posted by the iavf PMD and reset
the vf upon receipt of this event.

Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
V2: Add IAVF_NO_POLL_ON_LINK_DOWN_ARG macro to iavf_valid_args.
---
V3: Improve commit log.
---
 drivers/net/iavf/iavf.h                 |  2 ++
 drivers/net/iavf/iavf_ethdev.c          | 11 +++++++
 drivers/net/iavf/iavf_rxtx.c            | 29 +++++++++++++++--
 drivers/net/iavf/iavf_rxtx.h            |  1 +
 drivers/net/iavf/iavf_rxtx_vec_avx2.c   | 29 ++++++++++++++---
 drivers/net/iavf/iavf_rxtx_vec_avx512.c | 42 ++++++++++++++++++++++---
 drivers/net/iavf/iavf_rxtx_vec_sse.c    | 21 ++++++++++++-
 drivers/net/iavf/iavf_vchnl.c           | 17 ++++++++++
 8 files changed, 140 insertions(+), 12 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 98861e4242..30b05d25b6 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -305,6 +305,7 @@ struct iavf_devargs {
 	uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
 	uint16_t quanta_size;
 	uint32_t watchdog_period;
+	uint16_t no_poll_on_link_down;
 };
 
 struct iavf_security_ctx;
@@ -323,6 +324,7 @@ struct iavf_adapter {
 	uint32_t ptype_tbl[IAVF_MAX_PKT_TYPE] __rte_cache_min_aligned;
 	bool stopped;
 	bool closed;
+	bool no_poll;
 	uint16_t fdir_ref_cnt;
 	struct iavf_devargs devargs;
 };
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index ac7154d720..c922c64838 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -37,6 +37,7 @@
 #define IAVF_PROTO_XTR_ARG         "proto_xtr"
 #define IAVF_QUANTA_SIZE_ARG       "quanta_size"
 #define IAVF_RESET_WATCHDOG_ARG    "watchdog_period"
+#define IAVF_NO_POLL_ON_LINK_DOWN_ARG       "no-poll-on-link-down"
 
 uint64_t iavf_timestamp_dynflag;
 int iavf_timestamp_dynfield_offset = -1;
@@ -45,6 +46,7 @@ static const char * const iavf_valid_args[] = {
 	IAVF_PROTO_XTR_ARG,
 	IAVF_QUANTA_SIZE_ARG,
 	IAVF_RESET_WATCHDOG_ARG,
+	IAVF_NO_POLL_ON_LINK_DOWN_ARG,
 	NULL
 };
 
@@ -2237,6 +2239,7 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 	struct rte_kvargs *kvlist;
 	int ret;
 	int watchdog_period = -1;
+	uint16_t no_poll_on_link_down;
 
 	if (!devargs)
 		return 0;
@@ -2270,6 +2273,14 @@ static int iavf_parse_devargs(struct rte_eth_dev *dev)
 	else
 		ad->devargs.watchdog_period = watchdog_period;
 
+	no_poll_on_link_down = rte_kvargs_count(kvlist,
+		IAVF_NO_POLL_ON_LINK_DOWN_ARG);
+
+	if (no_poll_on_link_down == 0)
+		ad->devargs.no_poll_on_link_down = 0;
+	else
+		ad->devargs.no_poll_on_link_down = 1;
+
 	if (ad->devargs.quanta_size != 0 &&
 	    (ad->devargs.quanta_size < 256 || ad->devargs.quanta_size > 4096 ||
 	     ad->devargs.quanta_size & 0x40)) {
diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index f7df4665d1..447e306fee 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -770,6 +770,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
 	struct iavf_info *vf =
 		IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+	struct iavf_vsi *vsi = &vf->vsi;
 	struct iavf_tx_queue *txq;
 	const struct rte_memzone *mz;
 	uint32_t ring_size;
@@ -843,6 +844,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	txq->port_id = dev->data->port_id;
 	txq->offloads = offloads;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
+	txq->vsi = vsi;
 
 	if (iavf_ipsec_crypto_supported(adapter))
 		txq->ipsec_crypto_pkt_md_offset =
@@ -1406,9 +1408,12 @@ iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	uint64_t pkt_flags;
 	const uint32_t *ptype_tbl;
 
+	rxq = rx_queue;
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
 	nb_rx = 0;
 	nb_hold = 0;
-	rxq = rx_queue;
 	rx_id = rxq->rx_tail;
 	rx_ring = rxq->rx_ring;
 	ptype_tbl = rxq->vsi->adapter->ptype_tbl;
@@ -1515,9 +1520,12 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
 	const uint32_t *ptype_tbl;
 	uint64_t ts_ns;
 
+	rxq = rx_queue;
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
 	nb_rx = 0;
 	nb_hold = 0;
-	rxq = rx_queue;
 	rx_id = rxq->rx_tail;
 	rx_ring = rxq->rx_ring;
 	ptype_tbl = rxq->vsi->adapter->ptype_tbl;
@@ -1641,6 +1649,9 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 	volatile union iavf_rx_flex_desc *rxdp;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
 	if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
 		uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
 
@@ -1818,6 +1829,9 @@ iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 	volatile union iavf_rx_desc *rxdp;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
 	while (nb_rx < nb_pkts) {
 		rxdp = &rx_ring[rx_id];
 		qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
@@ -1973,6 +1987,9 @@ iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq,
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 	uint64_t ts_ns;
 
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
 	rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
 	rxep = &rxq->sw_ring[rxq->rx_tail];
 
@@ -2104,6 +2121,9 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts, uint1
 	uint64_t pkt_flags;
 	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
 	rxdp = &rxq->rx_ring[rxq->rx_tail];
 	rxep = &rxq->sw_ring[rxq->rx_tail];
 
@@ -2281,6 +2301,9 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	struct iavf_rx_queue *rxq = (struct iavf_rx_queue *)rx_queue;
 	uint16_t nb_rx = 0;
 
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
 	if (!nb_pkts)
 		return 0;
 
@@ -2768,6 +2791,8 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	uint16_t idx;
 	uint16_t slen;
 
+	if (!txq->vsi || txq->vsi->adapter->no_poll)
+		return 0;
 
 	/* Check if the descriptor ring needs to be cleaned. */
 	if (txq->nb_free < txq->free_thresh)
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 605ea3f824..d3324e0e6e 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -288,6 +288,7 @@ struct iavf_tx_queue {
 	uint16_t free_thresh;
 	uint16_t rs_thresh;
 	uint8_t rel_mbufs_type;
+	struct iavf_vsi *vsi; /**< the VSI this queue belongs to */
 
 	uint16_t port_id;
 	uint16_t queue_id;
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
index c10f24036e..98316bed24 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c
@@ -26,8 +26,7 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
 {
 #define IAVF_DESCS_PER_LOOP_AVX 8
 
-	/* const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; */
-	const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
+	const uint32_t *type_table;
 
 	const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
 			0, rxq->mbuf_initializer);
@@ -36,6 +35,11 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq,
 	volatile union iavf_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
 	const int avx_aligned = ((rxq->rx_tail & 1) == 0);
 
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
+	type_table = rxq->vsi->adapter->ptype_tbl;
+
 	rte_prefetch0(rxdp);
 
 	/* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
@@ -530,12 +534,12 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 {
 #define IAVF_DESCS_PER_LOOP_AVX 8
 
-	struct iavf_adapter *adapter = rxq->vsi->adapter;
+	struct iavf_adapter *adapter;
 
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+	uint64_t offloads;
 #endif
-	const uint32_t *type_table = adapter->ptype_tbl;
+	const uint32_t *type_table;
 
 	const __m256i mbuf_init = _mm256_set_epi64x(0, 0,
 			0, rxq->mbuf_initializer);
@@ -543,6 +547,15 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq,
 	volatile union iavf_rx_flex_desc *rxdp =
 		(union iavf_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail;
 
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
+	adapter = rxq->vsi->adapter;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+	offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+#endif
+	type_table = adapter->ptype_tbl;
+
 	rte_prefetch0(rxdp);
 
 	/* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
@@ -1774,6 +1787,9 @@ iavf_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
 	uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
 
+	if (!txq->vsi || txq->vsi->adapter->no_poll)
+		return 0;
+
 	if (txq->nb_free < txq->free_thresh)
 		iavf_tx_free_bufs(txq);
 
@@ -1834,6 +1850,9 @@ iavf_xmit_pkts_vec_avx2_common(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t nb_tx = 0;
 	struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
 
+	if (!txq->vsi || txq->vsi->adapter->no_poll)
+		return 0;
+
 	while (nb_pkts) {
 		uint16_t ret, num;
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx512.c b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
index 3e66df5341..8de739434c 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_avx512.c
@@ -45,7 +45,7 @@ _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
 			       bool offload)
 {
 #ifdef IAVF_RX_PTYPE_OFFLOAD
-	const uint32_t *type_table = rxq->vsi->adapter->ptype_tbl;
+	const uint32_t *type_table;
 #endif
 
 	const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
@@ -53,6 +53,13 @@ _iavf_recv_raw_pkts_vec_avx512(struct iavf_rx_queue *rxq,
 	struct rte_mbuf **sw_ring = &rxq->sw_ring[rxq->rx_tail];
 	volatile union iavf_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail;
 
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
+#ifdef IAVF_RX_PTYPE_OFFLOAD
+	type_table = rxq->vsi->adapter->ptype_tbl;
+#endif
+
 	rte_prefetch0(rxdp);
 
 	/* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
@@ -588,12 +595,12 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 					uint8_t *split_packet,
 					bool offload)
 {
-	struct iavf_adapter *adapter = rxq->vsi->adapter;
+	struct iavf_adapter *adapter;
 #ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
-	uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+	uint64_t offloads;
 #endif
 #ifdef IAVF_RX_PTYPE_OFFLOAD
-	const uint32_t *type_table = adapter->ptype_tbl;
+	const uint32_t *type_table;
 #endif
 
 	const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0,
@@ -602,6 +609,17 @@ _iavf_recv_raw_pkts_vec_avx512_flex_rxd(struct iavf_rx_queue *rxq,
 	volatile union iavf_rx_flex_desc *rxdp =
 		(union iavf_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail;
 
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
+	adapter = rxq->vsi->adapter;
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+	offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+#endif
+#ifdef IAVF_RX_PTYPE_OFFLOAD
+	type_table = adapter->ptype_tbl;
+#endif
+
 	rte_prefetch0(rxdp);
 
 	/* nb_pkts has to be floor-aligned to IAVF_DESCS_PER_LOOP_AVX */
@@ -1700,6 +1718,10 @@ iavf_recv_scattered_pkts_vec_avx512_cmn(void *rx_queue, struct rte_mbuf **rx_pkt
 					uint16_t nb_pkts, bool offload)
 {
 	uint16_t retval = 0;
+	struct iavf_rx_queue *rxq = rx_queue;
+
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
 
 	while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
 		uint16_t burst = iavf_recv_scattered_burst_vec_avx512(rx_queue,
@@ -2303,6 +2325,9 @@ iavf_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
 	uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
 
+	if (!txq->vsi || txq->vsi->adapter->no_poll)
+		return 0;
+
 	if (txq->nb_free < txq->free_thresh)
 		iavf_tx_free_bufs_avx512(txq);
 
@@ -2370,6 +2395,9 @@ iavf_xmit_fixed_burst_vec_avx512_ctx(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint64_t flags = IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_ICRC;
 	uint64_t rs = IAVF_TX_DESC_CMD_RS | flags;
 
+	if (!txq->vsi || txq->vsi->adapter->no_poll)
+		return 0;
+
 	if (txq->nb_free < txq->free_thresh)
 		iavf_tx_free_bufs_avx512(txq);
 
@@ -2432,6 +2460,9 @@ iavf_xmit_pkts_vec_avx512_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t nb_tx = 0;
 	struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
 
+	if (!txq->vsi || txq->vsi->adapter->no_poll)
+		return 0;
+
 	while (nb_pkts) {
 		uint16_t ret, num;
 
@@ -2498,6 +2529,9 @@ iavf_xmit_pkts_vec_avx512_ctx_cmn(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t nb_tx = 0;
 	struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
 
+	if (!txq->vsi || txq->vsi->adapter->no_poll)
+		return 0;
+
 	while (nb_pkts) {
 		uint16_t ret, num;
 
diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
index 892bfa4cf3..4007ce6f6f 100644
--- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
@@ -479,7 +479,12 @@ _recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
 	int pos;
 	uint64_t var;
 	__m128i shuf_msk;
-	const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+	const uint32_t *ptype_tbl;
+
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
+	ptype_tbl = rxq->vsi->adapter->ptype_tbl;
 
 	__m128i crc_adjust = _mm_set_epi16(
 				0, 0, 0,    /* ignore non-length fields */
@@ -1198,6 +1203,11 @@ uint16_t
 iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
 			    uint16_t nb_pkts)
 {
+	struct iavf_rx_queue *rxq = rx_queue;
+
+	if (!rxq->vsi)
+		return 0;
+
 	return _recv_raw_pkts_vec_flex_rxd(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
@@ -1215,6 +1225,9 @@ iavf_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
 	unsigned int i = 0;
 
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
 	/* get some new buffers */
 	uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
 					      split_flags);
@@ -1284,6 +1297,9 @@ iavf_recv_scattered_burst_vec_flex_rxd(void *rx_queue,
 	uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
 	unsigned int i = 0;
 
+	if (!rxq->vsi || rxq->vsi->adapter->no_poll)
+		return 0;
+
 	/* get some new buffers */
 	uint16_t nb_bufs = _recv_raw_pkts_vec_flex_rxd(rxq, rx_pkts, nb_pkts,
 					      split_flags);
@@ -1437,6 +1453,9 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t nb_tx = 0;
 	struct iavf_tx_queue *txq = (struct iavf_tx_queue *)tx_queue;
 
+	if (!txq->vsi || txq->vsi->adapter->no_poll)
+		return 0;
+
 	while (nb_pkts) {
 		uint16_t ret, num;
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 524732f67d..2d0b7bddb3 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -263,6 +263,14 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
 				if (!vf->link_up)
 					iavf_dev_watchdog_enable(adapter);
 			}
+			if (vf->link_up && adapter->no_poll) {
+				adapter->no_poll = false;
+				PMD_DRV_LOG(DEBUG, "IAVF no-poll turned off");
+			}
+			if (!vf->link_up && adapter->devargs.no_poll_on_link_down) {
+				adapter->no_poll = true;
+				PMD_DRV_LOG(DEBUG, "IAVF no-poll turned on");
+			}
 			PMD_DRV_LOG(INFO, "Link status update:%s",
 					vf->link_up ? "up" : "down");
 			break;
@@ -465,6 +473,15 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
 			if (!vf->link_up)
 				iavf_dev_watchdog_enable(adapter);
 		}
+		if (vf->link_up && adapter->no_poll) {
+			adapter->no_poll = false;
+			PMD_DRV_LOG(DEBUG, "IAVF no-poll turned off");
+		}
+		if (!vf->link_up && adapter->devargs.no_poll_on_link_down) {
+			adapter->no_poll = true;
+			PMD_DRV_LOG(DEBUG, "IAVF no-poll turned on");
+		}
+
 		iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0);
 		break;
 	case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
-- 
2.25.1


  parent reply	other threads:[~2023-07-21 10:08 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-20  6:16 [PATCH] net/ice: CVL support double vlan Mingjin Ye
2023-05-06 10:04 ` [PATCH v2] net/ice: " Mingjin Ye
2023-05-26 10:16   ` Xu, Ke1
2023-05-26 11:10     ` Zhang, Qi Z
2023-07-17  9:36 ` [POC] net/iavf: support no data path polling mode Mingjin Ye
2023-07-20 10:08   ` [POC v2] " Mingjin Ye
2023-07-20 15:45     ` Stephen Hemminger
2023-07-21  9:57     ` Mingjin Ye [this message]
2023-08-11  6:27       ` [PATCH] " Mingjin Ye
2023-09-26  7:56         ` [PATCH v2] " Mingjin Ye
2023-10-13  1:27           ` [PATCH v3] " Mingjin Ye
2023-10-17  1:44             ` [PATCH v4] " Mingjin Ye
2023-10-17  2:19             ` Mingjin Ye
2023-10-19  9:04               ` [PATCH v5] net/iavf: data paths support no-polling mode Mingjin Ye
2023-10-20  0:39                 ` Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230721095725.454894-1-mingjinx.ye@intel.com \
    --to=mingjinx.ye@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=konstantin.v.ananyev@yandex.ru \
    --cc=qiming.yang@intel.com \
    --cc=stephen@networkplumber.org \
    --cc=yidingx.zhou@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).