DPDK patches and discussions
 help / color / mirror / Atom feed
From: Kaiwen Deng <kaiwenx.deng@intel.com>
To: dev@dpdk.org
Cc: stable@dpdk.org, qiming.yang@intel.com, yidingx.zhou@intel.com,
	Kaiwen Deng <kaiwenx.deng@intel.com>,
	Jingjing Wu <jingjing.wu@intel.com>,
	Beilei Xing <beilei.xing@intel.com>,
	Qi Zhang <qi.z.zhang@intel.com>
Subject: [PATCH] net/iavf: fix iavf query stats in intr thread
Date: Wed, 22 Feb 2023 12:40:01 +0800	[thread overview]
Message-ID: <20230222044001.1241845-1-kaiwenx.deng@intel.com> (raw)

When iavf send query-stats command in eal-intr-thread through
virtual channel, there will be no response received from
iavf_dev_virtchnl_handler for this command during block and wait.
Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread.

When vf device is bonded as BONDING_MODE_TLB mode, the slave
update callback will registered in alarm and called by eal-intr-thread,
it would also raise the above issue.

This commit add local stats return to iavf_dev_stats_get immediately
when it is called by eal-intr-thread. And update local stats in
iavf-virtchnl-thread.

Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks")
Fixes: 22b123a36d07 ("net/avf: initialize PMD")
Cc: stable@dpdk.org

Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com>
---
 drivers/net/iavf/iavf.h        |   9 ++-
 drivers/net/iavf/iavf_ethdev.c |  24 ++++++--
 drivers/net/iavf/iavf_vchnl.c  | 107 ++++++++++++++++++++++++---------
 3 files changed, 104 insertions(+), 36 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 1edebab8dc..3a249b90a3 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -128,6 +128,7 @@ struct iavf_vsi {
 	uint16_t base_vector;
 	uint16_t msix_intr;      /* The MSIX interrupt binds to VSI */
 	struct iavf_eth_xstats eth_stats_offset;
+	struct virtchnl_eth_stats eth_stats;
 };
 
 struct rte_flow;
@@ -325,6 +326,8 @@ struct iavf_adapter {
 	struct iavf_devargs devargs;
 };
 
+typedef void (*virtchnl_callback)(struct rte_eth_dev *dev, void *args);
+
 /* IAVF_DEV_PRIVATE_TO */
 #define IAVF_DEV_PRIVATE_TO_ADAPTER(adapter) \
 	((struct iavf_adapter *)adapter)
@@ -424,8 +427,10 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
 }
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
-void iavf_dev_event_handler_fini(void);
-int iavf_dev_event_handler_init(void);
+void iavf_dev_virtchnl_handler_fini(void);
+void iavf_dev_virtchnl_callback_post(struct rte_eth_dev *dev,
+			 virtchnl_callback cb, void *args);
+int iavf_dev_virtchnl_handler_init(void);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
 int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 3196210f2c..fcbab5b26a 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -1729,6 +1729,17 @@ iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes)
 	iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards);
 }
 
+static void iavf_dev_stats_get_callback(struct rte_eth_dev *dev, void *args)
+{
+	struct virtchnl_eth_stats *eth_stats = (struct virtchnl_eth_stats *)args;
+	struct virtchnl_eth_stats *pstats = NULL;
+	struct iavf_adapter *adapter =
+		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	int ret = iavf_query_stats(adapter, &pstats);
+	if (ret == 0)
+		rte_memcpy(eth_stats, pstats, sizeof(struct virtchnl_eth_stats));
+}
+
 static int
 iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
@@ -1738,8 +1749,13 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 	struct iavf_vsi *vsi = &vf->vsi;
 	struct virtchnl_eth_stats *pstats = NULL;
 	int ret;
-
-	ret = iavf_query_stats(adapter, &pstats);
+	if (rte_thread_is_intr()) {
+		pstats = &vsi->eth_stats;
+		iavf_dev_virtchnl_callback_post(dev, iavf_dev_stats_get_callback, (void *)pstats);
+		ret = 0;
+	} else {
+		ret = iavf_query_stats(adapter, &pstats);
+	}
 	if (ret == 0) {
 		uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads &
 					 RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 :
@@ -2634,7 +2650,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
 			&eth_dev->data->mac_addrs[0]);
 
-	if (iavf_dev_event_handler_init())
+	if (iavf_dev_virtchnl_handler_init())
 		goto init_vf_err;
 
 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
@@ -2791,7 +2807,7 @@ iavf_dev_uninit(struct rte_eth_dev *dev)
 
 	iavf_dev_close(dev);
 
-	iavf_dev_event_handler_fini();
+	iavf_dev_virtchnl_handler_fini();
 
 	return 0;
 }
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index f92daf97f2..4136c97c45 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -31,24 +31,36 @@
 
 #define MAX_EVENT_PENDING 16
 
-struct iavf_event_element {
-	TAILQ_ENTRY(iavf_event_element) next;
+struct iavf_virtchnl_element {
+	TAILQ_ENTRY(iavf_virtchnl_element) next;
 	struct rte_eth_dev *dev;
-	enum rte_eth_event_type event;
-	void *param;
-	size_t param_alloc_size;
-	uint8_t param_alloc_data[0];
+	enum iavf_virchnl_handle_type {
+		EVENT_TYPE = 0,
+		CALL_TYPE
+	} handle_type;
+	union {
+		struct event_param {
+			enum rte_eth_event_type event;
+			void *param;
+			size_t param_alloc_size;
+			uint8_t param_alloc_data[0];
+		} ep;
+		struct call_param {
+			virtchnl_callback cb;
+			void *args;
+		} cp;
+	};
 };
 
-struct iavf_event_handler {
+struct iavf_virtchnl_handler {
 	uint32_t ndev;
 	pthread_t tid;
 	int fd[2];
 	pthread_mutex_t lock;
-	TAILQ_HEAD(event_list, iavf_event_element) pending;
+	TAILQ_HEAD(event_list, iavf_virtchnl_element) pending;
 };
 
-static struct iavf_event_handler event_handler = {
+static struct iavf_virtchnl_handler event_handler = {
 	.fd = {-1, -1},
 };
 
@@ -60,10 +72,10 @@ static struct iavf_event_handler event_handler = {
 #endif
 
 static void *
-iavf_dev_event_handle(void *param __rte_unused)
+iavf_dev_virtchnl_handle(void *param __rte_unused)
 {
-	struct iavf_event_handler *handler = &event_handler;
-	TAILQ_HEAD(event_list, iavf_event_element) pending;
+	struct iavf_virtchnl_handler *handler = &event_handler;
+	TAILQ_HEAD(event_list, iavf_virtchnl_element) pending;
 
 	while (true) {
 		char unused[MAX_EVENT_PENDING];
@@ -76,10 +88,22 @@ iavf_dev_event_handle(void *param __rte_unused)
 		TAILQ_CONCAT(&pending, &handler->pending, next);
 		pthread_mutex_unlock(&handler->lock);
 
-		struct iavf_event_element *pos, *save_next;
+		struct iavf_virtchnl_element *pos, *save_next;
 		TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
 			TAILQ_REMOVE(&pending, pos, next);
-			rte_eth_dev_callback_process(pos->dev, pos->event, pos->param);
+
+			switch (pos->handle_type) {
+			case EVENT_TYPE:
+				rte_eth_dev_callback_process(pos->dev,
+					pos->ep.event, pos->ep.param);
+				break;
+			case CALL_TYPE:
+				pos->cp.cb(pos->dev, pos->cp.args);
+				break;
+			default:
+				break;
+			}
+
 			rte_free(pos);
 		}
 	}
@@ -92,19 +116,20 @@ iavf_dev_event_post(struct rte_eth_dev *dev,
 		enum rte_eth_event_type event,
 		void *param, size_t param_alloc_size)
 {
-	struct iavf_event_handler *handler = &event_handler;
+	struct iavf_virtchnl_handler *handler = &event_handler;
 	char notify_byte;
-	struct iavf_event_element *elem = rte_malloc(NULL, sizeof(*elem) + param_alloc_size, 0);
+	struct iavf_virtchnl_element *elem = rte_malloc(NULL, sizeof(*elem) + param_alloc_size, 0);
 	if (!elem)
 		return;
-
+	elem->handle_type = EVENT_TYPE;
+	struct event_param *ep = &elem->ep;
 	elem->dev = dev;
-	elem->event = event;
-	elem->param = param;
-	elem->param_alloc_size = param_alloc_size;
+	ep->event = event;
+	ep->param = param;
+	ep->param_alloc_size = param_alloc_size;
 	if (param && param_alloc_size) {
-		rte_memcpy(elem->param_alloc_data, param, param_alloc_size);
-		elem->param = elem->param_alloc_data;
+		rte_memcpy(ep->param_alloc_data, param, param_alloc_size);
+		ep->param = ep->param_alloc_data;
 	}
 
 	pthread_mutex_lock(&handler->lock);
@@ -115,10 +140,32 @@ iavf_dev_event_post(struct rte_eth_dev *dev,
 	RTE_SET_USED(nw);
 }
 
+void
+iavf_dev_virtchnl_callback_post(struct rte_eth_dev *dev, virtchnl_callback cb, void *args)
+{
+	struct iavf_virtchnl_handler *handler = &event_handler;
+	char notify_byte;
+	struct iavf_virtchnl_element *elem = rte_malloc(NULL, sizeof(*elem), 0);
+	if (!elem)
+		return;
+	elem->dev = dev;
+	elem->handle_type = CALL_TYPE;
+	struct call_param *cp = &elem->cp;
+	cp->cb = cb;
+	cp->args = args;
+
+	pthread_mutex_lock(&handler->lock);
+	TAILQ_INSERT_TAIL(&handler->pending, elem, next);
+	pthread_mutex_unlock(&handler->lock);
+
+	ssize_t nw = write(handler->fd[1], &notify_byte, 1);
+	RTE_SET_USED(nw);
+}
+
 int
-iavf_dev_event_handler_init(void)
+iavf_dev_virtchnl_handler_init(void)
 {
-	struct iavf_event_handler *handler = &event_handler;
+	struct iavf_virtchnl_handler *handler = &event_handler;
 
 	if (__atomic_add_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 1)
 		return 0;
@@ -135,8 +182,8 @@ iavf_dev_event_handler_init(void)
 	TAILQ_INIT(&handler->pending);
 	pthread_mutex_init(&handler->lock, NULL);
 
-	if (rte_ctrl_thread_create(&handler->tid, "iavf-event-thread",
-				NULL, iavf_dev_event_handle, NULL)) {
+	if (rte_ctrl_thread_create(&handler->tid, "iavf-virtchnl-thread",
+				NULL, iavf_dev_virtchnl_handle, NULL)) {
 		__atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED);
 		return -1;
 	}
@@ -145,9 +192,9 @@ iavf_dev_event_handler_init(void)
 }
 
 void
-iavf_dev_event_handler_fini(void)
+iavf_dev_virtchnl_handler_fini(void)
 {
-	struct iavf_event_handler *handler = &event_handler;
+	struct iavf_virtchnl_handler *handler = &event_handler;
 
 	if (__atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 0)
 		return;
@@ -162,7 +209,7 @@ iavf_dev_event_handler_fini(void)
 	pthread_join(handler->tid, NULL);
 	pthread_mutex_destroy(&handler->lock);
 
-	struct iavf_event_element *pos, *save_next;
+	struct iavf_virtchnl_element *pos, *save_next;
 	TAILQ_FOREACH_SAFE(pos, &handler->pending, next, save_next) {
 		TAILQ_REMOVE(&handler->pending, pos, next);
 		rte_free(pos);
@@ -1408,7 +1455,7 @@ iavf_query_stats(struct iavf_adapter *adapter,
 	struct iavf_cmd_info args;
 	int err;
 
-	if (adapter->closed)
+	if (!adapter || adapter->closed)
 		return -EIO;
 
 	memset(&q_stats, 0, sizeof(q_stats));
-- 
2.34.1


             reply	other threads:[~2023-02-22  5:11 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-22  4:40 Kaiwen Deng [this message]
2023-02-27  0:56 ` Zhang, Qi Z
2023-02-27  6:02   ` Deng, KaiwenX
2023-03-07  3:27   ` Deng, KaiwenX
2023-03-07  2:55 ` [PATCH v2] " Kaiwen Deng
2023-03-15 13:40   ` Zhang, Qi Z
2023-03-22  7:26   ` [PATCH v3] " Kaiwen Deng
2023-03-23 15:39     ` Ferruh Yigit
2023-03-27  5:31       ` Deng, KaiwenX
2023-03-27 12:31         ` Ferruh Yigit
2023-03-27 12:37           ` Ferruh Yigit
2023-03-29  7:53             ` Deng, KaiwenX
2023-05-05  2:31             ` Deng, KaiwenX
2023-05-23  1:45               ` Deng, KaiwenX
2023-05-23 10:26                 ` Ferruh Yigit
2023-03-29  6:41           ` Deng, KaiwenX
2023-06-06  5:41     ` Jiale, SongX
2023-06-07  2:03     ` [PATCH v4] " Kaiwen Deng
2023-06-07  4:01       ` Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230222044001.1241845-1-kaiwenx.deng@intel.com \
    --to=kaiwenx.deng@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=stable@dpdk.org \
    --cc=yidingx.zhou@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).