* [PATCH] net/iavf: fix iavf query stats in intr thread @ 2023-02-22 4:40 Kaiwen Deng 2023-02-27 0:56 ` Zhang, Qi Z 2023-03-07 2:55 ` [PATCH v2] " Kaiwen Deng 0 siblings, 2 replies; 19+ messages in thread From: Kaiwen Deng @ 2023-02-22 4:40 UTC (permalink / raw) To: dev Cc: stable, qiming.yang, yidingx.zhou, Kaiwen Deng, Jingjing Wu, Beilei Xing, Qi Zhang When iavf send query-stats command in eal-intr-thread through virtual channel, there will be no response received from iavf_dev_virtchnl_handler for this command during block and wait. Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. When vf device is bonded as BONDING_MODE_TLB mode, the slave update callback will registered in alarm and called by eal-intr-thread, it would also raise the above issue. This commit add local stats return to iavf_dev_stats_get immediately when it is called by eal-intr-thread. And update local stats in iavf-virtchnl-thread. Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") Fixes: 22b123a36d07 ("net/avf: initialize PMD") Cc: stable@dpdk.org Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> --- drivers/net/iavf/iavf.h | 9 ++- drivers/net/iavf/iavf_ethdev.c | 24 ++++++-- drivers/net/iavf/iavf_vchnl.c | 107 ++++++++++++++++++++++++--------- 3 files changed, 104 insertions(+), 36 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 1edebab8dc..3a249b90a3 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -128,6 +128,7 @@ struct iavf_vsi { uint16_t base_vector; uint16_t msix_intr; /* The MSIX interrupt binds to VSI */ struct iavf_eth_xstats eth_stats_offset; + struct virtchnl_eth_stats eth_stats; }; struct rte_flow; @@ -325,6 +326,8 @@ struct iavf_adapter { struct iavf_devargs devargs; }; +typedef void (*virtchnl_callback)(struct rte_eth_dev *dev, void *args); + /* IAVF_DEV_PRIVATE_TO */ #define IAVF_DEV_PRIVATE_TO_ADAPTER(adapter) \ ((struct iavf_adapter *)adapter) @@ -424,8 +427,10 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops) } int iavf_check_api_version(struct iavf_adapter *adapter); int iavf_get_vf_resource(struct iavf_adapter *adapter); -void iavf_dev_event_handler_fini(void); -int iavf_dev_event_handler_init(void); +void iavf_dev_virtchnl_handler_fini(void); +void iavf_dev_virtchnl_callback_post(struct rte_eth_dev *dev, + virtchnl_callback cb, void *args); +int iavf_dev_virtchnl_handler_init(void); void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev); int iavf_enable_vlan_strip(struct iavf_adapter *adapter); int iavf_disable_vlan_strip(struct iavf_adapter *adapter); diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 3196210f2c..fcbab5b26a 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -1729,6 +1729,17 @@ iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes) iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards); } +static void iavf_dev_stats_get_callback(struct rte_eth_dev *dev, void *args) +{ + struct virtchnl_eth_stats *eth_stats = (struct virtchnl_eth_stats *)args; + struct virtchnl_eth_stats *pstats = NULL; + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int ret = iavf_query_stats(adapter, &pstats); + if (ret == 0) + rte_memcpy(eth_stats, pstats, sizeof(struct virtchnl_eth_stats)); +} + static int iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { @@ -1738,8 +1749,13 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) struct iavf_vsi *vsi = &vf->vsi; struct virtchnl_eth_stats *pstats = NULL; int ret; - - ret = iavf_query_stats(adapter, &pstats); + if (rte_thread_is_intr()) { + pstats = &vsi->eth_stats; + iavf_dev_virtchnl_callback_post(dev, iavf_dev_stats_get_callback, (void *)pstats); + ret = 0; + } else { + ret = iavf_query_stats(adapter, &pstats); + } if (ret == 0) { uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 : @@ -2634,7 +2650,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); - if (iavf_dev_event_handler_init()) + if (iavf_dev_virtchnl_handler_init()) goto init_vf_err; if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { @@ -2791,7 +2807,7 @@ iavf_dev_uninit(struct rte_eth_dev *dev) iavf_dev_close(dev); - iavf_dev_event_handler_fini(); + iavf_dev_virtchnl_handler_fini(); return 0; } diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index f92daf97f2..4136c97c45 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -31,24 +31,36 @@ #define MAX_EVENT_PENDING 16 -struct iavf_event_element { - TAILQ_ENTRY(iavf_event_element) next; +struct iavf_virtchnl_element { + TAILQ_ENTRY(iavf_virtchnl_element) next; struct rte_eth_dev *dev; - enum rte_eth_event_type event; - void *param; - size_t param_alloc_size; - uint8_t param_alloc_data[0]; + enum iavf_virchnl_handle_type { + EVENT_TYPE = 0, + CALL_TYPE + } handle_type; + union { + struct event_param { + enum rte_eth_event_type event; + void *param; + size_t param_alloc_size; + uint8_t param_alloc_data[0]; + } ep; + struct call_param { + virtchnl_callback cb; + void *args; + } cp; + }; }; -struct iavf_event_handler { +struct iavf_virtchnl_handler { uint32_t ndev; pthread_t tid; int fd[2]; pthread_mutex_t lock; - TAILQ_HEAD(event_list, iavf_event_element) pending; + TAILQ_HEAD(event_list, iavf_virtchnl_element) pending; }; -static struct iavf_event_handler event_handler = { +static struct iavf_virtchnl_handler event_handler = { .fd = {-1, -1}, }; @@ -60,10 +72,10 @@ static struct iavf_event_handler event_handler = { #endif static void * -iavf_dev_event_handle(void *param __rte_unused) +iavf_dev_virtchnl_handle(void *param __rte_unused) { - struct iavf_event_handler *handler = &event_handler; - TAILQ_HEAD(event_list, iavf_event_element) pending; + struct iavf_virtchnl_handler *handler = &event_handler; + TAILQ_HEAD(event_list, iavf_virtchnl_element) pending; while (true) { char unused[MAX_EVENT_PENDING]; @@ -76,10 +88,22 @@ iavf_dev_event_handle(void *param __rte_unused) TAILQ_CONCAT(&pending, &handler->pending, next); pthread_mutex_unlock(&handler->lock); - struct iavf_event_element *pos, *save_next; + struct iavf_virtchnl_element *pos, *save_next; TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) { TAILQ_REMOVE(&pending, pos, next); - rte_eth_dev_callback_process(pos->dev, pos->event, pos->param); + + switch (pos->handle_type) { + case EVENT_TYPE: + rte_eth_dev_callback_process(pos->dev, + pos->ep.event, pos->ep.param); + break; + case CALL_TYPE: + pos->cp.cb(pos->dev, pos->cp.args); + break; + default: + break; + } + rte_free(pos); } } @@ -92,19 +116,20 @@ iavf_dev_event_post(struct rte_eth_dev *dev, enum rte_eth_event_type event, void *param, size_t param_alloc_size) { - struct iavf_event_handler *handler = &event_handler; + struct iavf_virtchnl_handler *handler = &event_handler; char notify_byte; - struct iavf_event_element *elem = rte_malloc(NULL, sizeof(*elem) + param_alloc_size, 0); + struct iavf_virtchnl_element *elem = rte_malloc(NULL, sizeof(*elem) + param_alloc_size, 0); if (!elem) return; - + elem->handle_type = EVENT_TYPE; + struct event_param *ep = &elem->ep; elem->dev = dev; - elem->event = event; - elem->param = param; - elem->param_alloc_size = param_alloc_size; + ep->event = event; + ep->param = param; + ep->param_alloc_size = param_alloc_size; if (param && param_alloc_size) { - rte_memcpy(elem->param_alloc_data, param, param_alloc_size); - elem->param = elem->param_alloc_data; + rte_memcpy(ep->param_alloc_data, param, param_alloc_size); + ep->param = ep->param_alloc_data; } pthread_mutex_lock(&handler->lock); @@ -115,10 +140,32 @@ iavf_dev_event_post(struct rte_eth_dev *dev, RTE_SET_USED(nw); } +void +iavf_dev_virtchnl_callback_post(struct rte_eth_dev *dev, virtchnl_callback cb, void *args) +{ + struct iavf_virtchnl_handler *handler = &event_handler; + char notify_byte; + struct iavf_virtchnl_element *elem = rte_malloc(NULL, sizeof(*elem), 0); + if (!elem) + return; + elem->dev = dev; + elem->handle_type = CALL_TYPE; + struct call_param *cp = &elem->cp; + cp->cb = cb; + cp->args = args; + + pthread_mutex_lock(&handler->lock); + TAILQ_INSERT_TAIL(&handler->pending, elem, next); + pthread_mutex_unlock(&handler->lock); + + ssize_t nw = write(handler->fd[1], ¬ify_byte, 1); + RTE_SET_USED(nw); +} + int -iavf_dev_event_handler_init(void) +iavf_dev_virtchnl_handler_init(void) { - struct iavf_event_handler *handler = &event_handler; + struct iavf_virtchnl_handler *handler = &event_handler; if (__atomic_add_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 1) return 0; @@ -135,8 +182,8 @@ iavf_dev_event_handler_init(void) TAILQ_INIT(&handler->pending); pthread_mutex_init(&handler->lock, NULL); - if (rte_ctrl_thread_create(&handler->tid, "iavf-event-thread", - NULL, iavf_dev_event_handle, NULL)) { + if (rte_ctrl_thread_create(&handler->tid, "iavf-virtchnl-thread", + NULL, iavf_dev_virtchnl_handle, NULL)) { __atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED); return -1; } @@ -145,9 +192,9 @@ iavf_dev_event_handler_init(void) } void -iavf_dev_event_handler_fini(void) +iavf_dev_virtchnl_handler_fini(void) { - struct iavf_event_handler *handler = &event_handler; + struct iavf_virtchnl_handler *handler = &event_handler; if (__atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 0) return; @@ -162,7 +209,7 @@ iavf_dev_event_handler_fini(void) pthread_join(handler->tid, NULL); pthread_mutex_destroy(&handler->lock); - struct iavf_event_element *pos, *save_next; + struct iavf_virtchnl_element *pos, *save_next; TAILQ_FOREACH_SAFE(pos, &handler->pending, next, save_next) { TAILQ_REMOVE(&handler->pending, pos, next); rte_free(pos); @@ -1408,7 +1455,7 @@ iavf_query_stats(struct iavf_adapter *adapter, struct iavf_cmd_info args; int err; - if (adapter->closed) + if (!adapter || adapter->closed) return -EIO; memset(&q_stats, 0, sizeof(q_stats)); -- 2.34.1 ^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH] net/iavf: fix iavf query stats in intr thread 2023-02-22 4:40 [PATCH] net/iavf: fix iavf query stats in intr thread Kaiwen Deng @ 2023-02-27 0:56 ` Zhang, Qi Z 2023-02-27 6:02 ` Deng, KaiwenX 2023-03-07 3:27 ` Deng, KaiwenX 2023-03-07 2:55 ` [PATCH v2] " Kaiwen Deng 1 sibling, 2 replies; 19+ messages in thread From: Zhang, Qi Z @ 2023-02-27 0:56 UTC (permalink / raw) To: Deng, KaiwenX, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Wu, Jingjing, Xing, Beilei > -----Original Message----- > From: Deng, KaiwenX <kaiwenx.deng@intel.com> > Sent: Wednesday, February 22, 2023 12:40 PM > To: dev@dpdk.org > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX > <yidingx.zhou@intel.com>; Deng, KaiwenX <kaiwenx.deng@intel.com>; Wu, > Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Zhang, > Qi Z <qi.z.zhang@intel.com> > Subject: [PATCH] net/iavf: fix iavf query stats in intr thread > > When iavf send query-stats command in eal-intr-thread through virtual > channel, there will be no response received from iavf_dev_virtchnl_handler > for this command during block and wait. > Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. > > When vf device is bonded as BONDING_MODE_TLB mode, the slave update > callback will registered in alarm and called by eal-intr-thread, it would also > raise the above issue. > > This commit add local stats return to iavf_dev_stats_get immediately when it > is called by eal-intr-thread. And update local stats in iavf-virtchnl-thread. > > Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") > Fixes: 22b123a36d07 ("net/avf: initialize PMD") > Cc: stable@dpdk.org > > Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> > --- > drivers/net/iavf/iavf.h | 9 ++- > drivers/net/iavf/iavf_ethdev.c | 24 ++++++-- drivers/net/iavf/iavf_vchnl.c | > 107 ++++++++++++++++++++++++--------- > 3 files changed, 104 insertions(+), 36 deletions(-) > > diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index > 1edebab8dc..3a249b90a3 100644 > --- a/drivers/net/iavf/iavf.h > +++ b/drivers/net/iavf/iavf.h > @@ -128,6 +128,7 @@ struct iavf_vsi { > uint16_t base_vector; > uint16_t msix_intr; /* The MSIX interrupt binds to VSI */ > struct iavf_eth_xstats eth_stats_offset; > + struct virtchnl_eth_stats eth_stats; > }; > > struct rte_flow; > @@ -325,6 +326,8 @@ struct iavf_adapter { > struct iavf_devargs devargs; > }; > > +typedef void (*virtchnl_callback)(struct rte_eth_dev *dev, void *args); > + > /* IAVF_DEV_PRIVATE_TO */ > #define IAVF_DEV_PRIVATE_TO_ADAPTER(adapter) \ > ((struct iavf_adapter *)adapter) > @@ -424,8 +427,10 @@ _atomic_set_async_response_cmd(struct iavf_info > *vf, enum virtchnl_ops ops) } int iavf_check_api_version(struct iavf_adapter > *adapter); int iavf_get_vf_resource(struct iavf_adapter *adapter); -void > iavf_dev_event_handler_fini(void); > -int iavf_dev_event_handler_init(void); > +void iavf_dev_virtchnl_handler_fini(void); > +void iavf_dev_virtchnl_callback_post(struct rte_eth_dev *dev, > + virtchnl_callback cb, void *args); > +int iavf_dev_virtchnl_handler_init(void); > void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev); int > iavf_enable_vlan_strip(struct iavf_adapter *adapter); int > iavf_disable_vlan_strip(struct iavf_adapter *adapter); diff --git > a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index > 3196210f2c..fcbab5b26a 100644 > --- a/drivers/net/iavf/iavf_ethdev.c > +++ b/drivers/net/iavf/iavf_ethdev.c > @@ -1729,6 +1729,17 @@ iavf_update_stats(struct iavf_vsi *vsi, struct > virtchnl_eth_stats *nes) > iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards); } > > +static void iavf_dev_stats_get_callback(struct rte_eth_dev *dev, void > +*args) { > + struct virtchnl_eth_stats *eth_stats = (struct virtchnl_eth_stats *)args; > + struct virtchnl_eth_stats *pstats = NULL; > + struct iavf_adapter *adapter = > + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); > + int ret = iavf_query_stats(adapter, &pstats); > + if (ret == 0) > + rte_memcpy(eth_stats, pstats, sizeof(struct > virtchnl_eth_stats)); } > + > static int > iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) > { @@ -1738,8 +1749,13 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, > struct rte_eth_stats *stats) > struct iavf_vsi *vsi = &vf->vsi; > struct virtchnl_eth_stats *pstats = NULL; > int ret; > - > - ret = iavf_query_stats(adapter, &pstats); > + if (rte_thread_is_intr()) { > + pstats = &vsi->eth_stats; > + iavf_dev_virtchnl_callback_post(dev, > iavf_dev_stats_get_callback, (void *)pstats); > + ret = 0; I assume this is going to return a copy of outdated stats, then the stats will be updated it in future and this happens in another thread, is it correct? If true, then will be 2 issues. 1. How the caller know when the updated data will be ready? 2. even caller will consume the outdated stats directly, is that will be a race condition if the virtchnl thread update the stats at the same time? > + } else { > + ret = iavf_query_stats(adapter, &pstats); > + } > if (ret == 0) { > uint8_t crc_stats_len = (dev->data- > >dev_conf.rxmode.offloads & > RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? > 0 : ^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH] net/iavf: fix iavf query stats in intr thread 2023-02-27 0:56 ` Zhang, Qi Z @ 2023-02-27 6:02 ` Deng, KaiwenX 2023-03-07 3:27 ` Deng, KaiwenX 1 sibling, 0 replies; 19+ messages in thread From: Deng, KaiwenX @ 2023-02-27 6:02 UTC (permalink / raw) To: Zhang, Qi Z, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Wu, Jingjing, Xing, Beilei > -----Original Message----- > From: Zhang, Qi Z <qi.z.zhang@intel.com> > Sent: Monday, February 27, 2023 8:56 AM > To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX > <yidingx.zhou@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, > Beilei <beilei.xing@intel.com> > Subject: RE: [PATCH] net/iavf: fix iavf query stats in intr thread > > > > > -----Original Message----- > > From: Deng, KaiwenX <kaiwenx.deng@intel.com> > > Sent: Wednesday, February 22, 2023 12:40 PM > > To: dev@dpdk.org > > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, > > YidingX <yidingx.zhou@intel.com>; Deng, KaiwenX > > <kaiwenx.deng@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, > > Beilei <beilei.xing@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com> > > Subject: [PATCH] net/iavf: fix iavf query stats in intr thread > > > > When iavf send query-stats command in eal-intr-thread through virtual > > channel, there will be no response received from > > iavf_dev_virtchnl_handler for this command during block and wait. > > Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. > > > > When vf device is bonded as BONDING_MODE_TLB mode, the slave > update > > callback will registered in alarm and called by eal-intr-thread, it > > would also raise the above issue. > > > > This commit add local stats return to iavf_dev_stats_get immediately > > when it is called by eal-intr-thread. And update local stats in iavf-virtchnl- > thread. > > > > Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") > > Fixes: 22b123a36d07 ("net/avf: initialize PMD") > > Cc: stable@dpdk.org > > > > Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> > > --- > > drivers/net/iavf/iavf.h | 9 ++- > > drivers/net/iavf/iavf_ethdev.c | 24 ++++++-- > > drivers/net/iavf/iavf_vchnl.c | > > 107 ++++++++++++++++++++++++--------- > > 3 files changed, 104 insertions(+), 36 deletions(-) > > > > diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index > > 1edebab8dc..3a249b90a3 100644 > > --- a/drivers/net/iavf/iavf.h > > +++ b/drivers/net/iavf/iavf.h > > @@ -128,6 +128,7 @@ struct iavf_vsi { > > uint16_t base_vector; > > uint16_t msix_intr; /* The MSIX interrupt binds to VSI */ > > struct iavf_eth_xstats eth_stats_offset; > > + struct virtchnl_eth_stats eth_stats; > > }; > > > > struct rte_flow; > > @@ -325,6 +326,8 @@ struct iavf_adapter { > > struct iavf_devargs devargs; > > }; > > > > +typedef void (*virtchnl_callback)(struct rte_eth_dev *dev, void > > +*args); > > + > > /* IAVF_DEV_PRIVATE_TO */ > > #define IAVF_DEV_PRIVATE_TO_ADAPTER(adapter) \ > > ((struct iavf_adapter *)adapter) > > @@ -424,8 +427,10 @@ _atomic_set_async_response_cmd(struct > iavf_info > > *vf, enum virtchnl_ops ops) } int iavf_check_api_version(struct > > iavf_adapter *adapter); int iavf_get_vf_resource(struct iavf_adapter > > *adapter); -void iavf_dev_event_handler_fini(void); > > -int iavf_dev_event_handler_init(void); > > +void iavf_dev_virtchnl_handler_fini(void); > > +void iavf_dev_virtchnl_callback_post(struct rte_eth_dev *dev, > > + virtchnl_callback cb, void *args); int > > +iavf_dev_virtchnl_handler_init(void); > > void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev); int > > iavf_enable_vlan_strip(struct iavf_adapter *adapter); int > > iavf_disable_vlan_strip(struct iavf_adapter *adapter); diff --git > > a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c > > index 3196210f2c..fcbab5b26a 100644 > > --- a/drivers/net/iavf/iavf_ethdev.c > > +++ b/drivers/net/iavf/iavf_ethdev.c > > @@ -1729,6 +1729,17 @@ iavf_update_stats(struct iavf_vsi *vsi, struct > > virtchnl_eth_stats *nes) > > iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards); } > > > > +static void iavf_dev_stats_get_callback(struct rte_eth_dev *dev, void > > +*args) { > > + struct virtchnl_eth_stats *eth_stats = (struct virtchnl_eth_stats > *)args; > > + struct virtchnl_eth_stats *pstats = NULL; > > + struct iavf_adapter *adapter = > > + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); > > + int ret = iavf_query_stats(adapter, &pstats); > > + if (ret == 0) > > + rte_memcpy(eth_stats, pstats, sizeof(struct > > virtchnl_eth_stats)); } > > + > > static int > > iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats > > *stats) { @@ -1738,8 +1749,13 @@ iavf_dev_stats_get(struct rte_eth_dev > > *dev, struct rte_eth_stats *stats) > > struct iavf_vsi *vsi = &vf->vsi; > > struct virtchnl_eth_stats *pstats = NULL; > > int ret; > > - > > - ret = iavf_query_stats(adapter, &pstats); > > + if (rte_thread_is_intr()) { > > + pstats = &vsi->eth_stats; > > + iavf_dev_virtchnl_callback_post(dev, > > iavf_dev_stats_get_callback, (void *)pstats); > > + ret = 0; > > > I assume this is going to return a copy of outdated stats, then the stats will be > updated it in future and this happens in another thread, is it correct? > If true, then will be 2 issues. > 1. How the caller know when the updated data will be ready? > 2. even caller will consume the outdated stats directly, is that will be a race > condition if the virtchnl thread update the stats at the same time? > > > + } else { > > + ret = iavf_query_stats(adapter, &pstats); > > + } > > if (ret == 0) { > > uint8_t crc_stats_len = (dev->data- > > >dev_conf.rxmode.offloads & > > RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? > > 0 : ^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH] net/iavf: fix iavf query stats in intr thread 2023-02-27 0:56 ` Zhang, Qi Z 2023-02-27 6:02 ` Deng, KaiwenX @ 2023-03-07 3:27 ` Deng, KaiwenX 1 sibling, 0 replies; 19+ messages in thread From: Deng, KaiwenX @ 2023-03-07 3:27 UTC (permalink / raw) To: Zhang, Qi Z, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Wu, Jingjing, Xing, Beilei > -----Original Message----- > From: Zhang, Qi Z <qi.z.zhang@intel.com> > Sent: Monday, February 27, 2023 8:56 AM > To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX > <yidingx.zhou@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, > Beilei <beilei.xing@intel.com> > Subject: RE: [PATCH] net/iavf: fix iavf query stats in intr thread > > > > > -----Original Message----- > > From: Deng, KaiwenX <kaiwenx.deng@intel.com> > > Sent: Wednesday, February 22, 2023 12:40 PM > > To: dev@dpdk.org > > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, > > YidingX <yidingx.zhou@intel.com>; Deng, KaiwenX > > <kaiwenx.deng@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, > > Beilei <beilei.xing@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com> > > Subject: [PATCH] net/iavf: fix iavf query stats in intr thread > > > > When iavf send query-stats command in eal-intr-thread through virtual > > channel, there will be no response received from > > iavf_dev_virtchnl_handler for this command during block and wait. > > Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. > > > > When vf device is bonded as BONDING_MODE_TLB mode, the slave > update > > callback will registered in alarm and called by eal-intr-thread, it > > would also raise the above issue. > > > > This commit add local stats return to iavf_dev_stats_get immediately > > when it is called by eal-intr-thread. And update local stats in iavf-virtchnl- > thread. > > > > Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") > > Fixes: 22b123a36d07 ("net/avf: initialize PMD") > > Cc: stable@dpdk.org > > > > Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> > > --- > > drivers/net/iavf/iavf.h | 9 ++- > > drivers/net/iavf/iavf_ethdev.c | 24 ++++++-- > > drivers/net/iavf/iavf_vchnl.c | > > 107 ++++++++++++++++++++++++--------- > > 3 files changed, 104 insertions(+), 36 deletions(-) > > > > diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index > > 1edebab8dc..3a249b90a3 100644 > > --- a/drivers/net/iavf/iavf.h > > +++ b/drivers/net/iavf/iavf.h > > @@ -128,6 +128,7 @@ struct iavf_vsi { > > uint16_t base_vector; > > uint16_t msix_intr; /* The MSIX interrupt binds to VSI */ > > struct iavf_eth_xstats eth_stats_offset; > > + struct virtchnl_eth_stats eth_stats; > > }; > > > > struct rte_flow; > > @@ -325,6 +326,8 @@ struct iavf_adapter { > > struct iavf_devargs devargs; > > }; > > > > +typedef void (*virtchnl_callback)(struct rte_eth_dev *dev, void > > +*args); > > + > > /* IAVF_DEV_PRIVATE_TO */ > > #define IAVF_DEV_PRIVATE_TO_ADAPTER(adapter) \ > > ((struct iavf_adapter *)adapter) > > @@ -424,8 +427,10 @@ _atomic_set_async_response_cmd(struct > iavf_info > > *vf, enum virtchnl_ops ops) } int iavf_check_api_version(struct > > iavf_adapter *adapter); int iavf_get_vf_resource(struct iavf_adapter > > *adapter); -void iavf_dev_event_handler_fini(void); > > -int iavf_dev_event_handler_init(void); > > +void iavf_dev_virtchnl_handler_fini(void); > > +void iavf_dev_virtchnl_callback_post(struct rte_eth_dev *dev, > > + virtchnl_callback cb, void *args); int > > +iavf_dev_virtchnl_handler_init(void); > > void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev); int > > iavf_enable_vlan_strip(struct iavf_adapter *adapter); int > > iavf_disable_vlan_strip(struct iavf_adapter *adapter); diff --git > > a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c > > index 3196210f2c..fcbab5b26a 100644 > > --- a/drivers/net/iavf/iavf_ethdev.c > > +++ b/drivers/net/iavf/iavf_ethdev.c > > @@ -1729,6 +1729,17 @@ iavf_update_stats(struct iavf_vsi *vsi, struct > > virtchnl_eth_stats *nes) > > iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards); } > > > > +static void iavf_dev_stats_get_callback(struct rte_eth_dev *dev, void > > +*args) { > > + struct virtchnl_eth_stats *eth_stats = (struct virtchnl_eth_stats > *)args; > > + struct virtchnl_eth_stats *pstats = NULL; > > + struct iavf_adapter *adapter = > > + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); > > + int ret = iavf_query_stats(adapter, &pstats); > > + if (ret == 0) > > + rte_memcpy(eth_stats, pstats, sizeof(struct > > virtchnl_eth_stats)); } > > + > > static int > > iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats > > *stats) { @@ -1738,8 +1749,13 @@ iavf_dev_stats_get(struct rte_eth_dev > > *dev, struct rte_eth_stats *stats) > > struct iavf_vsi *vsi = &vf->vsi; > > struct virtchnl_eth_stats *pstats = NULL; > > int ret; > > - > > - ret = iavf_query_stats(adapter, &pstats); > > + if (rte_thread_is_intr()) { > > + pstats = &vsi->eth_stats; > > + iavf_dev_virtchnl_callback_post(dev, > > iavf_dev_stats_get_callback, (void *)pstats); > > + ret = 0; > > > I assume this is going to return a copy of outdated stats, then the stats will be > updated it in future and this happens in another thread, is it correct? > If true, then will be 2 issues. > 1. How the caller know when the updated data will be ready? > 2. even caller will consume the outdated stats directly, is that will be a race > condition if the virtchnl thread update the stats at the same time? >> Hi,Qi >>1.The caller cannot know when the updated data will be ready in interrupt thread. if caller block for stats update in interrupt thread, >> the stats will not be updated, because interrupt thread is blocked. The caller can only get local stats which is updated last time. >> I can't find any other better solution. If you have any suggestions, please let me know. >>2.I submit patch v2 to avoid race condition. >>Thanks > > > + } else { > > + ret = iavf_query_stats(adapter, &pstats); > > + } > > if (ret == 0) { > > uint8_t crc_stats_len = (dev->data- > > >dev_conf.rxmode.offloads & > > RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? > > 0 : ^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v2] net/iavf: fix iavf query stats in intr thread 2023-02-22 4:40 [PATCH] net/iavf: fix iavf query stats in intr thread Kaiwen Deng 2023-02-27 0:56 ` Zhang, Qi Z @ 2023-03-07 2:55 ` Kaiwen Deng 2023-03-15 13:40 ` Zhang, Qi Z 2023-03-22 7:26 ` [PATCH v3] " Kaiwen Deng 1 sibling, 2 replies; 19+ messages in thread From: Kaiwen Deng @ 2023-03-07 2:55 UTC (permalink / raw) To: dev Cc: stable, qiming.yang, yidingx.zhou, Kaiwen Deng, Jingjing Wu, Beilei Xing, Qi Zhang When iavf send query-stats command in eal-intr-thread through virtual channel, there will be no response received from iavf_dev_virtchnl_handler for this command during block and wait. Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. When vf device is bonded as BONDING_MODE_TLB mode, the slave device update callback will registered in alarm and called by eal-intr-thread, it would also raise the above issue. This commit adds local stats return to iavf_dev_stats_get immediately when it is called by eal-intr-thread. And updates local stats in iavf-virtchnl-thread. Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") Fixes: 22b123a36d07 ("net/avf: initialize PMD") Cc: stable@dpdk.org Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> --- Changes since v1: - Add lock to avoid race condition. --- --- drivers/net/iavf/iavf.h | 10 ++- drivers/net/iavf/iavf_ethdev.c | 37 ++++++++++-- drivers/net/iavf/iavf_vchnl.c | 107 ++++++++++++++++++++++++--------- 3 files changed, 118 insertions(+), 36 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 1edebab8dc..641dfa2e3b 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -128,6 +128,8 @@ struct iavf_vsi { uint16_t base_vector; uint16_t msix_intr; /* The MSIX interrupt binds to VSI */ struct iavf_eth_xstats eth_stats_offset; + struct virtchnl_eth_stats eth_stats; + rte_spinlock_t stats_lock; }; struct rte_flow; @@ -325,6 +327,8 @@ struct iavf_adapter { struct iavf_devargs devargs; }; +typedef void (*virtchnl_callback)(struct rte_eth_dev *dev, void *args); + /* IAVF_DEV_PRIVATE_TO */ #define IAVF_DEV_PRIVATE_TO_ADAPTER(adapter) \ ((struct iavf_adapter *)adapter) @@ -424,8 +428,10 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops) } int iavf_check_api_version(struct iavf_adapter *adapter); int iavf_get_vf_resource(struct iavf_adapter *adapter); -void iavf_dev_event_handler_fini(void); -int iavf_dev_event_handler_init(void); +void iavf_dev_virtchnl_handler_fini(void); +void iavf_dev_virtchnl_callback_post(struct rte_eth_dev *dev, + virtchnl_callback cb, void *args); +int iavf_dev_virtchnl_handler_init(void); void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev); int iavf_enable_vlan_strip(struct iavf_adapter *adapter); int iavf_disable_vlan_strip(struct iavf_adapter *adapter); diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 3196210f2c..772859d157 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -1729,6 +1729,22 @@ iavf_update_stats(struct iavf_vsi *vsi, struct virtchnl_eth_stats *nes) iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards); } +static void iavf_dev_stats_get_callback(struct rte_eth_dev *dev, void *args) +{ + struct virtchnl_eth_stats *eth_stats = (struct virtchnl_eth_stats *)args; + struct virtchnl_eth_stats *pstats = NULL; + struct iavf_adapter *adapter = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct iavf_vsi *vsi = &vf->vsi; + int ret = iavf_query_stats(adapter, &pstats); + if (ret == 0) { + rte_spinlock_lock(&vsi->stats_lock); + rte_memcpy(eth_stats, pstats, sizeof(struct virtchnl_eth_stats)); + rte_spinlock_unlock(&vsi->stats_lock); + } +} + static int iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { @@ -1738,9 +1754,17 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) struct iavf_vsi *vsi = &vf->vsi; struct virtchnl_eth_stats *pstats = NULL; int ret; - - ret = iavf_query_stats(adapter, &pstats); + int is_intr_thread = rte_thread_is_intr(); + if (is_intr_thread) { + pstats = &vsi->eth_stats; + iavf_dev_virtchnl_callback_post(dev, iavf_dev_stats_get_callback, (void *)pstats); + ret = 0; + } else { + ret = iavf_query_stats(adapter, &pstats); + } if (ret == 0) { + if (is_intr_thread) + rte_spinlock_lock(&vsi->stats_lock); uint8_t crc_stats_len = (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? 0 : RTE_ETHER_CRC_LEN; @@ -1754,6 +1778,8 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->ibytes = pstats->rx_bytes; stats->ibytes -= stats->ipackets * crc_stats_len; stats->obytes = pstats->tx_bytes; + if (is_intr_thread) + rte_spinlock_unlock(&vsi->stats_lock); } else { PMD_DRV_LOG(ERR, "Get statistics failed"); } @@ -2571,10 +2597,13 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + struct iavf_vsi *vsi = &vf->vsi; int ret = 0; PMD_INIT_FUNC_TRACE(); + rte_spinlock_init(&vsi->stats_lock); + /* assign ops func pointer */ eth_dev->dev_ops = &iavf_eth_dev_ops; eth_dev->rx_queue_count = iavf_dev_rxq_count; @@ -2634,7 +2663,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); - if (iavf_dev_event_handler_init()) + if (iavf_dev_virtchnl_handler_init()) goto init_vf_err; if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { @@ -2791,7 +2820,7 @@ iavf_dev_uninit(struct rte_eth_dev *dev) iavf_dev_close(dev); - iavf_dev_event_handler_fini(); + iavf_dev_virtchnl_handler_fini(); return 0; } diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index f92daf97f2..4136c97c45 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -31,24 +31,36 @@ #define MAX_EVENT_PENDING 16 -struct iavf_event_element { - TAILQ_ENTRY(iavf_event_element) next; +struct iavf_virtchnl_element { + TAILQ_ENTRY(iavf_virtchnl_element) next; struct rte_eth_dev *dev; - enum rte_eth_event_type event; - void *param; - size_t param_alloc_size; - uint8_t param_alloc_data[0]; + enum iavf_virchnl_handle_type { + EVENT_TYPE = 0, + CALL_TYPE + } handle_type; + union { + struct event_param { + enum rte_eth_event_type event; + void *param; + size_t param_alloc_size; + uint8_t param_alloc_data[0]; + } ep; + struct call_param { + virtchnl_callback cb; + void *args; + } cp; + }; }; -struct iavf_event_handler { +struct iavf_virtchnl_handler { uint32_t ndev; pthread_t tid; int fd[2]; pthread_mutex_t lock; - TAILQ_HEAD(event_list, iavf_event_element) pending; + TAILQ_HEAD(event_list, iavf_virtchnl_element) pending; }; -static struct iavf_event_handler event_handler = { +static struct iavf_virtchnl_handler event_handler = { .fd = {-1, -1}, }; @@ -60,10 +72,10 @@ static struct iavf_event_handler event_handler = { #endif static void * -iavf_dev_event_handle(void *param __rte_unused) +iavf_dev_virtchnl_handle(void *param __rte_unused) { - struct iavf_event_handler *handler = &event_handler; - TAILQ_HEAD(event_list, iavf_event_element) pending; + struct iavf_virtchnl_handler *handler = &event_handler; + TAILQ_HEAD(event_list, iavf_virtchnl_element) pending; while (true) { char unused[MAX_EVENT_PENDING]; @@ -76,10 +88,22 @@ iavf_dev_event_handle(void *param __rte_unused) TAILQ_CONCAT(&pending, &handler->pending, next); pthread_mutex_unlock(&handler->lock); - struct iavf_event_element *pos, *save_next; + struct iavf_virtchnl_element *pos, *save_next; TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) { TAILQ_REMOVE(&pending, pos, next); - rte_eth_dev_callback_process(pos->dev, pos->event, pos->param); + + switch (pos->handle_type) { + case EVENT_TYPE: + rte_eth_dev_callback_process(pos->dev, + pos->ep.event, pos->ep.param); + break; + case CALL_TYPE: + pos->cp.cb(pos->dev, pos->cp.args); + break; + default: + break; + } + rte_free(pos); } } @@ -92,19 +116,20 @@ iavf_dev_event_post(struct rte_eth_dev *dev, enum rte_eth_event_type event, void *param, size_t param_alloc_size) { - struct iavf_event_handler *handler = &event_handler; + struct iavf_virtchnl_handler *handler = &event_handler; char notify_byte; - struct iavf_event_element *elem = rte_malloc(NULL, sizeof(*elem) + param_alloc_size, 0); + struct iavf_virtchnl_element *elem = rte_malloc(NULL, sizeof(*elem) + param_alloc_size, 0); if (!elem) return; - + elem->handle_type = EVENT_TYPE; + struct event_param *ep = &elem->ep; elem->dev = dev; - elem->event = event; - elem->param = param; - elem->param_alloc_size = param_alloc_size; + ep->event = event; + ep->param = param; + ep->param_alloc_size = param_alloc_size; if (param && param_alloc_size) { - rte_memcpy(elem->param_alloc_data, param, param_alloc_size); - elem->param = elem->param_alloc_data; + rte_memcpy(ep->param_alloc_data, param, param_alloc_size); + ep->param = ep->param_alloc_data; } pthread_mutex_lock(&handler->lock); @@ -115,10 +140,32 @@ iavf_dev_event_post(struct rte_eth_dev *dev, RTE_SET_USED(nw); } +void +iavf_dev_virtchnl_callback_post(struct rte_eth_dev *dev, virtchnl_callback cb, void *args) +{ + struct iavf_virtchnl_handler *handler = &event_handler; + char notify_byte; + struct iavf_virtchnl_element *elem = rte_malloc(NULL, sizeof(*elem), 0); + if (!elem) + return; + elem->dev = dev; + elem->handle_type = CALL_TYPE; + struct call_param *cp = &elem->cp; + cp->cb = cb; + cp->args = args; + + pthread_mutex_lock(&handler->lock); + TAILQ_INSERT_TAIL(&handler->pending, elem, next); + pthread_mutex_unlock(&handler->lock); + + ssize_t nw = write(handler->fd[1], ¬ify_byte, 1); + RTE_SET_USED(nw); +} + int -iavf_dev_event_handler_init(void) +iavf_dev_virtchnl_handler_init(void) { - struct iavf_event_handler *handler = &event_handler; + struct iavf_virtchnl_handler *handler = &event_handler; if (__atomic_add_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 1) return 0; @@ -135,8 +182,8 @@ iavf_dev_event_handler_init(void) TAILQ_INIT(&handler->pending); pthread_mutex_init(&handler->lock, NULL); - if (rte_ctrl_thread_create(&handler->tid, "iavf-event-thread", - NULL, iavf_dev_event_handle, NULL)) { + if (rte_ctrl_thread_create(&handler->tid, "iavf-virtchnl-thread", + NULL, iavf_dev_virtchnl_handle, NULL)) { __atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED); return -1; } @@ -145,9 +192,9 @@ iavf_dev_event_handler_init(void) } void -iavf_dev_event_handler_fini(void) +iavf_dev_virtchnl_handler_fini(void) { - struct iavf_event_handler *handler = &event_handler; + struct iavf_virtchnl_handler *handler = &event_handler; if (__atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 0) return; @@ -162,7 +209,7 @@ iavf_dev_event_handler_fini(void) pthread_join(handler->tid, NULL); pthread_mutex_destroy(&handler->lock); - struct iavf_event_element *pos, *save_next; + struct iavf_virtchnl_element *pos, *save_next; TAILQ_FOREACH_SAFE(pos, &handler->pending, next, save_next) { TAILQ_REMOVE(&handler->pending, pos, next); rte_free(pos); @@ -1408,7 +1455,7 @@ iavf_query_stats(struct iavf_adapter *adapter, struct iavf_cmd_info args; int err; - if (adapter->closed) + if (!adapter || adapter->closed) return -EIO; memset(&q_stats, 0, sizeof(q_stats)); -- 2.34.1 ^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH v2] net/iavf: fix iavf query stats in intr thread 2023-03-07 2:55 ` [PATCH v2] " Kaiwen Deng @ 2023-03-15 13:40 ` Zhang, Qi Z 2023-03-22 7:26 ` [PATCH v3] " Kaiwen Deng 1 sibling, 0 replies; 19+ messages in thread From: Zhang, Qi Z @ 2023-03-15 13:40 UTC (permalink / raw) To: Deng, KaiwenX, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Wu, Jingjing, Xing, Beilei > -----Original Message----- > From: Deng, KaiwenX <kaiwenx.deng@intel.com> > Sent: Tuesday, March 7, 2023 10:56 AM > To: dev@dpdk.org > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX > <yidingx.zhou@intel.com>; Deng, KaiwenX <kaiwenx.deng@intel.com>; Wu, > Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Zhang, > Qi Z <qi.z.zhang@intel.com> > Subject: [PATCH v2] net/iavf: fix iavf query stats in intr thread > > When iavf send query-stats command in eal-intr-thread through virtual > channel, there will be no response received from iavf_dev_virtchnl_handler > for this command during block and wait. This is because the improper implementation in iavf_execute_vf_cmd I assume without interrupt (in eal_intr_thread), we should able to query stats through virtchnl by pulling mode. Check out how "VIRTCHNL_OP_VERSION" was implemented. > Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. > > When vf device is bonded as BONDING_MODE_TLB mode, the slave device > update callback will registered in alarm and called by eal-intr-thread, it > would also raise the above issue. > > This commit adds local stats return to iavf_dev_stats_get immediately when > it is called by eal-intr-thread. And updates local stats in iavf-virtchnl-thread. > > Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") > Fixes: 22b123a36d07 ("net/avf: initialize PMD") > Cc: stable@dpdk.org > > Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> > --- > Changes since v1: > - Add lock to avoid race condition. > --- > --- > drivers/net/iavf/iavf.h | 10 ++- > drivers/net/iavf/iavf_ethdev.c | 37 ++++++++++-- > drivers/net/iavf/iavf_vchnl.c | 107 ++++++++++++++++++++++++--------- > 3 files changed, 118 insertions(+), 36 deletions(-) > > diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index > 1edebab8dc..641dfa2e3b 100644 > --- a/drivers/net/iavf/iavf.h > +++ b/drivers/net/iavf/iavf.h > @@ -128,6 +128,8 @@ struct iavf_vsi { > uint16_t base_vector; > uint16_t msix_intr; /* The MSIX interrupt binds to VSI */ > struct iavf_eth_xstats eth_stats_offset; > + struct virtchnl_eth_stats eth_stats; > + rte_spinlock_t stats_lock; > }; > > struct rte_flow; > @@ -325,6 +327,8 @@ struct iavf_adapter { > struct iavf_devargs devargs; > }; > > +typedef void (*virtchnl_callback)(struct rte_eth_dev *dev, void *args); > + > /* IAVF_DEV_PRIVATE_TO */ > #define IAVF_DEV_PRIVATE_TO_ADAPTER(adapter) \ > ((struct iavf_adapter *)adapter) > @@ -424,8 +428,10 @@ _atomic_set_async_response_cmd(struct iavf_info > *vf, enum virtchnl_ops ops) } int iavf_check_api_version(struct iavf_adapter > *adapter); int iavf_get_vf_resource(struct iavf_adapter *adapter); -void > iavf_dev_event_handler_fini(void); > -int iavf_dev_event_handler_init(void); > +void iavf_dev_virtchnl_handler_fini(void); > +void iavf_dev_virtchnl_callback_post(struct rte_eth_dev *dev, > + virtchnl_callback cb, void *args); > +int iavf_dev_virtchnl_handler_init(void); > void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev); int > iavf_enable_vlan_strip(struct iavf_adapter *adapter); int > iavf_disable_vlan_strip(struct iavf_adapter *adapter); diff --git > a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index > 3196210f2c..772859d157 100644 > --- a/drivers/net/iavf/iavf_ethdev.c > +++ b/drivers/net/iavf/iavf_ethdev.c > @@ -1729,6 +1729,22 @@ iavf_update_stats(struct iavf_vsi *vsi, struct > virtchnl_eth_stats *nes) > iavf_stat_update_32(&oes->tx_discards, &nes->tx_discards); } > > +static void iavf_dev_stats_get_callback(struct rte_eth_dev *dev, void > +*args) { > + struct virtchnl_eth_stats *eth_stats = (struct virtchnl_eth_stats > *)args; > + struct virtchnl_eth_stats *pstats = NULL; > + struct iavf_adapter *adapter = > + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); > + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data- > >dev_private); > + struct iavf_vsi *vsi = &vf->vsi; > + int ret = iavf_query_stats(adapter, &pstats); > + if (ret == 0) { > + rte_spinlock_lock(&vsi->stats_lock); > + rte_memcpy(eth_stats, pstats, sizeof(struct > virtchnl_eth_stats)); > + rte_spinlock_unlock(&vsi->stats_lock); > + } > +} > + > static int > iavf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) > { @@ -1738,9 +1754,17 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, > struct rte_eth_stats *stats) > struct iavf_vsi *vsi = &vf->vsi; > struct virtchnl_eth_stats *pstats = NULL; > int ret; > - > - ret = iavf_query_stats(adapter, &pstats); > + int is_intr_thread = rte_thread_is_intr(); > + if (is_intr_thread) { > + pstats = &vsi->eth_stats; > + iavf_dev_virtchnl_callback_post(dev, > iavf_dev_stats_get_callback, (void *)pstats); > + ret = 0; > + } else { > + ret = iavf_query_stats(adapter, &pstats); > + } > if (ret == 0) { > + if (is_intr_thread) > + rte_spinlock_lock(&vsi->stats_lock); > uint8_t crc_stats_len = (dev->data- > >dev_conf.rxmode.offloads & > RTE_ETH_RX_OFFLOAD_KEEP_CRC) ? > 0 : > RTE_ETHER_CRC_LEN; > @@ -1754,6 +1778,8 @@ iavf_dev_stats_get(struct rte_eth_dev *dev, struct > rte_eth_stats *stats) > stats->ibytes = pstats->rx_bytes; > stats->ibytes -= stats->ipackets * crc_stats_len; > stats->obytes = pstats->tx_bytes; > + if (is_intr_thread) > + rte_spinlock_unlock(&vsi->stats_lock); > } else { > PMD_DRV_LOG(ERR, "Get statistics failed"); > } > @@ -2571,10 +2597,13 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); > struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); > struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); > + struct iavf_vsi *vsi = &vf->vsi; > int ret = 0; > > PMD_INIT_FUNC_TRACE(); > > + rte_spinlock_init(&vsi->stats_lock); > + > /* assign ops func pointer */ > eth_dev->dev_ops = &iavf_eth_dev_ops; > eth_dev->rx_queue_count = iavf_dev_rxq_count; @@ -2634,7 > +2663,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, > ð_dev->data->mac_addrs[0]); > > - if (iavf_dev_event_handler_init()) > + if (iavf_dev_virtchnl_handler_init()) > goto init_vf_err; > > if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) > { @@ -2791,7 +2820,7 @@ iavf_dev_uninit(struct rte_eth_dev *dev) > > iavf_dev_close(dev); > > - iavf_dev_event_handler_fini(); > + iavf_dev_virtchnl_handler_fini(); > > return 0; > } > diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index > f92daf97f2..4136c97c45 100644 > --- a/drivers/net/iavf/iavf_vchnl.c > +++ b/drivers/net/iavf/iavf_vchnl.c > @@ -31,24 +31,36 @@ > > #define MAX_EVENT_PENDING 16 > > -struct iavf_event_element { > - TAILQ_ENTRY(iavf_event_element) next; > +struct iavf_virtchnl_element { > + TAILQ_ENTRY(iavf_virtchnl_element) next; > struct rte_eth_dev *dev; > - enum rte_eth_event_type event; > - void *param; > - size_t param_alloc_size; > - uint8_t param_alloc_data[0]; > + enum iavf_virchnl_handle_type { > + EVENT_TYPE = 0, > + CALL_TYPE > + } handle_type; > + union { > + struct event_param { > + enum rte_eth_event_type event; > + void *param; > + size_t param_alloc_size; > + uint8_t param_alloc_data[0]; > + } ep; > + struct call_param { > + virtchnl_callback cb; > + void *args; > + } cp; > + }; > }; > > -struct iavf_event_handler { > +struct iavf_virtchnl_handler { > uint32_t ndev; > pthread_t tid; > int fd[2]; > pthread_mutex_t lock; > - TAILQ_HEAD(event_list, iavf_event_element) pending; > + TAILQ_HEAD(event_list, iavf_virtchnl_element) pending; > }; > > -static struct iavf_event_handler event_handler = { > +static struct iavf_virtchnl_handler event_handler = { > .fd = {-1, -1}, > }; > > @@ -60,10 +72,10 @@ static struct iavf_event_handler event_handler = > { #endif > > static void * > -iavf_dev_event_handle(void *param __rte_unused) > +iavf_dev_virtchnl_handle(void *param __rte_unused) > { > - struct iavf_event_handler *handler = &event_handler; > - TAILQ_HEAD(event_list, iavf_event_element) pending; > + struct iavf_virtchnl_handler *handler = &event_handler; > + TAILQ_HEAD(event_list, iavf_virtchnl_element) pending; > > while (true) { > char unused[MAX_EVENT_PENDING]; > @@ -76,10 +88,22 @@ iavf_dev_event_handle(void *param __rte_unused) > TAILQ_CONCAT(&pending, &handler->pending, next); > pthread_mutex_unlock(&handler->lock); > > - struct iavf_event_element *pos, *save_next; > + struct iavf_virtchnl_element *pos, *save_next; > TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) { > TAILQ_REMOVE(&pending, pos, next); > - rte_eth_dev_callback_process(pos->dev, pos->event, > pos->param); > + > + switch (pos->handle_type) { > + case EVENT_TYPE: > + rte_eth_dev_callback_process(pos->dev, > + pos->ep.event, pos->ep.param); > + break; > + case CALL_TYPE: > + pos->cp.cb(pos->dev, pos->cp.args); > + break; > + default: > + break; > + } > + > rte_free(pos); > } > } > @@ -92,19 +116,20 @@ iavf_dev_event_post(struct rte_eth_dev *dev, > enum rte_eth_event_type event, > void *param, size_t param_alloc_size) { > - struct iavf_event_handler *handler = &event_handler; > + struct iavf_virtchnl_handler *handler = &event_handler; > char notify_byte; > - struct iavf_event_element *elem = rte_malloc(NULL, sizeof(*elem) + > param_alloc_size, 0); > + struct iavf_virtchnl_element *elem = rte_malloc(NULL, sizeof(*elem) > + > +param_alloc_size, 0); > if (!elem) > return; > - > + elem->handle_type = EVENT_TYPE; > + struct event_param *ep = &elem->ep; > elem->dev = dev; > - elem->event = event; > - elem->param = param; > - elem->param_alloc_size = param_alloc_size; > + ep->event = event; > + ep->param = param; > + ep->param_alloc_size = param_alloc_size; > if (param && param_alloc_size) { > - rte_memcpy(elem->param_alloc_data, param, > param_alloc_size); > - elem->param = elem->param_alloc_data; > + rte_memcpy(ep->param_alloc_data, param, > param_alloc_size); > + ep->param = ep->param_alloc_data; > } > > pthread_mutex_lock(&handler->lock); > @@ -115,10 +140,32 @@ iavf_dev_event_post(struct rte_eth_dev *dev, > RTE_SET_USED(nw); > } > > +void > +iavf_dev_virtchnl_callback_post(struct rte_eth_dev *dev, > +virtchnl_callback cb, void *args) { > + struct iavf_virtchnl_handler *handler = &event_handler; > + char notify_byte; > + struct iavf_virtchnl_element *elem = rte_malloc(NULL, sizeof(*elem), > 0); > + if (!elem) > + return; > + elem->dev = dev; > + elem->handle_type = CALL_TYPE; > + struct call_param *cp = &elem->cp; > + cp->cb = cb; > + cp->args = args; > + > + pthread_mutex_lock(&handler->lock); > + TAILQ_INSERT_TAIL(&handler->pending, elem, next); > + pthread_mutex_unlock(&handler->lock); > + > + ssize_t nw = write(handler->fd[1], ¬ify_byte, 1); > + RTE_SET_USED(nw); > +} > + > int > -iavf_dev_event_handler_init(void) > +iavf_dev_virtchnl_handler_init(void) > { > - struct iavf_event_handler *handler = &event_handler; > + struct iavf_virtchnl_handler *handler = &event_handler; > > if (__atomic_add_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 1) > return 0; > @@ -135,8 +182,8 @@ iavf_dev_event_handler_init(void) > TAILQ_INIT(&handler->pending); > pthread_mutex_init(&handler->lock, NULL); > > - if (rte_ctrl_thread_create(&handler->tid, "iavf-event-thread", > - NULL, iavf_dev_event_handle, NULL)) { > + if (rte_ctrl_thread_create(&handler->tid, "iavf-virtchnl-thread", > + NULL, iavf_dev_virtchnl_handle, NULL)) { > __atomic_sub_fetch(&handler->ndev, 1, > __ATOMIC_RELAXED); > return -1; > } > @@ -145,9 +192,9 @@ iavf_dev_event_handler_init(void) } > > void > -iavf_dev_event_handler_fini(void) > +iavf_dev_virtchnl_handler_fini(void) > { > - struct iavf_event_handler *handler = &event_handler; > + struct iavf_virtchnl_handler *handler = &event_handler; > > if (__atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 0) > return; > @@ -162,7 +209,7 @@ iavf_dev_event_handler_fini(void) > pthread_join(handler->tid, NULL); > pthread_mutex_destroy(&handler->lock); > > - struct iavf_event_element *pos, *save_next; > + struct iavf_virtchnl_element *pos, *save_next; > TAILQ_FOREACH_SAFE(pos, &handler->pending, next, save_next) { > TAILQ_REMOVE(&handler->pending, pos, next); > rte_free(pos); > @@ -1408,7 +1455,7 @@ iavf_query_stats(struct iavf_adapter *adapter, > struct iavf_cmd_info args; > int err; > > - if (adapter->closed) > + if (!adapter || adapter->closed) > return -EIO; > > memset(&q_stats, 0, sizeof(q_stats)); > -- > 2.34.1 ^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v3] net/iavf: fix iavf query stats in intr thread 2023-03-07 2:55 ` [PATCH v2] " Kaiwen Deng 2023-03-15 13:40 ` Zhang, Qi Z @ 2023-03-22 7:26 ` Kaiwen Deng 2023-03-23 15:39 ` Ferruh Yigit ` (2 more replies) 1 sibling, 3 replies; 19+ messages in thread From: Kaiwen Deng @ 2023-03-22 7:26 UTC (permalink / raw) To: dev Cc: stable, qiming.yang, yidingx.zhou, Kaiwen Deng, Chas Williams, Min Hu (Connor), Jingjing Wu, Beilei Xing, Mike Pattrick, Qi Zhang, Declan Doherty, Daniel Mrzyglod, Dapeng Yu When iavf send query-stats command in eal-intr-thread through virtual channel, there will be no response received from iavf_dev_virtchnl_handler for this command during block and wait. Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. When vf device is bonded as BONDING_MODE_TLB mode, the slave device update callback will registered in alarm and called by eal-intr-thread, it would also raise the above issue. This commit add to poll the response for VIRTCHNL_OP_GET_STATS when it is called by eal-intr-thread to fix this issue. Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands") Fixes: 22b123a36d07 ("net/avf: initialize PMD") Fixes: 7c76a747e68c ("bond: add mode 5") Fixes: 435d523112cc ("net/iavf: fix multi-process shared data") Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") Cc: stable@dpdk.org Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> --- Changes since v2: - Add to poll the response for VIRTCHNL_OP_GET_STATS. Changes since v1: - Add lock to avoid race condition. --- --- drivers/net/bonding/rte_eth_bond_pmd.c | 7 ++- drivers/net/iavf/iavf_ethdev.c | 5 +- drivers/net/iavf/iavf_vchnl.c | 71 ++++++++++++++++++-------- 3 files changed, 58 insertions(+), 25 deletions(-) diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index f0c4f7d26b..edce621496 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -894,6 +894,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) uint8_t update_stats = 0; uint16_t slave_id; uint16_t i; + int ret; internals->slave_update_idx++; @@ -903,7 +904,10 @@ bond_ethdev_update_tlb_slave_cb(void *arg) for (i = 0; i < internals->active_slave_count; i++) { slave_id = internals->active_slaves[i]; - rte_eth_stats_get(slave_id, &slave_stats); + ret = rte_eth_stats_get(slave_id, &slave_stats); + if (ret) + goto OUT; + tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id]; bandwidth_left(slave_id, tx_bytes, internals->slave_update_idx, &bwg_array[i]); @@ -922,6 +926,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) for (i = 0; i < slave_count; i++) internals->tlb_slaves_order[i] = bwg_array[i].slave; +OUT: rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb, (struct bond_dev_private *)internals); } diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 3196210f2c..d6e1f1a7f4 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -2607,6 +2607,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) adapter->dev_data = eth_dev->data; adapter->stopped = 1; + if (iavf_dev_event_handler_init()) + goto init_vf_err; + if (iavf_init_vf(eth_dev) != 0) { PMD_INIT_LOG(ERR, "Init vf failed"); return -1; @@ -2634,8 +2637,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); - if (iavf_dev_event_handler_init()) - goto init_vf_err; if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { /* register callback func to eal lib */ diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 9adaadb173..aeffb07cca 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -256,6 +256,7 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, vf->link_speed = iavf_convert_link_speed(speed); } iavf_dev_link_update(vf->eth_dev, 0); + iavf_dev_event_post(vf->eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0); PMD_DRV_LOG(INFO, "Link status update:%s", vf->link_up ? "up" : "down"); break; @@ -368,28 +369,48 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args, _clear_cmd(vf); break; default: - /* For other virtchnl ops in running time, - * wait for the cmd done flag. - */ - do { - if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) - break; - iavf_msec_delay(ASQ_DELAY_MS); - /* If don't read msg or read sys event, continue */ - } while (i++ < MAX_TRY_TIMES); - - if (i >= MAX_TRY_TIMES) { - PMD_DRV_LOG(ERR, "No response for cmd %d", args->ops); + if (rte_thread_is_intr()) { + /* For virtchnl ops were executed in eal_intr_thread, + * need to poll the response. + */ + do { + result = iavf_read_msg_from_pf(adapter, args->out_size, + args->out_buffer); + if (result == IAVF_MSG_CMD) + break; + iavf_msec_delay(ASQ_DELAY_MS); + } while (i++ < MAX_TRY_TIMES); + if (i >= MAX_TRY_TIMES || + vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { + err = -1; + PMD_DRV_LOG(ERR, "No response or return failure (%d)" + " for cmd %d", vf->cmd_retval, args->ops); + } _clear_cmd(vf); - err = -EIO; - } else if (vf->cmd_retval == - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { - PMD_DRV_LOG(ERR, "Cmd %d not supported", args->ops); - err = -ENOTSUP; - } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { - PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", - vf->cmd_retval, args->ops); - err = -EINVAL; + } else { + /* For other virtchnl ops in running time, + * wait for the cmd done flag. + */ + do { + if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) + break; + iavf_msec_delay(ASQ_DELAY_MS); + /* If don't read msg or read sys event, continue */ + } while (i++ < MAX_TRY_TIMES); + + if (i >= MAX_TRY_TIMES) { + PMD_DRV_LOG(ERR, "No response for cmd %d", args->ops); + _clear_cmd(vf); + err = -EIO; + } else if (vf->cmd_retval == + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { + PMD_DRV_LOG(ERR, "Cmd %d not supported", args->ops); + err = -ENOTSUP; + } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", + vf->cmd_retval, args->ops); + err = -EINVAL; + } } break; } @@ -403,8 +424,14 @@ iavf_execute_vf_cmd_safe(struct iavf_adapter *adapter, { struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); int ret; + int is_intr_thread = rte_thread_is_intr(); - rte_spinlock_lock(&vf->aq_lock); + if (is_intr_thread) { + if (!rte_spinlock_trylock(&vf->aq_lock)) + return -EIO; + } else { + rte_spinlock_lock(&vf->aq_lock); + } ret = iavf_execute_vf_cmd(adapter, args, async); rte_spinlock_unlock(&vf->aq_lock); -- 2.34.1 ^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread 2023-03-22 7:26 ` [PATCH v3] " Kaiwen Deng @ 2023-03-23 15:39 ` Ferruh Yigit 2023-03-27 5:31 ` Deng, KaiwenX 2023-06-06 5:41 ` Jiale, SongX 2023-06-07 2:03 ` [PATCH v4] " Kaiwen Deng 2 siblings, 1 reply; 19+ messages in thread From: Ferruh Yigit @ 2023-03-23 15:39 UTC (permalink / raw) To: Kaiwen Deng, dev Cc: stable, qiming.yang, yidingx.zhou, Chas Williams, Min Hu (Connor), Jingjing Wu, Beilei Xing, Mike Pattrick, Qi Zhang, Declan Doherty, Daniel Mrzyglod, Dapeng Yu On 3/22/2023 7:26 AM, Kaiwen Deng wrote: > When iavf send query-stats command in eal-intr-thread through > virtual channel, there will be no response received from > iavf_dev_virtchnl_handler for this command during block and wait. > Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. > > When vf device is bonded as BONDING_MODE_TLB mode, the slave device > update callback will registered in alarm and called by eal-intr-thread, > it would also raise the above issue. > > This commit add to poll the response for VIRTCHNL_OP_GET_STATS > when it is called by eal-intr-thread to fix this issue. > > Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands") > Fixes: 22b123a36d07 ("net/avf: initialize PMD") > Fixes: 7c76a747e68c ("bond: add mode 5") > Fixes: 435d523112cc ("net/iavf: fix multi-process shared data") > Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") Hi Kaiwen, Above commit already seems trying to address same issue, it creates "iavf-event-thread" control thread to asyncroniously handle the interrupts, in non-interrupt context, why it is not working? Instead of adding 'rte_thread_is_intr()' checks, can't you make sure all interrupts handled in control tread? And can you please provide a stack trace in commit log, to describe the issue better? > Cc: stable@dpdk.org > > Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> > --- > Changes since v2: > - Add to poll the response for VIRTCHNL_OP_GET_STATS. > > Changes since v1: > - Add lock to avoid race condition. > --- > --- > drivers/net/bonding/rte_eth_bond_pmd.c | 7 ++- > drivers/net/iavf/iavf_ethdev.c | 5 +- > drivers/net/iavf/iavf_vchnl.c | 71 ++++++++++++++++++-------- > 3 files changed, 58 insertions(+), 25 deletions(-) > > diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c > index f0c4f7d26b..edce621496 100644 > --- a/drivers/net/bonding/rte_eth_bond_pmd.c > +++ b/drivers/net/bonding/rte_eth_bond_pmd.c > @@ -894,6 +894,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) > uint8_t update_stats = 0; > uint16_t slave_id; > uint16_t i; > + int ret; > > internals->slave_update_idx++; > > @@ -903,7 +904,10 @@ bond_ethdev_update_tlb_slave_cb(void *arg) > > for (i = 0; i < internals->active_slave_count; i++) { > slave_id = internals->active_slaves[i]; > - rte_eth_stats_get(slave_id, &slave_stats); > + ret = rte_eth_stats_get(slave_id, &slave_stats); > + if (ret) > + goto OUT; > + > tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id]; > bandwidth_left(slave_id, tx_bytes, > internals->slave_update_idx, &bwg_array[i]); > @@ -922,6 +926,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) > for (i = 0; i < slave_count; i++) > internals->tlb_slaves_order[i] = bwg_array[i].slave; > > +OUT: > rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb, > (struct bond_dev_private *)internals); > } > diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c > index 3196210f2c..d6e1f1a7f4 100644 > --- a/drivers/net/iavf/iavf_ethdev.c > +++ b/drivers/net/iavf/iavf_ethdev.c > @@ -2607,6 +2607,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > adapter->dev_data = eth_dev->data; > adapter->stopped = 1; > > + if (iavf_dev_event_handler_init()) > + goto init_vf_err; > + > if (iavf_init_vf(eth_dev) != 0) { > PMD_INIT_LOG(ERR, "Init vf failed"); > return -1; > @@ -2634,8 +2637,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, > ð_dev->data->mac_addrs[0]); > > - if (iavf_dev_event_handler_init()) > - goto init_vf_err; > > if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { > /* register callback func to eal lib */ > diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c > index 9adaadb173..aeffb07cca 100644 > --- a/drivers/net/iavf/iavf_vchnl.c > +++ b/drivers/net/iavf/iavf_vchnl.c > @@ -256,6 +256,7 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, > vf->link_speed = iavf_convert_link_speed(speed); > } > iavf_dev_link_update(vf->eth_dev, 0); > + iavf_dev_event_post(vf->eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0); > PMD_DRV_LOG(INFO, "Link status update:%s", > vf->link_up ? "up" : "down"); > break; > @@ -368,28 +369,48 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args, > _clear_cmd(vf); > break; > default: > - /* For other virtchnl ops in running time, > - * wait for the cmd done flag. > - */ > - do { > - if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) > - break; > - iavf_msec_delay(ASQ_DELAY_MS); > - /* If don't read msg or read sys event, continue */ > - } while (i++ < MAX_TRY_TIMES); > - > - if (i >= MAX_TRY_TIMES) { > - PMD_DRV_LOG(ERR, "No response for cmd %d", args->ops); > + if (rte_thread_is_intr()) { > + /* For virtchnl ops were executed in eal_intr_thread, > + * need to poll the response. > + */ > + do { > + result = iavf_read_msg_from_pf(adapter, args->out_size, > + args->out_buffer); > + if (result == IAVF_MSG_CMD) > + break; > + iavf_msec_delay(ASQ_DELAY_MS); > + } while (i++ < MAX_TRY_TIMES); > + if (i >= MAX_TRY_TIMES || > + vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { > + err = -1; > + PMD_DRV_LOG(ERR, "No response or return failure (%d)" > + " for cmd %d", vf->cmd_retval, args->ops); > + } > _clear_cmd(vf); > - err = -EIO; > - } else if (vf->cmd_retval == > - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { > - PMD_DRV_LOG(ERR, "Cmd %d not supported", args->ops); > - err = -ENOTSUP; > - } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { > - PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", > - vf->cmd_retval, args->ops); > - err = -EINVAL; > + } else { > + /* For other virtchnl ops in running time, > + * wait for the cmd done flag. > + */ > + do { > + if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) > + break; > + iavf_msec_delay(ASQ_DELAY_MS); > + /* If don't read msg or read sys event, continue */ > + } while (i++ < MAX_TRY_TIMES); > + > + if (i >= MAX_TRY_TIMES) { > + PMD_DRV_LOG(ERR, "No response for cmd %d", args->ops); > + _clear_cmd(vf); > + err = -EIO; > + } else if (vf->cmd_retval == > + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { > + PMD_DRV_LOG(ERR, "Cmd %d not supported", args->ops); > + err = -ENOTSUP; > + } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { > + PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", > + vf->cmd_retval, args->ops); > + err = -EINVAL; > + } > } > break; > } > @@ -403,8 +424,14 @@ iavf_execute_vf_cmd_safe(struct iavf_adapter *adapter, > { > struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); > int ret; > + int is_intr_thread = rte_thread_is_intr(); > > - rte_spinlock_lock(&vf->aq_lock); > + if (is_intr_thread) { > + if (!rte_spinlock_trylock(&vf->aq_lock)) > + return -EIO; > + } else { > + rte_spinlock_lock(&vf->aq_lock); > + } > ret = iavf_execute_vf_cmd(adapter, args, async); > rte_spinlock_unlock(&vf->aq_lock); > ^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH v3] net/iavf: fix iavf query stats in intr thread 2023-03-23 15:39 ` Ferruh Yigit @ 2023-03-27 5:31 ` Deng, KaiwenX 2023-03-27 12:31 ` Ferruh Yigit 0 siblings, 1 reply; 19+ messages in thread From: Deng, KaiwenX @ 2023-03-27 5:31 UTC (permalink / raw) To: Ferruh Yigit, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Chas Williams, Min Hu (Connor), Wu, Jingjing, Xing, Beilei, Mike Pattrick, Zhang, Qi Z, Doherty, Declan, Mrzyglod, Daniel T, Dapeng Yu > -----Original Message----- > From: Ferruh Yigit <ferruh.yigit@amd.com> > Sent: Thursday, March 23, 2023 11:39 PM > To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX > <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; Min Hu (Connor) > <humin29@huawei.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei > <beilei.xing@intel.com>; Mike Pattrick <mkp@redhat.com>; Zhang, Qi Z > <qi.z.zhang@intel.com>; Doherty, Declan <declan.doherty@intel.com>; > Mrzyglod, Daniel T <daniel.t.mrzyglod@intel.com>; Dapeng Yu > <dapengx.yu@intel.com> > Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread > > On 3/22/2023 7:26 AM, Kaiwen Deng wrote: > > When iavf send query-stats command in eal-intr-thread through virtual > > channel, there will be no response received from > > iavf_dev_virtchnl_handler for this command during block and wait. > > Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. > > > > When vf device is bonded as BONDING_MODE_TLB mode, the slave device > > update callback will registered in alarm and called by > > eal-intr-thread, it would also raise the above issue. > > > > This commit add to poll the response for VIRTCHNL_OP_GET_STATS when > it > > is called by eal-intr-thread to fix this issue. > > > > Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands") > > Fixes: 22b123a36d07 ("net/avf: initialize PMD") > > Fixes: 7c76a747e68c ("bond: add mode 5") > > Fixes: 435d523112cc ("net/iavf: fix multi-process shared data") > > Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") > > > Hi Kaiwen, > > Above commit already seems trying to address same issue, it creates "iavf- > event-thread" control thread to asyncroniously handle the interrupts, in non- > interrupt context, why it is not working? > > Instead of adding 'rte_thread_is_intr()' checks, can't you make sure all > interrupts handled in control tread? > > And can you please provide a stack trace in commit log, to describe the issue > better? Hi Ferru, Sorry for my late reply, And thanks for your review. The above commit does not fix this issue when we need to get the returned data. If we call iavf_query_stats and wait for response statistics in the intr-thread. iavf_handle_virtchnl_msg is also registered in the intr_thread and will not be executed while waiting. This commit I changed it to polling for replies to commands executed in the interrupt thread. main thread interrupt thread | | | | iavf_query_stats | iavf_execute_vf_cmd | iavf_aq_send_msg_to_pf and wait handle complete | | | |-------------------------------------------------------------------------------------------->| | | | iavf_handle_virtchnl_msg | | |<--------------------------------------------------------------------------------------------| | | iavf_execute_vf_cmd get response | | | The above is the stack trace for the normal execution of iavf_query_stats in the main thread. interrupt thread | | iavf_query_stats iavf_execute_vf_cmd iavf_aq_send_msg_to_pf wait handle complete(1 sec) iavf_execute_vf_cmd timeout | | iavf_handle_virtchnl_msg | The above is the stack trace for the abnormal execution of iavf_query_stats in the interrupt thread. > > > Cc: stable@dpdk.org > > > > Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> > > --- > > Changes since v2: > > - Add to poll the response for VIRTCHNL_OP_GET_STATS. > > > > Changes since v1: > > - Add lock to avoid race condition. > > --- > > --- > > drivers/net/bonding/rte_eth_bond_pmd.c | 7 ++- > > drivers/net/iavf/iavf_ethdev.c | 5 +- > > drivers/net/iavf/iavf_vchnl.c | 71 ++++++++++++++++++-------- > > 3 files changed, 58 insertions(+), 25 deletions(-) > > > > diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c > > b/drivers/net/bonding/rte_eth_bond_pmd.c > > index f0c4f7d26b..edce621496 100644 > > --- a/drivers/net/bonding/rte_eth_bond_pmd.c > > +++ b/drivers/net/bonding/rte_eth_bond_pmd.c > > @@ -894,6 +894,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) > > uint8_t update_stats = 0; > > uint16_t slave_id; > > uint16_t i; > > + int ret; > > > > internals->slave_update_idx++; > > > > @@ -903,7 +904,10 @@ bond_ethdev_update_tlb_slave_cb(void *arg) > > > > for (i = 0; i < internals->active_slave_count; i++) { > > slave_id = internals->active_slaves[i]; > > - rte_eth_stats_get(slave_id, &slave_stats); > > + ret = rte_eth_stats_get(slave_id, &slave_stats); > > + if (ret) > > + goto OUT; > > + > > tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id]; > > bandwidth_left(slave_id, tx_bytes, > > internals->slave_update_idx, &bwg_array[i]); > @@ -922,6 +926,7 @@ > > bond_ethdev_update_tlb_slave_cb(void *arg) > > for (i = 0; i < slave_count; i++) > > internals->tlb_slaves_order[i] = bwg_array[i].slave; > > > > +OUT: > > rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, > bond_ethdev_update_tlb_slave_cb, > > (struct bond_dev_private *)internals); } diff --git > > a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c > > index 3196210f2c..d6e1f1a7f4 100644 > > --- a/drivers/net/iavf/iavf_ethdev.c > > +++ b/drivers/net/iavf/iavf_ethdev.c > > @@ -2607,6 +2607,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > > adapter->dev_data = eth_dev->data; > > adapter->stopped = 1; > > > > + if (iavf_dev_event_handler_init()) > > + goto init_vf_err; > > + > > if (iavf_init_vf(eth_dev) != 0) { > > PMD_INIT_LOG(ERR, "Init vf failed"); > > return -1; > > @@ -2634,8 +2637,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > > rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, > > ð_dev->data->mac_addrs[0]); > > > > - if (iavf_dev_event_handler_init()) > > - goto init_vf_err; > > > > if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) > { > > /* register callback func to eal lib */ diff --git > > a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index > > 9adaadb173..aeffb07cca 100644 > > --- a/drivers/net/iavf/iavf_vchnl.c > > +++ b/drivers/net/iavf/iavf_vchnl.c > > @@ -256,6 +256,7 @@ iavf_read_msg_from_pf(struct iavf_adapter > *adapter, uint16_t buf_len, > > vf->link_speed = > iavf_convert_link_speed(speed); > > } > > iavf_dev_link_update(vf->eth_dev, 0); > > + iavf_dev_event_post(vf->eth_dev, > RTE_ETH_EVENT_INTR_LSC, NULL, 0); > > PMD_DRV_LOG(INFO, "Link status update:%s", > > vf->link_up ? "up" : "down"); > > break; > > @@ -368,28 +369,48 @@ iavf_execute_vf_cmd(struct iavf_adapter > *adapter, struct iavf_cmd_info *args, > > _clear_cmd(vf); > > break; > > default: > > - /* For other virtchnl ops in running time, > > - * wait for the cmd done flag. > > - */ > > - do { > > - if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) > > - break; > > - iavf_msec_delay(ASQ_DELAY_MS); > > - /* If don't read msg or read sys event, continue */ > > - } while (i++ < MAX_TRY_TIMES); > > - > > - if (i >= MAX_TRY_TIMES) { > > - PMD_DRV_LOG(ERR, "No response for cmd %d", > args->ops); > > + if (rte_thread_is_intr()) { > > + /* For virtchnl ops were executed in eal_intr_thread, > > + * need to poll the response. > > + */ > > + do { > > + result = iavf_read_msg_from_pf(adapter, > args->out_size, > > + args->out_buffer); > > + if (result == IAVF_MSG_CMD) > > + break; > > + iavf_msec_delay(ASQ_DELAY_MS); > > + } while (i++ < MAX_TRY_TIMES); > > + if (i >= MAX_TRY_TIMES || > > + vf->cmd_retval != > VIRTCHNL_STATUS_SUCCESS) { > > + err = -1; > > + PMD_DRV_LOG(ERR, "No response or return > failure (%d)" > > + " for cmd %d", vf- > >cmd_retval, args->ops); > > + } > > _clear_cmd(vf); > > - err = -EIO; > > - } else if (vf->cmd_retval == > > - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { > > - PMD_DRV_LOG(ERR, "Cmd %d not supported", args- > >ops); > > - err = -ENOTSUP; > > - } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { > > - PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", > > - vf->cmd_retval, args->ops); > > - err = -EINVAL; > > + } else { > > + /* For other virtchnl ops in running time, > > + * wait for the cmd done flag. > > + */ > > + do { > > + if (vf->pend_cmd == > VIRTCHNL_OP_UNKNOWN) > > + break; > > + iavf_msec_delay(ASQ_DELAY_MS); > > + /* If don't read msg or read sys event, > continue */ > > + } while (i++ < MAX_TRY_TIMES); > > + > > + if (i >= MAX_TRY_TIMES) { > > + PMD_DRV_LOG(ERR, "No response for > cmd %d", args->ops); > > + _clear_cmd(vf); > > + err = -EIO; > > + } else if (vf->cmd_retval == > > + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { > > + PMD_DRV_LOG(ERR, "Cmd %d not > supported", args->ops); > > + err = -ENOTSUP; > > + } else if (vf->cmd_retval != > VIRTCHNL_STATUS_SUCCESS) { > > + PMD_DRV_LOG(ERR, "Return failure %d for > cmd %d", > > + vf->cmd_retval, args->ops); > > + err = -EINVAL; > > + } > > } > > break; > > } > > @@ -403,8 +424,14 @@ iavf_execute_vf_cmd_safe(struct iavf_adapter > > *adapter, { > > struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); > > int ret; > > + int is_intr_thread = rte_thread_is_intr(); > > > > - rte_spinlock_lock(&vf->aq_lock); > > + if (is_intr_thread) { > > + if (!rte_spinlock_trylock(&vf->aq_lock)) > > + return -EIO; > > + } else { > > + rte_spinlock_lock(&vf->aq_lock); > > + } > > ret = iavf_execute_vf_cmd(adapter, args, async); > > rte_spinlock_unlock(&vf->aq_lock); > > ^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread 2023-03-27 5:31 ` Deng, KaiwenX @ 2023-03-27 12:31 ` Ferruh Yigit 2023-03-27 12:37 ` Ferruh Yigit 2023-03-29 6:41 ` Deng, KaiwenX 0 siblings, 2 replies; 19+ messages in thread From: Ferruh Yigit @ 2023-03-27 12:31 UTC (permalink / raw) To: Deng, KaiwenX, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Chas Williams, Min Hu (Connor), Wu, Jingjing, Xing, Beilei, Mike Pattrick, Zhang, Qi Z, Doherty, Declan, Mrzyglod, Daniel T, Dapeng Yu On 3/27/2023 6:31 AM, Deng, KaiwenX wrote: > > >> -----Original Message----- >> From: Ferruh Yigit <ferruh.yigit@amd.com> >> Sent: Thursday, March 23, 2023 11:39 PM >> To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org >> Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX >> <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; Min Hu (Connor) >> <humin29@huawei.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei >> <beilei.xing@intel.com>; Mike Pattrick <mkp@redhat.com>; Zhang, Qi Z >> <qi.z.zhang@intel.com>; Doherty, Declan <declan.doherty@intel.com>; >> Mrzyglod, Daniel T <daniel.t.mrzyglod@intel.com>; Dapeng Yu >> <dapengx.yu@intel.com> >> Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread >> >> On 3/22/2023 7:26 AM, Kaiwen Deng wrote: >>> When iavf send query-stats command in eal-intr-thread through virtual >>> channel, there will be no response received from >>> iavf_dev_virtchnl_handler for this command during block and wait. >>> Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. >>> >>> When vf device is bonded as BONDING_MODE_TLB mode, the slave device >>> update callback will registered in alarm and called by >>> eal-intr-thread, it would also raise the above issue. >>> >>> This commit add to poll the response for VIRTCHNL_OP_GET_STATS when >> it >>> is called by eal-intr-thread to fix this issue. >>> >>> Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands") >>> Fixes: 22b123a36d07 ("net/avf: initialize PMD") >>> Fixes: 7c76a747e68c ("bond: add mode 5") >>> Fixes: 435d523112cc ("net/iavf: fix multi-process shared data") >>> Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") >> >> >> Hi Kaiwen, >> >> Above commit already seems trying to address same issue, it creates "iavf- >> event-thread" control thread to asyncroniously handle the interrupts, in non- >> interrupt context, why it is not working? >> >> Instead of adding 'rte_thread_is_intr()' checks, can't you make sure all >> interrupts handled in control tread? >> >> And can you please provide a stack trace in commit log, to describe the issue >> better? > Hi Ferru, > Sorry for my late reply, And thanks for your review. > > The above commit does not fix this issue when we need to get the returned data. > If we call iavf_query_stats and wait for response statistics in the intr-thread. > iavf_handle_virtchnl_msg is also registered in the intr_thread and will not be > executed while waiting. > Got it, since return value is required, API can't be called asyncroniously. I think 'rte_thread_is_intr()' checks may cause more trouble for you in long term, - why 'iavf_query_stats()' is called in the iterrupt thread, can it be prevented? - does it make sense to allways poll messages from PF (for simplification)? If answer to both are 'No', I am OK to continue with current proposal if you are happy with it. > This commit I changed it to polling for replies to commands executed in the interrupt thread. > > main thread interrupt thread > | | > | | > iavf_query_stats | > iavf_execute_vf_cmd | > iavf_aq_send_msg_to_pf and wait handle complete | > | | > |-------------------------------------------------------------------------------------------->| > | | > | iavf_handle_virtchnl_msg > | | > |<--------------------------------------------------------------------------------------------| > | | > iavf_execute_vf_cmd get response | > | | > > The above is the stack trace for the normal execution of iavf_query_stats > in the main thread. > > interrupt thread > | > | > iavf_query_stats > iavf_execute_vf_cmd > iavf_aq_send_msg_to_pf wait handle complete(1 sec) > iavf_execute_vf_cmd timeout > | > | > iavf_handle_virtchnl_msg > | > > The above is the stack trace for the abnormal execution of iavf_query_stats > in the interrupt thread. >> >>> Cc: stable@dpdk.org >>> >>> Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> >>> --- >>> Changes since v2: >>> - Add to poll the response for VIRTCHNL_OP_GET_STATS. >>> >>> Changes since v1: >>> - Add lock to avoid race condition. >>> --- >>> --- >>> drivers/net/bonding/rte_eth_bond_pmd.c | 7 ++- >>> drivers/net/iavf/iavf_ethdev.c | 5 +- >>> drivers/net/iavf/iavf_vchnl.c | 71 ++++++++++++++++++-------- >>> 3 files changed, 58 insertions(+), 25 deletions(-) >>> >>> diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c >>> b/drivers/net/bonding/rte_eth_bond_pmd.c >>> index f0c4f7d26b..edce621496 100644 >>> --- a/drivers/net/bonding/rte_eth_bond_pmd.c >>> +++ b/drivers/net/bonding/rte_eth_bond_pmd.c >>> @@ -894,6 +894,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) >>> uint8_t update_stats = 0; >>> uint16_t slave_id; >>> uint16_t i; >>> + int ret; >>> >>> internals->slave_update_idx++; >>> >>> @@ -903,7 +904,10 @@ bond_ethdev_update_tlb_slave_cb(void *arg) >>> >>> for (i = 0; i < internals->active_slave_count; i++) { >>> slave_id = internals->active_slaves[i]; >>> - rte_eth_stats_get(slave_id, &slave_stats); >>> + ret = rte_eth_stats_get(slave_id, &slave_stats); >>> + if (ret) >>> + goto OUT; >>> + >>> tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id]; >>> bandwidth_left(slave_id, tx_bytes, >>> internals->slave_update_idx, &bwg_array[i]); >> @@ -922,6 +926,7 @@ >>> bond_ethdev_update_tlb_slave_cb(void *arg) >>> for (i = 0; i < slave_count; i++) >>> internals->tlb_slaves_order[i] = bwg_array[i].slave; >>> >>> +OUT: >>> rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, >> bond_ethdev_update_tlb_slave_cb, >>> (struct bond_dev_private *)internals); } diff --git >>> a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c >>> index 3196210f2c..d6e1f1a7f4 100644 >>> --- a/drivers/net/iavf/iavf_ethdev.c >>> +++ b/drivers/net/iavf/iavf_ethdev.c >>> @@ -2607,6 +2607,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) >>> adapter->dev_data = eth_dev->data; >>> adapter->stopped = 1; >>> >>> + if (iavf_dev_event_handler_init()) >>> + goto init_vf_err; >>> + >>> if (iavf_init_vf(eth_dev) != 0) { >>> PMD_INIT_LOG(ERR, "Init vf failed"); >>> return -1; >>> @@ -2634,8 +2637,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) >>> rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, >>> ð_dev->data->mac_addrs[0]); >>> >>> - if (iavf_dev_event_handler_init()) >>> - goto init_vf_err; >>> >>> if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) >> { >>> /* register callback func to eal lib */ diff --git >>> a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index >>> 9adaadb173..aeffb07cca 100644 >>> --- a/drivers/net/iavf/iavf_vchnl.c >>> +++ b/drivers/net/iavf/iavf_vchnl.c >>> @@ -256,6 +256,7 @@ iavf_read_msg_from_pf(struct iavf_adapter >> *adapter, uint16_t buf_len, >>> vf->link_speed = >> iavf_convert_link_speed(speed); >>> } >>> iavf_dev_link_update(vf->eth_dev, 0); >>> + iavf_dev_event_post(vf->eth_dev, >> RTE_ETH_EVENT_INTR_LSC, NULL, 0); >>> PMD_DRV_LOG(INFO, "Link status update:%s", >>> vf->link_up ? "up" : "down"); >>> break; >>> @@ -368,28 +369,48 @@ iavf_execute_vf_cmd(struct iavf_adapter >> *adapter, struct iavf_cmd_info *args, >>> _clear_cmd(vf); >>> break; >>> default: >>> - /* For other virtchnl ops in running time, >>> - * wait for the cmd done flag. >>> - */ >>> - do { >>> - if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) >>> - break; >>> - iavf_msec_delay(ASQ_DELAY_MS); >>> - /* If don't read msg or read sys event, continue */ >>> - } while (i++ < MAX_TRY_TIMES); >>> - >>> - if (i >= MAX_TRY_TIMES) { >>> - PMD_DRV_LOG(ERR, "No response for cmd %d", >> args->ops); >>> + if (rte_thread_is_intr()) { >>> + /* For virtchnl ops were executed in eal_intr_thread, >>> + * need to poll the response. >>> + */ >>> + do { >>> + result = iavf_read_msg_from_pf(adapter, >> args->out_size, >>> + args->out_buffer); >>> + if (result == IAVF_MSG_CMD) >>> + break; >>> + iavf_msec_delay(ASQ_DELAY_MS); >>> + } while (i++ < MAX_TRY_TIMES); >>> + if (i >= MAX_TRY_TIMES || >>> + vf->cmd_retval != >> VIRTCHNL_STATUS_SUCCESS) { >>> + err = -1; >>> + PMD_DRV_LOG(ERR, "No response or return >> failure (%d)" >>> + " for cmd %d", vf- >>> cmd_retval, args->ops); >>> + } >>> _clear_cmd(vf); >>> - err = -EIO; >>> - } else if (vf->cmd_retval == >>> - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { >>> - PMD_DRV_LOG(ERR, "Cmd %d not supported", args- >>> ops); >>> - err = -ENOTSUP; >>> - } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { >>> - PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", >>> - vf->cmd_retval, args->ops); >>> - err = -EINVAL; >>> + } else { >>> + /* For other virtchnl ops in running time, >>> + * wait for the cmd done flag. >>> + */ >>> + do { >>> + if (vf->pend_cmd == >> VIRTCHNL_OP_UNKNOWN) >>> + break; >>> + iavf_msec_delay(ASQ_DELAY_MS); >>> + /* If don't read msg or read sys event, >> continue */ >>> + } while (i++ < MAX_TRY_TIMES); >>> + >>> + if (i >= MAX_TRY_TIMES) { >>> + PMD_DRV_LOG(ERR, "No response for >> cmd %d", args->ops); >>> + _clear_cmd(vf); >>> + err = -EIO; >>> + } else if (vf->cmd_retval == >>> + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { >>> + PMD_DRV_LOG(ERR, "Cmd %d not >> supported", args->ops); >>> + err = -ENOTSUP; >>> + } else if (vf->cmd_retval != >> VIRTCHNL_STATUS_SUCCESS) { >>> + PMD_DRV_LOG(ERR, "Return failure %d for >> cmd %d", >>> + vf->cmd_retval, args->ops); >>> + err = -EINVAL; >>> + } >>> } >>> break; >>> } >>> @@ -403,8 +424,14 @@ iavf_execute_vf_cmd_safe(struct iavf_adapter >>> *adapter, { >>> struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); >>> int ret; >>> + int is_intr_thread = rte_thread_is_intr(); >>> >>> - rte_spinlock_lock(&vf->aq_lock); >>> + if (is_intr_thread) { >>> + if (!rte_spinlock_trylock(&vf->aq_lock)) >>> + return -EIO; >>> + } else { >>> + rte_spinlock_lock(&vf->aq_lock); >>> + } >>> ret = iavf_execute_vf_cmd(adapter, args, async); >>> rte_spinlock_unlock(&vf->aq_lock); >>> > ^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread 2023-03-27 12:31 ` Ferruh Yigit @ 2023-03-27 12:37 ` Ferruh Yigit 2023-03-29 7:53 ` Deng, KaiwenX 2023-05-05 2:31 ` Deng, KaiwenX 2023-03-29 6:41 ` Deng, KaiwenX 1 sibling, 2 replies; 19+ messages in thread From: Ferruh Yigit @ 2023-03-27 12:37 UTC (permalink / raw) To: Deng, KaiwenX, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Chas Williams, Min Hu (Connor), Wu, Jingjing, Xing, Beilei, Mike Pattrick, Zhang, Qi Z, Doherty, Declan, Mrzyglod, Daniel T, Dapeng Yu, Helin Zhang, Mcnamara, John, Thomas Monjalon On 3/27/2023 1:31 PM, Ferruh Yigit wrote: > On 3/27/2023 6:31 AM, Deng, KaiwenX wrote: >> >> >>> -----Original Message----- >>> From: Ferruh Yigit <ferruh.yigit@amd.com> >>> Sent: Thursday, March 23, 2023 11:39 PM >>> To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org >>> Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX >>> <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; Min Hu (Connor) >>> <humin29@huawei.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei >>> <beilei.xing@intel.com>; Mike Pattrick <mkp@redhat.com>; Zhang, Qi Z >>> <qi.z.zhang@intel.com>; Doherty, Declan <declan.doherty@intel.com>; >>> Mrzyglod, Daniel T <daniel.t.mrzyglod@intel.com>; Dapeng Yu >>> <dapengx.yu@intel.com> >>> Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread >>> >>> On 3/22/2023 7:26 AM, Kaiwen Deng wrote: >>>> When iavf send query-stats command in eal-intr-thread through virtual >>>> channel, there will be no response received from >>>> iavf_dev_virtchnl_handler for this command during block and wait. >>>> Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. >>>> >>>> When vf device is bonded as BONDING_MODE_TLB mode, the slave device >>>> update callback will registered in alarm and called by >>>> eal-intr-thread, it would also raise the above issue. >>>> >>>> This commit add to poll the response for VIRTCHNL_OP_GET_STATS when >>> it >>>> is called by eal-intr-thread to fix this issue. >>>> >>>> Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands") >>>> Fixes: 22b123a36d07 ("net/avf: initialize PMD") >>>> Fixes: 7c76a747e68c ("bond: add mode 5") >>>> Fixes: 435d523112cc ("net/iavf: fix multi-process shared data") >>>> Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") >>> >>> >>> Hi Kaiwen, >>> >>> Above commit already seems trying to address same issue, it creates "iavf- >>> event-thread" control thread to asyncroniously handle the interrupts, in non- >>> interrupt context, why it is not working? >>> >>> Instead of adding 'rte_thread_is_intr()' checks, can't you make sure all >>> interrupts handled in control tread? >>> >>> And can you please provide a stack trace in commit log, to describe the issue >>> better? >> Hi Ferru, >> Sorry for my late reply, And thanks for your review. >> >> The above commit does not fix this issue when we need to get the returned data. >> If we call iavf_query_stats and wait for response statistics in the intr-thread. >> iavf_handle_virtchnl_msg is also registered in the intr_thread and will not be >> executed while waiting. >> > > Got it, since return value is required, API can't be called asyncroniously. > > > > I think 'rte_thread_is_intr()' checks may cause more trouble for you in > long term, > > - why 'iavf_query_stats()' is called in the iterrupt thread, can it be > prevented? > > - does it make sense to allways poll messages from PF (for simplification)? > > > If answer to both are 'No', I am OK to continue with current proposal if > you are happy with it. > btw, how critical is this issue? If it is critical, I am OK to get it as it is for this release and investigate it further for next release, since only a few days left for this release. > >> This commit I changed it to polling for replies to commands executed in the interrupt thread. >> >> main thread interrupt thread >> | | >> | | >> iavf_query_stats | >> iavf_execute_vf_cmd | >> iavf_aq_send_msg_to_pf and wait handle complete | >> | | >> |-------------------------------------------------------------------------------------------->| >> | | >> | iavf_handle_virtchnl_msg >> | | >> |<--------------------------------------------------------------------------------------------| >> | | >> iavf_execute_vf_cmd get response | >> | | >> >> The above is the stack trace for the normal execution of iavf_query_stats >> in the main thread. >> >> interrupt thread >> | >> | >> iavf_query_stats >> iavf_execute_vf_cmd >> iavf_aq_send_msg_to_pf wait handle complete(1 sec) >> iavf_execute_vf_cmd timeout >> | >> | >> iavf_handle_virtchnl_msg >> | >> >> The above is the stack trace for the abnormal execution of iavf_query_stats >> in the interrupt thread. >>> >>>> Cc: stable@dpdk.org >>>> >>>> Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> >>>> --- >>>> Changes since v2: >>>> - Add to poll the response for VIRTCHNL_OP_GET_STATS. >>>> >>>> Changes since v1: >>>> - Add lock to avoid race condition. >>>> --- >>>> --- >>>> drivers/net/bonding/rte_eth_bond_pmd.c | 7 ++- >>>> drivers/net/iavf/iavf_ethdev.c | 5 +- >>>> drivers/net/iavf/iavf_vchnl.c | 71 ++++++++++++++++++-------- >>>> 3 files changed, 58 insertions(+), 25 deletions(-) >>>> >>>> diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c >>>> b/drivers/net/bonding/rte_eth_bond_pmd.c >>>> index f0c4f7d26b..edce621496 100644 >>>> --- a/drivers/net/bonding/rte_eth_bond_pmd.c >>>> +++ b/drivers/net/bonding/rte_eth_bond_pmd.c >>>> @@ -894,6 +894,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) >>>> uint8_t update_stats = 0; >>>> uint16_t slave_id; >>>> uint16_t i; >>>> + int ret; >>>> >>>> internals->slave_update_idx++; >>>> >>>> @@ -903,7 +904,10 @@ bond_ethdev_update_tlb_slave_cb(void *arg) >>>> >>>> for (i = 0; i < internals->active_slave_count; i++) { >>>> slave_id = internals->active_slaves[i]; >>>> - rte_eth_stats_get(slave_id, &slave_stats); >>>> + ret = rte_eth_stats_get(slave_id, &slave_stats); >>>> + if (ret) >>>> + goto OUT; >>>> + >>>> tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id]; >>>> bandwidth_left(slave_id, tx_bytes, >>>> internals->slave_update_idx, &bwg_array[i]); >>> @@ -922,6 +926,7 @@ >>>> bond_ethdev_update_tlb_slave_cb(void *arg) >>>> for (i = 0; i < slave_count; i++) >>>> internals->tlb_slaves_order[i] = bwg_array[i].slave; >>>> >>>> +OUT: >>>> rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, >>> bond_ethdev_update_tlb_slave_cb, >>>> (struct bond_dev_private *)internals); } diff --git >>>> a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c >>>> index 3196210f2c..d6e1f1a7f4 100644 >>>> --- a/drivers/net/iavf/iavf_ethdev.c >>>> +++ b/drivers/net/iavf/iavf_ethdev.c >>>> @@ -2607,6 +2607,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) >>>> adapter->dev_data = eth_dev->data; >>>> adapter->stopped = 1; >>>> >>>> + if (iavf_dev_event_handler_init()) >>>> + goto init_vf_err; >>>> + >>>> if (iavf_init_vf(eth_dev) != 0) { >>>> PMD_INIT_LOG(ERR, "Init vf failed"); >>>> return -1; >>>> @@ -2634,8 +2637,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) >>>> rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, >>>> ð_dev->data->mac_addrs[0]); >>>> >>>> - if (iavf_dev_event_handler_init()) >>>> - goto init_vf_err; >>>> >>>> if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) >>> { >>>> /* register callback func to eal lib */ diff --git >>>> a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index >>>> 9adaadb173..aeffb07cca 100644 >>>> --- a/drivers/net/iavf/iavf_vchnl.c >>>> +++ b/drivers/net/iavf/iavf_vchnl.c >>>> @@ -256,6 +256,7 @@ iavf_read_msg_from_pf(struct iavf_adapter >>> *adapter, uint16_t buf_len, >>>> vf->link_speed = >>> iavf_convert_link_speed(speed); >>>> } >>>> iavf_dev_link_update(vf->eth_dev, 0); >>>> + iavf_dev_event_post(vf->eth_dev, >>> RTE_ETH_EVENT_INTR_LSC, NULL, 0); >>>> PMD_DRV_LOG(INFO, "Link status update:%s", >>>> vf->link_up ? "up" : "down"); >>>> break; >>>> @@ -368,28 +369,48 @@ iavf_execute_vf_cmd(struct iavf_adapter >>> *adapter, struct iavf_cmd_info *args, >>>> _clear_cmd(vf); >>>> break; >>>> default: >>>> - /* For other virtchnl ops in running time, >>>> - * wait for the cmd done flag. >>>> - */ >>>> - do { >>>> - if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) >>>> - break; >>>> - iavf_msec_delay(ASQ_DELAY_MS); >>>> - /* If don't read msg or read sys event, continue */ >>>> - } while (i++ < MAX_TRY_TIMES); >>>> - >>>> - if (i >= MAX_TRY_TIMES) { >>>> - PMD_DRV_LOG(ERR, "No response for cmd %d", >>> args->ops); >>>> + if (rte_thread_is_intr()) { >>>> + /* For virtchnl ops were executed in eal_intr_thread, >>>> + * need to poll the response. >>>> + */ >>>> + do { >>>> + result = iavf_read_msg_from_pf(adapter, >>> args->out_size, >>>> + args->out_buffer); >>>> + if (result == IAVF_MSG_CMD) >>>> + break; >>>> + iavf_msec_delay(ASQ_DELAY_MS); >>>> + } while (i++ < MAX_TRY_TIMES); >>>> + if (i >= MAX_TRY_TIMES || >>>> + vf->cmd_retval != >>> VIRTCHNL_STATUS_SUCCESS) { >>>> + err = -1; >>>> + PMD_DRV_LOG(ERR, "No response or return >>> failure (%d)" >>>> + " for cmd %d", vf- >>>> cmd_retval, args->ops); >>>> + } >>>> _clear_cmd(vf); >>>> - err = -EIO; >>>> - } else if (vf->cmd_retval == >>>> - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { >>>> - PMD_DRV_LOG(ERR, "Cmd %d not supported", args- >>>> ops); >>>> - err = -ENOTSUP; >>>> - } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { >>>> - PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", >>>> - vf->cmd_retval, args->ops); >>>> - err = -EINVAL; >>>> + } else { >>>> + /* For other virtchnl ops in running time, >>>> + * wait for the cmd done flag. >>>> + */ >>>> + do { >>>> + if (vf->pend_cmd == >>> VIRTCHNL_OP_UNKNOWN) >>>> + break; >>>> + iavf_msec_delay(ASQ_DELAY_MS); >>>> + /* If don't read msg or read sys event, >>> continue */ >>>> + } while (i++ < MAX_TRY_TIMES); >>>> + >>>> + if (i >= MAX_TRY_TIMES) { >>>> + PMD_DRV_LOG(ERR, "No response for >>> cmd %d", args->ops); >>>> + _clear_cmd(vf); >>>> + err = -EIO; >>>> + } else if (vf->cmd_retval == >>>> + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { >>>> + PMD_DRV_LOG(ERR, "Cmd %d not >>> supported", args->ops); >>>> + err = -ENOTSUP; >>>> + } else if (vf->cmd_retval != >>> VIRTCHNL_STATUS_SUCCESS) { >>>> + PMD_DRV_LOG(ERR, "Return failure %d for >>> cmd %d", >>>> + vf->cmd_retval, args->ops); >>>> + err = -EINVAL; >>>> + } >>>> } >>>> break; >>>> } >>>> @@ -403,8 +424,14 @@ iavf_execute_vf_cmd_safe(struct iavf_adapter >>>> *adapter, { >>>> struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); >>>> int ret; >>>> + int is_intr_thread = rte_thread_is_intr(); >>>> >>>> - rte_spinlock_lock(&vf->aq_lock); >>>> + if (is_intr_thread) { >>>> + if (!rte_spinlock_trylock(&vf->aq_lock)) >>>> + return -EIO; >>>> + } else { >>>> + rte_spinlock_lock(&vf->aq_lock); >>>> + } >>>> ret = iavf_execute_vf_cmd(adapter, args, async); >>>> rte_spinlock_unlock(&vf->aq_lock); >>>> >> > ^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH v3] net/iavf: fix iavf query stats in intr thread 2023-03-27 12:37 ` Ferruh Yigit @ 2023-03-29 7:53 ` Deng, KaiwenX 2023-05-05 2:31 ` Deng, KaiwenX 1 sibling, 0 replies; 19+ messages in thread From: Deng, KaiwenX @ 2023-03-29 7:53 UTC (permalink / raw) To: Ferruh Yigit, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Chas Williams, Min Hu (Connor), Wu, Jingjing, Xing, Beilei, Mike Pattrick, Zhang, Qi Z, Doherty, Declan, Mrzyglod, Daniel T, Dapeng Yu, Zhang, Helin, Mcnamara, John, Thomas Monjalon > -----Original Message----- > From: Ferruh Yigit <ferruh.yigit@amd.com> > Sent: Monday, March 27, 2023 8:38 PM > To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX > <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; Min Hu (Connor) > <humin29@huawei.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei > <beilei.xing@intel.com>; Mike Pattrick <mkp@redhat.com>; Zhang, Qi Z > <qi.z.zhang@intel.com>; Doherty, Declan <declan.doherty@intel.com>; > Mrzyglod, Daniel T <daniel.t.mrzyglod@intel.com>; Dapeng Yu > <dapengx.yu@intel.com>; Zhang, Helin <helin.zhang@intel.com>; > Mcnamara, John <john.mcnamara@intel.com>; Thomas Monjalon > <thomas@monjalon.net> > Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread > > On 3/27/2023 1:31 PM, Ferruh Yigit wrote: > > On 3/27/2023 6:31 AM, Deng, KaiwenX wrote: > >> > >> > >>> -----Original Message----- > >>> From: Ferruh Yigit <ferruh.yigit@amd.com> > >>> Sent: Thursday, March 23, 2023 11:39 PM > >>> To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org > >>> Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, > >>> YidingX <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; Min > >>> Hu (Connor) <humin29@huawei.com>; Wu, Jingjing > >>> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Mike > >>> Pattrick <mkp@redhat.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; > >>> Doherty, Declan <declan.doherty@intel.com>; Mrzyglod, Daniel T > >>> <daniel.t.mrzyglod@intel.com>; Dapeng Yu <dapengx.yu@intel.com> > >>> Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr > >>> thread > >>> > >>> On 3/22/2023 7:26 AM, Kaiwen Deng wrote: > >>>> When iavf send query-stats command in eal-intr-thread through > >>>> virtual channel, there will be no response received from > >>>> iavf_dev_virtchnl_handler for this command during block and wait. > >>>> Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. > >>>> > >>>> When vf device is bonded as BONDING_MODE_TLB mode, the slave > device > >>>> update callback will registered in alarm and called by > >>>> eal-intr-thread, it would also raise the above issue. > >>>> > >>>> This commit add to poll the response for VIRTCHNL_OP_GET_STATS > when > >>> it > >>>> is called by eal-intr-thread to fix this issue. > >>>> > >>>> Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands") > >>>> Fixes: 22b123a36d07 ("net/avf: initialize PMD") > >>>> Fixes: 7c76a747e68c ("bond: add mode 5") > >>>> Fixes: 435d523112cc ("net/iavf: fix multi-process shared data") > >>>> Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") > >>> > >>> > >>> Hi Kaiwen, > >>> > >>> Above commit already seems trying to address same issue, it creates > >>> "iavf- event-thread" control thread to asyncroniously handle the > >>> interrupts, in non- interrupt context, why it is not working? > >>> > >>> Instead of adding 'rte_thread_is_intr()' checks, can't you make sure > >>> all interrupts handled in control tread? > >>> > >>> And can you please provide a stack trace in commit log, to describe > >>> the issue better? > >> Hi Ferru, > >> Sorry for my late reply, And thanks for your review. > >> > >> The above commit does not fix this issue when we need to get the > returned data. > >> If we call iavf_query_stats and wait for response statistics in the intr- > thread. > >> iavf_handle_virtchnl_msg is also registered in the intr_thread and > >> will not be executed while waiting. > >> > > > > Got it, since return value is required, API can't be called asyncroniously. > > > > > > > > I think 'rte_thread_is_intr()' checks may cause more trouble for you > > in long term, > > > > - why 'iavf_query_stats()' is called in the iterrupt thread, can it be > > prevented? > > > > - does it make sense to allways poll messages from PF (for simplification)? > > > > > > If answer to both are 'No', I am OK to continue with current proposal > > if you are happy with it. > > > > > btw, how critical is this issue? > > If it is critical, I am OK to get it as it is for this release and investigate it further > for next release, since only a few days left for this release. > This is not a very crtical issue, we can wait for a more appropriate solution. Thanks. > > > > >> This commit I changed it to polling for replies to commands executed in > the interrupt thread. > >> > >> main thread interrupt > thread > >> | | > >> | | > >> iavf_query_stats | > >> iavf_execute_vf_cmd | > >> iavf_aq_send_msg_to_pf and wait handle complete | > >> | | > >> |------------------------------------------------------------------------------------- > ------->| > >> | | > >> | > iavf_handle_virtchnl_msg > >> | | > >> |<------------------------------------------------------------------------------------ > --------| > >> | | > >> iavf_execute_vf_cmd get response | > >> | | > >> > >> The above is the stack trace for the normal execution of > >> iavf_query_stats in the main thread. > >> > >> interrupt thread > >> | > >> | > >> iavf_query_stats > >> iavf_execute_vf_cmd > >> iavf_aq_send_msg_to_pf wait handle complete(1 sec) > >> iavf_execute_vf_cmd timeout > >> | > >> | > >> iavf_handle_virtchnl_msg > >> | > >> > >> The above is the stack trace for the abnormal execution of > >> iavf_query_stats in the interrupt thread. > >>> > >>>> Cc: stable@dpdk.org > >>>> > >>>> Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> > >>>> --- > >>>> Changes since v2: > >>>> - Add to poll the response for VIRTCHNL_OP_GET_STATS. > >>>> > >>>> Changes since v1: > >>>> - Add lock to avoid race condition. > >>>> --- > >>>> --- > >>>> drivers/net/bonding/rte_eth_bond_pmd.c | 7 ++- > >>>> drivers/net/iavf/iavf_ethdev.c | 5 +- > >>>> drivers/net/iavf/iavf_vchnl.c | 71 ++++++++++++++++++-------- > >>>> 3 files changed, 58 insertions(+), 25 deletions(-) > >>>> > >>>> diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c > >>>> b/drivers/net/bonding/rte_eth_bond_pmd.c > >>>> index f0c4f7d26b..edce621496 100644 > >>>> --- a/drivers/net/bonding/rte_eth_bond_pmd.c > >>>> +++ b/drivers/net/bonding/rte_eth_bond_pmd.c > >>>> @@ -894,6 +894,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) > >>>> uint8_t update_stats = 0; > >>>> uint16_t slave_id; > >>>> uint16_t i; > >>>> + int ret; > >>>> > >>>> internals->slave_update_idx++; > >>>> > >>>> @@ -903,7 +904,10 @@ bond_ethdev_update_tlb_slave_cb(void *arg) > >>>> > >>>> for (i = 0; i < internals->active_slave_count; i++) { > >>>> slave_id = internals->active_slaves[i]; > >>>> - rte_eth_stats_get(slave_id, &slave_stats); > >>>> + ret = rte_eth_stats_get(slave_id, &slave_stats); > >>>> + if (ret) > >>>> + goto OUT; > >>>> + > >>>> tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id]; > >>>> bandwidth_left(slave_id, tx_bytes, > >>>> internals->slave_update_idx, &bwg_array[i]); > >>> @@ -922,6 +926,7 @@ > >>>> bond_ethdev_update_tlb_slave_cb(void *arg) > >>>> for (i = 0; i < slave_count; i++) > >>>> internals->tlb_slaves_order[i] = bwg_array[i].slave; > >>>> > >>>> +OUT: > >>>> rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, > >>> bond_ethdev_update_tlb_slave_cb, > >>>> (struct bond_dev_private *)internals); } diff --git > >>>> a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c > >>>> index 3196210f2c..d6e1f1a7f4 100644 > >>>> --- a/drivers/net/iavf/iavf_ethdev.c > >>>> +++ b/drivers/net/iavf/iavf_ethdev.c > >>>> @@ -2607,6 +2607,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > >>>> adapter->dev_data = eth_dev->data; > >>>> adapter->stopped = 1; > >>>> > >>>> + if (iavf_dev_event_handler_init()) > >>>> + goto init_vf_err; > >>>> + > >>>> if (iavf_init_vf(eth_dev) != 0) { > >>>> PMD_INIT_LOG(ERR, "Init vf failed"); > >>>> return -1; > >>>> @@ -2634,8 +2637,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > >>>> rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, > >>>> ð_dev->data->mac_addrs[0]); > >>>> > >>>> - if (iavf_dev_event_handler_init()) > >>>> - goto init_vf_err; > >>>> > >>>> if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) > >>> { > >>>> /* register callback func to eal lib */ diff --git > >>>> a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c > >>>> index 9adaadb173..aeffb07cca 100644 > >>>> --- a/drivers/net/iavf/iavf_vchnl.c > >>>> +++ b/drivers/net/iavf/iavf_vchnl.c > >>>> @@ -256,6 +256,7 @@ iavf_read_msg_from_pf(struct iavf_adapter > >>> *adapter, uint16_t buf_len, > >>>> vf->link_speed = > >>> iavf_convert_link_speed(speed); > >>>> } > >>>> iavf_dev_link_update(vf->eth_dev, 0); > >>>> + iavf_dev_event_post(vf->eth_dev, > >>> RTE_ETH_EVENT_INTR_LSC, NULL, 0); > >>>> PMD_DRV_LOG(INFO, "Link status update:%s", > >>>> vf->link_up ? "up" : "down"); > >>>> break; > >>>> @@ -368,28 +369,48 @@ iavf_execute_vf_cmd(struct iavf_adapter > >>> *adapter, struct iavf_cmd_info *args, > >>>> _clear_cmd(vf); > >>>> break; > >>>> default: > >>>> - /* For other virtchnl ops in running time, > >>>> - * wait for the cmd done flag. > >>>> - */ > >>>> - do { > >>>> - if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) > >>>> - break; > >>>> - iavf_msec_delay(ASQ_DELAY_MS); > >>>> - /* If don't read msg or read sys event, continue */ > >>>> - } while (i++ < MAX_TRY_TIMES); > >>>> - > >>>> - if (i >= MAX_TRY_TIMES) { > >>>> - PMD_DRV_LOG(ERR, "No response for cmd %d", > >>> args->ops); > >>>> + if (rte_thread_is_intr()) { > >>>> + /* For virtchnl ops were executed in eal_intr_thread, > >>>> + * need to poll the response. > >>>> + */ > >>>> + do { > >>>> + result = iavf_read_msg_from_pf(adapter, > >>> args->out_size, > >>>> + args->out_buffer); > >>>> + if (result == IAVF_MSG_CMD) > >>>> + break; > >>>> + iavf_msec_delay(ASQ_DELAY_MS); > >>>> + } while (i++ < MAX_TRY_TIMES); > >>>> + if (i >= MAX_TRY_TIMES || > >>>> + vf->cmd_retval != > >>> VIRTCHNL_STATUS_SUCCESS) { > >>>> + err = -1; > >>>> + PMD_DRV_LOG(ERR, "No response or return > >>> failure (%d)" > >>>> + " for cmd %d", vf- > >>>> cmd_retval, args->ops); > >>>> + } > >>>> _clear_cmd(vf); > >>>> - err = -EIO; > >>>> - } else if (vf->cmd_retval == > >>>> - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { > >>>> - PMD_DRV_LOG(ERR, "Cmd %d not supported", args- > >>>> ops); > >>>> - err = -ENOTSUP; > >>>> - } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { > >>>> - PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", > >>>> - vf->cmd_retval, args->ops); > >>>> - err = -EINVAL; > >>>> + } else { > >>>> + /* For other virtchnl ops in running time, > >>>> + * wait for the cmd done flag. > >>>> + */ > >>>> + do { > >>>> + if (vf->pend_cmd == > >>> VIRTCHNL_OP_UNKNOWN) > >>>> + break; > >>>> + iavf_msec_delay(ASQ_DELAY_MS); > >>>> + /* If don't read msg or read sys event, > >>> continue */ > >>>> + } while (i++ < MAX_TRY_TIMES); > >>>> + > >>>> + if (i >= MAX_TRY_TIMES) { > >>>> + PMD_DRV_LOG(ERR, "No response for > >>> cmd %d", args->ops); > >>>> + _clear_cmd(vf); > >>>> + err = -EIO; > >>>> + } else if (vf->cmd_retval == > >>>> + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { > >>>> + PMD_DRV_LOG(ERR, "Cmd %d not > >>> supported", args->ops); > >>>> + err = -ENOTSUP; > >>>> + } else if (vf->cmd_retval != > >>> VIRTCHNL_STATUS_SUCCESS) { > >>>> + PMD_DRV_LOG(ERR, "Return failure %d for > >>> cmd %d", > >>>> + vf->cmd_retval, args->ops); > >>>> + err = -EINVAL; > >>>> + } > >>>> } > >>>> break; > >>>> } > >>>> @@ -403,8 +424,14 @@ iavf_execute_vf_cmd_safe(struct > iavf_adapter > >>>> *adapter, { > >>>> struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); > >>>> int ret; > >>>> + int is_intr_thread = rte_thread_is_intr(); > >>>> > >>>> - rte_spinlock_lock(&vf->aq_lock); > >>>> + if (is_intr_thread) { > >>>> + if (!rte_spinlock_trylock(&vf->aq_lock)) > >>>> + return -EIO; > >>>> + } else { > >>>> + rte_spinlock_lock(&vf->aq_lock); > >>>> + } > >>>> ret = iavf_execute_vf_cmd(adapter, args, async); > >>>> rte_spinlock_unlock(&vf->aq_lock); > >>>> > >> > > ^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH v3] net/iavf: fix iavf query stats in intr thread 2023-03-27 12:37 ` Ferruh Yigit 2023-03-29 7:53 ` Deng, KaiwenX @ 2023-05-05 2:31 ` Deng, KaiwenX 2023-05-23 1:45 ` Deng, KaiwenX 1 sibling, 1 reply; 19+ messages in thread From: Deng, KaiwenX @ 2023-05-05 2:31 UTC (permalink / raw) To: Ferruh Yigit, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Chas Williams, Min Hu (Connor), Wu, Jingjing, Xing, Beilei, Mike Pattrick, Zhang, Qi Z, Doherty, Declan, Mrzyglod, Daniel T, Dapeng Yu, Zhang, Helin, Mcnamara, John, Thomas Monjalon > -----Original Message----- > From: Ferruh Yigit <ferruh.yigit@amd.com> > Sent: Monday, March 27, 2023 8:38 PM > To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX > <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; Min Hu (Connor) > <humin29@huawei.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei > <beilei.xing@intel.com>; Mike Pattrick <mkp@redhat.com>; Zhang, Qi Z > <qi.z.zhang@intel.com>; Doherty, Declan <declan.doherty@intel.com>; > Mrzyglod, Daniel T <daniel.t.mrzyglod@intel.com>; Dapeng Yu > <dapengx.yu@intel.com>; Zhang, Helin <helin.zhang@intel.com>; > Mcnamara, John <john.mcnamara@intel.com>; Thomas Monjalon > <thomas@monjalon.net> > Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread > > On 3/27/2023 1:31 PM, Ferruh Yigit wrote: > > On 3/27/2023 6:31 AM, Deng, KaiwenX wrote: > >> > >> > >>> -----Original Message----- > >>> From: Ferruh Yigit <ferruh.yigit@amd.com> > >>> Sent: Thursday, March 23, 2023 11:39 PM > >>> To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org > >>> Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, > >>> YidingX <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; Min > >>> Hu (Connor) <humin29@huawei.com>; Wu, Jingjing > >>> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Mike > >>> Pattrick <mkp@redhat.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; > >>> Doherty, Declan <declan.doherty@intel.com>; Mrzyglod, Daniel T > >>> <daniel.t.mrzyglod@intel.com>; Dapeng Yu <dapengx.yu@intel.com> > >>> Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr > >>> thread > >>> > >>> On 3/22/2023 7:26 AM, Kaiwen Deng wrote: > >>>> When iavf send query-stats command in eal-intr-thread through > >>>> virtual channel, there will be no response received from > >>>> iavf_dev_virtchnl_handler for this command during block and wait. > >>>> Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. > >>>> > >>>> When vf device is bonded as BONDING_MODE_TLB mode, the slave > device > >>>> update callback will registered in alarm and called by > >>>> eal-intr-thread, it would also raise the above issue. > >>>> > >>>> This commit add to poll the response for VIRTCHNL_OP_GET_STATS > when > >>> it > >>>> is called by eal-intr-thread to fix this issue. > >>>> > >>>> Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands") > >>>> Fixes: 22b123a36d07 ("net/avf: initialize PMD") > >>>> Fixes: 7c76a747e68c ("bond: add mode 5") > >>>> Fixes: 435d523112cc ("net/iavf: fix multi-process shared data") > >>>> Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") > >>> > >>> > >>> Hi Kaiwen, > >>> > >>> Above commit already seems trying to address same issue, it creates > >>> "iavf- event-thread" control thread to asyncroniously handle the > >>> interrupts, in non- interrupt context, why it is not working? > >>> > >>> Instead of adding 'rte_thread_is_intr()' checks, can't you make sure > >>> all interrupts handled in control tread? > >>> > >>> And can you please provide a stack trace in commit log, to describe > >>> the issue better? > >> Hi Ferru, > >> Sorry for my late reply, And thanks for your review. > >> > >> The above commit does not fix this issue when we need to get the > returned data. > >> If we call iavf_query_stats and wait for response statistics in the intr- > thread. > >> iavf_handle_virtchnl_msg is also registered in the intr_thread and > >> will not be executed while waiting. > >> > > > > Got it, since return value is required, API can't be called asyncroniously. > > > > > > > > I think 'rte_thread_is_intr()' checks may cause more trouble for you > > in long term, > > > > - why 'iavf_query_stats()' is called in the iterrupt thread, can it be > > prevented? > > > > - does it make sense to allways poll messages from PF (for simplification)? > > > > > > If answer to both are 'No', I am OK to continue with current proposal > > if you are happy with it. > > > > > btw, how critical is this issue? > > If it is critical, I am OK to get it as it is for this release and investigate it further > for next release, since only a few days left for this release. > Hi Ferruh, I didn't find a more suitable solution after consideration, if you have a better suggestion, please let me know, thanks. > > > > >> This commit I changed it to polling for replies to commands executed in > the interrupt thread. > >> > >> main thread interrupt > thread > >> | | > >> | | > >> iavf_query_stats | > >> iavf_execute_vf_cmd | > >> iavf_aq_send_msg_to_pf and wait handle complete | > >> | | > >> |------------------------------------------------------------------------------------- > ------->| > >> | | > >> | > iavf_handle_virtchnl_msg > >> | | > >> |<------------------------------------------------------------------------------------ > --------| > >> | | > >> iavf_execute_vf_cmd get response | > >> | | > >> > >> The above is the stack trace for the normal execution of > >> iavf_query_stats in the main thread. > >> > >> interrupt thread > >> | > >> | > >> iavf_query_stats > >> iavf_execute_vf_cmd > >> iavf_aq_send_msg_to_pf wait handle complete(1 sec) > >> iavf_execute_vf_cmd timeout > >> | > >> | > >> iavf_handle_virtchnl_msg > >> | > >> > >> The above is the stack trace for the abnormal execution of > >> iavf_query_stats in the interrupt thread. > >>> > >>>> Cc: stable@dpdk.org > >>>> > >>>> Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> > >>>> --- > >>>> Changes since v2: > >>>> - Add to poll the response for VIRTCHNL_OP_GET_STATS. > >>>> > >>>> Changes since v1: > >>>> - Add lock to avoid race condition. > >>>> --- > >>>> --- > >>>> drivers/net/bonding/rte_eth_bond_pmd.c | 7 ++- > >>>> drivers/net/iavf/iavf_ethdev.c | 5 +- > >>>> drivers/net/iavf/iavf_vchnl.c | 71 ++++++++++++++++++-------- > >>>> 3 files changed, 58 insertions(+), 25 deletions(-) > >>>> > >>>> diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c > >>>> b/drivers/net/bonding/rte_eth_bond_pmd.c > >>>> index f0c4f7d26b..edce621496 100644 > >>>> --- a/drivers/net/bonding/rte_eth_bond_pmd.c > >>>> +++ b/drivers/net/bonding/rte_eth_bond_pmd.c > >>>> @@ -894,6 +894,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) > >>>> uint8_t update_stats = 0; > >>>> uint16_t slave_id; > >>>> uint16_t i; > >>>> + int ret; > >>>> > >>>> internals->slave_update_idx++; > >>>> > >>>> @@ -903,7 +904,10 @@ bond_ethdev_update_tlb_slave_cb(void *arg) > >>>> > >>>> for (i = 0; i < internals->active_slave_count; i++) { > >>>> slave_id = internals->active_slaves[i]; > >>>> - rte_eth_stats_get(slave_id, &slave_stats); > >>>> + ret = rte_eth_stats_get(slave_id, &slave_stats); > >>>> + if (ret) > >>>> + goto OUT; > >>>> + > >>>> tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id]; > >>>> bandwidth_left(slave_id, tx_bytes, > >>>> internals->slave_update_idx, &bwg_array[i]); > >>> @@ -922,6 +926,7 @@ > >>>> bond_ethdev_update_tlb_slave_cb(void *arg) > >>>> for (i = 0; i < slave_count; i++) > >>>> internals->tlb_slaves_order[i] = bwg_array[i].slave; > >>>> > >>>> +OUT: > >>>> rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, > >>> bond_ethdev_update_tlb_slave_cb, > >>>> (struct bond_dev_private *)internals); } diff --git > >>>> a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c > >>>> index 3196210f2c..d6e1f1a7f4 100644 > >>>> --- a/drivers/net/iavf/iavf_ethdev.c > >>>> +++ b/drivers/net/iavf/iavf_ethdev.c > >>>> @@ -2607,6 +2607,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > >>>> adapter->dev_data = eth_dev->data; > >>>> adapter->stopped = 1; > >>>> > >>>> + if (iavf_dev_event_handler_init()) > >>>> + goto init_vf_err; > >>>> + > >>>> if (iavf_init_vf(eth_dev) != 0) { > >>>> PMD_INIT_LOG(ERR, "Init vf failed"); > >>>> return -1; > >>>> @@ -2634,8 +2637,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > >>>> rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, > >>>> ð_dev->data->mac_addrs[0]); > >>>> > >>>> - if (iavf_dev_event_handler_init()) > >>>> - goto init_vf_err; > >>>> > >>>> if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) > >>> { > >>>> /* register callback func to eal lib */ diff --git > >>>> a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c > >>>> index 9adaadb173..aeffb07cca 100644 > >>>> --- a/drivers/net/iavf/iavf_vchnl.c > >>>> +++ b/drivers/net/iavf/iavf_vchnl.c > >>>> @@ -256,6 +256,7 @@ iavf_read_msg_from_pf(struct iavf_adapter > >>> *adapter, uint16_t buf_len, > >>>> vf->link_speed = > >>> iavf_convert_link_speed(speed); > >>>> } > >>>> iavf_dev_link_update(vf->eth_dev, 0); > >>>> + iavf_dev_event_post(vf->eth_dev, > >>> RTE_ETH_EVENT_INTR_LSC, NULL, 0); > >>>> PMD_DRV_LOG(INFO, "Link status update:%s", > >>>> vf->link_up ? "up" : "down"); > >>>> break; > >>>> @@ -368,28 +369,48 @@ iavf_execute_vf_cmd(struct iavf_adapter > >>> *adapter, struct iavf_cmd_info *args, > >>>> _clear_cmd(vf); > >>>> break; > >>>> default: > >>>> - /* For other virtchnl ops in running time, > >>>> - * wait for the cmd done flag. > >>>> - */ > >>>> - do { > >>>> - if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) > >>>> - break; > >>>> - iavf_msec_delay(ASQ_DELAY_MS); > >>>> - /* If don't read msg or read sys event, continue */ > >>>> - } while (i++ < MAX_TRY_TIMES); > >>>> - > >>>> - if (i >= MAX_TRY_TIMES) { > >>>> - PMD_DRV_LOG(ERR, "No response for cmd %d", > >>> args->ops); > >>>> + if (rte_thread_is_intr()) { > >>>> + /* For virtchnl ops were executed in eal_intr_thread, > >>>> + * need to poll the response. > >>>> + */ > >>>> + do { > >>>> + result = iavf_read_msg_from_pf(adapter, > >>> args->out_size, > >>>> + args->out_buffer); > >>>> + if (result == IAVF_MSG_CMD) > >>>> + break; > >>>> + iavf_msec_delay(ASQ_DELAY_MS); > >>>> + } while (i++ < MAX_TRY_TIMES); > >>>> + if (i >= MAX_TRY_TIMES || > >>>> + vf->cmd_retval != > >>> VIRTCHNL_STATUS_SUCCESS) { > >>>> + err = -1; > >>>> + PMD_DRV_LOG(ERR, "No response or return > >>> failure (%d)" > >>>> + " for cmd %d", vf- > >>>> cmd_retval, args->ops); > >>>> + } > >>>> _clear_cmd(vf); > >>>> - err = -EIO; > >>>> - } else if (vf->cmd_retval == > >>>> - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { > >>>> - PMD_DRV_LOG(ERR, "Cmd %d not supported", args- > >>>> ops); > >>>> - err = -ENOTSUP; > >>>> - } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { > >>>> - PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", > >>>> - vf->cmd_retval, args->ops); > >>>> - err = -EINVAL; > >>>> + } else { > >>>> + /* For other virtchnl ops in running time, > >>>> + * wait for the cmd done flag. > >>>> + */ > >>>> + do { > >>>> + if (vf->pend_cmd == > >>> VIRTCHNL_OP_UNKNOWN) > >>>> + break; > >>>> + iavf_msec_delay(ASQ_DELAY_MS); > >>>> + /* If don't read msg or read sys event, > >>> continue */ > >>>> + } while (i++ < MAX_TRY_TIMES); > >>>> + > >>>> + if (i >= MAX_TRY_TIMES) { > >>>> + PMD_DRV_LOG(ERR, "No response for > >>> cmd %d", args->ops); > >>>> + _clear_cmd(vf); > >>>> + err = -EIO; > >>>> + } else if (vf->cmd_retval == > >>>> + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { > >>>> + PMD_DRV_LOG(ERR, "Cmd %d not > >>> supported", args->ops); > >>>> + err = -ENOTSUP; > >>>> + } else if (vf->cmd_retval != > >>> VIRTCHNL_STATUS_SUCCESS) { > >>>> + PMD_DRV_LOG(ERR, "Return failure %d for > >>> cmd %d", > >>>> + vf->cmd_retval, args->ops); > >>>> + err = -EINVAL; > >>>> + } > >>>> } > >>>> break; > >>>> } > >>>> @@ -403,8 +424,14 @@ iavf_execute_vf_cmd_safe(struct > iavf_adapter > >>>> *adapter, { > >>>> struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); > >>>> int ret; > >>>> + int is_intr_thread = rte_thread_is_intr(); > >>>> > >>>> - rte_spinlock_lock(&vf->aq_lock); > >>>> + if (is_intr_thread) { > >>>> + if (!rte_spinlock_trylock(&vf->aq_lock)) > >>>> + return -EIO; > >>>> + } else { > >>>> + rte_spinlock_lock(&vf->aq_lock); > >>>> + } > >>>> ret = iavf_execute_vf_cmd(adapter, args, async); > >>>> rte_spinlock_unlock(&vf->aq_lock); > >>>> > >> > > ^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH v3] net/iavf: fix iavf query stats in intr thread 2023-05-05 2:31 ` Deng, KaiwenX @ 2023-05-23 1:45 ` Deng, KaiwenX 2023-05-23 10:26 ` Ferruh Yigit 0 siblings, 1 reply; 19+ messages in thread From: Deng, KaiwenX @ 2023-05-23 1:45 UTC (permalink / raw) To: Ferruh Yigit, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Chas Williams, Min Hu (Connor), Wu, Jingjing, Xing, Beilei, Mike Pattrick, Zhang, Qi Z, Doherty, Declan, Mrzyglod, Daniel T, Dapeng Yu, Zhang, Helin, Mcnamara, John, Thomas Monjalon > -----Original Message----- > From: Deng, KaiwenX > Sent: Friday, May 5, 2023 10:31 AM > To: Ferruh Yigit <ferruh.yigit@amd.com>; dev@dpdk.org > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX > <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; Min Hu (Connor) > <humin29@huawei.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei > <beilei.xing@intel.com>; Mike Pattrick <mkp@redhat.com>; Zhang, Qi Z > <qi.z.zhang@intel.com>; Doherty, Declan <declan.doherty@intel.com>; > Mrzyglod, Daniel T <daniel.t.mrzyglod@intel.com>; Dapeng Yu > <dapengx.yu@intel.com>; Zhang, Helin <helin.zhang@intel.com>; > Mcnamara, John <john.mcnamara@intel.com>; Thomas Monjalon > <thomas@monjalon.net> > Subject: RE: [PATCH v3] net/iavf: fix iavf query stats in intr thread > > > > > -----Original Message----- > > From: Ferruh Yigit <ferruh.yigit@amd.com> > > Sent: Monday, March 27, 2023 8:38 PM > > To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org > > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, > > YidingX <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; Min > > Hu (Connor) <humin29@huawei.com>; Wu, Jingjing > > <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Mike > > Pattrick <mkp@redhat.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; > > Doherty, Declan <declan.doherty@intel.com>; Mrzyglod, Daniel T > > <daniel.t.mrzyglod@intel.com>; Dapeng Yu <dapengx.yu@intel.com>; > > Zhang, Helin <helin.zhang@intel.com>; Mcnamara, John > > <john.mcnamara@intel.com>; Thomas Monjalon <thomas@monjalon.net> > > Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread > > > > On 3/27/2023 1:31 PM, Ferruh Yigit wrote: > > > On 3/27/2023 6:31 AM, Deng, KaiwenX wrote: > > >> > > >> > > >>> -----Original Message----- > > >>> From: Ferruh Yigit <ferruh.yigit@amd.com> > > >>> Sent: Thursday, March 23, 2023 11:39 PM > > >>> To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org > > >>> Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, > > >>> YidingX <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; > > >>> Min Hu (Connor) <humin29@huawei.com>; Wu, Jingjing > > >>> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; > > >>> Mike Pattrick <mkp@redhat.com>; Zhang, Qi Z > > >>> <qi.z.zhang@intel.com>; Doherty, Declan > > >>> <declan.doherty@intel.com>; Mrzyglod, Daniel T > > >>> <daniel.t.mrzyglod@intel.com>; Dapeng Yu <dapengx.yu@intel.com> > > >>> Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr > > >>> thread > > >>> > > >>> On 3/22/2023 7:26 AM, Kaiwen Deng wrote: > > >>>> When iavf send query-stats command in eal-intr-thread through > > >>>> virtual channel, there will be no response received from > > >>>> iavf_dev_virtchnl_handler for this command during block and wait. > > >>>> Because iavf_dev_virtchnl_handler is also registered in eal-intr- > thread. > > >>>> > > >>>> When vf device is bonded as BONDING_MODE_TLB mode, the slave > > device > > >>>> update callback will registered in alarm and called by > > >>>> eal-intr-thread, it would also raise the above issue. > > >>>> > > >>>> This commit add to poll the response for VIRTCHNL_OP_GET_STATS > > when > > >>> it > > >>>> is called by eal-intr-thread to fix this issue. > > >>>> > > >>>> Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands") > > >>>> Fixes: 22b123a36d07 ("net/avf: initialize PMD") > > >>>> Fixes: 7c76a747e68c ("bond: add mode 5") > > >>>> Fixes: 435d523112cc ("net/iavf: fix multi-process shared data") > > >>>> Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") > > >>> > > >>> > > >>> Hi Kaiwen, > > >>> > > >>> Above commit already seems trying to address same issue, it > > >>> creates > > >>> "iavf- event-thread" control thread to asyncroniously handle the > > >>> interrupts, in non- interrupt context, why it is not working? > > >>> > > >>> Instead of adding 'rte_thread_is_intr()' checks, can't you make > > >>> sure all interrupts handled in control tread? > > >>> > > >>> And can you please provide a stack trace in commit log, to > > >>> describe the issue better? > > >> Hi Ferru, > > >> Sorry for my late reply, And thanks for your review. > > >> > > >> The above commit does not fix this issue when we need to get the > > returned data. > > >> If we call iavf_query_stats and wait for response statistics in the > > >> intr- > > thread. > > >> iavf_handle_virtchnl_msg is also registered in the intr_thread and > > >> will not be executed while waiting. > > >> > > > > > > Got it, since return value is required, API can't be called asyncroniously. > > > > > > > > > > > > I think 'rte_thread_is_intr()' checks may cause more trouble for you > > > in long term, > > > > > > - why 'iavf_query_stats()' is called in the iterrupt thread, can it > > > be prevented? > > > > > > - does it make sense to allways poll messages from PF (for simplification)? > > > > > > > > > If answer to both are 'No', I am OK to continue with current > > > proposal if you are happy with it. > > > > > > > > > btw, how critical is this issue? > > > > If it is critical, I am OK to get it as it is for this release and > > investigate it further for next release, since only a few days left for this > release. > > > Hi Ferruh, > > I didn't find a more suitable solution after consideration, if you have a better > suggestion, please let me know, thanks. Hi Ferruh, Can you please take a look at this patch. Thanks. > > > > > > > >> This commit I changed it to polling for replies to commands > > >> executed in > > the interrupt thread. > > >> > > >> main thread interrupt > > thread > > >> | | > > >> | | > > >> iavf_query_stats | > > >> iavf_execute_vf_cmd | > > >> iavf_aq_send_msg_to_pf and wait handle complete > | > > >> | | > > >> > > >> |------------------------------------------------------------------ > > >> ------------------- > > ------->| > > >> | | > > >> | > > iavf_handle_virtchnl_msg > > >> | | > > >> > > >> |<----------------------------------------------------------------- > > >> ------------------- > > --------| > > >> | | > > >> iavf_execute_vf_cmd get response | > > >> | | > > >> > > >> The above is the stack trace for the normal execution of > > >> iavf_query_stats in the main thread. > > >> > > >> interrupt thread > > >> | > > >> | > > >> iavf_query_stats > > >> iavf_execute_vf_cmd > > >> iavf_aq_send_msg_to_pf wait handle complete(1 sec) > > >> iavf_execute_vf_cmd timeout > > >> | > > >> | > > >> iavf_handle_virtchnl_msg > > >> | > > >> > > >> The above is the stack trace for the abnormal execution of > > >> iavf_query_stats in the interrupt thread. > > >>> > > >>>> Cc: stable@dpdk.org > > >>>> > > >>>> Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> > > >>>> --- > > >>>> Changes since v2: > > >>>> - Add to poll the response for VIRTCHNL_OP_GET_STATS. > > >>>> > > >>>> Changes since v1: > > >>>> - Add lock to avoid race condition. > > >>>> --- > > >>>> --- > > >>>> drivers/net/bonding/rte_eth_bond_pmd.c | 7 ++- > > >>>> drivers/net/iavf/iavf_ethdev.c | 5 +- > > >>>> drivers/net/iavf/iavf_vchnl.c | 71 ++++++++++++++++++-------- > > >>>> 3 files changed, 58 insertions(+), 25 deletions(-) > > >>>> > > >>>> diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c > > >>>> b/drivers/net/bonding/rte_eth_bond_pmd.c > > >>>> index f0c4f7d26b..edce621496 100644 > > >>>> --- a/drivers/net/bonding/rte_eth_bond_pmd.c > > >>>> +++ b/drivers/net/bonding/rte_eth_bond_pmd.c > > >>>> @@ -894,6 +894,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) > > >>>> uint8_t update_stats = 0; > > >>>> uint16_t slave_id; > > >>>> uint16_t i; > > >>>> + int ret; > > >>>> > > >>>> internals->slave_update_idx++; > > >>>> > > >>>> @@ -903,7 +904,10 @@ bond_ethdev_update_tlb_slave_cb(void > *arg) > > >>>> > > >>>> for (i = 0; i < internals->active_slave_count; i++) { > > >>>> slave_id = internals->active_slaves[i]; > > >>>> - rte_eth_stats_get(slave_id, &slave_stats); > > >>>> + ret = rte_eth_stats_get(slave_id, &slave_stats); > > >>>> + if (ret) > > >>>> + goto OUT; > > >>>> + > > >>>> tx_bytes = slave_stats.obytes - > tlb_last_obytets[slave_id]; > > >>>> bandwidth_left(slave_id, tx_bytes, > > >>>> internals->slave_update_idx, > &bwg_array[i]); > > >>> @@ -922,6 +926,7 @@ > > >>>> bond_ethdev_update_tlb_slave_cb(void *arg) > > >>>> for (i = 0; i < slave_count; i++) > > >>>> internals->tlb_slaves_order[i] = bwg_array[i].slave; > > >>>> > > >>>> +OUT: > > >>>> rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, > > >>> bond_ethdev_update_tlb_slave_cb, > > >>>> (struct bond_dev_private *)internals); } diff - > -git > > >>>> a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c > > >>>> index 3196210f2c..d6e1f1a7f4 100644 > > >>>> --- a/drivers/net/iavf/iavf_ethdev.c > > >>>> +++ b/drivers/net/iavf/iavf_ethdev.c > > >>>> @@ -2607,6 +2607,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > > >>>> adapter->dev_data = eth_dev->data; > > >>>> adapter->stopped = 1; > > >>>> > > >>>> + if (iavf_dev_event_handler_init()) > > >>>> + goto init_vf_err; > > >>>> + > > >>>> if (iavf_init_vf(eth_dev) != 0) { > > >>>> PMD_INIT_LOG(ERR, "Init vf failed"); > > >>>> return -1; > > >>>> @@ -2634,8 +2637,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > > >>>> rte_ether_addr_copy((struct rte_ether_addr *)hw- > >mac.addr, > > >>>> ð_dev->data->mac_addrs[0]); > > >>>> > > >>>> - if (iavf_dev_event_handler_init()) > > >>>> - goto init_vf_err; > > >>>> > > >>>> if (vf->vf_res->vf_cap_flags & > VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) > > >>> { > > >>>> /* register callback func to eal lib */ diff --git > > >>>> a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c > > >>>> index 9adaadb173..aeffb07cca 100644 > > >>>> --- a/drivers/net/iavf/iavf_vchnl.c > > >>>> +++ b/drivers/net/iavf/iavf_vchnl.c > > >>>> @@ -256,6 +256,7 @@ iavf_read_msg_from_pf(struct iavf_adapter > > >>> *adapter, uint16_t buf_len, > > >>>> vf->link_speed = > > >>> iavf_convert_link_speed(speed); > > >>>> } > > >>>> iavf_dev_link_update(vf->eth_dev, 0); > > >>>> + iavf_dev_event_post(vf->eth_dev, > > >>> RTE_ETH_EVENT_INTR_LSC, NULL, 0); > > >>>> PMD_DRV_LOG(INFO, "Link status > update:%s", > > >>>> vf->link_up ? "up" : "down"); > > >>>> break; > > >>>> @@ -368,28 +369,48 @@ iavf_execute_vf_cmd(struct iavf_adapter > > >>> *adapter, struct iavf_cmd_info *args, > > >>>> _clear_cmd(vf); > > >>>> break; > > >>>> default: > > >>>> - /* For other virtchnl ops in running time, > > >>>> - * wait for the cmd done flag. > > >>>> - */ > > >>>> - do { > > >>>> - if (vf->pend_cmd == > VIRTCHNL_OP_UNKNOWN) > > >>>> - break; > > >>>> - iavf_msec_delay(ASQ_DELAY_MS); > > >>>> - /* If don't read msg or read sys event, > continue */ > > >>>> - } while (i++ < MAX_TRY_TIMES); > > >>>> - > > >>>> - if (i >= MAX_TRY_TIMES) { > > >>>> - PMD_DRV_LOG(ERR, "No response for > cmd %d", > > >>> args->ops); > > >>>> + if (rte_thread_is_intr()) { > > >>>> + /* For virtchnl ops were executed in > eal_intr_thread, > > >>>> + * need to poll the response. > > >>>> + */ > > >>>> + do { > > >>>> + result = > iavf_read_msg_from_pf(adapter, > > >>> args->out_size, > > >>>> + args- > >out_buffer); > > >>>> + if (result == IAVF_MSG_CMD) > > >>>> + break; > > >>>> + iavf_msec_delay(ASQ_DELAY_MS); > > >>>> + } while (i++ < MAX_TRY_TIMES); > > >>>> + if (i >= MAX_TRY_TIMES || > > >>>> + vf->cmd_retval != > > >>> VIRTCHNL_STATUS_SUCCESS) { > > >>>> + err = -1; > > >>>> + PMD_DRV_LOG(ERR, "No response > or return > > >>> failure (%d)" > > >>>> + " for cmd %d", vf- > > >>>> cmd_retval, args->ops); > > >>>> + } > > >>>> _clear_cmd(vf); > > >>>> - err = -EIO; > > >>>> - } else if (vf->cmd_retval == > > >>>> - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) > { > > >>>> - PMD_DRV_LOG(ERR, "Cmd %d not > supported", args- > > >>>> ops); > > >>>> - err = -ENOTSUP; > > >>>> - } else if (vf->cmd_retval != > VIRTCHNL_STATUS_SUCCESS) { > > >>>> - PMD_DRV_LOG(ERR, "Return failure %d for > cmd %d", > > >>>> - vf->cmd_retval, args->ops); > > >>>> - err = -EINVAL; > > >>>> + } else { > > >>>> + /* For other virtchnl ops in running time, > > >>>> + * wait for the cmd done flag. > > >>>> + */ > > >>>> + do { > > >>>> + if (vf->pend_cmd == > > >>> VIRTCHNL_OP_UNKNOWN) > > >>>> + break; > > >>>> + iavf_msec_delay(ASQ_DELAY_MS); > > >>>> + /* If don't read msg or read sys event, > > >>> continue */ > > >>>> + } while (i++ < MAX_TRY_TIMES); > > >>>> + > > >>>> + if (i >= MAX_TRY_TIMES) { > > >>>> + PMD_DRV_LOG(ERR, "No response > for > > >>> cmd %d", args->ops); > > >>>> + _clear_cmd(vf); > > >>>> + err = -EIO; > > >>>> + } else if (vf->cmd_retval == > > >>>> + > VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { > > >>>> + PMD_DRV_LOG(ERR, "Cmd %d not > > >>> supported", args->ops); > > >>>> + err = -ENOTSUP; > > >>>> + } else if (vf->cmd_retval != > > >>> VIRTCHNL_STATUS_SUCCESS) { > > >>>> + PMD_DRV_LOG(ERR, "Return > failure %d for > > >>> cmd %d", > > >>>> + vf->cmd_retval, args- > >ops); > > >>>> + err = -EINVAL; > > >>>> + } > > >>>> } > > >>>> break; > > >>>> } > > >>>> @@ -403,8 +424,14 @@ iavf_execute_vf_cmd_safe(struct > > iavf_adapter > > >>>> *adapter, { > > >>>> struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); > > >>>> int ret; > > >>>> + int is_intr_thread = rte_thread_is_intr(); > > >>>> > > >>>> - rte_spinlock_lock(&vf->aq_lock); > > >>>> + if (is_intr_thread) { > > >>>> + if (!rte_spinlock_trylock(&vf->aq_lock)) > > >>>> + return -EIO; > > >>>> + } else { > > >>>> + rte_spinlock_lock(&vf->aq_lock); > > >>>> + } > > >>>> ret = iavf_execute_vf_cmd(adapter, args, async); > > >>>> rte_spinlock_unlock(&vf->aq_lock); > > >>>> > > >> > > > ^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread 2023-05-23 1:45 ` Deng, KaiwenX @ 2023-05-23 10:26 ` Ferruh Yigit 0 siblings, 0 replies; 19+ messages in thread From: Ferruh Yigit @ 2023-05-23 10:26 UTC (permalink / raw) To: Deng, KaiwenX, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Chas Williams, Min Hu (Connor), Wu, Jingjing, Xing, Beilei, Mike Pattrick, Zhang, Qi Z, Doherty, Declan, Mrzyglod, Daniel T, Dapeng Yu, Zhang, Helin, Mcnamara, John, Thomas Monjalon On 5/23/2023 2:45 AM, Deng, KaiwenX wrote: > > >> -----Original Message----- >> From: Deng, KaiwenX >> Sent: Friday, May 5, 2023 10:31 AM >> To: Ferruh Yigit <ferruh.yigit@amd.com>; dev@dpdk.org >> Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX >> <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; Min Hu (Connor) >> <humin29@huawei.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei >> <beilei.xing@intel.com>; Mike Pattrick <mkp@redhat.com>; Zhang, Qi Z >> <qi.z.zhang@intel.com>; Doherty, Declan <declan.doherty@intel.com>; >> Mrzyglod, Daniel T <daniel.t.mrzyglod@intel.com>; Dapeng Yu >> <dapengx.yu@intel.com>; Zhang, Helin <helin.zhang@intel.com>; >> Mcnamara, John <john.mcnamara@intel.com>; Thomas Monjalon >> <thomas@monjalon.net> >> Subject: RE: [PATCH v3] net/iavf: fix iavf query stats in intr thread >> >> >> >>> -----Original Message----- >>> From: Ferruh Yigit <ferruh.yigit@amd.com> >>> Sent: Monday, March 27, 2023 8:38 PM >>> To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org >>> Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, >>> YidingX <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; Min >>> Hu (Connor) <humin29@huawei.com>; Wu, Jingjing >>> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Mike >>> Pattrick <mkp@redhat.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; >>> Doherty, Declan <declan.doherty@intel.com>; Mrzyglod, Daniel T >>> <daniel.t.mrzyglod@intel.com>; Dapeng Yu <dapengx.yu@intel.com>; >>> Zhang, Helin <helin.zhang@intel.com>; Mcnamara, John >>> <john.mcnamara@intel.com>; Thomas Monjalon <thomas@monjalon.net> >>> Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread >>> >>> On 3/27/2023 1:31 PM, Ferruh Yigit wrote: >>>> On 3/27/2023 6:31 AM, Deng, KaiwenX wrote: >>>>> >>>>> >>>>>> -----Original Message----- >>>>>> From: Ferruh Yigit <ferruh.yigit@amd.com> >>>>>> Sent: Thursday, March 23, 2023 11:39 PM >>>>>> To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org >>>>>> Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, >>>>>> YidingX <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; >>>>>> Min Hu (Connor) <humin29@huawei.com>; Wu, Jingjing >>>>>> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; >>>>>> Mike Pattrick <mkp@redhat.com>; Zhang, Qi Z >>>>>> <qi.z.zhang@intel.com>; Doherty, Declan >>>>>> <declan.doherty@intel.com>; Mrzyglod, Daniel T >>>>>> <daniel.t.mrzyglod@intel.com>; Dapeng Yu <dapengx.yu@intel.com> >>>>>> Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr >>>>>> thread >>>>>> >>>>>> On 3/22/2023 7:26 AM, Kaiwen Deng wrote: >>>>>>> When iavf send query-stats command in eal-intr-thread through >>>>>>> virtual channel, there will be no response received from >>>>>>> iavf_dev_virtchnl_handler for this command during block and wait. >>>>>>> Because iavf_dev_virtchnl_handler is also registered in eal-intr- >> thread. >>>>>>> >>>>>>> When vf device is bonded as BONDING_MODE_TLB mode, the slave >>> device >>>>>>> update callback will registered in alarm and called by >>>>>>> eal-intr-thread, it would also raise the above issue. >>>>>>> >>>>>>> This commit add to poll the response for VIRTCHNL_OP_GET_STATS >>> when >>>>>> it >>>>>>> is called by eal-intr-thread to fix this issue. >>>>>>> >>>>>>> Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands") >>>>>>> Fixes: 22b123a36d07 ("net/avf: initialize PMD") >>>>>>> Fixes: 7c76a747e68c ("bond: add mode 5") >>>>>>> Fixes: 435d523112cc ("net/iavf: fix multi-process shared data") >>>>>>> Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") >>>>>> >>>>>> >>>>>> Hi Kaiwen, >>>>>> >>>>>> Above commit already seems trying to address same issue, it >>>>>> creates >>>>>> "iavf- event-thread" control thread to asyncroniously handle the >>>>>> interrupts, in non- interrupt context, why it is not working? >>>>>> >>>>>> Instead of adding 'rte_thread_is_intr()' checks, can't you make >>>>>> sure all interrupts handled in control tread? >>>>>> >>>>>> And can you please provide a stack trace in commit log, to >>>>>> describe the issue better? >>>>> Hi Ferru, >>>>> Sorry for my late reply, And thanks for your review. >>>>> >>>>> The above commit does not fix this issue when we need to get the >>> returned data. >>>>> If we call iavf_query_stats and wait for response statistics in the >>>>> intr- >>> thread. >>>>> iavf_handle_virtchnl_msg is also registered in the intr_thread and >>>>> will not be executed while waiting. >>>>> >>>> >>>> Got it, since return value is required, API can't be called asyncroniously. >>>> >>>> >>>> >>>> I think 'rte_thread_is_intr()' checks may cause more trouble for you >>>> in long term, >>>> >>>> - why 'iavf_query_stats()' is called in the iterrupt thread, can it >>>> be prevented? >>>> >>>> - does it make sense to allways poll messages from PF (for simplification)? >>>> >>>> >>>> If answer to both are 'No', I am OK to continue with current >>>> proposal if you are happy with it. >>>> >>> >>> >>> btw, how critical is this issue? >>> >>> If it is critical, I am OK to get it as it is for this release and >>> investigate it further for next release, since only a few days left for this >> release. >>> >> Hi Ferruh, >> >> I didn't find a more suitable solution after consideration, if you have a better >> suggestion, please let me know, thanks. > Hi Ferruh, > Can you please take a look at this patch. > Hi Kaiwen, Sorry for delay, lets continue with this solution. I thought calling from "iavf-event-thread" can be an option but your description clarifies why it is not an option, and I also don't have better solution, so I think can continue as it is. > Thanks. >>> >>>> >>>>> This commit I changed it to polling for replies to commands >>>>> executed in >>> the interrupt thread. >>>>> >>>>> main thread interrupt >>> thread >>>>> | | >>>>> | | >>>>> iavf_query_stats | >>>>> iavf_execute_vf_cmd | >>>>> iavf_aq_send_msg_to_pf and wait handle complete >> | >>>>> | | >>>>> >>>>> |------------------------------------------------------------------ >>>>> ------------------- >>> ------->| >>>>> | | >>>>> | >>> iavf_handle_virtchnl_msg >>>>> | | >>>>> >>>>> |<----------------------------------------------------------------- >>>>> ------------------- >>> --------| >>>>> | | >>>>> iavf_execute_vf_cmd get response | >>>>> | | >>>>> >>>>> The above is the stack trace for the normal execution of >>>>> iavf_query_stats in the main thread. >>>>> >>>>> interrupt thread >>>>> | >>>>> | >>>>> iavf_query_stats >>>>> iavf_execute_vf_cmd >>>>> iavf_aq_send_msg_to_pf wait handle complete(1 sec) >>>>> iavf_execute_vf_cmd timeout >>>>> | >>>>> | >>>>> iavf_handle_virtchnl_msg >>>>> | >>>>> >>>>> The above is the stack trace for the abnormal execution of >>>>> iavf_query_stats in the interrupt thread. >>>>>> >>>>>>> Cc: stable@dpdk.org >>>>>>> >>>>>>> Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> >>>>>>> --- >>>>>>> Changes since v2: >>>>>>> - Add to poll the response for VIRTCHNL_OP_GET_STATS. >>>>>>> >>>>>>> Changes since v1: >>>>>>> - Add lock to avoid race condition. >>>>>>> --- >>>>>>> --- >>>>>>> drivers/net/bonding/rte_eth_bond_pmd.c | 7 ++- >>>>>>> drivers/net/iavf/iavf_ethdev.c | 5 +- >>>>>>> drivers/net/iavf/iavf_vchnl.c | 71 ++++++++++++++++++-------- >>>>>>> 3 files changed, 58 insertions(+), 25 deletions(-) >>>>>>> >>>>>>> diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c >>>>>>> b/drivers/net/bonding/rte_eth_bond_pmd.c >>>>>>> index f0c4f7d26b..edce621496 100644 >>>>>>> --- a/drivers/net/bonding/rte_eth_bond_pmd.c >>>>>>> +++ b/drivers/net/bonding/rte_eth_bond_pmd.c >>>>>>> @@ -894,6 +894,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) >>>>>>> uint8_t update_stats = 0; >>>>>>> uint16_t slave_id; >>>>>>> uint16_t i; >>>>>>> + int ret; >>>>>>> >>>>>>> internals->slave_update_idx++; >>>>>>> >>>>>>> @@ -903,7 +904,10 @@ bond_ethdev_update_tlb_slave_cb(void >> *arg) >>>>>>> >>>>>>> for (i = 0; i < internals->active_slave_count; i++) { >>>>>>> slave_id = internals->active_slaves[i]; >>>>>>> - rte_eth_stats_get(slave_id, &slave_stats); >>>>>>> + ret = rte_eth_stats_get(slave_id, &slave_stats); >>>>>>> + if (ret) >>>>>>> + goto OUT; >>>>>>> + >>>>>>> tx_bytes = slave_stats.obytes - >> tlb_last_obytets[slave_id]; >>>>>>> bandwidth_left(slave_id, tx_bytes, >>>>>>> internals->slave_update_idx, >> &bwg_array[i]); >>>>>> @@ -922,6 +926,7 @@ >>>>>>> bond_ethdev_update_tlb_slave_cb(void *arg) >>>>>>> for (i = 0; i < slave_count; i++) >>>>>>> internals->tlb_slaves_order[i] = bwg_array[i].slave; >>>>>>> >>>>>>> +OUT: >>>>>>> rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, >>>>>> bond_ethdev_update_tlb_slave_cb, >>>>>>> (struct bond_dev_private *)internals); } diff - >> -git >>>>>>> a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c >>>>>>> index 3196210f2c..d6e1f1a7f4 100644 >>>>>>> --- a/drivers/net/iavf/iavf_ethdev.c >>>>>>> +++ b/drivers/net/iavf/iavf_ethdev.c >>>>>>> @@ -2607,6 +2607,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) >>>>>>> adapter->dev_data = eth_dev->data; >>>>>>> adapter->stopped = 1; >>>>>>> >>>>>>> + if (iavf_dev_event_handler_init()) >>>>>>> + goto init_vf_err; >>>>>>> + >>>>>>> if (iavf_init_vf(eth_dev) != 0) { >>>>>>> PMD_INIT_LOG(ERR, "Init vf failed"); >>>>>>> return -1; >>>>>>> @@ -2634,8 +2637,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) >>>>>>> rte_ether_addr_copy((struct rte_ether_addr *)hw- >>> mac.addr, >>>>>>> ð_dev->data->mac_addrs[0]); >>>>>>> >>>>>>> - if (iavf_dev_event_handler_init()) >>>>>>> - goto init_vf_err; >>>>>>> >>>>>>> if (vf->vf_res->vf_cap_flags & >> VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) >>>>>> { >>>>>>> /* register callback func to eal lib */ diff --git >>>>>>> a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c >>>>>>> index 9adaadb173..aeffb07cca 100644 >>>>>>> --- a/drivers/net/iavf/iavf_vchnl.c >>>>>>> +++ b/drivers/net/iavf/iavf_vchnl.c >>>>>>> @@ -256,6 +256,7 @@ iavf_read_msg_from_pf(struct iavf_adapter >>>>>> *adapter, uint16_t buf_len, >>>>>>> vf->link_speed = >>>>>> iavf_convert_link_speed(speed); >>>>>>> } >>>>>>> iavf_dev_link_update(vf->eth_dev, 0); >>>>>>> + iavf_dev_event_post(vf->eth_dev, >>>>>> RTE_ETH_EVENT_INTR_LSC, NULL, 0); >>>>>>> PMD_DRV_LOG(INFO, "Link status >> update:%s", >>>>>>> vf->link_up ? "up" : "down"); >>>>>>> break; >>>>>>> @@ -368,28 +369,48 @@ iavf_execute_vf_cmd(struct iavf_adapter >>>>>> *adapter, struct iavf_cmd_info *args, >>>>>>> _clear_cmd(vf); >>>>>>> break; >>>>>>> default: >>>>>>> - /* For other virtchnl ops in running time, >>>>>>> - * wait for the cmd done flag. >>>>>>> - */ >>>>>>> - do { >>>>>>> - if (vf->pend_cmd == >> VIRTCHNL_OP_UNKNOWN) >>>>>>> - break; >>>>>>> - iavf_msec_delay(ASQ_DELAY_MS); >>>>>>> - /* If don't read msg or read sys event, >> continue */ >>>>>>> - } while (i++ < MAX_TRY_TIMES); >>>>>>> - >>>>>>> - if (i >= MAX_TRY_TIMES) { >>>>>>> - PMD_DRV_LOG(ERR, "No response for >> cmd %d", >>>>>> args->ops); >>>>>>> + if (rte_thread_is_intr()) { >>>>>>> + /* For virtchnl ops were executed in >> eal_intr_thread, >>>>>>> + * need to poll the response. >>>>>>> + */ >>>>>>> + do { >>>>>>> + result = >> iavf_read_msg_from_pf(adapter, >>>>>> args->out_size, >>>>>>> + args- >>> out_buffer); >>>>>>> + if (result == IAVF_MSG_CMD) >>>>>>> + break; >>>>>>> + iavf_msec_delay(ASQ_DELAY_MS); >>>>>>> + } while (i++ < MAX_TRY_TIMES); >>>>>>> + if (i >= MAX_TRY_TIMES || >>>>>>> + vf->cmd_retval != >>>>>> VIRTCHNL_STATUS_SUCCESS) { >>>>>>> + err = -1; >>>>>>> + PMD_DRV_LOG(ERR, "No response >> or return >>>>>> failure (%d)" >>>>>>> + " for cmd %d", vf- >>>>>>> cmd_retval, args->ops); >>>>>>> + } >>>>>>> _clear_cmd(vf); >>>>>>> - err = -EIO; >>>>>>> - } else if (vf->cmd_retval == >>>>>>> - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) >> { >>>>>>> - PMD_DRV_LOG(ERR, "Cmd %d not >> supported", args- >>>>>>> ops); >>>>>>> - err = -ENOTSUP; >>>>>>> - } else if (vf->cmd_retval != >> VIRTCHNL_STATUS_SUCCESS) { >>>>>>> - PMD_DRV_LOG(ERR, "Return failure %d for >> cmd %d", >>>>>>> - vf->cmd_retval, args->ops); >>>>>>> - err = -EINVAL; >>>>>>> + } else { >>>>>>> + /* For other virtchnl ops in running time, >>>>>>> + * wait for the cmd done flag. >>>>>>> + */ >>>>>>> + do { >>>>>>> + if (vf->pend_cmd == >>>>>> VIRTCHNL_OP_UNKNOWN) >>>>>>> + break; >>>>>>> + iavf_msec_delay(ASQ_DELAY_MS); >>>>>>> + /* If don't read msg or read sys event, >>>>>> continue */ >>>>>>> + } while (i++ < MAX_TRY_TIMES); >>>>>>> + >>>>>>> + if (i >= MAX_TRY_TIMES) { >>>>>>> + PMD_DRV_LOG(ERR, "No response >> for >>>>>> cmd %d", args->ops); >>>>>>> + _clear_cmd(vf); >>>>>>> + err = -EIO; >>>>>>> + } else if (vf->cmd_retval == >>>>>>> + >> VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { >>>>>>> + PMD_DRV_LOG(ERR, "Cmd %d not >>>>>> supported", args->ops); >>>>>>> + err = -ENOTSUP; >>>>>>> + } else if (vf->cmd_retval != >>>>>> VIRTCHNL_STATUS_SUCCESS) { >>>>>>> + PMD_DRV_LOG(ERR, "Return >> failure %d for >>>>>> cmd %d", >>>>>>> + vf->cmd_retval, args- >>> ops); >>>>>>> + err = -EINVAL; >>>>>>> + } >>>>>>> } >>>>>>> break; >>>>>>> } >>>>>>> @@ -403,8 +424,14 @@ iavf_execute_vf_cmd_safe(struct >>> iavf_adapter >>>>>>> *adapter, { >>>>>>> struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); >>>>>>> int ret; >>>>>>> + int is_intr_thread = rte_thread_is_intr(); >>>>>>> >>>>>>> - rte_spinlock_lock(&vf->aq_lock); >>>>>>> + if (is_intr_thread) { >>>>>>> + if (!rte_spinlock_trylock(&vf->aq_lock)) >>>>>>> + return -EIO; >>>>>>> + } else { >>>>>>> + rte_spinlock_lock(&vf->aq_lock); >>>>>>> + } >>>>>>> ret = iavf_execute_vf_cmd(adapter, args, async); >>>>>>> rte_spinlock_unlock(&vf->aq_lock); >>>>>>> >>>>> >>>> > ^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH v3] net/iavf: fix iavf query stats in intr thread 2023-03-27 12:31 ` Ferruh Yigit 2023-03-27 12:37 ` Ferruh Yigit @ 2023-03-29 6:41 ` Deng, KaiwenX 1 sibling, 0 replies; 19+ messages in thread From: Deng, KaiwenX @ 2023-03-29 6:41 UTC (permalink / raw) To: Ferruh Yigit, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Chas Williams, Min Hu (Connor), Wu, Jingjing, Xing, Beilei, Mike Pattrick, Zhang, Qi Z, Doherty, Declan, Mrzyglod, Daniel T, Dapeng Yu > -----Original Message----- > From: Ferruh Yigit <ferruh.yigit@amd.com> > Sent: Monday, March 27, 2023 8:32 PM > To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX > <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; Min Hu (Connor) > <humin29@huawei.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei > <beilei.xing@intel.com>; Mike Pattrick <mkp@redhat.com>; Zhang, Qi Z > <qi.z.zhang@intel.com>; Doherty, Declan <declan.doherty@intel.com>; > Mrzyglod, Daniel T <daniel.t.mrzyglod@intel.com>; Dapeng Yu > <dapengx.yu@intel.com> > Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread > > On 3/27/2023 6:31 AM, Deng, KaiwenX wrote: > > > > > >> -----Original Message----- > >> From: Ferruh Yigit <ferruh.yigit@amd.com> > >> Sent: Thursday, March 23, 2023 11:39 PM > >> To: Deng, KaiwenX <kaiwenx.deng@intel.com>; dev@dpdk.org > >> Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, > >> YidingX <yidingx.zhou@intel.com>; Chas Williams <chas3@att.com>; Min > >> Hu (Connor) <humin29@huawei.com>; Wu, Jingjing > >> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Mike > >> Pattrick <mkp@redhat.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; > >> Doherty, Declan <declan.doherty@intel.com>; Mrzyglod, Daniel T > >> <daniel.t.mrzyglod@intel.com>; Dapeng Yu <dapengx.yu@intel.com> > >> Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread > >> > >> On 3/22/2023 7:26 AM, Kaiwen Deng wrote: > >>> When iavf send query-stats command in eal-intr-thread through > >>> virtual channel, there will be no response received from > >>> iavf_dev_virtchnl_handler for this command during block and wait. > >>> Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. > >>> > >>> When vf device is bonded as BONDING_MODE_TLB mode, the slave > device > >>> update callback will registered in alarm and called by > >>> eal-intr-thread, it would also raise the above issue. > >>> > >>> This commit add to poll the response for VIRTCHNL_OP_GET_STATS > when > >> it > >>> is called by eal-intr-thread to fix this issue. > >>> > >>> Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands") > >>> Fixes: 22b123a36d07 ("net/avf: initialize PMD") > >>> Fixes: 7c76a747e68c ("bond: add mode 5") > >>> Fixes: 435d523112cc ("net/iavf: fix multi-process shared data") > >>> Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") > >> > >> > >> Hi Kaiwen, > >> > >> Above commit already seems trying to address same issue, it creates > >> "iavf- event-thread" control thread to asyncroniously handle the > >> interrupts, in non- interrupt context, why it is not working? > >> > >> Instead of adding 'rte_thread_is_intr()' checks, can't you make sure > >> all interrupts handled in control tread? > >> > >> And can you please provide a stack trace in commit log, to describe > >> the issue better? > > Hi Ferru, > > Sorry for my late reply, And thanks for your review. > > > > The above commit does not fix this issue when we need to get the > returned data. > > If we call iavf_query_stats and wait for response statistics in the intr-thread. > > iavf_handle_virtchnl_msg is also registered in the intr_thread and > > will not be executed while waiting. > > > > Got it, since return value is required, API can't be called asyncroniously. > > > > I think 'rte_thread_is_intr()' checks may cause more trouble for you in long > term, > > - why 'iavf_query_stats()' is called in the iterrupt thread, can it be prevented? > > - does it make sense to allways poll messages from PF (for simplification)? > Virtual channel commands sometimes need to be registered to alarm so that they can be called periodically, and alarm is registered to be called in interrupt threads. For some commands that do not require a return value, It cannot support asynchronous if allways poll messages from PF. > > If answer to both are 'No', I am OK to continue with current proposal if you > are happy with it. > > > > This commit I changed it to polling for replies to commands executed in the > interrupt thread. > > > > main thread interrupt > thread > > | | > > | | > > iavf_query_stats | > > iavf_execute_vf_cmd | > > iavf_aq_send_msg_to_pf and wait handle complete | > > | | > > |--------------------------------------------------------------------------------------- > ----->| > > | | > > | > iavf_handle_virtchnl_msg > > | | > > |<------------------------------------------------------------------------------------- > -------| > > | | > > iavf_execute_vf_cmd get response | > > | | > > > > The above is the stack trace for the normal execution of > > iavf_query_stats in the main thread. > > > > interrupt thread > > | > > | > > iavf_query_stats > > iavf_execute_vf_cmd > > iavf_aq_send_msg_to_pf wait handle complete(1 sec) > iavf_execute_vf_cmd > > timeout > > | > > | > > iavf_handle_virtchnl_msg > > | > > > > The above is the stack trace for the abnormal execution of > > iavf_query_stats in the interrupt thread. > >> > >>> Cc: stable@dpdk.org > >>> > >>> Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> > >>> --- > >>> Changes since v2: > >>> - Add to poll the response for VIRTCHNL_OP_GET_STATS. > >>> > >>> Changes since v1: > >>> - Add lock to avoid race condition. > >>> --- > >>> --- > >>> drivers/net/bonding/rte_eth_bond_pmd.c | 7 ++- > >>> drivers/net/iavf/iavf_ethdev.c | 5 +- > >>> drivers/net/iavf/iavf_vchnl.c | 71 ++++++++++++++++++-------- > >>> 3 files changed, 58 insertions(+), 25 deletions(-) > >>> > >>> diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c > >>> b/drivers/net/bonding/rte_eth_bond_pmd.c > >>> index f0c4f7d26b..edce621496 100644 > >>> --- a/drivers/net/bonding/rte_eth_bond_pmd.c > >>> +++ b/drivers/net/bonding/rte_eth_bond_pmd.c > >>> @@ -894,6 +894,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) > >>> uint8_t update_stats = 0; > >>> uint16_t slave_id; > >>> uint16_t i; > >>> + int ret; > >>> > >>> internals->slave_update_idx++; > >>> > >>> @@ -903,7 +904,10 @@ bond_ethdev_update_tlb_slave_cb(void *arg) > >>> > >>> for (i = 0; i < internals->active_slave_count; i++) { > >>> slave_id = internals->active_slaves[i]; > >>> - rte_eth_stats_get(slave_id, &slave_stats); > >>> + ret = rte_eth_stats_get(slave_id, &slave_stats); > >>> + if (ret) > >>> + goto OUT; > >>> + > >>> tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id]; > >>> bandwidth_left(slave_id, tx_bytes, > >>> internals->slave_update_idx, &bwg_array[i]); > >> @@ -922,6 +926,7 @@ > >>> bond_ethdev_update_tlb_slave_cb(void *arg) > >>> for (i = 0; i < slave_count; i++) > >>> internals->tlb_slaves_order[i] = bwg_array[i].slave; > >>> > >>> +OUT: > >>> rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, > >> bond_ethdev_update_tlb_slave_cb, > >>> (struct bond_dev_private *)internals); } diff --git > >>> a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c > >>> index 3196210f2c..d6e1f1a7f4 100644 > >>> --- a/drivers/net/iavf/iavf_ethdev.c > >>> +++ b/drivers/net/iavf/iavf_ethdev.c > >>> @@ -2607,6 +2607,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > >>> adapter->dev_data = eth_dev->data; > >>> adapter->stopped = 1; > >>> > >>> + if (iavf_dev_event_handler_init()) > >>> + goto init_vf_err; > >>> + > >>> if (iavf_init_vf(eth_dev) != 0) { > >>> PMD_INIT_LOG(ERR, "Init vf failed"); > >>> return -1; > >>> @@ -2634,8 +2637,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) > >>> rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, > >>> ð_dev->data->mac_addrs[0]); > >>> > >>> - if (iavf_dev_event_handler_init()) > >>> - goto init_vf_err; > >>> > >>> if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) > >> { > >>> /* register callback func to eal lib */ diff --git > >>> a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c > >>> index 9adaadb173..aeffb07cca 100644 > >>> --- a/drivers/net/iavf/iavf_vchnl.c > >>> +++ b/drivers/net/iavf/iavf_vchnl.c > >>> @@ -256,6 +256,7 @@ iavf_read_msg_from_pf(struct iavf_adapter > >> *adapter, uint16_t buf_len, > >>> vf->link_speed = > >> iavf_convert_link_speed(speed); > >>> } > >>> iavf_dev_link_update(vf->eth_dev, 0); > >>> + iavf_dev_event_post(vf->eth_dev, > >> RTE_ETH_EVENT_INTR_LSC, NULL, 0); > >>> PMD_DRV_LOG(INFO, "Link status update:%s", > >>> vf->link_up ? "up" : "down"); > >>> break; > >>> @@ -368,28 +369,48 @@ iavf_execute_vf_cmd(struct iavf_adapter > >> *adapter, struct iavf_cmd_info *args, > >>> _clear_cmd(vf); > >>> break; > >>> default: > >>> - /* For other virtchnl ops in running time, > >>> - * wait for the cmd done flag. > >>> - */ > >>> - do { > >>> - if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) > >>> - break; > >>> - iavf_msec_delay(ASQ_DELAY_MS); > >>> - /* If don't read msg or read sys event, continue */ > >>> - } while (i++ < MAX_TRY_TIMES); > >>> - > >>> - if (i >= MAX_TRY_TIMES) { > >>> - PMD_DRV_LOG(ERR, "No response for cmd %d", > >> args->ops); > >>> + if (rte_thread_is_intr()) { > >>> + /* For virtchnl ops were executed in eal_intr_thread, > >>> + * need to poll the response. > >>> + */ > >>> + do { > >>> + result = iavf_read_msg_from_pf(adapter, > >> args->out_size, > >>> + args->out_buffer); > >>> + if (result == IAVF_MSG_CMD) > >>> + break; > >>> + iavf_msec_delay(ASQ_DELAY_MS); > >>> + } while (i++ < MAX_TRY_TIMES); > >>> + if (i >= MAX_TRY_TIMES || > >>> + vf->cmd_retval != > >> VIRTCHNL_STATUS_SUCCESS) { > >>> + err = -1; > >>> + PMD_DRV_LOG(ERR, "No response or return > >> failure (%d)" > >>> + " for cmd %d", vf- > >>> cmd_retval, args->ops); > >>> + } > >>> _clear_cmd(vf); > >>> - err = -EIO; > >>> - } else if (vf->cmd_retval == > >>> - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { > >>> - PMD_DRV_LOG(ERR, "Cmd %d not supported", args- > >>> ops); > >>> - err = -ENOTSUP; > >>> - } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { > >>> - PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", > >>> - vf->cmd_retval, args->ops); > >>> - err = -EINVAL; > >>> + } else { > >>> + /* For other virtchnl ops in running time, > >>> + * wait for the cmd done flag. > >>> + */ > >>> + do { > >>> + if (vf->pend_cmd == > >> VIRTCHNL_OP_UNKNOWN) > >>> + break; > >>> + iavf_msec_delay(ASQ_DELAY_MS); > >>> + /* If don't read msg or read sys event, > >> continue */ > >>> + } while (i++ < MAX_TRY_TIMES); > >>> + > >>> + if (i >= MAX_TRY_TIMES) { > >>> + PMD_DRV_LOG(ERR, "No response for > >> cmd %d", args->ops); > >>> + _clear_cmd(vf); > >>> + err = -EIO; > >>> + } else if (vf->cmd_retval == > >>> + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { > >>> + PMD_DRV_LOG(ERR, "Cmd %d not > >> supported", args->ops); > >>> + err = -ENOTSUP; > >>> + } else if (vf->cmd_retval != > >> VIRTCHNL_STATUS_SUCCESS) { > >>> + PMD_DRV_LOG(ERR, "Return failure %d for > >> cmd %d", > >>> + vf->cmd_retval, args->ops); > >>> + err = -EINVAL; > >>> + } > >>> } > >>> break; > >>> } > >>> @@ -403,8 +424,14 @@ iavf_execute_vf_cmd_safe(struct iavf_adapter > >>> *adapter, { > >>> struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); > >>> int ret; > >>> + int is_intr_thread = rte_thread_is_intr(); > >>> > >>> - rte_spinlock_lock(&vf->aq_lock); > >>> + if (is_intr_thread) { > >>> + if (!rte_spinlock_trylock(&vf->aq_lock)) > >>> + return -EIO; > >>> + } else { > >>> + rte_spinlock_lock(&vf->aq_lock); > >>> + } > >>> ret = iavf_execute_vf_cmd(adapter, args, async); > >>> rte_spinlock_unlock(&vf->aq_lock); > >>> > > ^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH v3] net/iavf: fix iavf query stats in intr thread 2023-03-22 7:26 ` [PATCH v3] " Kaiwen Deng 2023-03-23 15:39 ` Ferruh Yigit @ 2023-06-06 5:41 ` Jiale, SongX 2023-06-07 2:03 ` [PATCH v4] " Kaiwen Deng 2 siblings, 0 replies; 19+ messages in thread From: Jiale, SongX @ 2023-06-06 5:41 UTC (permalink / raw) To: Deng, KaiwenX, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Deng, KaiwenX, Chas Williams, Min Hu (Connor), Wu, Jingjing, Xing, Beilei, Mike Pattrick, Zhang, Qi Z, Doherty, Declan, Mrzyglod, Daniel T, Dapeng Yu > -----Original Message----- > From: Kaiwen Deng <kaiwenx.deng@intel.com> > Sent: Wednesday, March 22, 2023 3:26 PM > To: dev@dpdk.org > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX > <yidingx.zhou@intel.com>; Deng, KaiwenX <kaiwenx.deng@intel.com>; > Chas Williams <chas3@att.com>; Min Hu (Connor) <humin29@huawei.com>; > Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; > Mike Pattrick <mkp@redhat.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; > Doherty, Declan <declan.doherty@intel.com>; Mrzyglod, Daniel T > <daniel.t.mrzyglod@intel.com>; Dapeng Yu <dapengx.yu@intel.com> > Subject: [PATCH v3] net/iavf: fix iavf query stats in intr thread > > When iavf send query-stats command in eal-intr-thread through virtual > channel, there will be no response received from iavf_dev_virtchnl_handler > for this command during block and wait. > Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. > > When vf device is bonded as BONDING_MODE_TLB mode, the slave device > update callback will registered in alarm and called by eal-intr-thread, it would > also raise the above issue. > > This commit add to poll the response for VIRTCHNL_OP_GET_STATS when it > is called by eal-intr-thread to fix this issue. > > Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands") > Fixes: 22b123a36d07 ("net/avf: initialize PMD") > Fixes: 7c76a747e68c ("bond: add mode 5") > Fixes: 435d523112cc ("net/iavf: fix multi-process shared data") > Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") > Cc: stable@dpdk.org > > Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> > --- Tested-by: Jiale Song < songx.jiale@intel.com> ^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH v4] net/iavf: fix iavf query stats in intr thread 2023-03-22 7:26 ` [PATCH v3] " Kaiwen Deng 2023-03-23 15:39 ` Ferruh Yigit 2023-06-06 5:41 ` Jiale, SongX @ 2023-06-07 2:03 ` Kaiwen Deng 2023-06-07 4:01 ` Zhang, Qi Z 2 siblings, 1 reply; 19+ messages in thread From: Kaiwen Deng @ 2023-06-07 2:03 UTC (permalink / raw) To: dev Cc: stable, qiming.yang, yidingx.zhou, Kaiwen Deng, Chas Williams, Min Hu (Connor), Jingjing Wu, Beilei Xing, Qi Zhang, Mike Pattrick, Declan Doherty, Daniel Mrzyglod, Dapeng Yu When iavf send query-stats command in eal-intr-thread through virtual channel, there will be no response received from iavf_dev_virtchnl_handler for this command during block and wait. Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. When vf device is bonded as BONDING_MODE_TLB mode, the slave device update callback will registered in alarm and called by eal-intr-thread, it would also raise the above issue. This commit add to poll the response for VIRTCHNL_OP_GET_STATS when it is called by eal-intr-thread to fix this issue. Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands") Fixes: 22b123a36d07 ("net/avf: initialize PMD") Fixes: 7c76a747e68c ("bond: add mode 5") Fixes: 435d523112cc ("net/iavf: fix multi-process shared data") Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") Cc: stable@dpdk.org Signed-off-by: Kaiwen Deng <kaiwenx.deng@intel.com> --- Changes since v3: - Rebase to latest dpdk-next-net-intel. Changes since v2: - Add to poll the response for VIRTCHNL_OP_GET_STATS. Changes since v1: - Add lock to avoid race condition. --- --- drivers/net/bonding/rte_eth_bond_pmd.c | 7 ++- drivers/net/iavf/iavf_ethdev.c | 5 +- drivers/net/iavf/iavf_vchnl.c | 71 ++++++++++++++++++-------- 3 files changed, 58 insertions(+), 25 deletions(-) diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index f0c4f7d26b..edce621496 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -894,6 +894,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) uint8_t update_stats = 0; uint16_t slave_id; uint16_t i; + int ret; internals->slave_update_idx++; @@ -903,7 +904,10 @@ bond_ethdev_update_tlb_slave_cb(void *arg) for (i = 0; i < internals->active_slave_count; i++) { slave_id = internals->active_slaves[i]; - rte_eth_stats_get(slave_id, &slave_stats); + ret = rte_eth_stats_get(slave_id, &slave_stats); + if (ret) + goto OUT; + tx_bytes = slave_stats.obytes - tlb_last_obytets[slave_id]; bandwidth_left(slave_id, tx_bytes, internals->slave_update_idx, &bwg_array[i]); @@ -922,6 +926,7 @@ bond_ethdev_update_tlb_slave_cb(void *arg) for (i = 0; i < slave_count; i++) internals->tlb_slaves_order[i] = bwg_array[i].slave; +OUT: rte_eal_alarm_set(REORDER_PERIOD_MS * 1000, bond_ethdev_update_tlb_slave_cb, (struct bond_dev_private *)internals); } diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 96004220a1..b72dbc8ceb 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -2647,6 +2647,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) adapter->dev_data = eth_dev->data; adapter->stopped = 1; + if (iavf_dev_event_handler_init()) + goto init_vf_err; + if (iavf_init_vf(eth_dev) != 0) { PMD_INIT_LOG(ERR, "Init vf failed"); return -1; @@ -2674,8 +2677,6 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); - if (iavf_dev_event_handler_init()) - goto init_vf_err; if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { /* register callback func to eal lib */ diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 8cc5377bcf..cb353db008 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -256,6 +256,7 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, vf->link_speed = iavf_convert_link_speed(speed); } iavf_dev_link_update(vf->eth_dev, 0); + iavf_dev_event_post(vf->eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0); if (vf->link_up && !vf->vf_reset) { iavf_dev_watchdog_disable(adapter); } else { @@ -374,28 +375,48 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args, _clear_cmd(vf); break; default: - /* For other virtchnl ops in running time, - * wait for the cmd done flag. - */ - do { - if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) - break; - iavf_msec_delay(ASQ_DELAY_MS); - /* If don't read msg or read sys event, continue */ - } while (i++ < MAX_TRY_TIMES); - - if (i >= MAX_TRY_TIMES) { - PMD_DRV_LOG(ERR, "No response for cmd %d", args->ops); + if (rte_thread_is_intr()) { + /* For virtchnl ops were executed in eal_intr_thread, + * need to poll the response. + */ + do { + result = iavf_read_msg_from_pf(adapter, args->out_size, + args->out_buffer); + if (result == IAVF_MSG_CMD) + break; + iavf_msec_delay(ASQ_DELAY_MS); + } while (i++ < MAX_TRY_TIMES); + if (i >= MAX_TRY_TIMES || + vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { + err = -1; + PMD_DRV_LOG(ERR, "No response or return failure (%d)" + " for cmd %d", vf->cmd_retval, args->ops); + } _clear_cmd(vf); - err = -EIO; - } else if (vf->cmd_retval == - VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { - PMD_DRV_LOG(ERR, "Cmd %d not supported", args->ops); - err = -ENOTSUP; - } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { - PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", - vf->cmd_retval, args->ops); - err = -EINVAL; + } else { + /* For other virtchnl ops in running time, + * wait for the cmd done flag. + */ + do { + if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) + break; + iavf_msec_delay(ASQ_DELAY_MS); + /* If don't read msg or read sys event, continue */ + } while (i++ < MAX_TRY_TIMES); + + if (i >= MAX_TRY_TIMES) { + PMD_DRV_LOG(ERR, "No response for cmd %d", args->ops); + _clear_cmd(vf); + err = -EIO; + } else if (vf->cmd_retval == + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED) { + PMD_DRV_LOG(ERR, "Cmd %d not supported", args->ops); + err = -ENOTSUP; + } else if (vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { + PMD_DRV_LOG(ERR, "Return failure %d for cmd %d", + vf->cmd_retval, args->ops); + err = -EINVAL; + } } break; } @@ -409,8 +430,14 @@ iavf_execute_vf_cmd_safe(struct iavf_adapter *adapter, { struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); int ret; + int is_intr_thread = rte_thread_is_intr(); - rte_spinlock_lock(&vf->aq_lock); + if (is_intr_thread) { + if (!rte_spinlock_trylock(&vf->aq_lock)) + return -EIO; + } else { + rte_spinlock_lock(&vf->aq_lock); + } ret = iavf_execute_vf_cmd(adapter, args, async); rte_spinlock_unlock(&vf->aq_lock); -- 2.34.1 ^ permalink raw reply [flat|nested] 19+ messages in thread
* RE: [PATCH v4] net/iavf: fix iavf query stats in intr thread 2023-06-07 2:03 ` [PATCH v4] " Kaiwen Deng @ 2023-06-07 4:01 ` Zhang, Qi Z 0 siblings, 0 replies; 19+ messages in thread From: Zhang, Qi Z @ 2023-06-07 4:01 UTC (permalink / raw) To: Deng, KaiwenX, dev Cc: stable, Yang, Qiming, Zhou, YidingX, Chas Williams, Min Hu (Connor), Wu, Jingjing, Xing, Beilei, Mike Pattrick, Doherty, Declan, Mrzyglod, Daniel T, Dapeng Yu > -----Original Message----- > From: Deng, KaiwenX <kaiwenx.deng@intel.com> > Sent: Wednesday, June 7, 2023 10:04 AM > To: dev@dpdk.org > Cc: stable@dpdk.org; Yang, Qiming <qiming.yang@intel.com>; Zhou, YidingX > <yidingx.zhou@intel.com>; Deng, KaiwenX <kaiwenx.deng@intel.com>; Chas > Williams <chas3@att.com>; Min Hu (Connor) <humin29@huawei.com>; Wu, > Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Zhang, > Qi Z <qi.z.zhang@intel.com>; Mike Pattrick <mkp@redhat.com>; Doherty, > Declan <declan.doherty@intel.com>; Mrzyglod, Daniel T > <daniel.t.mrzyglod@intel.com>; Dapeng Yu <dapengx.yu@intel.com> > Subject: [PATCH v4] net/iavf: fix iavf query stats in intr thread > > When iavf send query-stats command in eal-intr-thread through virtual > channel, there will be no response received from iavf_dev_virtchnl_handler > for this command during block and wait. > Because iavf_dev_virtchnl_handler is also registered in eal-intr-thread. > > When vf device is bonded as BONDING_MODE_TLB mode, the slave device > update callback will registered in alarm and called by eal-intr-thread, it would > also raise the above issue. > > This commit add to poll the response for VIRTCHNL_OP_GET_STATS when it > is called by eal-intr-thread to fix this issue. > > Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands") > Fixes: 22b123a36d07 ("net/avf: initialize PMD") > Fixes: 7c76a747e68c ("bond: add mode 5") > Fixes: 435d523112cc ("net/iavf: fix multi-process shared data") > Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks") To comply with the principles of modularity and separation of concerns, please separate the patch into two distinct fixes, addressing each PMD individually for the bond PMD and iavf PMD. btw, it would be better to only list the patch that introduced the issue. Regarding the missing part of the iavf PMD, it is unable to handle VC commands in the interrupt handler. This aspect was not initially considered, so I assume that the following fix line should be sufficient. Fixes: 22b123a36d07 ("net/avf: initialize PMD") ^ permalink raw reply [flat|nested] 19+ messages in thread
end of thread, other threads:[~2023-06-07 4:03 UTC | newest] Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2023-02-22 4:40 [PATCH] net/iavf: fix iavf query stats in intr thread Kaiwen Deng 2023-02-27 0:56 ` Zhang, Qi Z 2023-02-27 6:02 ` Deng, KaiwenX 2023-03-07 3:27 ` Deng, KaiwenX 2023-03-07 2:55 ` [PATCH v2] " Kaiwen Deng 2023-03-15 13:40 ` Zhang, Qi Z 2023-03-22 7:26 ` [PATCH v3] " Kaiwen Deng 2023-03-23 15:39 ` Ferruh Yigit 2023-03-27 5:31 ` Deng, KaiwenX 2023-03-27 12:31 ` Ferruh Yigit 2023-03-27 12:37 ` Ferruh Yigit 2023-03-29 7:53 ` Deng, KaiwenX 2023-05-05 2:31 ` Deng, KaiwenX 2023-05-23 1:45 ` Deng, KaiwenX 2023-05-23 10:26 ` Ferruh Yigit 2023-03-29 6:41 ` Deng, KaiwenX 2023-06-06 5:41 ` Jiale, SongX 2023-06-07 2:03 ` [PATCH v4] " Kaiwen Deng 2023-06-07 4:01 ` Zhang, Qi Z
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).