From: Christian Ehrhardt <christian.ehrhardt@canonical.com>
To: Yiding Zhou <yidingx.zhou@intel.com>
Cc: stable@dpdk.org
Subject: Re: [PATCH 19.11] net/iavf: add thread for event callbacks
Date: Wed, 16 Nov 2022 09:17:36 +0100 [thread overview]
Message-ID: <CAATJJ0+7nqhK96dtZd55VFKeezKzU5h-3S3mc_w=MOta_NGoiw@mail.gmail.com> (raw)
In-Reply-To: <20221116023926.317352-1-yidingx.zhou@intel.com>
On Wed, Nov 16, 2022 at 3:35 AM Yiding Zhou <yidingx.zhou@intel.com> wrote:
>
> [upstream commit cb5c1b91f76f436724cd09f26c7432b2775b519c]
Hi,
I tried to apply this but it causes build errors:
[ 151s] iavf_vchnl.o: In function `iavf_dev_event_handler_fini':
[ 151s] iavf_vchnl.c:(.text+0x5d9): undefined reference to `pthread_cancel'
[ 151s] iavf_vchnl.c:(.text+0x611): undefined reference to `pthread_join'
[ 151s] collect2: error: ld returned 1 exit status
[ 151s] make[4]: *** [librte_pmd_iavf.so.20.0] Error 1
[ 151s] make[3]: *** [iavf] Error 2
On all redhat, fedora, suse builds.
Interestingly the Ubuntu builds worked fine, but only them.
Please have a look and resubmit.
> All callbacks registered for ethdev events are called in eal-intr-thread,
> and some of them execute virtchnl commands. Because interrupts are disabled
> in the intr thread, there will be no response received for these commands.
> So all callbacks should be called in a new context.
>
> When the device is bonded, the bond pmd registers callback for LSC event to
> execute virtchnl commands to reinitialize the device, it would also raise
> the above issue.
>
> This commit add a new thread to call all event callbacks.
>
> Fixes: 48de41ca11f0 ("net/avf: enable link status update")
> Fixes: 84108425054a ("net/iavf: support asynchronous virtual channel message")
>
> Signed-off-by: Yiding Zhou <yidingx.zhou@intel.com>
> ---
> drivers/net/iavf/iavf.h | 2 +
> drivers/net/iavf/iavf_ethdev.c | 4 +
> drivers/net/iavf/iavf_vchnl.c | 148 ++++++++++++++++++++++++++++++++-
> 3 files changed, 150 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
> index 297c69775b..356ff532af 100644
> --- a/drivers/net/iavf/iavf.h
> +++ b/drivers/net/iavf/iavf.h
> @@ -214,6 +214,8 @@ _atomic_set_cmd(struct iavf_info *vf, enum virtchnl_ops ops)
>
> int iavf_check_api_version(struct iavf_adapter *adapter);
> int iavf_get_vf_resource(struct iavf_adapter *adapter);
> +void iavf_dev_event_handler_fini(void);
> +int iavf_dev_event_handler_init(void);
> void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
> int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
> int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
> diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
> index 3cb02bd1fb..28bc1ca2da 100644
> --- a/drivers/net/iavf/iavf_ethdev.c
> +++ b/drivers/net/iavf/iavf_ethdev.c
> @@ -1413,6 +1413,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
> rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
> ð_dev->data->mac_addrs[0]);
>
> + if (iavf_dev_event_handler_init())
> + return -1;
> +
> /* register callback func to eal lib */
> rte_intr_callback_register(&pci_dev->intr_handle,
> iavf_dev_interrupt_handler,
> @@ -1460,6 +1463,7 @@ iavf_dev_uninit(struct rte_eth_dev *dev)
> dev->rx_pkt_burst = NULL;
> dev->tx_pkt_burst = NULL;
> iavf_dev_close(dev);
> + iavf_dev_event_handler_fini();
>
> rte_free(vf->vf_res);
> vf->vsi_res = NULL;
> diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
> index ba627f1103..1f5f43a7a5 100644
> --- a/drivers/net/iavf/iavf_vchnl.c
> +++ b/drivers/net/iavf/iavf_vchnl.c
> @@ -2,6 +2,7 @@
> * Copyright(c) 2017 Intel Corporation
> */
>
> +#include <fcntl.h>
> #include <stdio.h>
> #include <errno.h>
> #include <stdint.h>
> @@ -30,6 +31,146 @@
> #define MAX_TRY_TIMES 200
> #define ASQ_DELAY_MS 10
>
> +#define MAX_EVENT_PENDING 16
> +
> +struct iavf_event_element {
> + TAILQ_ENTRY(iavf_event_element) next;
> + struct rte_eth_dev *dev;
> + enum rte_eth_event_type event;
> + void *param;
> + size_t param_alloc_size;
> + uint8_t param_alloc_data[0];
> +};
> +
> +struct iavf_event_handler {
> + uint32_t ndev;
> + pthread_t tid;
> + int fd[2];
> + pthread_mutex_t lock;
> + TAILQ_HEAD(event_lsit, iavf_event_element) pending;
> +};
> +
> +static struct iavf_event_handler event_handler = {
> + .fd = {-1, -1},
> +};
> +
> +#ifndef TAILQ_FOREACH_SAFE
> +#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
> + for ((var) = TAILQ_FIRST((head)); \
> + (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
> + (var) = (tvar))
> +#endif
> +
> +static void *
> +iavf_dev_event_handle(void *param __rte_unused)
> +{
> + struct iavf_event_handler *handler = &event_handler;
> + TAILQ_HEAD(event_list, iavf_event_element) pending;
> +
> + while (true) {
> + char unused[MAX_EVENT_PENDING];
> + ssize_t nr = read(handler->fd[0], &unused, sizeof(unused));
> + if (nr <= 0)
> + break;
> +
> + TAILQ_INIT(&pending);
> + pthread_mutex_lock(&handler->lock);
> + TAILQ_CONCAT(&pending, &handler->pending, next);
> + pthread_mutex_unlock(&handler->lock);
> +
> + struct iavf_event_element *pos, *save_next;
> + TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
> + TAILQ_REMOVE(&pending, pos, next);
> + _rte_eth_dev_callback_process(pos->dev, pos->event, pos->param);
> + rte_free(pos);
> + }
> + }
> +
> + return NULL;
> +}
> +
> +static void
> +iavf_dev_event_post(struct rte_eth_dev *dev,
> + enum rte_eth_event_type event,
> + void *param, size_t param_alloc_size)
> +{
> + struct iavf_event_handler *handler = &event_handler;
> + char notify_byte;
> + struct iavf_event_element *elem = rte_malloc(NULL, sizeof(*elem) + param_alloc_size, 0);
> + if (!elem)
> + return;
> +
> + elem->dev = dev;
> + elem->event = event;
> + elem->param = param;
> + elem->param_alloc_size = param_alloc_size;
> + if (param && param_alloc_size) {
> + rte_memcpy(elem->param_alloc_data, param, param_alloc_size);
> + elem->param = elem->param_alloc_data;
> + }
> +
> + pthread_mutex_lock(&handler->lock);
> + TAILQ_INSERT_TAIL(&handler->pending, elem, next);
> + pthread_mutex_unlock(&handler->lock);
> +
> + ssize_t nw = write(handler->fd[1], ¬ify_byte, 1);
> + RTE_SET_USED(nw);
> +}
> +
> +int
> +iavf_dev_event_handler_init(void)
> +{
> + struct iavf_event_handler *handler = &event_handler;
> +
> + if (__atomic_add_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 1)
> + return 0;
> +#if defined(RTE_EXEC_ENV_IS_WINDOWS) && RTE_EXEC_ENV_IS_WINDOWS != 0
> + int err = _pipe(handler->fd, MAX_EVENT_PENDING, O_BINARY);
> +#else
> + int err = pipe(handler->fd);
> +#endif
> + if (err != 0) {
> + __atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED);
> + return -1;
> + }
> +
> + TAILQ_INIT(&handler->pending);
> + pthread_mutex_init(&handler->lock, NULL);
> +
> + if (rte_ctrl_thread_create(&handler->tid, "iavf-event-thread",
> + NULL, iavf_dev_event_handle, NULL)) {
> + __atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED);
> + return -1;
> + }
> +
> + return 0;
> +}
> +
> +void
> +iavf_dev_event_handler_fini(void)
> +{
> + struct iavf_event_handler *handler = &event_handler;
> +
> + if (__atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 0)
> + return;
> +
> + int unused = pthread_cancel(handler->tid);
> + RTE_SET_USED(unused);
> + close(handler->fd[0]);
> + close(handler->fd[1]);
> + handler->fd[0] = -1;
> + handler->fd[1] = -1;
> +
> + pthread_join(handler->tid, NULL);
> + pthread_mutex_destroy(&handler->lock);
> +
> + struct iavf_event_element *pos, *save_next;
> + TAILQ_FOREACH_SAFE(pos, &handler->pending, next, save_next) {
> + TAILQ_REMOVE(&handler->pending, pos, next);
> + rte_free(pos);
> + }
> +}
> +
> /* Read data in admin queue to get msg from pf driver */
> static enum iavf_status_code
> iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len,
> @@ -189,8 +330,8 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
> case VIRTCHNL_EVENT_RESET_IMPENDING:
> PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
> vf->vf_reset = true;
> - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
> - NULL);
> + iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_RESET,
> + NULL, 0);
> break;
> case VIRTCHNL_EVENT_LINK_CHANGE:
> PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
> @@ -204,8 +345,7 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg,
> vf->link_speed = iavf_convert_link_speed(speed);
> }
> iavf_dev_link_update(dev, 0);
> - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
> - NULL);
> + iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0);
> break;
> case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
> PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
> --
> 2.34.1
>
--
Christian Ehrhardt
Senior Staff Engineer, Ubuntu Server
Canonical Ltd
next prev parent reply other threads:[~2022-11-16 8:18 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-16 2:39 Yiding Zhou
2022-11-16 8:17 ` Christian Ehrhardt [this message]
[not found] ` <DM5PR1101MB21078D157F2A267FA10FD39285079@DM5PR1101MB2107.namprd11.prod.outlook.com>
2022-11-16 9:58 ` Zhou, YidingX
2022-11-17 7:47 ` Christian Ehrhardt
2022-11-17 9:05 ` Zhou, YidingX
2022-11-17 9:14 ` Christian Ehrhardt
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to='CAATJJ0+7nqhK96dtZd55VFKeezKzU5h-3S3mc_w=MOta_NGoiw@mail.gmail.com' \
--to=christian.ehrhardt@canonical.com \
--cc=stable@dpdk.org \
--cc=yidingx.zhou@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).