patches for DPDK stable branches
 help / color / mirror / Atom feed
From: "Zhang, Qi Z" <qi.z.zhang@intel.com>
To: "Zhou, YidingX" <yidingx.zhou@intel.com>, "dev@dpdk.org" <dev@dpdk.org>
Cc: "Zhou, YidingX" <yidingx.zhou@intel.com>,
	"stable@dpdk.org" <stable@dpdk.org>
Subject: RE: [PATCH v4] net/iavf: fix error of virtchnl command
Date: Tue, 18 Oct 2022 13:18:18 +0000	[thread overview]
Message-ID: <DM4PR11MB59943D56D0B9D6B8CF572229D7289@DM4PR11MB5994.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20221013062017.96065-1-yidingx.zhou@intel.com>



> -----Original Message-----
> From: Yiding Zhou <yidingx.zhou@intel.com>
> Sent: Thursday, October 13, 2022 2:20 PM
> To: dev@dpdk.org
> Cc: Zhou, YidingX <yidingx.zhou@intel.com>; stable@dpdk.org
> Subject: [PATCH v4] net/iavf: fix error of virtchnl command
>
> When the device is bonded, bond pmd will register callback for LSC event.
> This callback will execute some virtchnl commands in eal-intr-thread to
> reinitialize the device with interrupts disabled. In this case, responses to all
> commands not be received.

Can we reword this commit log a little bit?

I think first of all, the patch is about code refactor, it separate all etherdev event notification from interrupt handler thread to a separated thread.

Secondary, it also fixed the problem in bond.

>
> This commit starts a thread to handle all events to fix this issue.
>
> Fixes: 48de41ca11f0 ("net/avf: enable link status update")
> CC: stable@dpdk.org
>
> Signed-off-by: Yiding Zhou <yidingx.zhou@intel.com>
> ---
> v4: add 'reset' and 'ipsec' event handling
> v3: fix CI errors
> ---
>  drivers/net/iavf/iavf.h        |   2 +
>  drivers/net/iavf/iavf_ethdev.c |   5 ++
>  drivers/net/iavf/iavf_vchnl.c  | 152 +++++++++++++++++++++++++++++++--
>  3 files changed, 153 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index
> 26b858f6f0..1edebab8dc 100644
> --- a/drivers/net/iavf/iavf.h
> +++ b/drivers/net/iavf/iavf.h
> @@ -424,6 +424,8 @@ _atomic_set_async_response_cmd(struct iavf_info
> *vf, enum virtchnl_ops ops)  }  int iavf_check_api_version(struct iavf_adapter
> *adapter);  int iavf_get_vf_resource(struct iavf_adapter *adapter);
> +void iavf_dev_event_handler_fini(void);
> +int iavf_dev_event_handler_init(void);
>  void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);  int
> iavf_enable_vlan_strip(struct iavf_adapter *adapter);  int
> iavf_disable_vlan_strip(struct iavf_adapter *adapter); diff --git
> a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index
> 782be82c7f..633d684804 100644
> --- a/drivers/net/iavf/iavf_ethdev.c
> +++ b/drivers/net/iavf/iavf_ethdev.c
> @@ -2631,6 +2631,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
>       rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
>                       &eth_dev->data->mac_addrs[0]);
>
> +     if (iavf_dev_event_handler_init())
> +             goto init_vf_err;
> +
>       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
> {
>               /* register callback func to eal lib */
>               rte_intr_callback_register(pci_dev->intr_handle,
> @@ -2785,6 +2788,8 @@ iavf_dev_uninit(struct rte_eth_dev *dev)
>
>       iavf_dev_close(dev);
>
> +     iavf_dev_event_handler_fini();
> +
>       return 0;
>  }
>
> diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index
> 4327c5a786..43e18ca5f7 100644
> --- a/drivers/net/iavf/iavf_vchnl.c
> +++ b/drivers/net/iavf/iavf_vchnl.c
> @@ -2,6 +2,7 @@
>   * Copyright(c) 2017 Intel Corporation
>   */
>
> +#include <unistd.h>
>  #include <stdio.h>
>  #include <errno.h>
>  #include <stdint.h>
> @@ -11,6 +12,7 @@
>  #include <inttypes.h>
>  #include <rte_byteorder.h>
>  #include <rte_common.h>
> +#include <rte_os_shim.h>
>
>  #include <rte_debug.h>
>  #include <rte_alarm.h>
> @@ -27,6 +29,145 @@
>  #define MAX_TRY_TIMES 2000
>  #define ASQ_DELAY_MS  1
>
> +#define MAX_EVENT_PENDING 16
> +
> +struct iavf_event_element {
> +     TAILQ_ENTRY(iavf_event_element) next;
> +     struct rte_eth_dev *dev;
> +     enum rte_eth_event_type event;
> +     void *param;
> +     size_t param_alloc_size;
> +     uint8_t param_alloc_data[0];
> +};
> +
> +struct iavf_event_handler {
> +     uint32_t ndev;
> +     pthread_t tid;
> +     int fd[2];
> +     pthread_mutex_t lock;
> +     TAILQ_HEAD(event_lsit, iavf_event_element) pending; };
> +
> +static struct iavf_event_handler event_handler = {
> +     .fd = {-1, -1},
> +};
> +
> +#ifndef TAILQ_FOREACH_SAFE
> +#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
> +     for ((var) = TAILQ_FIRST((head)); \
> +             (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
> +     (var) = (tvar))
> +#endif
> +
> +static void *
> +iavf_dev_event_handle(void *param __rte_unused) {
> +     struct iavf_event_handler *handler = &event_handler;
> +     TAILQ_HEAD(event_list, iavf_event_element) pending;
> +
> +     while (true) {
> +             char unused[MAX_EVENT_PENDING];
> +             ssize_t nr = read(handler->fd[0], &unused, sizeof(unused));
> +             if (nr <= 0)
> +                     break;
> +
> +             TAILQ_INIT(&pending);
> +             pthread_mutex_lock(&handler->lock);
> +             TAILQ_CONCAT(&pending, &handler->pending, next);
> +             pthread_mutex_unlock(&handler->lock);
> +
> +             struct iavf_event_element *pos, *save_next;
> +             TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
> +                     TAILQ_REMOVE(&pending, pos, next);
> +                     rte_eth_dev_callback_process(pos->dev, pos->event,
> pos->param);
> +                     rte_free(pos);
> +             }
> +     }
> +     return NULL;
> +}
> +
> +static void
> +iavf_dev_event_post(struct rte_eth_dev *dev,
> +             enum rte_eth_event_type event,
> +             void *param, size_t param_alloc_size) {
> +     struct iavf_event_handler *handler = &event_handler;
> +     char notify_byte;
> +     struct iavf_event_element *elem = rte_malloc(NULL, sizeof(*elem) +
> param_alloc_size, 0);
> +     if (!elem)
> +             return;
> +
> +     elem->dev = dev;
> +     elem->event = event;
> +     elem->param = param;
> +     elem->param_alloc_size = param_alloc_size;
> +     if (param && param_alloc_size) {
> +             rte_memcpy(elem->param_alloc_data, param,
> param_alloc_size);
> +             elem->param = elem->param_alloc_data;
> +     }
> +
> +     pthread_mutex_lock(&handler->lock);
> +     TAILQ_INSERT_TAIL(&handler->pending, elem, next);
> +     pthread_mutex_unlock(&handler->lock);
> +
> +     ssize_t nw = write(handler->fd[1], &notify_byte, 1);
> +     RTE_SET_USED(nw);
> +}
> +
> +int
> +iavf_dev_event_handler_init(void)
> +{
> +     struct iavf_event_handler *handler = &event_handler;
> +
> +     if (__atomic_add_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) !=
> 1)
> +             return 0;
> +#if defined(RTE_EXEC_ENV_IS_WINDOWS) &&
> RTE_EXEC_ENV_IS_WINDOWS != 0
> +     int err = _pipe(handler->fd, MAX_EVENT_PENDING, O_BINARY); #else
> +     int err = pipe(handler->fd);
> +#endif
> +     if (err != 0) {
> +             __atomic_sub_fetch(&handler->ndev, 1,
> __ATOMIC_RELAXED);
> +             return -1;
> +     }
> +
> +     TAILQ_INIT(&handler->pending);
> +     pthread_mutex_init(&handler->lock, NULL);
> +
> +     if (rte_ctrl_thread_create(&handler->tid, "iavf-event-thread",
> +                             NULL, iavf_dev_event_handle, NULL)) {
> +             __atomic_sub_fetch(&handler->ndev, 1,
> __ATOMIC_RELAXED);
> +             return -1;
> +     }
> +
> +     return 0;
> +}
> +
> +void
> +iavf_dev_event_handler_fini(void)
> +{
> +     struct iavf_event_handler *handler = &event_handler;
> +
> +     if (__atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) !=
> 0)
> +             return;
> +
> +     int unused = pthread_cancel(handler->tid);
> +     RTE_SET_USED(unused);
> +     close(handler->fd[0]);
> +     close(handler->fd[1]);
> +     handler->fd[0] = -1;
> +     handler->fd[1] = -1;
> +
> +     pthread_join(handler->tid, NULL);
> +     pthread_mutex_destroy(&handler->lock);
> +
> +     struct iavf_event_element *pos, *save_next;
> +     TAILQ_FOREACH_SAFE(pos, &handler->pending, next, save_next) {
> +             TAILQ_REMOVE(&handler->pending, pos, next);
> +             rte_free(pos);
> +     }
> +}
> +
>  static uint32_t
>  iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed)  { @@ -
> 278,8 +419,8 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev,
> uint8_t *msg,
>       case VIRTCHNL_EVENT_RESET_IMPENDING:
>               PMD_DRV_LOG(DEBUG,
> "VIRTCHNL_EVENT_RESET_IMPENDING event");
>               vf->vf_reset = true;
> -             rte_eth_dev_callback_process(dev,
> RTE_ETH_EVENT_INTR_RESET,
> -                                           NULL);
> +             iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_RESET,
> +                                           NULL, 0);
>               break;
>       case VIRTCHNL_EVENT_LINK_CHANGE:
>               PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE
> event"); @@ -293,7 +434,7 @@ iavf_handle_pf_event_msg(struct
> rte_eth_dev *dev, uint8_t *msg,
>                       vf->link_speed = iavf_convert_link_speed(speed);
>               }
>               iavf_dev_link_update(dev, 0);
> -             rte_eth_dev_callback_process(dev,
> RTE_ETH_EVENT_INTR_LSC, NULL);
> +             iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
> 0);
>               break;
>       case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
>               PMD_DRV_LOG(DEBUG,
> "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event"); @@ -359,9 +500,8 @@
> iavf_handle_virtchnl_msg(struct rte_eth_dev *dev)
>                                       desc.subtype =
>
>       RTE_ETH_EVENT_IPSEC_UNKNOWN;
>                                       desc.metadata = ev-
> >ipsec_event_data;
> -                                     rte_eth_dev_callback_process(dev,
> -
>       RTE_ETH_EVENT_IPSEC,
> -                                                     &desc);
> +                                     iavf_dev_event_post(dev,
> RTE_ETH_EVENT_IPSEC,
> +                                                     &desc, sizeof(desc));
>                                       return;
>                               }
>
> --
> 2.34.1


  reply	other threads:[~2022-10-18 13:18 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-19  6:06 [PATCH] " Yiding Zhou
2022-09-19  7:53 ` [PATCH v2] " Yiding Zhou
2022-10-08  8:48 ` [PATCH v3] " Yiding Zhou
2022-10-09  6:03   ` Zhang, Qi Z
2022-10-10  2:02     ` Zhou, YidingX
2022-10-13  6:20 ` [PATCH v4] " Yiding Zhou
2022-10-18 13:18   ` Zhang, Qi Z [this message]
2022-10-13  6:21 ` [PATCH] net/ice/base: fix duplicate flow rules Yiding Zhou
2022-10-19 12:19   ` Xu, Ke1
2022-11-09  0:40     ` Zhang, Qi Z
2022-11-08  6:37   ` Zhou, YidingX
2022-10-20  5:00 ` [PATCH v5] net/iavf: add thread for event callbacks Yiding Zhou
2022-10-20  5:40   ` Zhang, Qi Z
2022-10-28 21:45   ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=DM4PR11MB59943D56D0B9D6B8CF572229D7289@DM4PR11MB5994.namprd11.prod.outlook.com \
    --to=qi.z.zhang@intel.com \
    --cc=dev@dpdk.org \
    --cc=stable@dpdk.org \
    --cc=yidingx.zhou@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).