From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 98723A00C3 for ; Thu, 13 Oct 2022 08:17:11 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9203742D31; Thu, 13 Oct 2022 08:17:11 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id 8B64642C27; Thu, 13 Oct 2022 08:17:09 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1665641829; x=1697177829; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=7q1SxyNfyph/VBvs7W4IHSk1k8j/O2lzB2zwKaKHmJE=; b=fzVVsebBCTiS3gzpiwOGCjqfoZByuxlEqxBjqB0iLFDe16R8eIiaRB63 ktWJ19sMW4YEeF2QEHWU+jVHW+/j2nVssNcF7pbFob/VQbIwuHRfibP2N KeXbY77p6kg6Ej6IU/70Eh1lUKH++kKa5Og9U75fc0GbRqlWrA99CYqT3 zDb7EeHwh4/fDimObzT/GQPR+rqfnuxmKdhQJYxgNoh+7cMtD3o9c5rpV dCeWnEhx3f7FCVGk+SDwYXSJyY+FUFuFVubU3HtDKKM7uqaKwSuVBn/Z+ KvGAFq1oR3KDAJTgcN52+rhdl7bhr/61jxicaKOnqub26yt8NTDlFgaZt Q==; X-IronPort-AV: E=McAfee;i="6500,9779,10498"; a="302602330" X-IronPort-AV: E=Sophos;i="5.95,180,1661842800"; d="scan'208";a="302602330" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Oct 2022 23:17:08 -0700 X-IronPort-AV: E=McAfee;i="6500,9779,10498"; a="622036697" X-IronPort-AV: E=Sophos;i="5.95,180,1661842800"; d="scan'208";a="622036697" Received: from unknown (HELO dpdkserver..) ([10.239.252.174]) by orsmga007-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Oct 2022 23:17:06 -0700 From: Yiding Zhou To: dev@dpdk.org Cc: Yiding Zhou , stable@dpdk.org Subject: [PATCH v4] net/iavf: fix error of virtchnl command Date: Thu, 13 Oct 2022 14:20:17 +0800 Message-Id: <20221013062017.96065-1-yidingx.zhou@intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20220919060654.1092715-1-yidingx.zhou@intel.com> References: <20220919060654.1092715-1-yidingx.zhou@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: stable-bounces@dpdk.org When the device is bonded, bond pmd will register callback for LSC event. This callback will execute some virtchnl commands in eal-intr-thread to reinitialize the device with interrupts disabled. In this case, responses to all commands not be received. This commit starts a thread to handle all events to fix this issue. Fixes: 48de41ca11f0 ("net/avf: enable link status update") CC: stable@dpdk.org Signed-off-by: Yiding Zhou --- v4: add 'reset' and 'ipsec' event handling v3: fix CI errors --- drivers/net/iavf/iavf.h | 2 + drivers/net/iavf/iavf_ethdev.c | 5 ++ drivers/net/iavf/iavf_vchnl.c | 152 +++++++++++++++++++++++++++++++-- 3 files changed, 153 insertions(+), 6 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 26b858f6f0..1edebab8dc 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -424,6 +424,8 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum virtchnl_ops ops) } int iavf_check_api_version(struct iavf_adapter *adapter); int iavf_get_vf_resource(struct iavf_adapter *adapter); +void iavf_dev_event_handler_fini(void); +int iavf_dev_event_handler_init(void); void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev); int iavf_enable_vlan_strip(struct iavf_adapter *adapter); int iavf_disable_vlan_strip(struct iavf_adapter *adapter); diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 782be82c7f..633d684804 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -2631,6 +2631,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev) rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); + if (iavf_dev_event_handler_init()) + goto init_vf_err; + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { /* register callback func to eal lib */ rte_intr_callback_register(pci_dev->intr_handle, @@ -2785,6 +2788,8 @@ iavf_dev_uninit(struct rte_eth_dev *dev) iavf_dev_close(dev); + iavf_dev_event_handler_fini(); + return 0; } diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 4327c5a786..43e18ca5f7 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -2,6 +2,7 @@ * Copyright(c) 2017 Intel Corporation */ +#include #include #include #include @@ -11,6 +12,7 @@ #include #include #include +#include #include #include @@ -27,6 +29,145 @@ #define MAX_TRY_TIMES 2000 #define ASQ_DELAY_MS 1 +#define MAX_EVENT_PENDING 16 + +struct iavf_event_element { + TAILQ_ENTRY(iavf_event_element) next; + struct rte_eth_dev *dev; + enum rte_eth_event_type event; + void *param; + size_t param_alloc_size; + uint8_t param_alloc_data[0]; +}; + +struct iavf_event_handler { + uint32_t ndev; + pthread_t tid; + int fd[2]; + pthread_mutex_t lock; + TAILQ_HEAD(event_lsit, iavf_event_element) pending; +}; + +static struct iavf_event_handler event_handler = { + .fd = {-1, -1}, +}; + +#ifndef TAILQ_FOREACH_SAFE +#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = TAILQ_FIRST((head)); \ + (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ + (var) = (tvar)) +#endif + +static void * +iavf_dev_event_handle(void *param __rte_unused) +{ + struct iavf_event_handler *handler = &event_handler; + TAILQ_HEAD(event_list, iavf_event_element) pending; + + while (true) { + char unused[MAX_EVENT_PENDING]; + ssize_t nr = read(handler->fd[0], &unused, sizeof(unused)); + if (nr <= 0) + break; + + TAILQ_INIT(&pending); + pthread_mutex_lock(&handler->lock); + TAILQ_CONCAT(&pending, &handler->pending, next); + pthread_mutex_unlock(&handler->lock); + + struct iavf_event_element *pos, *save_next; + TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) { + TAILQ_REMOVE(&pending, pos, next); + rte_eth_dev_callback_process(pos->dev, pos->event, pos->param); + rte_free(pos); + } + } + return NULL; +} + +static void +iavf_dev_event_post(struct rte_eth_dev *dev, + enum rte_eth_event_type event, + void *param, size_t param_alloc_size) +{ + struct iavf_event_handler *handler = &event_handler; + char notify_byte; + struct iavf_event_element *elem = rte_malloc(NULL, sizeof(*elem) + param_alloc_size, 0); + if (!elem) + return; + + elem->dev = dev; + elem->event = event; + elem->param = param; + elem->param_alloc_size = param_alloc_size; + if (param && param_alloc_size) { + rte_memcpy(elem->param_alloc_data, param, param_alloc_size); + elem->param = elem->param_alloc_data; + } + + pthread_mutex_lock(&handler->lock); + TAILQ_INSERT_TAIL(&handler->pending, elem, next); + pthread_mutex_unlock(&handler->lock); + + ssize_t nw = write(handler->fd[1], ¬ify_byte, 1); + RTE_SET_USED(nw); +} + +int +iavf_dev_event_handler_init(void) +{ + struct iavf_event_handler *handler = &event_handler; + + if (__atomic_add_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 1) + return 0; +#if defined(RTE_EXEC_ENV_IS_WINDOWS) && RTE_EXEC_ENV_IS_WINDOWS != 0 + int err = _pipe(handler->fd, MAX_EVENT_PENDING, O_BINARY); +#else + int err = pipe(handler->fd); +#endif + if (err != 0) { + __atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED); + return -1; + } + + TAILQ_INIT(&handler->pending); + pthread_mutex_init(&handler->lock, NULL); + + if (rte_ctrl_thread_create(&handler->tid, "iavf-event-thread", + NULL, iavf_dev_event_handle, NULL)) { + __atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED); + return -1; + } + + return 0; +} + +void +iavf_dev_event_handler_fini(void) +{ + struct iavf_event_handler *handler = &event_handler; + + if (__atomic_sub_fetch(&handler->ndev, 1, __ATOMIC_RELAXED) != 0) + return; + + int unused = pthread_cancel(handler->tid); + RTE_SET_USED(unused); + close(handler->fd[0]); + close(handler->fd[1]); + handler->fd[0] = -1; + handler->fd[1] = -1; + + pthread_join(handler->tid, NULL); + pthread_mutex_destroy(&handler->lock); + + struct iavf_event_element *pos, *save_next; + TAILQ_FOREACH_SAFE(pos, &handler->pending, next, save_next) { + TAILQ_REMOVE(&handler->pending, pos, next); + rte_free(pos); + } +} + static uint32_t iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed) { @@ -278,8 +419,8 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, case VIRTCHNL_EVENT_RESET_IMPENDING: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event"); vf->vf_reset = true; - rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, - NULL); + iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_RESET, + NULL, 0); break; case VIRTCHNL_EVENT_LINK_CHANGE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event"); @@ -293,7 +434,7 @@ iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, vf->link_speed = iavf_convert_link_speed(speed); } iavf_dev_link_update(dev, 0); - rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + iavf_dev_event_post(dev, RTE_ETH_EVENT_INTR_LSC, NULL, 0); break; case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event"); @@ -359,9 +500,8 @@ iavf_handle_virtchnl_msg(struct rte_eth_dev *dev) desc.subtype = RTE_ETH_EVENT_IPSEC_UNKNOWN; desc.metadata = ev->ipsec_event_data; - rte_eth_dev_callback_process(dev, - RTE_ETH_EVENT_IPSEC, - &desc); + iavf_dev_event_post(dev, RTE_ETH_EVENT_IPSEC, + &desc, sizeof(desc)); return; } -- 2.34.1