From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5D02CA057C; Fri, 27 Mar 2020 04:05:50 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4B4CF1C0CD; Fri, 27 Mar 2020 04:05:03 +0100 (CET) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id 31CC81C0AD for ; Fri, 27 Mar 2020 04:04:58 +0100 (CET) IronPort-SDR: zHnaLC88nxCxTQIdGCTNj2wgvzrrngfIvXDzcSAjVEQLLwd9+2jQEM6Lq619IT1R2jdMegHKWK qUKhXZaJMLvA== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Mar 2020 20:04:57 -0700 IronPort-SDR: K5NC/WlD46pwkq8q0yOEAamhwhUEvMxf793pk+nhGlnDOm9Og8RBIwqhkk47XgyE8FX3W9kaeZ ezqcIhJfZ/Uw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.72,310,1580803200"; d="scan'208";a="420949833" Received: from npg-dpdk-haiyue-1.sh.intel.com ([10.67.119.213]) by orsmga005.jf.intel.com with ESMTP; 26 Mar 2020 20:04:55 -0700 From: Haiyue Wang To: dev@dpdk.org, xiaolong.ye@intel.com, qi.z.zhang@intel.com, jingjing.wu@intel.com, qiming.yang@intel.com, beilei.xing@intel.com Cc: wei.zhao1@intel.com, Haiyue Wang Date: Fri, 27 Mar 2020 10:56:41 +0800 Message-Id: <20200327025641.31008-8-haiyue.wang@intel.com> X-Mailer: git-send-email 2.26.0 In-Reply-To: <20200327025641.31008-1-haiyue.wang@intel.com> References: <20200309141437.11800-1-haiyue.wang@intel.com> <20200327025641.31008-1-haiyue.wang@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v6 7/7] net/ice: get the VF hardware index in DCF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The DCF (Device Config Function) needs the hardware index of the VFs to control the flow setting. And also if the VF resets, the index may be changed, so it should handle this in VF reset event. Signed-off-by: Haiyue Wang Acked-by: Qi Zhang --- drivers/net/ice/Makefile | 1 + drivers/net/ice/ice_dcf.c | 89 +++++++++++++++++++++++++++++++ drivers/net/ice/ice_dcf.h | 6 +++ drivers/net/ice/ice_dcf_parent.c | 90 +++++++++++++++++++++++++++++++- 4 files changed, 185 insertions(+), 1 deletion(-) diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile index 3ecc72219..622a853af 100644 --- a/drivers/net/ice/Makefile +++ b/drivers/net/ice/Makefile @@ -16,6 +16,7 @@ CFLAGS += -I$(RTE_SDK)/drivers/common/iavf LDLIBS += -lrte_eal -lrte_mbuf -lrte_ethdev -lrte_kvargs LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_hash LDLIBS += -lrte_net -lrte_common_iavf +LDLIBS += -lpthread EXPORT_MAP := rte_pmd_ice_version.map diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index c799cdf83..4c30f0e60 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -269,6 +269,65 @@ ice_dcf_get_vf_resource(struct ice_dcf_hw *hw) return 0; } +static int +ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw) +{ + struct virtchnl_dcf_vsi_map *vsi_map; + uint32_t valid_msg_len; + uint16_t len; + int err; + + err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP, + NULL, 0); + if (err) { + PMD_DRV_LOG(ERR, "Failed to send msg OP_DCF_GET_VSI_MAP"); + return err; + } + + err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP, + hw->arq_buf, ICE_DCF_AQ_BUF_SZ, + &len); + if (err) { + PMD_DRV_LOG(ERR, "Failed to get response of OP_DCF_GET_VSI_MAP"); + return err; + } + + vsi_map = (struct virtchnl_dcf_vsi_map *)hw->arq_buf; + valid_msg_len = (vsi_map->num_vfs - 1) * sizeof(vsi_map->vf_vsi[0]) + + sizeof(*vsi_map); + if (len != valid_msg_len) { + PMD_DRV_LOG(ERR, "invalid vf vsi map response with length %u", + len); + return -EINVAL; + } + + if (hw->num_vfs != 0 && hw->num_vfs != vsi_map->num_vfs) { + PMD_DRV_LOG(ERR, "The number VSI map (%u) doesn't match the number of VFs (%u)", + vsi_map->num_vfs, hw->num_vfs); + return -EINVAL; + } + + len = vsi_map->num_vfs * sizeof(vsi_map->vf_vsi[0]); + + if (!hw->vf_vsi_map) { + hw->vf_vsi_map = rte_zmalloc("vf_vsi_ctx", len, 0); + if (!hw->vf_vsi_map) { + PMD_DRV_LOG(ERR, "Failed to alloc memory for VSI context"); + return -ENOMEM; + } + + hw->num_vfs = vsi_map->num_vfs; + } + + if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) { + PMD_DRV_LOG(DEBUG, "VF VSI map doesn't change"); + return 1; + } + + rte_memcpy(hw->vf_vsi_map, vsi_map->vf_vsi, len); + return 0; +} + static int ice_dcf_mode_disable(struct ice_dcf_hw *hw) { @@ -466,6 +525,28 @@ ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc, return err; } +int +ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(hw->eth_dev); + int err = 0; + + rte_spinlock_lock(&hw->vc_cmd_send_lock); + + rte_intr_disable(&pci_dev->intr_handle); + ice_dcf_disable_irq0(hw); + + if (ice_dcf_get_vf_resource(hw) || ice_dcf_get_vf_vsi_map(hw)) + err = -1; + + rte_intr_enable(&pci_dev->intr_handle); + ice_dcf_enable_irq0(hw); + + rte_spinlock_unlock(&hw->vc_cmd_send_lock); + + return err; +} + int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) { @@ -533,6 +614,13 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) goto err_alloc; } + if (ice_dcf_get_vf_vsi_map(hw) < 0) { + PMD_INIT_LOG(ERR, "Failed to get VF VSI map"); + ice_dcf_mode_disable(hw); + goto err_alloc; + } + + hw->eth_dev = eth_dev; rte_intr_callback_register(&pci_dev->intr_handle, ice_dcf_dev_interrupt_handler, hw); rte_intr_enable(&pci_dev->intr_handle); @@ -565,5 +653,6 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw) iavf_shutdown_adminq(&hw->avf); rte_free(hw->arq_buf); + rte_free(hw->vf_vsi_map); rte_free(hw->vf_res); } diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index ecd6303a0..d2e447b48 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -41,16 +41,22 @@ struct ice_dcf_hw { uint8_t *arq_buf; + uint16_t num_vfs; + uint16_t *vf_vsi_map; + struct virtchnl_version_info virtchnl_version; struct virtchnl_vf_resource *vf_res; /* VF resource */ struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */ uint16_t vsi_id; + + struct rte_eth_dev *eth_dev; }; int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd); int ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc, void *buf, uint16_t buf_size); +int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw); int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); diff --git a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c index 138838a73..ff08292a1 100644 --- a/drivers/net/ice/ice_dcf_parent.c +++ b/drivers/net/ice/ice_dcf_parent.c @@ -3,15 +3,92 @@ */ #include #include +#include #include +#include + #include "ice_dcf_ethdev.h" +#define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL 100000 /* us */ +static rte_spinlock_t vsi_update_lock = RTE_SPINLOCK_INITIALIZER; + +static __rte_always_inline void +ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle, + uint16_t vsi_map) +{ + struct ice_vsi_ctx *vsi_ctx; + + if (unlikely(vsi_handle >= ICE_MAX_VSI)) { + PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle); + return; + } + + vsi_ctx = hw->vsi_ctx[vsi_handle]; + + if (vsi_map & VIRTCHNL_DCF_VF_VSI_VALID) { + if (!vsi_ctx) { + vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx)); + if (!vsi_ctx) { + PMD_DRV_LOG(ERR, "No memory for vsi context %u", + vsi_handle); + return; + } + } + + vsi_ctx->vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >> + VIRTCHNL_DCF_VF_VSI_ID_S; + hw->vsi_ctx[vsi_handle] = vsi_ctx; + + PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u", + vsi_handle, vsi_ctx->vsi_num); + } else { + hw->vsi_ctx[vsi_handle] = NULL; + + ice_free(hw, vsi_ctx); + + PMD_DRV_LOG(NOTICE, "VF%u is disabled", vsi_handle); + } +} + +static void +ice_dcf_update_vf_vsi_map(struct ice_hw *hw, uint16_t num_vfs, + uint16_t *vf_vsi_map) +{ + uint16_t vf_id; + + for (vf_id = 0; vf_id < num_vfs; vf_id++) + ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]); +} + +static void* +ice_dcf_vsi_update_service_handler(void *param) +{ + struct ice_dcf_hw *hw = param; + + usleep(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL); + + rte_spinlock_lock(&vsi_update_lock); + + if (!ice_dcf_handle_vsi_update_event(hw)) { + struct ice_dcf_adapter *dcf_ad = + container_of(hw, struct ice_dcf_adapter, real_hw); + + ice_dcf_update_vf_vsi_map(&dcf_ad->parent.hw, + hw->num_vfs, hw->vf_vsi_map); + } + + rte_spinlock_unlock(&vsi_update_lock); + + return NULL; +} + void -ice_dcf_handle_pf_event_msg(__rte_unused struct ice_dcf_hw *dcf_hw, +ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw, uint8_t *msg, uint16_t msglen) { struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg; + pthread_t thread; if (msglen < sizeof(struct virtchnl_pf_event)) { PMD_DRV_LOG(DEBUG, "Invalid event message length : %u", msglen); @@ -21,6 +98,8 @@ ice_dcf_handle_pf_event_msg(__rte_unused struct ice_dcf_hw *dcf_hw, switch (pf_msg->event) { case VIRTCHNL_EVENT_RESET_IMPENDING: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event"); + pthread_create(&thread, NULL, + ice_dcf_vsi_update_service_handler, dcf_hw); break; case VIRTCHNL_EVENT_LINK_CHANGE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event"); @@ -28,6 +107,13 @@ ice_dcf_handle_pf_event_msg(__rte_unused struct ice_dcf_hw *dcf_hw, case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event"); break; + case VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE: + PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u", + pf_msg->event_data.vf_vsi_map.vf_id, + pf_msg->event_data.vf_vsi_map.vsi_id); + pthread_create(&thread, NULL, + ice_dcf_vsi_update_service_handler, dcf_hw); + break; default: PMD_DRV_LOG(ERR, "Unknown event received %u", pf_msg->event); break; @@ -235,6 +321,8 @@ ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev) } parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw); + ice_dcf_update_vf_vsi_map(parent_hw, hw->num_vfs, hw->vf_vsi_map); + mac = (const struct rte_ether_addr *)hw->avf.mac.addr; if (rte_is_valid_assigned_ether_addr(mac)) rte_ether_addr_copy(mac, &parent_adapter->pf.dev_addr); -- 2.26.0