From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2F66BA04DB; Fri, 16 Oct 2020 03:18:27 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D453E1DD99; Fri, 16 Oct 2020 03:18:05 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id 1D9851DCD8 for ; Fri, 16 Oct 2020 03:18:01 +0200 (CEST) IronPort-SDR: QtQUZc2Fggftv48m3kt02w8ujhUUJmX9XXbqd1gDHDycrtb1mEjcnj9IQFxYr57LgRqSERrFXw 930S/x3wTDsQ== X-IronPort-AV: E=McAfee;i="6000,8403,9775"; a="163023848" X-IronPort-AV: E=Sophos;i="5.77,380,1596524400"; d="scan'208";a="163023848" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Oct 2020 18:18:00 -0700 IronPort-SDR: Axi5Dwz0RjaDZQZfajTcYNI+49W/16erXI5CHiYcXCwmgs5LVjktRBwBoQiF51xbr00LCi2UEK 3ymcRN7WyGaw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,380,1596524400"; d="scan'208";a="531485927" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.154]) by orsmga005.jf.intel.com with ESMTP; 15 Oct 2020 18:17:58 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, beilei.xing@intel.com, jingjing.wu@intel.com, Ting Xu Date: Fri, 16 Oct 2020 09:21:41 +0800 Message-Id: <20201016012142.54144-2-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201016012142.54144-1-ting.xu@intel.com> References: <20200909072028.16726-1-ting.xu@intel.com> <20201016012142.54144-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v5 1/2] net/iavf: add IAVF request queues function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add new virtchnl function to request additional queues from PF. Current default queue pairs number when creating a VF is 16. In order to support up to 256 queue pairs, enable this request queues function. Since request queues command may return event message, modify function iavf_read_msg_from_pf to identify event opcode and mark VF reset status. Signed-off-by: Ting Xu --- drivers/net/iavf/iavf.h | 9 ++ drivers/net/iavf/iavf_ethdev.c | 11 +- drivers/net/iavf/iavf_vchnl.c | 226 +++++++++++++++++++++++++-------- 3 files changed, 192 insertions(+), 54 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index d56611608..93c165c62 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -107,6 +107,14 @@ struct iavf_fdir_info { /* TODO: is that correct to assume the max number to be 16 ?*/ #define IAVF_MAX_MSIX_VECTORS 16 +/* Message type read in admin queue from PF */ +enum iavf_aq_result { + IAVF_MSG_ERR = -1, /* Meet error when accessing admin queue */ + IAVF_MSG_NON, /* Read nothing from admin queue */ + IAVF_MSG_SYS, /* Read system msg from admin queue */ + IAVF_MSG_CMD, /* Read async command result */ +}; + /* Structure to store private data specific for VF instance. */ struct iavf_info { uint16_t num_queue_pairs; @@ -301,4 +309,5 @@ int iavf_add_del_rss_cfg(struct iavf_adapter *adapter, int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, struct rte_ether_addr *mc_addrs, uint32_t mc_addrs_num, bool add); +int iavf_request_queues(struct iavf_adapter *adapter, uint16_t num); #endif /* _IAVF_ETHDEV_H_ */ diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 8b1cf8f1c..a4a28b885 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -1282,7 +1282,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) } static int -iavf_check_vf_reset_done(struct iavf_hw *hw) +iavf_check_vf_reset_done(struct iavf_hw *hw, struct iavf_info *vf) { int i, reset; @@ -1299,6 +1299,9 @@ iavf_check_vf_reset_done(struct iavf_hw *hw) if (i >= IAVF_RESET_WAIT_CNT) return -1; + /* VF is not in reset or reset is completed */ + vf->vf_reset = false; + return 0; } @@ -1666,7 +1669,7 @@ iavf_init_vf(struct rte_eth_dev *dev) goto err; } - err = iavf_check_vf_reset_done(hw); + err = iavf_check_vf_reset_done(hw, vf); if (err) { PMD_INIT_LOG(ERR, "VF is still resetting"); goto err; @@ -1911,7 +1914,9 @@ iavf_dev_close(struct rte_eth_dev *dev) iavf_dev_stop(dev); iavf_flow_flush(dev, NULL); - iavf_flow_uninit(adapter); + /* if VF is in reset, adminq is disabled, skip the process via adminq */ + if (!vf->vf_reset) + iavf_flow_uninit(adapter); /* * disable promiscuous mode before reset vf diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 5e7142893..11a1ff608 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "iavf.h" @@ -25,14 +26,54 @@ #define MAX_TRY_TIMES 200 #define ASQ_DELAY_MS 10 +static uint32_t +iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed) +{ + uint32_t speed; + + switch (virt_link_speed) { + case VIRTCHNL_LINK_SPEED_100MB: + speed = 100; + break; + case VIRTCHNL_LINK_SPEED_1GB: + speed = 1000; + break; + case VIRTCHNL_LINK_SPEED_10GB: + speed = 10000; + break; + case VIRTCHNL_LINK_SPEED_40GB: + speed = 40000; + break; + case VIRTCHNL_LINK_SPEED_20GB: + speed = 20000; + break; + case VIRTCHNL_LINK_SPEED_25GB: + speed = 25000; + break; + case VIRTCHNL_LINK_SPEED_2_5GB: + speed = 2500; + break; + case VIRTCHNL_LINK_SPEED_5GB: + speed = 5000; + break; + default: + speed = 0; + break; + } + + return speed; +} + /* Read data in admin queue to get msg from pf driver */ -static enum iavf_status +static enum iavf_aq_result iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, uint8_t *buf) { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct rte_eth_dev *dev = adapter->eth_dev; struct iavf_arq_event_info event; + enum iavf_aq_result result = IAVF_MSG_NON; enum virtchnl_ops opcode; int ret; @@ -42,7 +83,9 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, /* Can't read any msg from adminQ */ if (ret) { PMD_DRV_LOG(DEBUG, "Can't read msg from AQ"); - return ret; + if (ret != IAVF_ERR_ADMIN_QUEUE_NO_WORK) + result = IAVF_MSG_ERR; + return result; } opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high); @@ -52,16 +95,51 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, PMD_DRV_LOG(DEBUG, "AQ from pf carries opcode %u, retval %d", opcode, vf->cmd_retval); - if (opcode != vf->pend_cmd) { - if (opcode != VIRTCHNL_OP_EVENT) { - PMD_DRV_LOG(WARNING, - "command mismatch, expect %u, get %u", - vf->pend_cmd, opcode); + if (opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = + (struct virtchnl_pf_event *)event.msg_buf; + + result = IAVF_MSG_SYS; + switch (vpe->event) { + case VIRTCHNL_EVENT_LINK_CHANGE: + vf->link_up = + vpe->event_data.link_event.link_status; + if (vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_CAP_ADV_LINK_SPEED) { + vf->link_speed = + vpe->event_data.link_event_adv.link_speed; + } else { + enum virtchnl_link_speed speed; + speed = vpe->event_data.link_event.link_speed; + vf->link_speed = iavf_convert_link_speed(speed); + } + iavf_dev_link_update(dev, 0); + PMD_DRV_LOG(INFO, "Link status update:%s", + vf->link_up ? "up" : "down"); + break; + case VIRTCHNL_EVENT_RESET_IMPENDING: + vf->vf_reset = true; + PMD_DRV_LOG(INFO, "VF is resetting"); + break; + case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: + vf->dev_closed = true; + PMD_DRV_LOG(INFO, "PF driver closed"); + break; + default: + PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf", + __func__, vpe->event); + } + } else { + /* async reply msg on command issued by vf previously */ + result = IAVF_MSG_CMD; + if (opcode != vf->pend_cmd) { + PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u", + vf->pend_cmd, opcode); + result = IAVF_MSG_ERR; } - return IAVF_ERR_OPCODE_MISMATCH; } - return IAVF_SUCCESS; + return result; } static int @@ -69,6 +147,7 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + enum iavf_aq_result result; enum iavf_status ret; int err = 0; int i = 0; @@ -97,9 +176,9 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: /* for init virtchnl ops, need to poll the response */ do { - ret = iavf_read_msg_from_pf(adapter, args->out_size, + result = iavf_read_msg_from_pf(adapter, args->out_size, args->out_buffer); - if (ret == IAVF_SUCCESS) + if (result == IAVF_MSG_CMD) break; rte_delay_ms(ASQ_DELAY_MS); } while (i++ < MAX_TRY_TIMES); @@ -111,7 +190,33 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) } _clear_cmd(vf); break; - + case VIRTCHNL_OP_REQUEST_QUEUES: + /* + * ignore async reply, only wait for system message, + * vf_reset = true if get VIRTCHNL_EVENT_RESET_IMPENDING, + * if not, means request queues failed. + */ + do { + result = iavf_read_msg_from_pf(adapter, args->out_size, + args->out_buffer); + if (result == IAVF_MSG_SYS && vf->vf_reset) { + break; + } else if (result == IAVF_MSG_CMD || + result == IAVF_MSG_ERR) { + err = -1; + break; + } + rte_delay_ms(ASQ_DELAY_MS); + /* If don't read msg or read sys event, continue */ + } while (i++ < MAX_TRY_TIMES); + if (i >= MAX_TRY_TIMES || + vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { + err = -1; + PMD_DRV_LOG(ERR, "No response or return failure (%d)" + " for cmd %d", vf->cmd_retval, args->ops); + } + _clear_cmd(vf); + break; default: /* For other virtchnl ops in running time, * wait for the cmd done flag. @@ -136,44 +241,6 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) return err; } -static uint32_t -iavf_convert_link_speed(enum virtchnl_link_speed virt_link_speed) -{ - uint32_t speed; - - switch (virt_link_speed) { - case VIRTCHNL_LINK_SPEED_100MB: - speed = 100; - break; - case VIRTCHNL_LINK_SPEED_1GB: - speed = 1000; - break; - case VIRTCHNL_LINK_SPEED_10GB: - speed = 10000; - break; - case VIRTCHNL_LINK_SPEED_40GB: - speed = 40000; - break; - case VIRTCHNL_LINK_SPEED_20GB: - speed = 20000; - break; - case VIRTCHNL_LINK_SPEED_25GB: - speed = 25000; - break; - case VIRTCHNL_LINK_SPEED_2_5GB: - speed = 2500; - break; - case VIRTCHNL_LINK_SPEED_5GB: - speed = 5000; - break; - default: - speed = 0; - break; - } - - return speed; -} - static void iavf_handle_pf_event_msg(struct rte_eth_dev *dev, uint8_t *msg, uint16_t msglen) @@ -389,7 +456,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter) caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC | VIRTCHNL_VF_OFFLOAD_FDIR_PF | - VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF; + VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF | + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; args.in_args = (uint8_t *)∩︀ args.in_args_size = sizeof(caps); @@ -1148,3 +1216,59 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, return 0; } + +int +iavf_request_queues(struct iavf_adapter *adapter, uint16_t num) +{ + struct rte_eth_dev *dev = adapter->eth_dev; + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct virtchnl_vf_res_request vfres; + struct iavf_cmd_info args; + uint16_t num_queue_pairs; + int err; + + if (!(vf->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) { + PMD_DRV_LOG(ERR, "request queues not supported"); + return -1; + } + + if (num == 0) { + PMD_DRV_LOG(ERR, "queue number cannot be zero"); + return -1; + } + vfres.num_queue_pairs = num; + + args.ops = VIRTCHNL_OP_REQUEST_QUEUES; + args.in_args = (u8 *)&vfres; + args.in_args_size = sizeof(vfres); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + /* + * disable interrupt to avoid the admin queue message to be read + * before iavf_read_msg_from_pf. + */ + rte_intr_disable(&pci_dev->intr_handle); + err = iavf_execute_vf_cmd(adapter, &args); + rte_intr_enable(&pci_dev->intr_handle); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES"); + return err; + } + + /* request queues succeeded, vf is resetting */ + if (vf->vf_reset) { + PMD_DRV_LOG(INFO, "vf is resetting"); + return 0; + } + + /* request additional queues failed, return available number */ + num_queue_pairs = + ((struct virtchnl_vf_res_request *)args.out_buffer)->num_queue_pairs; + PMD_DRV_LOG(ERR, "request queues failed, only %u queues " + "available", num_queue_pairs); + + return -1; +} -- 2.17.1