From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 49101A04C0; Fri, 25 Sep 2020 07:55:58 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8987E1E4E9; Fri, 25 Sep 2020 07:55:55 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id A701E1E4C9 for ; Fri, 25 Sep 2020 07:55:50 +0200 (CEST) IronPort-SDR: 1HAotubH/VhlORwb0vLUkRlUQ0wONkZwWyQ1pcsy1T1WChXao9ySYjfTASXQhr/wnz26mfW4nS CTclTdJyRyfQ== X-IronPort-AV: E=McAfee;i="6000,8403,9754"; a="140864113" X-IronPort-AV: E=Sophos;i="5.77,300,1596524400"; d="scan'208";a="140864113" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 24 Sep 2020 22:55:50 -0700 IronPort-SDR: Cr8bhLoiValHLNxvYlHjY9wLxAIV2mHAdh8hysgfseP1Lpm+x/7r1qNEZSgoAP/IaPsiGNsXgj SfBohiXE54+w== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,300,1596524400"; d="scan'208";a="349616669" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.154]) by orsmga007.jf.intel.com with ESMTP; 24 Sep 2020 22:55:47 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, Ting Xu Date: Fri, 25 Sep 2020 13:59:28 +0800 Message-Id: <20200925055929.8844-2-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200925055929.8844-1-ting.xu@intel.com> References: <20200909072028.16726-1-ting.xu@intel.com> <20200925055929.8844-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v2 1/2] net/iavf: add IAVF request queues function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add new virtchnl function to request additional queues from PF. Current default queue pairs number when creating a VF is 16. In order to support up to 256 queue pairs, enable this request queues function. Since request queues command may return event message, modify function iavf_read_msg_from_pf to identify event opcode and mark VF reset status. Signed-off-by: Ting Xu --- drivers/net/iavf/iavf.h | 17 +++++ drivers/net/iavf/iavf_ethdev.c | 12 ++- drivers/net/iavf/iavf_vchnl.c | 134 ++++++++++++++++++++++++++++++--- 3 files changed, 148 insertions(+), 15 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 3198d85b3..bdc75ff50 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -107,6 +107,21 @@ struct iavf_fdir_info { /* TODO: is that correct to assume the max number to be 16 ?*/ #define IAVF_MAX_MSIX_VECTORS 16 +/* Event status from PF */ +enum pending_msg { + PFMSG_LINK_CHANGE = 0x1, + PFMSG_RESET_IMPENDING = 0x2, + PFMSG_DRIVER_CLOSE = 0x4, +}; + +/* Message type read in admin queue from PF */ +enum iavf_aq_result { + IAVF_MSG_ERR = -1, /* Meet error when accessing admin queue */ + IAVF_MSG_NON, /* Read nothing from admin queue */ + IAVF_MSG_SYS, /* Read system msg from admin queue */ + IAVF_MSG_CMD, /* Read async command result */ +}; + /* Structure to store private data specific for VF instance. */ struct iavf_info { uint16_t num_queue_pairs; @@ -123,6 +138,7 @@ struct iavf_info { volatile enum virtchnl_ops pend_cmd; /* pending command not finished */ uint32_t cmd_retval; /* return value of the cmd response from PF */ uint8_t *aq_resp; /* buffer to store the adminq response from PF */ + uint16_t pend_msg; /* flags indicates events from pf not handled yet */ /* Event from pf */ bool dev_closed; @@ -279,4 +295,5 @@ int iavf_add_del_rss_cfg(struct iavf_adapter *adapter, int iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, struct rte_ether_addr *mc_addrs, uint32_t mc_addrs_num, bool add); +int iavf_request_queues(struct rte_eth_dev *dev, uint16_t num); #endif /* _IAVF_ETHDEV_H_ */ diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index 440da7d76..8fd9b44db 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -1193,7 +1193,7 @@ iavf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) } static int -iavf_check_vf_reset_done(struct iavf_hw *hw) +iavf_check_vf_reset_done(struct iavf_hw *hw, struct iavf_info *vf) { int i, reset; @@ -1210,6 +1210,10 @@ iavf_check_vf_reset_done(struct iavf_hw *hw) if (i >= IAVF_RESET_WAIT_CNT) return -1; + /* VF is not in reset or reset is completed */ + vf->vf_reset = false; + vf->pend_msg &= ~PFMSG_RESET_IMPENDING; + return 0; } @@ -1228,7 +1232,7 @@ iavf_init_vf(struct rte_eth_dev *dev) goto err; } - err = iavf_check_vf_reset_done(hw); + err = iavf_check_vf_reset_done(hw, vf); if (err) { PMD_INIT_LOG(ERR, "VF is still resetting"); goto err; @@ -1475,7 +1479,9 @@ iavf_dev_close(struct rte_eth_dev *dev) iavf_dev_stop(dev); iavf_flow_flush(dev, NULL); - iavf_flow_uninit(adapter); + /* if VF is in reset, adminq is disabled, skip the process via adminq */ + if (!vf->vf_reset) + iavf_flow_uninit(adapter); iavf_shutdown_adminq(hw); /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 76f8e38d1..bd39f0db6 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "iavf.h" @@ -26,13 +27,14 @@ #define ASQ_DELAY_MS 10 /* Read data in admin queue to get msg from pf driver */ -static enum iavf_status +static enum iavf_aq_result iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, uint8_t *buf) { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); struct iavf_arq_event_info event; + enum iavf_aq_result result = IAVF_MSG_NON; enum virtchnl_ops opcode; int ret; @@ -42,7 +44,9 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, /* Can't read any msg from adminQ */ if (ret) { PMD_DRV_LOG(DEBUG, "Can't read msg from AQ"); - return ret; + if (ret != IAVF_ERR_ADMIN_QUEUE_NO_WORK) + result = IAVF_MSG_ERR; + return result; } opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high); @@ -52,16 +56,46 @@ iavf_read_msg_from_pf(struct iavf_adapter *adapter, uint16_t buf_len, PMD_DRV_LOG(DEBUG, "AQ from pf carries opcode %u, retval %d", opcode, vf->cmd_retval); - if (opcode != vf->pend_cmd) { - if (opcode != VIRTCHNL_OP_EVENT) { - PMD_DRV_LOG(WARNING, - "command mismatch, expect %u, get %u", - vf->pend_cmd, opcode); + if (opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = + (struct virtchnl_pf_event *)event.msg_buf; + + result = IAVF_MSG_SYS; + switch (vpe->event) { + case VIRTCHNL_EVENT_LINK_CHANGE: + vf->link_up = + vpe->event_data.link_event.link_status; + vf->link_speed = + vpe->event_data.link_event.link_speed; + vf->pend_msg |= PFMSG_LINK_CHANGE; + PMD_DRV_LOG(INFO, "Link status update:%s", + vf->link_up ? "up" : "down"); + break; + case VIRTCHNL_EVENT_RESET_IMPENDING: + vf->vf_reset = true; + vf->pend_msg |= PFMSG_RESET_IMPENDING; + PMD_DRV_LOG(INFO, "vf is reseting"); + break; + case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: + vf->dev_closed = true; + vf->pend_msg |= PFMSG_DRIVER_CLOSE; + PMD_DRV_LOG(INFO, "PF driver closed"); + break; + default: + PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf", + __func__, vpe->event); + } + } else { + /* async reply msg on command issued by vf previously */ + result = IAVF_MSG_CMD; + if (opcode != vf->pend_cmd) { + PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u", + vf->pend_cmd, opcode); + result = IAVF_MSG_ERR; } - return IAVF_ERR_OPCODE_MISMATCH; } - return IAVF_SUCCESS; + return result; } static int @@ -69,6 +103,7 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) { struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + enum iavf_aq_result result; enum iavf_status ret; int err = 0; int i = 0; @@ -97,9 +132,9 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: /* for init virtchnl ops, need to poll the response */ do { - ret = iavf_read_msg_from_pf(adapter, args->out_size, + result = iavf_read_msg_from_pf(adapter, args->out_size, args->out_buffer); - if (ret == IAVF_SUCCESS) + if (result == IAVF_MSG_CMD) break; rte_delay_ms(ASQ_DELAY_MS); } while (i++ < MAX_TRY_TIMES); @@ -111,7 +146,33 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args) } _clear_cmd(vf); break; - + case VIRTCHNL_OP_REQUEST_QUEUES: + /* + * ignore async reply, only wait for system message, + * vf_reset = true if get VIRTCHNL_EVENT_RESET_IMPENDING, + * if not, means request queues failed. + */ + do { + result = iavf_read_msg_from_pf(adapter, args->out_size, + args->out_buffer); + if (result == IAVF_MSG_SYS && vf->vf_reset) { + break; + } else if (result == IAVF_MSG_CMD || + result == IAVF_MSG_ERR) { + err = -1; + break; + } + rte_delay_ms(ASQ_DELAY_MS); + /* If don't read msg or read sys event, continue */ + } while (i++ < MAX_TRY_TIMES); + if (i >= MAX_TRY_TIMES || + vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) { + err = -1; + PMD_DRV_LOG(ERR, "No response or return failure (%d)" + " for cmd %d", vf->cmd_retval, args->ops); + } + _clear_cmd(vf); + break; default: /* For other virtchnl ops in running time, * wait for the cmd done flag. @@ -1136,3 +1197,52 @@ iavf_add_del_mc_addr_list(struct iavf_adapter *adapter, return 0; } + +int +iavf_request_queues(struct rte_eth_dev *dev, uint16_t num) +{ + struct iavf_adapter *ad = + IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct virtchnl_vf_res_request vfres; + struct iavf_cmd_info args; + uint16_t num_queue_pairs; + int err; + + if (num == 0) + return -1; + vfres.num_queue_pairs = num; + + args.ops = VIRTCHNL_OP_REQUEST_QUEUES; + args.in_args = (u8 *)&vfres; + args.in_args_size = sizeof(vfres); + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + + /* + * disable interrupt to avoid the admin queue message to be read + * before iavf_read_msg_from_pf. + */ + rte_intr_disable(&pci_dev->intr_handle); + err = iavf_execute_vf_cmd(ad, &args); + rte_intr_enable(&pci_dev->intr_handle); + if (err) { + PMD_DRV_LOG(ERR, "fail to execute command OP_REQUEST_QUEUES"); + return err; + } + + /* request queues succeeded, vf is resetting */ + if (vf->vf_reset) { + PMD_DRV_LOG(INFO, "vf reset"); + return 0; + } + + /* request additional queues failed, return available number */ + num_queue_pairs = + ((struct virtchnl_vf_res_request *)args.out_buffer)->num_queue_pairs; + PMD_DRV_LOG(ERR, "request queues failed, only %u queues " + "available", num_queue_pairs); + + return -1; +} -- 2.17.1