From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C69B0A04B0; Sun, 18 Oct 2020 12:46:38 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 19A13D041; Sun, 18 Oct 2020 12:45:05 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 17499CFC3 for ; Sun, 18 Oct 2020 12:44:54 +0200 (CEST) IronPort-SDR: 03JEe4Ckd73mv1ER4pliPuHlbnq5dLv5OwVFdL1es7zbuQBAuwOMN89nIeE8MjvjtjemlDlJwQ 2XAfHvdndfzw== X-IronPort-AV: E=McAfee;i="6000,8403,9777"; a="153826032" X-IronPort-AV: E=Sophos;i="5.77,390,1596524400"; d="scan'208";a="153826032" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 Oct 2020 03:44:54 -0700 IronPort-SDR: OGQqNartwne45QRYdqgBA/e/IaWhmqFEWC5w6MCnSodpnz798R1dR3BFYROhReV6sVamOKG/Bx s0T5w4xfyS6w== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,390,1596524400"; d="scan'208";a="347087283" Received: from dpdk-xuting-second.sh.intel.com ([10.67.116.154]) by fmsmga004.fm.intel.com with ESMTP; 18 Oct 2020 03:44:53 -0700 From: Ting Xu To: dev@dpdk.org Cc: qi.z.zhang@intel.com, beilei.xing@intel.com, jingjing.wu@intel.com, Ting Xu Date: Sun, 18 Oct 2020 18:34:29 +0800 Message-Id: <20201018103430.30997-6-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201018103430.30997-1-ting.xu@intel.com> References: <20200909072028.16726-1-ting.xu@intel.com> <20201018103430.30997-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v7 5/6] net/iavf: enable IRQ mapping configuration for large VF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The current IRQ mapping configuration only supports max 16 queues and 16 MSIX vectors. Change the queue vector mapping structure to indicate up to 256 queues. A new opcode is used to handle the case with large number of queues. To avoid adminq buffer size limitation, we support to send the virtchnl message multiple times if needed. Signed-off-by: Ting Xu --- drivers/net/iavf/iavf.h | 12 +++++--- drivers/net/iavf/iavf_ethdev.c | 52 +++++++++++++++++++++++++++++----- drivers/net/iavf/iavf_vchnl.c | 50 +++++++++++++++++++++++++++++--- 3 files changed, 99 insertions(+), 15 deletions(-) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 1cdac1b10..5e330b215 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -22,6 +22,7 @@ #define IAVF_MAX_NUM_QUEUES_DFLT 16 #define IAVF_MAX_NUM_QUEUES_LV 256 #define IAVF_CFG_Q_NUM_PER_BUF 32 +#define IAVF_IRQ_MAP_NUM_PER_BUF 128 #define IAVF_NUM_MACADDR_MAX 64 @@ -106,8 +107,10 @@ struct iavf_fdir_info { struct iavf_fdir_conf conf; }; -/* TODO: is that correct to assume the max number to be 16 ?*/ -#define IAVF_MAX_MSIX_VECTORS 16 +struct iavf_qv_map { + uint16_t queue_id; + uint16_t vector_id; +}; /* Message type read in admin queue from PF */ enum iavf_aq_result { @@ -152,8 +155,7 @@ struct iavf_info { uint16_t nb_msix; /* number of MSI-X interrupts on Rx */ uint16_t msix_base; /* msix vector base from */ uint16_t max_rss_qregion; /* max RSS queue region supported by PF */ - /* queue bitmask for each vector */ - uint16_t rxq_map[IAVF_MAX_MSIX_VECTORS]; + struct iavf_qv_map *qv_map; /* queue vector mapping */ struct iavf_flow_list flow_list; rte_spinlock_t flow_ops_lock; struct iavf_parser_list rss_parser_list; @@ -274,6 +276,8 @@ int iavf_configure_queues(struct iavf_adapter *adapter, uint16_t num_queue_pairs, uint16_t index); int iavf_get_supported_rxdid(struct iavf_adapter *adapter); int iavf_config_irq_map(struct iavf_adapter *adapter); +int iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num, + uint16_t index); void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add); int iavf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete); diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c index db334b390..77c49ed03 100644 --- a/drivers/net/iavf/iavf_ethdev.c +++ b/drivers/net/iavf/iavf_ethdev.c @@ -423,6 +423,7 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(adapter); + struct iavf_qv_map *qv_map; uint16_t interval, i; int vec; @@ -443,6 +444,14 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, } } + qv_map = rte_zmalloc("qv_map", + dev->data->nb_rx_queues * sizeof(struct iavf_qv_map), 0); + if (!qv_map) { + PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map", + dev->data->nb_rx_queues); + return -1; + } + if (!dev->data->dev_conf.intr_conf.rxq || !rte_intr_dp_is_en(intr_handle)) { /* Rx interrupt disabled, Map interrupt only for writeback */ @@ -473,16 +482,21 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, } IAVF_WRITE_FLUSH(hw); /* map all queues to the same interrupt */ - for (i = 0; i < dev->data->nb_rx_queues; i++) - vf->rxq_map[vf->msix_base] |= 1 << i; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + qv_map[i].queue_id = i; + qv_map[i].vector_id = vf->msix_base; + } + vf->qv_map = qv_map; } else { if (!rte_intr_allow_others(intr_handle)) { vf->nb_msix = 1; vf->msix_base = IAVF_MISC_VEC_ID; for (i = 0; i < dev->data->nb_rx_queues; i++) { - vf->rxq_map[vf->msix_base] |= 1 << i; + qv_map[i].queue_id = i; + qv_map[i].vector_id = vf->msix_base; intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID; } + vf->qv_map = qv_map; PMD_DRV_LOG(DEBUG, "vector %u are mapping to all Rx queues", vf->msix_base); @@ -495,20 +509,44 @@ static int iavf_config_rx_queues_irqs(struct rte_eth_dev *dev, vf->msix_base = IAVF_RX_VEC_START; vec = IAVF_RX_VEC_START; for (i = 0; i < dev->data->nb_rx_queues; i++) { - vf->rxq_map[vec] |= 1 << i; + qv_map[i].queue_id = i; + qv_map[i].vector_id = vec; intr_handle->intr_vec[i] = vec++; if (vec >= vf->nb_msix) vec = IAVF_RX_VEC_START; } + vf->qv_map = qv_map; PMD_DRV_LOG(DEBUG, "%u vectors are mapping to %u Rx queues", vf->nb_msix, dev->data->nb_rx_queues); } } - if (iavf_config_irq_map(adapter)) { - PMD_DRV_LOG(ERR, "config interrupt mapping failed"); - return -1; + if (!vf->lv_enabled) { + if (iavf_config_irq_map(adapter)) { + PMD_DRV_LOG(ERR, "config interrupt mapping failed"); + return -1; + } + } else { + uint16_t num_qv_maps = dev->data->nb_rx_queues; + uint16_t index = 0; + + while (num_qv_maps > IAVF_IRQ_MAP_NUM_PER_BUF) { + if (iavf_config_irq_map_lv(adapter, + IAVF_IRQ_MAP_NUM_PER_BUF, index)) { + PMD_DRV_LOG(ERR, "config interrupt mapping " + "for large VF failed"); + return -1; + } + num_qv_maps -= IAVF_IRQ_MAP_NUM_PER_BUF; + index += IAVF_IRQ_MAP_NUM_PER_BUF; + } + + if (iavf_config_irq_map_lv(adapter, num_qv_maps, index)) { + PMD_DRV_LOG(ERR, "config interrupt mapping " + "for large VF failed"); + return -1; + } } return 0; } diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c index 829963434..bc7e4f83f 100644 --- a/drivers/net/iavf/iavf_vchnl.c +++ b/drivers/net/iavf/iavf_vchnl.c @@ -775,13 +775,14 @@ iavf_config_irq_map(struct iavf_adapter *adapter) return -ENOMEM; map_info->num_vectors = vf->nb_msix; - for (i = 0; i < vf->nb_msix; i++) { - vecmap = &map_info->vecmap[i]; + for (i = 0; i < adapter->eth_dev->data->nb_rx_queues; i++) { + vecmap = + &map_info->vecmap[vf->qv_map[i].vector_id - vf->msix_base]; vecmap->vsi_id = vf->vsi_res->vsi_id; vecmap->rxitr_idx = IAVF_ITR_INDEX_DEFAULT; - vecmap->vector_id = vf->msix_base + i; + vecmap->vector_id = vf->qv_map[i].vector_id; vecmap->txq_map = 0; - vecmap->rxq_map = vf->rxq_map[vf->msix_base + i]; + vecmap->rxq_map |= 1 << vf->qv_map[i].queue_id; } args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP; @@ -797,6 +798,47 @@ iavf_config_irq_map(struct iavf_adapter *adapter) return err; } +int +iavf_config_irq_map_lv(struct iavf_adapter *adapter, uint16_t num, + uint16_t index) +{ + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter); + struct virtchnl_queue_vector_maps *map_info; + struct virtchnl_queue_vector *qv_maps; + struct iavf_cmd_info args; + int len, i, err; + int count = 0; + + len = sizeof(struct virtchnl_queue_vector_maps) + + sizeof(struct virtchnl_queue_vector) * (num - 1); + + map_info = rte_zmalloc("map_info", len, 0); + if (!map_info) + return -ENOMEM; + + map_info->vport_id = vf->vsi_res->vsi_id; + map_info->num_qv_maps = num; + for (i = index; i < index + map_info->num_qv_maps; i++) { + qv_maps = &map_info->qv_maps[count++]; + qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0; + qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX; + qv_maps->queue_id = vf->qv_map[i].queue_id; + qv_maps->vector_id = vf->qv_map[i].vector_id; + } + + args.ops = VIRTCHNL_OP_MAP_QUEUE_VECTOR; + args.in_args = (u8 *)map_info; + args.in_args_size = len; + args.out_buffer = vf->aq_resp; + args.out_size = IAVF_AQ_BUF_SZ; + err = iavf_execute_vf_cmd(adapter, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR"); + + rte_free(map_info); + return err; +} + void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add) { -- 2.17.1