From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 595A8A0508; Wed, 13 Apr 2022 10:14:14 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id A64B64286F; Wed, 13 Apr 2022 10:12:00 +0200 (CEST) Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by mails.dpdk.org (Postfix) with ESMTP id C5A2E4286E for ; Wed, 13 Apr 2022 10:11:58 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649837519; x=1681373519; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=gEEGeqX8JUaOwgf5NT8uyy75ilpyVaI3wVYaYUB7elg=; b=OvcwvBS1a6y8wKeyM6YgU40DvqR8K36+Pe1mL51WCBD+IuldbXyIiRnS 0aFxgIvPv6zcB5gpowI/OlrdJDuuMl0yPcd6n4uXZ0DfBQfub/B1t0stB UJPiBKfRR/jYwrCXYsrlO7jgUQJHc00QSAT23blPA4WmT4hASGZdzwCDW x1/W5RjrRE/SYGJp1AW/L9azHxkTGMcVF43MU8JQstYd3HSxWriufdHjn FfRb66qjB+bi/bhQwofdxzZQHPHFceaf8dfeKJ5F+TFVfFaX9iV4yckG9 QgE4OzsEAiwarmFZT8JecHkDZ+uDgsf/rCjYGaDZdzib9DbtooJsgjf8J Q==; X-IronPort-AV: E=McAfee;i="6400,9594,10315"; a="287630134" X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="287630134" Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga101.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:58 -0700 X-IronPort-AV: E=Sophos;i="5.90,256,1643702400"; d="scan'208";a="526847912" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga006-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2022 01:11:56 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Kevin Liu Subject: [PATCH v2 29/33] net/ice: enable IRQ mapping configuration for large VF Date: Wed, 13 Apr 2022 16:09:28 +0000 Message-Id: <20220413160932.2074781-30-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220413160932.2074781-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> <20220413160932.2074781-1-kevinx.liu@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Steve Yang The current IRQ mapping configuration only supports max 16 queues and 16 MSIX vectors. Change the queue vector mapping structure to indicate up to 256 queues. A new opcode is used to handle the case with large number of queues. To avoid adminq buffer size limitation, we support to send the virtchnl message multiple times if needed. Signed-off-by: Steve Yang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf.c | 50 +++++++++++++++++++++++++++---- drivers/net/ice/ice_dcf.h | 10 ++++++- drivers/net/ice/ice_dcf_ethdev.c | 51 +++++++++++++++++++++++++++----- drivers/net/ice/ice_dcf_ethdev.h | 1 + 4 files changed, 99 insertions(+), 13 deletions(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 7004c00f1c..290f754049 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -1115,7 +1115,6 @@ ice_dcf_get_max_rss_queue_region(struct ice_dcf_hw *hw) return 0; } - int ice_dcf_config_irq_map(struct ice_dcf_hw *hw) { @@ -1132,13 +1131,14 @@ ice_dcf_config_irq_map(struct ice_dcf_hw *hw) return -ENOMEM; map_info->num_vectors = hw->nb_msix; - for (i = 0; i < hw->nb_msix; i++) { - vecmap = &map_info->vecmap[i]; + for (i = 0; i < hw->eth_dev->data->nb_rx_queues; i++) { + vecmap = + &map_info->vecmap[hw->qv_map[i].vector_id - hw->msix_base]; vecmap->vsi_id = hw->vsi_res->vsi_id; vecmap->rxitr_idx = 0; - vecmap->vector_id = hw->msix_base + i; + vecmap->vector_id = hw->qv_map[i].vector_id; vecmap->txq_map = 0; - vecmap->rxq_map = hw->rxq_map[hw->msix_base + i]; + vecmap->rxq_map |= 1 << hw->qv_map[i].queue_id; } memset(&args, 0, sizeof(args)); @@ -1154,6 +1154,46 @@ ice_dcf_config_irq_map(struct ice_dcf_hw *hw) return err; } +int +ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw, + uint16_t num, uint16_t index) +{ + struct virtchnl_queue_vector_maps *map_info; + struct virtchnl_queue_vector *qv_maps; + struct dcf_virtchnl_cmd args; + int len, i, err; + int count = 0; + + len = sizeof(struct virtchnl_queue_vector_maps) + + sizeof(struct virtchnl_queue_vector) * (num - 1); + + map_info = rte_zmalloc("map_info", len, 0); + if (!map_info) + return -ENOMEM; + + map_info->vport_id = hw->vsi_res->vsi_id; + map_info->num_qv_maps = num; + for (i = index; i < index + map_info->num_qv_maps; i++) { + qv_maps = &map_info->qv_maps[count++]; + qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0; + qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX; + qv_maps->queue_id = hw->qv_map[i].queue_id; + qv_maps->vector_id = hw->qv_map[i].vector_id; + } + + args.v_op = VIRTCHNL_OP_MAP_QUEUE_VECTOR; + args.req_msg = (u8 *)map_info; + args.req_msglen = len; + args.rsp_msgbuf = hw->arq_buf; + args.req_msglen = ICE_DCF_AQ_BUF_SZ; + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR"); + + rte_free(map_info); + return err; +} + int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on) { diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index e36428a92a..ce57a687ab 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -74,6 +74,11 @@ struct ice_dcf_tm_conf { bool committed; }; +struct ice_dcf_qv_map { + uint16_t queue_id; + uint16_t vector_id; +}; + struct ice_dcf_hw { struct iavf_hw avf; @@ -106,7 +111,8 @@ struct ice_dcf_hw { uint16_t msix_base; uint16_t nb_msix; uint16_t max_rss_qregion; /* max RSS queue region supported by PF */ - uint16_t rxq_map[16]; + + struct ice_dcf_qv_map *qv_map; /* queue vector mapping */ struct virtchnl_eth_stats eth_stats_offset; struct virtchnl_vlan_caps vlan_v2_caps; @@ -134,6 +140,8 @@ int ice_dcf_configure_queues(struct ice_dcf_hw *hw, int ice_dcf_request_queues(struct ice_dcf_hw *hw, uint16_t num); int ice_dcf_get_max_rss_queue_region(struct ice_dcf_hw *hw); int ice_dcf_config_irq_map(struct ice_dcf_hw *hw); +int ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw, + uint16_t num, uint16_t index); int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on); int ice_dcf_disable_queues(struct ice_dcf_hw *hw); int ice_dcf_query_stats(struct ice_dcf_hw *hw, diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 78df82d5b5..1ddba02ebb 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -143,6 +143,7 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, { struct ice_dcf_adapter *adapter = dev->data->dev_private; struct ice_dcf_hw *hw = &adapter->real_hw; + struct ice_dcf_qv_map *qv_map; uint16_t interval, i; int vec; @@ -161,6 +162,14 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, } } + qv_map = rte_zmalloc("qv_map", + dev->data->nb_rx_queues * sizeof(struct ice_dcf_qv_map), 0); + if (!qv_map) { + PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map", + dev->data->nb_rx_queues); + return -1; + } + if (!dev->data->dev_conf.intr_conf.rxq || !rte_intr_dp_is_en(intr_handle)) { /* Rx interrupt disabled, Map interrupt only for writeback */ @@ -196,17 +205,22 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, } IAVF_WRITE_FLUSH(&hw->avf); /* map all queues to the same interrupt */ - for (i = 0; i < dev->data->nb_rx_queues; i++) - hw->rxq_map[hw->msix_base] |= 1 << i; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + qv_map[i].queue_id = i; + qv_map[i].vector_id = hw->msix_base; + } + hw->qv_map = qv_map; } else { if (!rte_intr_allow_others(intr_handle)) { hw->nb_msix = 1; hw->msix_base = IAVF_MISC_VEC_ID; for (i = 0; i < dev->data->nb_rx_queues; i++) { - hw->rxq_map[hw->msix_base] |= 1 << i; + qv_map[i].queue_id = i; + qv_map[i].vector_id = hw->msix_base; rte_intr_vec_list_index_set(intr_handle, i, IAVF_MISC_VEC_ID); } + hw->qv_map = qv_map; PMD_DRV_LOG(DEBUG, "vector %u are mapping to all Rx queues", hw->msix_base); @@ -219,21 +233,44 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, hw->msix_base = IAVF_MISC_VEC_ID; vec = IAVF_MISC_VEC_ID; for (i = 0; i < dev->data->nb_rx_queues; i++) { - hw->rxq_map[vec] |= 1 << i; + qv_map[i].queue_id = i; + qv_map[i].vector_id = vec; rte_intr_vec_list_index_set(intr_handle, i, vec++); if (vec >= hw->nb_msix) vec = IAVF_RX_VEC_START; } + hw->qv_map = qv_map; PMD_DRV_LOG(DEBUG, "%u vectors are mapping to %u Rx queues", hw->nb_msix, dev->data->nb_rx_queues); } } - if (ice_dcf_config_irq_map(hw)) { - PMD_DRV_LOG(ERR, "config interrupt mapping failed"); - return -1; + if (!hw->lv_enabled) { + if (ice_dcf_config_irq_map(hw)) { + PMD_DRV_LOG(ERR, "config interrupt mapping failed"); + return -1; + } + } else { + uint16_t num_qv_maps = dev->data->nb_rx_queues; + uint16_t index = 0; + + while (num_qv_maps > ICE_DCF_IRQ_MAP_NUM_PER_BUF) { + if (ice_dcf_config_irq_map_lv(hw, + ICE_DCF_IRQ_MAP_NUM_PER_BUF, index)) { + PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); + return -1; + } + num_qv_maps -= ICE_DCF_IRQ_MAP_NUM_PER_BUF; + index += ICE_DCF_IRQ_MAP_NUM_PER_BUF; + } + + if (ice_dcf_config_irq_map_lv(hw, num_qv_maps, index)) { + PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); + return -1; + } + } return 0; } diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h index 2fac1e5b21..9ef524c97c 100644 --- a/drivers/net/ice/ice_dcf_ethdev.h +++ b/drivers/net/ice/ice_dcf_ethdev.h @@ -23,6 +23,7 @@ #define ICE_DCF_MAX_NUM_QUEUES_DFLT 16 #define ICE_DCF_MAX_NUM_QUEUES_LV 256 #define ICE_DCF_CFG_Q_NUM_PER_BUF 32 +#define ICE_DCF_IRQ_MAP_NUM_PER_BUF 128 struct ice_dcf_queue { uint64_t dummy; -- 2.33.1