From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id DB400A0508; Thu, 7 Apr 2022 05:01:38 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id B077642893; Thu, 7 Apr 2022 04:59:54 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id 7A83E428D5 for ; Thu, 7 Apr 2022 04:59:52 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649300392; x=1680836392; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=mUlwG4P1npA//qHv8ipnUHwxjbIWgcR5/tLdwp57zKg=; b=Cn1Q/3iUALMU5OjNkmZOnIuFn5OjYirISsjCjInhTmhl1F2X2stdJT3G AOCTAcQADpXptuZEET5XiWKnisvjmhWP5vd0ACBJGnDCdeZBpXnPmFqwl ZE2NUmPETNwzodOzcd1Pl+hP6DIaDR1nZXGgQq4QO8crLXcYf9QxkiHjv PB7YtoCzNuK+AISp1/9SA+hOi1p69EZiKzgJcjB4OcxFpizpu1OoNXOXZ grA+/3lj3dk9xKoTfWHoppaTjcMH+yJoUfsib1lWH0eKTo4hLigGKnbpZ vDkcbGF4Jlxw+DAyTykreDWtOlRuQBH6fGAywHSjQD+LsIbGxsFG23n1w w==; X-IronPort-AV: E=McAfee;i="6200,9189,10309"; a="248738509" X-IronPort-AV: E=Sophos;i="5.90,241,1643702400"; d="scan'208";a="248738509" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Apr 2022 19:59:52 -0700 X-IronPort-AV: E=Sophos;i="5.90,241,1643702400"; d="scan'208";a="570850832" Received: from intel-cd-odc-kevin.cd.intel.com ([10.240.178.195]) by orsmga008-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Apr 2022 19:59:49 -0700 From: Kevin Liu To: dev@dpdk.org Cc: qiming.yang@intel.com, qi.z.zhang@intel.com, stevex.yang@intel.com, Kevin Liu Subject: [PATCH 35/39] net/ice: enable IRQ mapping configuration for large VF Date: Thu, 7 Apr 2022 10:57:02 +0000 Message-Id: <20220407105706.18889-36-kevinx.liu@intel.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: <20220407105706.18889-1-kevinx.liu@intel.com> References: <20220407105706.18889-1-kevinx.liu@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org From: Steve Yang The current IRQ mapping configuration only supports max 16 queues and 16 MSIX vectors. Change the queue vector mapping structure to indicate up to 256 queues. A new opcode is used to handle the case with large number of queues. To avoid adminq buffer size limitation, we support to send the virtchnl message multiple times if needed. Signed-off-by: Steve Yang Signed-off-by: Kevin Liu --- drivers/net/ice/ice_dcf.c | 50 +++++++++++++++++++++++++++---- drivers/net/ice/ice_dcf.h | 10 ++++++- drivers/net/ice/ice_dcf_ethdev.c | 51 +++++++++++++++++++++++++++----- drivers/net/ice/ice_dcf_ethdev.h | 1 + 4 files changed, 99 insertions(+), 13 deletions(-) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index 7a0a9a3534..90af99f8d0 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -1116,7 +1116,6 @@ ice_dcf_get_max_rss_queue_region(struct ice_dcf_hw *hw) return 0; } - int ice_dcf_config_irq_map(struct ice_dcf_hw *hw) { @@ -1133,13 +1132,14 @@ ice_dcf_config_irq_map(struct ice_dcf_hw *hw) return -ENOMEM; map_info->num_vectors = hw->nb_msix; - for (i = 0; i < hw->nb_msix; i++) { - vecmap = &map_info->vecmap[i]; + for (i = 0; i < hw->eth_dev->data->nb_rx_queues; i++) { + vecmap = + &map_info->vecmap[hw->qv_map[i].vector_id - hw->msix_base]; vecmap->vsi_id = hw->vsi_res->vsi_id; vecmap->rxitr_idx = 0; - vecmap->vector_id = hw->msix_base + i; + vecmap->vector_id = hw->qv_map[i].vector_id; vecmap->txq_map = 0; - vecmap->rxq_map = hw->rxq_map[hw->msix_base + i]; + vecmap->rxq_map |= 1 << hw->qv_map[i].queue_id; } memset(&args, 0, sizeof(args)); @@ -1155,6 +1155,46 @@ ice_dcf_config_irq_map(struct ice_dcf_hw *hw) return err; } +int +ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw, + uint16_t num, uint16_t index) +{ + struct virtchnl_queue_vector_maps *map_info; + struct virtchnl_queue_vector *qv_maps; + struct dcf_virtchnl_cmd args; + int len, i, err; + int count = 0; + + len = sizeof(struct virtchnl_queue_vector_maps) + + sizeof(struct virtchnl_queue_vector) * (num - 1); + + map_info = rte_zmalloc("map_info", len, 0); + if (!map_info) + return -ENOMEM; + + map_info->vport_id = hw->vsi_res->vsi_id; + map_info->num_qv_maps = num; + for (i = index; i < index + map_info->num_qv_maps; i++) { + qv_maps = &map_info->qv_maps[count++]; + qv_maps->itr_idx = VIRTCHNL_ITR_IDX_0; + qv_maps->queue_type = VIRTCHNL_QUEUE_TYPE_RX; + qv_maps->queue_id = hw->qv_map[i].queue_id; + qv_maps->vector_id = hw->qv_map[i].vector_id; + } + + args.v_op = VIRTCHNL_OP_MAP_QUEUE_VECTOR; + args.req_msg = (u8 *)map_info; + args.req_msglen = len; + args.rsp_msgbuf = hw->arq_buf; + args.req_msglen = ICE_DCF_AQ_BUF_SZ; + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_MAP_QUEUE_VECTOR"); + + rte_free(map_info); + return err; +} + int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on) { diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index 1f45881315..bd88424034 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -74,6 +74,11 @@ struct ice_dcf_tm_conf { bool committed; }; +struct ice_dcf_qv_map { + uint16_t queue_id; + uint16_t vector_id; +}; + struct ice_dcf_hw { struct iavf_hw avf; @@ -108,7 +113,8 @@ struct ice_dcf_hw { uint16_t msix_base; uint16_t nb_msix; uint16_t max_rss_qregion; /* max RSS queue region supported by PF */ - uint16_t rxq_map[16]; + + struct ice_dcf_qv_map *qv_map; /* queue vector mapping */ struct virtchnl_eth_stats eth_stats_offset; struct virtchnl_vlan_caps vlan_v2_caps; @@ -136,6 +142,8 @@ int ice_dcf_configure_queues(struct ice_dcf_hw *hw, int ice_dcf_request_queues(struct ice_dcf_hw *hw, uint16_t num); int ice_dcf_get_max_rss_queue_region(struct ice_dcf_hw *hw); int ice_dcf_config_irq_map(struct ice_dcf_hw *hw); +int ice_dcf_config_irq_map_lv(struct ice_dcf_hw *hw, + uint16_t num, uint16_t index); int ice_dcf_switch_queue(struct ice_dcf_hw *hw, uint16_t qid, bool rx, bool on); int ice_dcf_disable_queues(struct ice_dcf_hw *hw); int ice_dcf_query_stats(struct ice_dcf_hw *hw, diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 211a2510fa..82d97fd049 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -144,6 +144,7 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, { struct ice_dcf_adapter *adapter = dev->data->dev_private; struct ice_dcf_hw *hw = &adapter->real_hw; + struct ice_dcf_qv_map *qv_map; uint16_t interval, i; int vec; @@ -162,6 +163,14 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, } } + qv_map = rte_zmalloc("qv_map", + dev->data->nb_rx_queues * sizeof(struct ice_dcf_qv_map), 0); + if (!qv_map) { + PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map", + dev->data->nb_rx_queues); + return -1; + } + if (!dev->data->dev_conf.intr_conf.rxq || !rte_intr_dp_is_en(intr_handle)) { /* Rx interrupt disabled, Map interrupt only for writeback */ @@ -197,17 +206,22 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, } IAVF_WRITE_FLUSH(&hw->avf); /* map all queues to the same interrupt */ - for (i = 0; i < dev->data->nb_rx_queues; i++) - hw->rxq_map[hw->msix_base] |= 1 << i; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + qv_map[i].queue_id = i; + qv_map[i].vector_id = hw->msix_base; + } + hw->qv_map = qv_map; } else { if (!rte_intr_allow_others(intr_handle)) { hw->nb_msix = 1; hw->msix_base = IAVF_MISC_VEC_ID; for (i = 0; i < dev->data->nb_rx_queues; i++) { - hw->rxq_map[hw->msix_base] |= 1 << i; + qv_map[i].queue_id = i; + qv_map[i].vector_id = hw->msix_base; rte_intr_vec_list_index_set(intr_handle, i, IAVF_MISC_VEC_ID); } + hw->qv_map = qv_map; PMD_DRV_LOG(DEBUG, "vector %u are mapping to all Rx queues", hw->msix_base); @@ -220,21 +234,44 @@ ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, hw->msix_base = IAVF_MISC_VEC_ID; vec = IAVF_MISC_VEC_ID; for (i = 0; i < dev->data->nb_rx_queues; i++) { - hw->rxq_map[vec] |= 1 << i; + qv_map[i].queue_id = i; + qv_map[i].vector_id = vec; rte_intr_vec_list_index_set(intr_handle, i, vec++); if (vec >= hw->nb_msix) vec = IAVF_RX_VEC_START; } + hw->qv_map = qv_map; PMD_DRV_LOG(DEBUG, "%u vectors are mapping to %u Rx queues", hw->nb_msix, dev->data->nb_rx_queues); } } - if (ice_dcf_config_irq_map(hw)) { - PMD_DRV_LOG(ERR, "config interrupt mapping failed"); - return -1; + if (!hw->lv_enabled) { + if (ice_dcf_config_irq_map(hw)) { + PMD_DRV_LOG(ERR, "config interrupt mapping failed"); + return -1; + } + } else { + uint16_t num_qv_maps = dev->data->nb_rx_queues; + uint16_t index = 0; + + while (num_qv_maps > ICE_DCF_IRQ_MAP_NUM_PER_BUF) { + if (ice_dcf_config_irq_map_lv(hw, + ICE_DCF_IRQ_MAP_NUM_PER_BUF, index)) { + PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); + return -1; + } + num_qv_maps -= ICE_DCF_IRQ_MAP_NUM_PER_BUF; + index += ICE_DCF_IRQ_MAP_NUM_PER_BUF; + } + + if (ice_dcf_config_irq_map_lv(hw, num_qv_maps, index)) { + PMD_DRV_LOG(ERR, "config interrupt mapping for large VF failed"); + return -1; + } + } return 0; } diff --git a/drivers/net/ice/ice_dcf_ethdev.h b/drivers/net/ice/ice_dcf_ethdev.h index 2fac1e5b21..9ef524c97c 100644 --- a/drivers/net/ice/ice_dcf_ethdev.h +++ b/drivers/net/ice/ice_dcf_ethdev.h @@ -23,6 +23,7 @@ #define ICE_DCF_MAX_NUM_QUEUES_DFLT 16 #define ICE_DCF_MAX_NUM_QUEUES_LV 256 #define ICE_DCF_CFG_Q_NUM_PER_BUF 32 +#define ICE_DCF_IRQ_MAP_NUM_PER_BUF 128 struct ice_dcf_queue { uint64_t dummy; -- 2.33.1