From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2145FA00C5; Thu, 11 Jun 2020 11:10:37 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 0A7531BEE6; Thu, 11 Jun 2020 11:09:39 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 71B241BEE1 for ; Thu, 11 Jun 2020 11:09:36 +0200 (CEST) IronPort-SDR: 47lSqorp3V9TkUY/e09GnUoBw4uLBKSmbVXKES+EFihNr67T36y99FfNQuaz+wy+H+hk+1ktOJ MVjHSiwdOcmw== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 11 Jun 2020 02:09:35 -0700 IronPort-SDR: jmH1RJAHed+tvpxLqZbHVPbWF69wm3m+YgmFeJkw2NlgCA2VCjM9IrrQDBwcdNEHSKVg6csO3H HnQ1374OJNNA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,499,1583222400"; d="scan'208";a="473737258" Received: from dpdk-xuting-main.sh.intel.com ([10.67.117.84]) by fmsmga005.fm.intel.com with ESMTP; 11 Jun 2020 02:09:33 -0700 From: Ting Xu To: dev@dpdk.org Cc: xiaolong.ye@intel.com, qi.z.zhang@intel.com, qiming.yang@intel.com, john.mcnamara@intel.com, marko.kovacevic@intel.com Date: Thu, 11 Jun 2020 17:08:35 +0000 Message-Id: <20200611170839.9206-9-ting.xu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200611170839.9206-1-ting.xu@intel.com> References: <20200605201737.33766-1-ting.xu@intel.com> <20200611170839.9206-1-ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v2 08/12] net/ice: add queue config in DCF X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Qi Zhang Add queues and Rx queue irqs configuration during device start in DCF. The setup is sent to PF via virtchnl. Signed-off-by: Qi Zhang Signed-off-by: Ting Xu --- drivers/net/ice/ice_dcf.c | 111 +++++++++++++++++++++++++++ drivers/net/ice/ice_dcf.h | 6 ++ drivers/net/ice/ice_dcf_ethdev.c | 126 +++++++++++++++++++++++++++++++ 3 files changed, 243 insertions(+) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index f285323dd..8869e0d1c 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -24,6 +24,7 @@ #include #include "ice_dcf.h" +#include "ice_rxtx.h" #define ICE_DCF_AQ_LEN 32 #define ICE_DCF_AQ_BUF_SZ 4096 @@ -825,3 +826,113 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw) return 0; } + +#define IAVF_RXDID_LEGACY_1 1 +#define IAVF_RXDID_COMMS_GENERIC 16 + +int +ice_dcf_configure_queues(struct ice_dcf_hw *hw) +{ + struct ice_rx_queue **rxq = + (struct ice_rx_queue **)hw->eth_dev->data->rx_queues; + struct ice_tx_queue **txq = + (struct ice_tx_queue **)hw->eth_dev->data->tx_queues; + struct virtchnl_vsi_queue_config_info *vc_config; + struct virtchnl_queue_pair_info *vc_qp; + struct dcf_virtchnl_cmd args; + uint16_t i, size; + int err; + + size = sizeof(*vc_config) + + sizeof(vc_config->qpair[0]) * hw->num_queue_pairs; + vc_config = rte_zmalloc("cfg_queue", size, 0); + if (!vc_config) + return -ENOMEM; + + vc_config->vsi_id = hw->vsi_res->vsi_id; + vc_config->num_queue_pairs = hw->num_queue_pairs; + + for (i = 0, vc_qp = vc_config->qpair; + i < hw->num_queue_pairs; + i++, vc_qp++) { + vc_qp->txq.vsi_id = hw->vsi_res->vsi_id; + vc_qp->txq.queue_id = i; + if (i < hw->eth_dev->data->nb_tx_queues) { + vc_qp->txq.ring_len = txq[i]->nb_tx_desc; + vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_dma; + } + vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id; + vc_qp->rxq.queue_id = i; + vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len; + + if (i >= hw->eth_dev->data->nb_rx_queues) + continue; + + vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc; + vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma; + vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len; + + if (hw->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC && + hw->supported_rxdid & + BIT(IAVF_RXDID_COMMS_GENERIC)) { + vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_GENERIC; + PMD_DRV_LOG(NOTICE, "request RXDID == %d in " + "Queue[%d]", vc_qp->rxq.rxdid, i); + } else { + PMD_DRV_LOG(ERR, "RXDID 16 is not supported"); + return -EINVAL; + } + } + + memset(&args, 0, sizeof(args)); + args.v_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; + args.req_msg = (uint8_t *)vc_config; + args.req_msglen = size; + + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " VIRTCHNL_OP_CONFIG_VSI_QUEUES"); + + rte_free(vc_config); + return err; +} + +int +ice_dcf_config_irq_map(struct ice_dcf_hw *hw) +{ + struct virtchnl_irq_map_info *map_info; + struct virtchnl_vector_map *vecmap; + struct dcf_virtchnl_cmd args; + int len, i, err; + + len = sizeof(struct virtchnl_irq_map_info) + + sizeof(struct virtchnl_vector_map) * hw->nb_msix; + + map_info = rte_zmalloc("map_info", len, 0); + if (!map_info) + return -ENOMEM; + + map_info->num_vectors = hw->nb_msix; + for (i = 0; i < hw->nb_msix; i++) { + vecmap = &map_info->vecmap[i]; + vecmap->vsi_id = hw->vsi_res->vsi_id; + vecmap->rxitr_idx = 0; + vecmap->vector_id = hw->msix_base + i; + vecmap->txq_map = 0; + vecmap->rxq_map = hw->rxq_map[hw->msix_base + i]; + } + + memset(&args, 0, sizeof(args)); + args.v_op = VIRTCHNL_OP_CONFIG_IRQ_MAP; + args.req_msg = (u8 *)map_info; + args.req_msglen = len; + + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP"); + + rte_free(map_info); + return err; +} diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index eea4b286b..9470d1df7 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -54,6 +54,10 @@ struct ice_dcf_hw { uint8_t *rss_key; uint64_t supported_rxdid; uint16_t num_queue_pairs; + + uint16_t msix_base; + uint16_t nb_msix; + uint16_t rxq_map[16]; }; int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw, @@ -64,5 +68,7 @@ int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw); int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); int ice_dcf_init_rss(struct ice_dcf_hw *hw); +int ice_dcf_configure_queues(struct ice_dcf_hw *hw); +int ice_dcf_config_irq_map(struct ice_dcf_hw *hw); #endif /* _ICE_DCF_H_ */ diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index e021d779a..333fee037 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -114,10 +114,124 @@ ice_dcf_init_rx_queues(struct rte_eth_dev *dev) return 0; } +#define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +#define IAVF_ITR_INDEX_DEFAULT 0 +#define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ +#define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ + +static inline uint16_t +iavf_calc_itr_interval(int16_t interval) +{ + if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX) + interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT; + + /* Convert to hardware count, as writing each 1 represents 2 us */ + return interval / 2; +} + +static int +ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + uint16_t interval, i; + int vec; + + if (rte_intr_cap_multiple(intr_handle) && + dev->data->dev_conf.intr_conf.rxq) { + if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (!intr_handle->intr_vec) { + PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec", + dev->data->nb_rx_queues); + return -1; + } + } + + if (!dev->data->dev_conf.intr_conf.rxq || + !rte_intr_dp_is_en(intr_handle)) { + /* Rx interrupt disabled, Map interrupt only for writeback */ + hw->nb_msix = 1; + if (hw->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* If WB_ON_ITR supports, enable it */ + hw->msix_base = IAVF_RX_VEC_START; + IAVF_WRITE_REG(&hw->avf, + IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1), + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | + IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK); + } else { + /* If no WB_ON_ITR offload flags, need to set + * interrupt for descriptor write back. + */ + hw->msix_base = IAVF_MISC_VEC_ID; + + /* set ITR to max */ + interval = + iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX); + IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01, + IAVF_VFINT_DYN_CTL01_INTENA_MASK | + (IAVF_ITR_INDEX_DEFAULT << + IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) | + (interval << + IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT)); + } + IAVF_WRITE_FLUSH(&hw->avf); + /* map all queues to the same interrupt */ + for (i = 0; i < dev->data->nb_rx_queues; i++) + hw->rxq_map[hw->msix_base] |= 1 << i; + } else { + if (!rte_intr_allow_others(intr_handle)) { + hw->nb_msix = 1; + hw->msix_base = IAVF_MISC_VEC_ID; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + hw->rxq_map[hw->msix_base] |= 1 << i; + intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID; + } + PMD_DRV_LOG(DEBUG, + "vector %u are mapping to all Rx queues", + hw->msix_base); + } else { + /* If Rx interrupt is reuquired, and we can use + * multi interrupts, then the vec is from 1 + */ + hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors, + intr_handle->nb_efd); + hw->msix_base = IAVF_MISC_VEC_ID; + vec = IAVF_MISC_VEC_ID; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + hw->rxq_map[vec] |= 1 << i; + intr_handle->intr_vec[i] = vec++; + if (vec >= hw->nb_msix) + vec = IAVF_RX_VEC_START; + } + PMD_DRV_LOG(DEBUG, + "%u vectors are mapping to %u Rx queues", + hw->nb_msix, dev->data->nb_rx_queues); + } + } + + if (ice_dcf_config_irq_map(hw)) { + PMD_DRV_LOG(ERR, "config interrupt mapping failed"); + return -1; + } + return 0; +} + static int ice_dcf_dev_start(struct rte_eth_dev *dev) { struct ice_dcf_adapter *dcf_ad = dev->data->dev_private; + struct rte_intr_handle *intr_handle = dev->intr_handle; struct ice_adapter *ad = &dcf_ad->parent; struct ice_dcf_hw *hw = &dcf_ad->real_hw; int ret; @@ -141,6 +255,18 @@ ice_dcf_dev_start(struct rte_eth_dev *dev) } } + ret = ice_dcf_configure_queues(hw); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to config queues"); + return ret; + } + + ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs"); + return ret; + } + dev->data->dev_link.link_status = ETH_LINK_UP; return 0; -- 2.17.1