From: Ting Xu <ting.xu@intel.com> To: dev@dpdk.org Cc: qi.z.zhang@intel.com, qiming.yang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com, marko.kovacevic@intel.com, john.mcnamara@intel.com, Ting Xu <ting.xu@intel.com> Subject: [dpdk-dev] [PATCH v4 08/12] net/ice: add queue config in DCF Date: Fri, 19 Jun 2020 16:50:41 +0800 Message-ID: <20200619085045.22875-9-ting.xu@intel.com> (raw) In-Reply-To: <20200619085045.22875-1-ting.xu@intel.com> From: Qi Zhang <qi.z.zhang@intel.com> Add queues and Rx queue irqs configuration during device start in DCF. The setup is sent to PF via virtchnl. Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> Signed-off-by: Ting Xu <ting.xu@intel.com> --- drivers/net/ice/ice_dcf.c | 111 +++++++++++++++++++++++++++ drivers/net/ice/ice_dcf.h | 6 ++ drivers/net/ice/ice_dcf_ethdev.c | 126 +++++++++++++++++++++++++++++++ 3 files changed, 243 insertions(+) diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index f285323dd..8869e0d1c 100644 --- a/drivers/net/ice/ice_dcf.c +++ b/drivers/net/ice/ice_dcf.c @@ -24,6 +24,7 @@ #include <rte_dev.h> #include "ice_dcf.h" +#include "ice_rxtx.h" #define ICE_DCF_AQ_LEN 32 #define ICE_DCF_AQ_BUF_SZ 4096 @@ -825,3 +826,113 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw) return 0; } + +#define IAVF_RXDID_LEGACY_1 1 +#define IAVF_RXDID_COMMS_GENERIC 16 + +int +ice_dcf_configure_queues(struct ice_dcf_hw *hw) +{ + struct ice_rx_queue **rxq = + (struct ice_rx_queue **)hw->eth_dev->data->rx_queues; + struct ice_tx_queue **txq = + (struct ice_tx_queue **)hw->eth_dev->data->tx_queues; + struct virtchnl_vsi_queue_config_info *vc_config; + struct virtchnl_queue_pair_info *vc_qp; + struct dcf_virtchnl_cmd args; + uint16_t i, size; + int err; + + size = sizeof(*vc_config) + + sizeof(vc_config->qpair[0]) * hw->num_queue_pairs; + vc_config = rte_zmalloc("cfg_queue", size, 0); + if (!vc_config) + return -ENOMEM; + + vc_config->vsi_id = hw->vsi_res->vsi_id; + vc_config->num_queue_pairs = hw->num_queue_pairs; + + for (i = 0, vc_qp = vc_config->qpair; + i < hw->num_queue_pairs; + i++, vc_qp++) { + vc_qp->txq.vsi_id = hw->vsi_res->vsi_id; + vc_qp->txq.queue_id = i; + if (i < hw->eth_dev->data->nb_tx_queues) { + vc_qp->txq.ring_len = txq[i]->nb_tx_desc; + vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_dma; + } + vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id; + vc_qp->rxq.queue_id = i; + vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len; + + if (i >= hw->eth_dev->data->nb_rx_queues) + continue; + + vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc; + vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma; + vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len; + + if (hw->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC && + hw->supported_rxdid & + BIT(IAVF_RXDID_COMMS_GENERIC)) { + vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_GENERIC; + PMD_DRV_LOG(NOTICE, "request RXDID == %d in " + "Queue[%d]", vc_qp->rxq.rxdid, i); + } else { + PMD_DRV_LOG(ERR, "RXDID 16 is not supported"); + return -EINVAL; + } + } + + memset(&args, 0, sizeof(args)); + args.v_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES; + args.req_msg = (uint8_t *)vc_config; + args.req_msglen = size; + + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) + PMD_DRV_LOG(ERR, "Failed to execute command of" + " VIRTCHNL_OP_CONFIG_VSI_QUEUES"); + + rte_free(vc_config); + return err; +} + +int +ice_dcf_config_irq_map(struct ice_dcf_hw *hw) +{ + struct virtchnl_irq_map_info *map_info; + struct virtchnl_vector_map *vecmap; + struct dcf_virtchnl_cmd args; + int len, i, err; + + len = sizeof(struct virtchnl_irq_map_info) + + sizeof(struct virtchnl_vector_map) * hw->nb_msix; + + map_info = rte_zmalloc("map_info", len, 0); + if (!map_info) + return -ENOMEM; + + map_info->num_vectors = hw->nb_msix; + for (i = 0; i < hw->nb_msix; i++) { + vecmap = &map_info->vecmap[i]; + vecmap->vsi_id = hw->vsi_res->vsi_id; + vecmap->rxitr_idx = 0; + vecmap->vector_id = hw->msix_base + i; + vecmap->txq_map = 0; + vecmap->rxq_map = hw->rxq_map[hw->msix_base + i]; + } + + memset(&args, 0, sizeof(args)); + args.v_op = VIRTCHNL_OP_CONFIG_IRQ_MAP; + args.req_msg = (u8 *)map_info; + args.req_msglen = len; + + err = ice_dcf_execute_virtchnl_cmd(hw, &args); + if (err) + PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP"); + + rte_free(map_info); + return err; +} diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index eea4b286b..9470d1df7 100644 --- a/drivers/net/ice/ice_dcf.h +++ b/drivers/net/ice/ice_dcf.h @@ -54,6 +54,10 @@ struct ice_dcf_hw { uint8_t *rss_key; uint64_t supported_rxdid; uint16_t num_queue_pairs; + + uint16_t msix_base; + uint16_t nb_msix; + uint16_t rxq_map[16]; }; int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw, @@ -64,5 +68,7 @@ int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw); int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw); int ice_dcf_init_rss(struct ice_dcf_hw *hw); +int ice_dcf_configure_queues(struct ice_dcf_hw *hw); +int ice_dcf_config_irq_map(struct ice_dcf_hw *hw); #endif /* _ICE_DCF_H_ */ diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index e2ab7e637..a190ab7c1 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -114,10 +114,124 @@ ice_dcf_init_rx_queues(struct rte_eth_dev *dev) return 0; } +#define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +#define IAVF_ITR_INDEX_DEFAULT 0 +#define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ +#define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ + +static inline uint16_t +iavf_calc_itr_interval(int16_t interval) +{ + if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX) + interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT; + + /* Convert to hardware count, as writing each 1 represents 2 us */ + return interval / 2; +} + +static int +ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + uint16_t interval, i; + int vec; + + if (rte_intr_cap_multiple(intr_handle) && + dev->data->dev_conf.intr_conf.rxq) { + if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (!intr_handle->intr_vec) { + PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec", + dev->data->nb_rx_queues); + return -1; + } + } + + if (!dev->data->dev_conf.intr_conf.rxq || + !rte_intr_dp_is_en(intr_handle)) { + /* Rx interrupt disabled, Map interrupt only for writeback */ + hw->nb_msix = 1; + if (hw->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* If WB_ON_ITR supports, enable it */ + hw->msix_base = IAVF_RX_VEC_START; + IAVF_WRITE_REG(&hw->avf, + IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1), + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | + IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK); + } else { + /* If no WB_ON_ITR offload flags, need to set + * interrupt for descriptor write back. + */ + hw->msix_base = IAVF_MISC_VEC_ID; + + /* set ITR to max */ + interval = + iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX); + IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01, + IAVF_VFINT_DYN_CTL01_INTENA_MASK | + (IAVF_ITR_INDEX_DEFAULT << + IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) | + (interval << + IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT)); + } + IAVF_WRITE_FLUSH(&hw->avf); + /* map all queues to the same interrupt */ + for (i = 0; i < dev->data->nb_rx_queues; i++) + hw->rxq_map[hw->msix_base] |= 1 << i; + } else { + if (!rte_intr_allow_others(intr_handle)) { + hw->nb_msix = 1; + hw->msix_base = IAVF_MISC_VEC_ID; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + hw->rxq_map[hw->msix_base] |= 1 << i; + intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID; + } + PMD_DRV_LOG(DEBUG, + "vector %u are mapping to all Rx queues", + hw->msix_base); + } else { + /* If Rx interrupt is reuquired, and we can use + * multi interrupts, then the vec is from 1 + */ + hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors, + intr_handle->nb_efd); + hw->msix_base = IAVF_MISC_VEC_ID; + vec = IAVF_MISC_VEC_ID; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + hw->rxq_map[vec] |= 1 << i; + intr_handle->intr_vec[i] = vec++; + if (vec >= hw->nb_msix) + vec = IAVF_RX_VEC_START; + } + PMD_DRV_LOG(DEBUG, + "%u vectors are mapping to %u Rx queues", + hw->nb_msix, dev->data->nb_rx_queues); + } + } + + if (ice_dcf_config_irq_map(hw)) { + PMD_DRV_LOG(ERR, "config interrupt mapping failed"); + return -1; + } + return 0; +} + static int ice_dcf_dev_start(struct rte_eth_dev *dev) { struct ice_dcf_adapter *dcf_ad = dev->data->dev_private; + struct rte_intr_handle *intr_handle = dev->intr_handle; struct ice_adapter *ad = &dcf_ad->parent; struct ice_dcf_hw *hw = &dcf_ad->real_hw; int ret; @@ -141,6 +255,18 @@ ice_dcf_dev_start(struct rte_eth_dev *dev) } } + ret = ice_dcf_configure_queues(hw); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to config queues"); + return ret; + } + + ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs"); + return ret; + } + dev->data->dev_link.link_status = ETH_LINK_UP; return 0; -- 2.17.1
next prev parent reply other threads:[~2020-06-19 8:48 UTC|newest] Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top 2020-06-05 20:17 [dpdk-dev] [PATCH v1 00/12] enable DCF datapath configuration Ting Xu 2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu 2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 02/12] net/ice: complete device info get " Ting Xu 2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 03/12] net/ice: complete dev configure " Ting Xu 2020-06-05 14:56 ` Ye Xiaolong 2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 04/12] net/ice: complete queue setup " Ting Xu 2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 05/12] net/ice: add stop flag for device start / stop Ting Xu 2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 06/12] net/ice: add Rx queue init in DCF Ting Xu 2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 07/12] net/ice: init RSS during DCF start Ting Xu 2020-06-05 15:26 ` Ye Xiaolong 2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 08/12] net/ice: add queue config in DCF Ting Xu 2020-06-07 10:11 ` Ye Xiaolong 2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 09/12] net/ice: add queue start and stop for DCF Ting Xu 2020-06-07 12:28 ` Ye Xiaolong 2020-06-08 7:35 ` Yang, Qiming 2020-06-09 7:35 ` Xu, Ting 2020-06-10 5:03 ` Yang, Qiming 2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 10/12] net/ice: enable stats " Ting Xu 2020-06-07 10:19 ` Ye Xiaolong 2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 11/12] net/ice: set MAC filter during dev start " Ting Xu 2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 12/12] doc: enable DCF datapath configuration Ting Xu 2020-06-05 14:41 ` Ye Xiaolong 2020-06-09 7:50 ` Xu, Ting 2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 00/12] " Ting Xu 2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu 2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 02/12] net/ice: complete device info get " Ting Xu 2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 03/12] net/ice: complete dev configure " Ting Xu 2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 04/12] net/ice: complete queue setup " Ting Xu 2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 05/12] net/ice: add stop flag for device start / stop Ting Xu 2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 06/12] net/ice: add Rx queue init in DCF Ting Xu 2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 07/12] net/ice: init RSS during DCF start Ting Xu 2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 08/12] net/ice: add queue config in DCF Ting Xu 2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 09/12] net/ice: add queue start and stop for DCF Ting Xu 2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 10/12] net/ice: enable stats " Ting Xu 2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 11/12] net/ice: set MAC filter during dev start " Ting Xu 2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 12/12] doc: enable DCF datapath configuration Ting Xu 2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 00/12] " Ting Xu 2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu 2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 02/12] net/ice: complete device info get " Ting Xu 2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 03/12] net/ice: complete dev configure " Ting Xu 2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 04/12] net/ice: complete queue setup " Ting Xu 2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 05/12] net/ice: add stop flag for device start / stop Ting Xu 2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 06/12] net/ice: add Rx queue init in DCF Ting Xu 2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 07/12] net/ice: init RSS during DCF start Ting Xu 2020-06-19 8:50 ` Ting Xu [this message] 2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 09/12] net/ice: add queue start and stop for DCF Ting Xu 2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 10/12] net/ice: enable stats " Ting Xu 2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 11/12] net/ice: set MAC filter during dev start " Ting Xu 2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 12/12] doc: enable DCF datapath configuration Ting Xu 2020-06-22 4:48 ` Zhang, Qi Z 2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 00/12] " Ting Xu 2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu 2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 02/12] net/ice: complete device info get " Ting Xu 2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 03/12] net/ice: complete dev configure " Ting Xu 2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 04/12] net/ice: complete queue setup " Ting Xu 2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 05/12] net/ice: add stop flag for device start / stop Ting Xu 2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 06/12] net/ice: add Rx queue init in DCF Ting Xu 2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 07/12] net/ice: init RSS during DCF start Ting Xu 2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 08/12] net/ice: add queue config in DCF Ting Xu 2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 09/12] net/ice: add queue start and stop for DCF Ting Xu 2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 10/12] net/ice: enable stats " Ting Xu 2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 11/12] net/ice: set MAC filter during dev start " Ting Xu 2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 12/12] doc: enable DCF datapath configuration Ting Xu 2020-06-29 2:43 ` [dpdk-dev] [PATCH v5 00/12] " Yang, Qiming 2020-06-29 5:36 ` Zhang, Qi Z
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20200619085045.22875-9-ting.xu@intel.com \ --to=ting.xu@intel.com \ --cc=beilei.xing@intel.com \ --cc=dev@dpdk.org \ --cc=jingjing.wu@intel.com \ --cc=john.mcnamara@intel.com \ --cc=marko.kovacevic@intel.com \ --cc=qi.z.zhang@intel.com \ --cc=qiming.yang@intel.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
DPDK patches and discussions This inbox may be cloned and mirrored by anyone: git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \ dev@dpdk.org public-inbox-index dev Example config snippet for mirrors. Newsgroup available over NNTP: nntp://inbox.dpdk.org/inbox.dpdk.dev AGPL code for this site: git clone https://public-inbox.org/public-inbox.git