DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ting Xu <ting.xu@intel.com>
To: dev@dpdk.org
Cc: qi.z.zhang@intel.com, qiming.yang@intel.com,
	jingjing.wu@intel.com, beilei.xing@intel.com,
	marko.kovacevic@intel.com, john.mcnamara@intel.com,
	xiaolong.ye@intel.com
Subject: [dpdk-dev] [PATCH v3 08/12] net/ice: add queue config in DCF
Date: Tue, 16 Jun 2020 12:41:08 +0000	[thread overview]
Message-ID: <20200616124112.108014-9-ting.xu@intel.com> (raw)
In-Reply-To: <20200616124112.108014-1-ting.xu@intel.com>

From: Qi Zhang <qi.z.zhang@intel.com>

Add queues and Rx queue irqs configuration during device start
in DCF. The setup is sent to PF via virtchnl.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Ting Xu <ting.xu@intel.com>
---
 drivers/net/ice/ice_dcf.c        | 111 +++++++++++++++++++++++++++
 drivers/net/ice/ice_dcf.h        |   6 ++
 drivers/net/ice/ice_dcf_ethdev.c | 126 +++++++++++++++++++++++++++++++
 3 files changed, 243 insertions(+)

diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index f285323dd..8869e0d1c 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -24,6 +24,7 @@
 #include <rte_dev.h>
 
 #include "ice_dcf.h"
+#include "ice_rxtx.h"
 
 #define ICE_DCF_AQ_LEN     32
 #define ICE_DCF_AQ_BUF_SZ  4096
@@ -825,3 +826,113 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
 
 	return 0;
 }
+
+#define IAVF_RXDID_LEGACY_1 1
+#define IAVF_RXDID_COMMS_GENERIC 16
+
+int
+ice_dcf_configure_queues(struct ice_dcf_hw *hw)
+{
+	struct ice_rx_queue **rxq =
+		(struct ice_rx_queue **)hw->eth_dev->data->rx_queues;
+	struct ice_tx_queue **txq =
+		(struct ice_tx_queue **)hw->eth_dev->data->tx_queues;
+	struct virtchnl_vsi_queue_config_info *vc_config;
+	struct virtchnl_queue_pair_info *vc_qp;
+	struct dcf_virtchnl_cmd args;
+	uint16_t i, size;
+	int err;
+
+	size = sizeof(*vc_config) +
+	       sizeof(vc_config->qpair[0]) * hw->num_queue_pairs;
+	vc_config = rte_zmalloc("cfg_queue", size, 0);
+	if (!vc_config)
+		return -ENOMEM;
+
+	vc_config->vsi_id = hw->vsi_res->vsi_id;
+	vc_config->num_queue_pairs = hw->num_queue_pairs;
+
+	for (i = 0, vc_qp = vc_config->qpair;
+	     i < hw->num_queue_pairs;
+	     i++, vc_qp++) {
+		vc_qp->txq.vsi_id = hw->vsi_res->vsi_id;
+		vc_qp->txq.queue_id = i;
+		if (i < hw->eth_dev->data->nb_tx_queues) {
+			vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
+			vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_dma;
+		}
+		vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id;
+		vc_qp->rxq.queue_id = i;
+		vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
+
+		if (i >= hw->eth_dev->data->nb_rx_queues)
+			continue;
+
+		vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
+		vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma;
+		vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
+
+		if (hw->vf_res->vf_cap_flags &
+		    VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
+		    hw->supported_rxdid &
+		    BIT(IAVF_RXDID_COMMS_GENERIC)) {
+			vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_GENERIC;
+			PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
+				    "Queue[%d]", vc_qp->rxq.rxdid, i);
+		} else {
+			PMD_DRV_LOG(ERR, "RXDID 16 is not supported");
+			return -EINVAL;
+		}
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.v_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
+	args.req_msg = (uint8_t *)vc_config;
+	args.req_msglen = size;
+
+	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of"
+			    " VIRTCHNL_OP_CONFIG_VSI_QUEUES");
+
+	rte_free(vc_config);
+	return err;
+}
+
+int
+ice_dcf_config_irq_map(struct ice_dcf_hw *hw)
+{
+	struct virtchnl_irq_map_info *map_info;
+	struct virtchnl_vector_map *vecmap;
+	struct dcf_virtchnl_cmd args;
+	int len, i, err;
+
+	len = sizeof(struct virtchnl_irq_map_info) +
+	      sizeof(struct virtchnl_vector_map) * hw->nb_msix;
+
+	map_info = rte_zmalloc("map_info", len, 0);
+	if (!map_info)
+		return -ENOMEM;
+
+	map_info->num_vectors = hw->nb_msix;
+	for (i = 0; i < hw->nb_msix; i++) {
+		vecmap = &map_info->vecmap[i];
+		vecmap->vsi_id = hw->vsi_res->vsi_id;
+		vecmap->rxitr_idx = 0;
+		vecmap->vector_id = hw->msix_base + i;
+		vecmap->txq_map = 0;
+		vecmap->rxq_map = hw->rxq_map[hw->msix_base + i];
+	}
+
+	memset(&args, 0, sizeof(args));
+	args.v_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
+	args.req_msg = (u8 *)map_info;
+	args.req_msglen = len;
+
+	err = ice_dcf_execute_virtchnl_cmd(hw, &args);
+	if (err)
+		PMD_DRV_LOG(ERR, "fail to execute command OP_CONFIG_IRQ_MAP");
+
+	rte_free(map_info);
+	return err;
+}
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index eea4b286b..9470d1df7 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -54,6 +54,10 @@ struct ice_dcf_hw {
 	uint8_t *rss_key;
 	uint64_t supported_rxdid;
 	uint16_t num_queue_pairs;
+
+	uint16_t msix_base;
+	uint16_t nb_msix;
+	uint16_t rxq_map[16];
 };
 
 int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
@@ -64,5 +68,7 @@ int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw);
 int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
 void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
 int ice_dcf_init_rss(struct ice_dcf_hw *hw);
+int ice_dcf_configure_queues(struct ice_dcf_hw *hw);
+int ice_dcf_config_irq_map(struct ice_dcf_hw *hw);
 
 #endif /* _ICE_DCF_H_ */
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index e021d779a..333fee037 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -114,10 +114,124 @@ ice_dcf_init_rx_queues(struct rte_eth_dev *dev)
 	return 0;
 }
 
+#define IAVF_MISC_VEC_ID                RTE_INTR_VEC_ZERO_OFFSET
+#define IAVF_RX_VEC_START               RTE_INTR_VEC_RXTX_OFFSET
+
+#define IAVF_ITR_INDEX_DEFAULT          0
+#define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
+#define IAVF_QUEUE_ITR_INTERVAL_MAX     8160 /* 8160 us */
+
+static inline uint16_t
+iavf_calc_itr_interval(int16_t interval)
+{
+	if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
+		interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
+
+	/* Convert to hardware count, as writing each 1 represents 2 us */
+	return interval / 2;
+}
+
+static int
+ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
+				     struct rte_intr_handle *intr_handle)
+{
+	struct ice_dcf_adapter *adapter = dev->data->dev_private;
+	struct ice_dcf_hw *hw = &adapter->real_hw;
+	uint16_t interval, i;
+	int vec;
+
+	if (rte_intr_cap_multiple(intr_handle) &&
+	    dev->data->dev_conf.intr_conf.rxq) {
+		if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
+			return -1;
+	}
+
+	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
+		intr_handle->intr_vec =
+			rte_zmalloc("intr_vec",
+				    dev->data->nb_rx_queues * sizeof(int), 0);
+		if (!intr_handle->intr_vec) {
+			PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
+				    dev->data->nb_rx_queues);
+			return -1;
+		}
+	}
+
+	if (!dev->data->dev_conf.intr_conf.rxq ||
+	    !rte_intr_dp_is_en(intr_handle)) {
+		/* Rx interrupt disabled, Map interrupt only for writeback */
+		hw->nb_msix = 1;
+		if (hw->vf_res->vf_cap_flags &
+		    VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
+			/* If WB_ON_ITR supports, enable it */
+			hw->msix_base = IAVF_RX_VEC_START;
+			IAVF_WRITE_REG(&hw->avf,
+				       IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
+				       IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
+				       IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
+		} else {
+			/* If no WB_ON_ITR offload flags, need to set
+			 * interrupt for descriptor write back.
+			 */
+			hw->msix_base = IAVF_MISC_VEC_ID;
+
+			/* set ITR to max */
+			interval =
+			iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
+			IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
+				       IAVF_VFINT_DYN_CTL01_INTENA_MASK |
+				       (IAVF_ITR_INDEX_DEFAULT <<
+					IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
+				       (interval <<
+					IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
+		}
+		IAVF_WRITE_FLUSH(&hw->avf);
+		/* map all queues to the same interrupt */
+		for (i = 0; i < dev->data->nb_rx_queues; i++)
+			hw->rxq_map[hw->msix_base] |= 1 << i;
+	} else {
+		if (!rte_intr_allow_others(intr_handle)) {
+			hw->nb_msix = 1;
+			hw->msix_base = IAVF_MISC_VEC_ID;
+			for (i = 0; i < dev->data->nb_rx_queues; i++) {
+				hw->rxq_map[hw->msix_base] |= 1 << i;
+				intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
+			}
+			PMD_DRV_LOG(DEBUG,
+				    "vector %u are mapping to all Rx queues",
+				    hw->msix_base);
+		} else {
+			/* If Rx interrupt is reuquired, and we can use
+			 * multi interrupts, then the vec is from 1
+			 */
+			hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
+					      intr_handle->nb_efd);
+			hw->msix_base = IAVF_MISC_VEC_ID;
+			vec = IAVF_MISC_VEC_ID;
+			for (i = 0; i < dev->data->nb_rx_queues; i++) {
+				hw->rxq_map[vec] |= 1 << i;
+				intr_handle->intr_vec[i] = vec++;
+				if (vec >= hw->nb_msix)
+					vec = IAVF_RX_VEC_START;
+			}
+			PMD_DRV_LOG(DEBUG,
+				    "%u vectors are mapping to %u Rx queues",
+				    hw->nb_msix, dev->data->nb_rx_queues);
+		}
+	}
+
+	if (ice_dcf_config_irq_map(hw)) {
+		PMD_DRV_LOG(ERR, "config interrupt mapping failed");
+		return -1;
+	}
+	return 0;
+}
+
 static int
 ice_dcf_dev_start(struct rte_eth_dev *dev)
 {
 	struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
+	struct rte_intr_handle *intr_handle = dev->intr_handle;
 	struct ice_adapter *ad = &dcf_ad->parent;
 	struct ice_dcf_hw *hw = &dcf_ad->real_hw;
 	int ret;
@@ -141,6 +255,18 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
 		}
 	}
 
+	ret = ice_dcf_configure_queues(hw);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to config queues");
+		return ret;
+	}
+
+	ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
+	if (ret) {
+		PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
+		return ret;
+	}
+
 	dev->data->dev_link.link_status = ETH_LINK_UP;
 
 	return 0;
-- 
2.17.1


  parent reply	other threads:[~2020-06-16  4:43 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-16 12:41 [dpdk-dev] [PATCH v3 00/12] enable DCF datapath configuration Ting Xu
2020-06-16 12:41 ` [dpdk-dev] [PATCH v3 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu
2020-06-16 12:41 ` [dpdk-dev] [PATCH v3 02/12] net/ice: complete device info get " Ting Xu
2020-06-18  6:44   ` Yang, Qiming
2020-06-16 12:41 ` [dpdk-dev] [PATCH v3 03/12] net/ice: complete dev configure " Ting Xu
2020-06-16 12:41 ` [dpdk-dev] [PATCH v3 04/12] net/ice: complete queue setup " Ting Xu
2020-06-16 12:41 ` [dpdk-dev] [PATCH v3 05/12] net/ice: add stop flag for device start / stop Ting Xu
2020-06-18  6:48   ` Yang, Qiming
2020-06-16 12:41 ` [dpdk-dev] [PATCH v3 06/12] net/ice: add Rx queue init in DCF Ting Xu
2020-06-16 12:41 ` [dpdk-dev] [PATCH v3 07/12] net/ice: init RSS during DCF start Ting Xu
2020-06-16 12:41 ` Ting Xu [this message]
2020-06-16 12:41 ` [dpdk-dev] [PATCH v3 09/12] net/ice: add queue start and stop for DCF Ting Xu
2020-06-18  6:39   ` Yang, Qiming
2020-06-16 12:41 ` [dpdk-dev] [PATCH v3 10/12] net/ice: enable stats " Ting Xu
2020-06-18  6:31   ` Yang, Qiming
2020-06-19  2:44     ` Xu, Ting
2020-06-16 12:41 ` [dpdk-dev] [PATCH v3 11/12] net/ice: set MAC filter during dev start " Ting Xu
2020-06-16 12:41 ` [dpdk-dev] [PATCH v3 12/12] doc: enable DCF datapath configuration Ting Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200616124112.108014-9-ting.xu@intel.com \
    --to=ting.xu@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=john.mcnamara@intel.com \
    --cc=marko.kovacevic@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    --cc=xiaolong.ye@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).