From: Ye Xiaolong <xiaolong.ye@intel.com>
To: Ting Xu <ting.xu@intel.com>
Cc: dev@dpdk.org, qi.z.zhang@intel.com, qiming.yang@intel.com,
john.mcnamara@intel.com, marko.kovacevic@intel.com
Subject: Re: [dpdk-dev] [PATCH v1 08/12] net/ice: add queue config in DCF
Date: Sun, 7 Jun 2020 18:11:22 +0800 [thread overview]
Message-ID: <20200607101122.GA7883@intel.com> (raw)
In-Reply-To: <20200605201737.33766-9-ting.xu@intel.com>
On 06/05, Ting Xu wrote:
>From: Qi Zhang <qi.z.zhang@intel.com>
>
>Add queues and Rx queue irqs configuration during device start
>in DCF. The setup is sent to PF via virtchnl.
>
>Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
>---
> drivers/net/ice/ice_dcf.c | 109 +++++++++++++++++++++++++++
> drivers/net/ice/ice_dcf.h | 6 ++
> drivers/net/ice/ice_dcf_ethdev.c | 125 +++++++++++++++++++++++++++++++
> 3 files changed, 240 insertions(+)
>
>diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
>index 8d078163e..d864ae894 100644
>--- a/drivers/net/ice/ice_dcf.c
>+++ b/drivers/net/ice/ice_dcf.c
>@@ -24,6 +24,7 @@
> #include <rte_dev.h>
>
> #include "ice_dcf.h"
>+#include "ice_rxtx.h"
>
> #define ICE_DCF_AQ_LEN 32
> #define ICE_DCF_AQ_BUF_SZ 4096
>@@ -831,3 +832,111 @@ ice_dcf_init_rss(struct ice_dcf_hw *hw)
>
> return 0;
> }
>+
>+#define IAVF_RXDID_LEGACY_1 1
>+#define IAVF_RXDID_COMMS_GENERIC 16
>+
>+int
>+ice_dcf_configure_queues(struct ice_dcf_hw *hw)
>+{
>+ struct ice_rx_queue **rxq =
>+ (struct ice_rx_queue **)hw->eth_dev->data->rx_queues;
>+ struct ice_tx_queue **txq =
>+ (struct ice_tx_queue **)hw->eth_dev->data->tx_queues;
>+ struct virtchnl_vsi_queue_config_info *vc_config;
>+ struct virtchnl_queue_pair_info *vc_qp;
>+ struct dcf_virtchnl_cmd args;
>+ uint16_t i, size;
>+ int err;
>+
>+ size = sizeof(*vc_config) +
>+ sizeof(vc_config->qpair[0]) * hw->num_queue_pairs;
>+ vc_config = rte_zmalloc("cfg_queue", size, 0);
>+ if (!vc_config)
>+ return -ENOMEM;
>+
>+ vc_config->vsi_id = hw->vsi_res->vsi_id;
>+ vc_config->num_queue_pairs = hw->num_queue_pairs;
>+
>+ for (i = 0, vc_qp = vc_config->qpair;
>+ i < hw->num_queue_pairs;
>+ i++, vc_qp++) {
>+ vc_qp->txq.vsi_id = hw->vsi_res->vsi_id;
>+ vc_qp->txq.queue_id = i;
>+ if (i < hw->eth_dev->data->nb_tx_queues) {
>+ vc_qp->txq.ring_len = txq[i]->nb_tx_desc;
>+ vc_qp->txq.dma_ring_addr = txq[i]->tx_ring_dma;
>+ }
>+ vc_qp->rxq.vsi_id = hw->vsi_res->vsi_id;
>+ vc_qp->rxq.queue_id = i;
>+ vc_qp->rxq.max_pkt_size = rxq[i]->max_pkt_len;
>+ if (i < hw->eth_dev->data->nb_rx_queues) {
What about changing as below to reduce the nested level of if blocks.
if (i >= hw->eth_dev->data->nb_rx_queues)
break;
vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma;
vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
...
}
>+ vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
>+ vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_dma;
>+ vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
>+
>+ if (hw->vf_res->vf_cap_flags &
>+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
>+ hw->supported_rxdid &
>+ BIT(IAVF_RXDID_COMMS_GENERIC)) {
>+ vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_GENERIC;
>+ PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
[snip]
>+static inline uint16_t
>+iavf_calc_itr_interval(int16_t interval)
>+{
>+ if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX)
>+ interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT;
>+
>+ /* Convert to hardware count, as writing each 1 represents 2 us */
>+ return interval / 2;
>+}
>+
>+static int ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev,
>+ struct rte_intr_handle *intr_handle)
put the return type in a separate line.
>+{
>+ struct ice_dcf_adapter *adapter = dev->data->dev_private;
>+ struct ice_dcf_hw *hw = &adapter->real_hw;
>+ uint16_t interval, i;
>+ int vec;
>+
>+ if (rte_intr_cap_multiple(intr_handle) &&
>+ dev->data->dev_conf.intr_conf.rxq) {
>+ if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues))
>+ return -1;
>+ }
>+
>+ if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
>+ intr_handle->intr_vec =
>+ rte_zmalloc("intr_vec",
>+ dev->data->nb_rx_queues * sizeof(int), 0);
>+ if (!intr_handle->intr_vec) {
>+ PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec",
>+ dev->data->nb_rx_queues);
>+ return -1;
>+ }
>+ }
>+
>+ if (!dev->data->dev_conf.intr_conf.rxq ||
>+ !rte_intr_dp_is_en(intr_handle)) {
>+ /* Rx interrupt disabled, Map interrupt only for writeback */
>+ hw->nb_msix = 1;
>+ if (hw->vf_res->vf_cap_flags &
>+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
>+ /* If WB_ON_ITR supports, enable it */
>+ hw->msix_base = IAVF_RX_VEC_START;
>+ IAVF_WRITE_REG(&hw->avf,
>+ IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1),
>+ IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK |
>+ IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK);
>+ } else {
>+ /* If no WB_ON_ITR offload flags, need to set
>+ * interrupt for descriptor write back.
>+ */
>+ hw->msix_base = IAVF_MISC_VEC_ID;
>+
>+ /* set ITR to max */
>+ interval =
>+ iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX);
>+ IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01,
>+ IAVF_VFINT_DYN_CTL01_INTENA_MASK |
>+ (IAVF_ITR_INDEX_DEFAULT <<
>+ IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
>+ (interval <<
>+ IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT));
>+ }
>+ IAVF_WRITE_FLUSH(&hw->avf);
>+ /* map all queues to the same interrupt */
>+ for (i = 0; i < dev->data->nb_rx_queues; i++)
>+ hw->rxq_map[hw->msix_base] |= 1 << i;
>+ } else {
>+ if (!rte_intr_allow_others(intr_handle)) {
>+ hw->nb_msix = 1;
>+ hw->msix_base = IAVF_MISC_VEC_ID;
>+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
>+ hw->rxq_map[hw->msix_base] |= 1 << i;
>+ intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID;
>+ }
>+ PMD_DRV_LOG(DEBUG,
>+ "vector %u are mapping to all Rx queues",
>+ hw->msix_base);
>+ } else {
>+ /* If Rx interrupt is reuquired, and we can use
>+ * multi interrupts, then the vec is from 1
>+ */
>+ hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
>+ intr_handle->nb_efd);
>+ hw->msix_base = IAVF_MISC_VEC_ID;
>+ vec = IAVF_MISC_VEC_ID;
>+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
>+ hw->rxq_map[vec] |= 1 << i;
>+ intr_handle->intr_vec[i] = vec++;
>+ if (vec >= hw->nb_msix)
>+ vec = IAVF_RX_VEC_START;
>+ }
>+ PMD_DRV_LOG(DEBUG,
>+ "%u vectors are mapping to %u Rx queues",
>+ hw->nb_msix, dev->data->nb_rx_queues);
>+ }
>+ }
>+
>+ if (ice_dcf_config_irq_map(hw)) {
>+ PMD_DRV_LOG(ERR, "config interrupt mapping failed");
>+ return -1;
Do we need to free intr_handle->intr_vec here?
>+ }
>+ return 0;
>+}
>+
> static int
> ice_dcf_dev_start(struct rte_eth_dev *dev)
> {
> struct ice_dcf_adapter *dcf_ad = dev->data->dev_private;
>+ struct rte_intr_handle *intr_handle = dev->intr_handle;
> struct ice_adapter *ad = &dcf_ad->parent;
> struct ice_dcf_hw *hw = &dcf_ad->real_hw;
> int ret;
>@@ -141,6 +254,18 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
> }
> }
>
>+ ret = ice_dcf_configure_queues(hw);
>+ if (ret) {
>+ PMD_DRV_LOG(ERR, "Fail to config queues");
>+ return ret;
>+ }
>+
>+ ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle);
>+ if (ret) {
>+ PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs");
>+ return ret;
>+ }
>+
> dev->data->dev_link.link_status = ETH_LINK_UP;
>
> return 0;
>--
>2.17.1
>
next prev parent reply other threads:[~2020-06-07 10:19 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-05 20:17 [dpdk-dev] [PATCH v1 00/12] enable DCF datapath configuration Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 02/12] net/ice: complete device info get " Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 03/12] net/ice: complete dev configure " Ting Xu
2020-06-05 14:56 ` Ye Xiaolong
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 04/12] net/ice: complete queue setup " Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 05/12] net/ice: add stop flag for device start / stop Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 06/12] net/ice: add Rx queue init in DCF Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 07/12] net/ice: init RSS during DCF start Ting Xu
2020-06-05 15:26 ` Ye Xiaolong
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 08/12] net/ice: add queue config in DCF Ting Xu
2020-06-07 10:11 ` Ye Xiaolong [this message]
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 09/12] net/ice: add queue start and stop for DCF Ting Xu
2020-06-07 12:28 ` Ye Xiaolong
2020-06-08 7:35 ` Yang, Qiming
2020-06-09 7:35 ` Xu, Ting
2020-06-10 5:03 ` Yang, Qiming
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 10/12] net/ice: enable stats " Ting Xu
2020-06-07 10:19 ` Ye Xiaolong
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 11/12] net/ice: set MAC filter during dev start " Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 12/12] doc: enable DCF datapath configuration Ting Xu
2020-06-05 14:41 ` Ye Xiaolong
2020-06-09 7:50 ` Xu, Ting
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 00/12] " Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 02/12] net/ice: complete device info get " Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 03/12] net/ice: complete dev configure " Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 04/12] net/ice: complete queue setup " Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 05/12] net/ice: add stop flag for device start / stop Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 06/12] net/ice: add Rx queue init in DCF Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 07/12] net/ice: init RSS during DCF start Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 08/12] net/ice: add queue config in DCF Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 09/12] net/ice: add queue start and stop for DCF Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 10/12] net/ice: enable stats " Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 11/12] net/ice: set MAC filter during dev start " Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 12/12] doc: enable DCF datapath configuration Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 00/12] " Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 02/12] net/ice: complete device info get " Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 03/12] net/ice: complete dev configure " Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 04/12] net/ice: complete queue setup " Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 05/12] net/ice: add stop flag for device start / stop Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 06/12] net/ice: add Rx queue init in DCF Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 07/12] net/ice: init RSS during DCF start Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 08/12] net/ice: add queue config in DCF Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 09/12] net/ice: add queue start and stop for DCF Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 10/12] net/ice: enable stats " Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 11/12] net/ice: set MAC filter during dev start " Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 12/12] doc: enable DCF datapath configuration Ting Xu
2020-06-22 4:48 ` Zhang, Qi Z
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 00/12] " Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 02/12] net/ice: complete device info get " Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 03/12] net/ice: complete dev configure " Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 04/12] net/ice: complete queue setup " Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 05/12] net/ice: add stop flag for device start / stop Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 06/12] net/ice: add Rx queue init in DCF Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 07/12] net/ice: init RSS during DCF start Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 08/12] net/ice: add queue config in DCF Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 09/12] net/ice: add queue start and stop for DCF Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 10/12] net/ice: enable stats " Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 11/12] net/ice: set MAC filter during dev start " Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 12/12] doc: enable DCF datapath configuration Ting Xu
2020-06-29 2:43 ` [dpdk-dev] [PATCH v5 00/12] " Yang, Qiming
2020-06-29 5:36 ` Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200607101122.GA7883@intel.com \
--to=xiaolong.ye@intel.com \
--cc=dev@dpdk.org \
--cc=john.mcnamara@intel.com \
--cc=marko.kovacevic@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=qiming.yang@intel.com \
--cc=ting.xu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).