DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ting Xu <ting.xu@intel.com>
To: dev@dpdk.org
Cc: qi.z.zhang@intel.com, qiming.yang@intel.com,
	jingjing.wu@intel.com, beilei.xing@intel.com,
	marko.kovacevic@intel.com, john.mcnamara@intel.com,
	Ting Xu <ting.xu@intel.com>
Subject: [dpdk-dev] [PATCH v4 01/12] net/ice: init RSS and supported RXDID in DCF
Date: Fri, 19 Jun 2020 16:50:34 +0800	[thread overview]
Message-ID: <20200619085045.22875-2-ting.xu@intel.com> (raw)
In-Reply-To: <20200619085045.22875-1-ting.xu@intel.com>

From: Qi Zhang <qi.z.zhang@intel.com>

Enable RSS parameters initialization and get the supported
flexible descriptor RXDIDs bitmap from PF during DCF init.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Signed-off-by: Ting Xu <ting.xu@intel.com>
---
 drivers/net/ice/ice_dcf.c | 54 ++++++++++++++++++++++++++++++++++++++-
 drivers/net/ice/ice_dcf.h |  3 +++
 2 files changed, 56 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 0cd5d1bf6..93fabd5f7 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -233,7 +233,7 @@ ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
 
 	caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
 	       VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
-	       VF_BASE_MODE_OFFLOADS;
+	       VF_BASE_MODE_OFFLOADS | VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
 
 	err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
 					  (uint8_t *)&caps, sizeof(caps));
@@ -547,6 +547,30 @@ ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
 	return err;
 }
 
+static int
+ice_dcf_get_supported_rxdid(struct ice_dcf_hw *hw)
+{
+	int err;
+
+	err = ice_dcf_send_cmd_req_no_irq(hw,
+					  VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+					  NULL, 0);
+	if (err) {
+		PMD_INIT_LOG(ERR, "Failed to send OP_GET_SUPPORTED_RXDIDS");
+		return -1;
+	}
+
+	err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+					  (uint8_t *)&hw->supported_rxdid,
+					  sizeof(uint64_t), NULL);
+	if (err) {
+		PMD_INIT_LOG(ERR, "Failed to get response of OP_GET_SUPPORTED_RXDIDS");
+		return -1;
+	}
+
+	return 0;
+}
+
 int
 ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 {
@@ -620,6 +644,29 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 		goto err_alloc;
 	}
 
+	/* Allocate memory for RSS info */
+	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+		hw->rss_key = rte_zmalloc(NULL,
+					  hw->vf_res->rss_key_size, 0);
+		if (!hw->rss_key) {
+			PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
+			goto err_alloc;
+		}
+		hw->rss_lut = rte_zmalloc("rss_lut",
+					  hw->vf_res->rss_lut_size, 0);
+		if (!hw->rss_lut) {
+			PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
+			goto err_rss;
+		}
+	}
+
+	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+		if (ice_dcf_get_supported_rxdid(hw) != 0) {
+			PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
+			goto err_rss;
+		}
+	}
+
 	hw->eth_dev = eth_dev;
 	rte_intr_callback_register(&pci_dev->intr_handle,
 				   ice_dcf_dev_interrupt_handler, hw);
@@ -628,6 +675,9 @@ ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 
 	return 0;
 
+err_rss:
+	rte_free(hw->rss_key);
+	rte_free(hw->rss_lut);
 err_alloc:
 	rte_free(hw->vf_res);
 err_api:
@@ -655,4 +705,6 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
 	rte_free(hw->arq_buf);
 	rte_free(hw->vf_vsi_map);
 	rte_free(hw->vf_res);
+	rte_free(hw->rss_lut);
+	rte_free(hw->rss_key);
 }
diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
index d2e447b48..152266e3c 100644
--- a/drivers/net/ice/ice_dcf.h
+++ b/drivers/net/ice/ice_dcf.h
@@ -50,6 +50,9 @@ struct ice_dcf_hw {
 	uint16_t vsi_id;
 
 	struct rte_eth_dev *eth_dev;
+	uint8_t *rss_lut;
+	uint8_t *rss_key;
+	uint64_t supported_rxdid;
 };
 
 int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
-- 
2.17.1


  reply	other threads:[~2020-06-19  8:47 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-05 20:17 [dpdk-dev] [PATCH v1 00/12] enable DCF datapath configuration Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 02/12] net/ice: complete device info get " Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 03/12] net/ice: complete dev configure " Ting Xu
2020-06-05 14:56   ` Ye Xiaolong
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 04/12] net/ice: complete queue setup " Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 05/12] net/ice: add stop flag for device start / stop Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 06/12] net/ice: add Rx queue init in DCF Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 07/12] net/ice: init RSS during DCF start Ting Xu
2020-06-05 15:26   ` Ye Xiaolong
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 08/12] net/ice: add queue config in DCF Ting Xu
2020-06-07 10:11   ` Ye Xiaolong
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 09/12] net/ice: add queue start and stop for DCF Ting Xu
2020-06-07 12:28   ` Ye Xiaolong
2020-06-08  7:35   ` Yang, Qiming
2020-06-09  7:35     ` Xu, Ting
2020-06-10  5:03       ` Yang, Qiming
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 10/12] net/ice: enable stats " Ting Xu
2020-06-07 10:19   ` Ye Xiaolong
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 11/12] net/ice: set MAC filter during dev start " Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 12/12] doc: enable DCF datapath configuration Ting Xu
2020-06-05 14:41   ` Ye Xiaolong
2020-06-09  7:50     ` Xu, Ting
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 00/12] " Ting Xu
2020-06-11 17:08   ` [dpdk-dev] [PATCH v2 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu
2020-06-11 17:08   ` [dpdk-dev] [PATCH v2 02/12] net/ice: complete device info get " Ting Xu
2020-06-11 17:08   ` [dpdk-dev] [PATCH v2 03/12] net/ice: complete dev configure " Ting Xu
2020-06-11 17:08   ` [dpdk-dev] [PATCH v2 04/12] net/ice: complete queue setup " Ting Xu
2020-06-11 17:08   ` [dpdk-dev] [PATCH v2 05/12] net/ice: add stop flag for device start / stop Ting Xu
2020-06-11 17:08   ` [dpdk-dev] [PATCH v2 06/12] net/ice: add Rx queue init in DCF Ting Xu
2020-06-11 17:08   ` [dpdk-dev] [PATCH v2 07/12] net/ice: init RSS during DCF start Ting Xu
2020-06-11 17:08   ` [dpdk-dev] [PATCH v2 08/12] net/ice: add queue config in DCF Ting Xu
2020-06-11 17:08   ` [dpdk-dev] [PATCH v2 09/12] net/ice: add queue start and stop for DCF Ting Xu
2020-06-11 17:08   ` [dpdk-dev] [PATCH v2 10/12] net/ice: enable stats " Ting Xu
2020-06-11 17:08   ` [dpdk-dev] [PATCH v2 11/12] net/ice: set MAC filter during dev start " Ting Xu
2020-06-11 17:08   ` [dpdk-dev] [PATCH v2 12/12] doc: enable DCF datapath configuration Ting Xu
2020-06-19  8:50 ` [dpdk-dev] [PATCH v4 00/12] " Ting Xu
2020-06-19  8:50   ` Ting Xu [this message]
2020-06-19  8:50   ` [dpdk-dev] [PATCH v4 02/12] net/ice: complete device info get in DCF Ting Xu
2020-06-19  8:50   ` [dpdk-dev] [PATCH v4 03/12] net/ice: complete dev configure " Ting Xu
2020-06-19  8:50   ` [dpdk-dev] [PATCH v4 04/12] net/ice: complete queue setup " Ting Xu
2020-06-19  8:50   ` [dpdk-dev] [PATCH v4 05/12] net/ice: add stop flag for device start / stop Ting Xu
2020-06-19  8:50   ` [dpdk-dev] [PATCH v4 06/12] net/ice: add Rx queue init in DCF Ting Xu
2020-06-19  8:50   ` [dpdk-dev] [PATCH v4 07/12] net/ice: init RSS during DCF start Ting Xu
2020-06-19  8:50   ` [dpdk-dev] [PATCH v4 08/12] net/ice: add queue config in DCF Ting Xu
2020-06-19  8:50   ` [dpdk-dev] [PATCH v4 09/12] net/ice: add queue start and stop for DCF Ting Xu
2020-06-19  8:50   ` [dpdk-dev] [PATCH v4 10/12] net/ice: enable stats " Ting Xu
2020-06-19  8:50   ` [dpdk-dev] [PATCH v4 11/12] net/ice: set MAC filter during dev start " Ting Xu
2020-06-19  8:50   ` [dpdk-dev] [PATCH v4 12/12] doc: enable DCF datapath configuration Ting Xu
2020-06-22  4:48     ` Zhang, Qi Z
2020-06-23  2:38 ` [dpdk-dev] [PATCH v5 00/12] " Ting Xu
2020-06-23  2:38   ` [dpdk-dev] [PATCH v5 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu
2020-06-23  2:38   ` [dpdk-dev] [PATCH v5 02/12] net/ice: complete device info get " Ting Xu
2020-06-23  2:38   ` [dpdk-dev] [PATCH v5 03/12] net/ice: complete dev configure " Ting Xu
2020-06-23  2:38   ` [dpdk-dev] [PATCH v5 04/12] net/ice: complete queue setup " Ting Xu
2020-06-23  2:38   ` [dpdk-dev] [PATCH v5 05/12] net/ice: add stop flag for device start / stop Ting Xu
2020-06-23  2:38   ` [dpdk-dev] [PATCH v5 06/12] net/ice: add Rx queue init in DCF Ting Xu
2020-06-23  2:38   ` [dpdk-dev] [PATCH v5 07/12] net/ice: init RSS during DCF start Ting Xu
2020-06-23  2:38   ` [dpdk-dev] [PATCH v5 08/12] net/ice: add queue config in DCF Ting Xu
2020-06-23  2:38   ` [dpdk-dev] [PATCH v5 09/12] net/ice: add queue start and stop for DCF Ting Xu
2020-06-23  2:38   ` [dpdk-dev] [PATCH v5 10/12] net/ice: enable stats " Ting Xu
2020-06-23  2:38   ` [dpdk-dev] [PATCH v5 11/12] net/ice: set MAC filter during dev start " Ting Xu
2020-06-23  2:38   ` [dpdk-dev] [PATCH v5 12/12] doc: enable DCF datapath configuration Ting Xu
2020-06-29  2:43   ` [dpdk-dev] [PATCH v5 00/12] " Yang, Qiming
2020-06-29  5:36     ` Zhang, Qi Z

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200619085045.22875-2-ting.xu@intel.com \
    --to=ting.xu@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=jingjing.wu@intel.com \
    --cc=john.mcnamara@intel.com \
    --cc=marko.kovacevic@intel.com \
    --cc=qi.z.zhang@intel.com \
    --cc=qiming.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).