From: Ye Xiaolong <xiaolong.ye@intel.com>
To: Ting Xu <ting.xu@intel.com>
Cc: dev@dpdk.org, qi.z.zhang@intel.com, qiming.yang@intel.com,
john.mcnamara@intel.com, marko.kovacevic@intel.com
Subject: Re: [dpdk-dev] [PATCH v1 07/12] net/ice: init RSS during DCF start
Date: Fri, 5 Jun 2020 23:26:54 +0800 [thread overview]
Message-ID: <20200605152654.GA7654@intel.com> (raw)
In-Reply-To: <20200605201737.33766-8-ting.xu@intel.com>
On 06/05, Ting Xu wrote:
>From: Qi Zhang <qi.z.zhang@intel.com>
>
>Enable RSS initialization during DCF start. Add RSS LUT and
>RSS key configuration functions.
>
>Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
>---
> drivers/net/ice/ice_dcf.c | 123 +++++++++++++++++++++++++++++++
> drivers/net/ice/ice_dcf.h | 1 +
> drivers/net/ice/ice_dcf_ethdev.c | 14 +++-
> 3 files changed, 135 insertions(+), 3 deletions(-)
>
>diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
>index 93fabd5f7..8d078163e 100644
>--- a/drivers/net/ice/ice_dcf.c
>+++ b/drivers/net/ice/ice_dcf.c
>@@ -708,3 +708,126 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
> rte_free(hw->rss_lut);
> rte_free(hw->rss_key);
> }
>+
>+static int
>+ice_dcf_configure_rss_key(struct ice_dcf_hw *hw)
>+{
>+ struct virtchnl_rss_key *rss_key;
>+ struct dcf_virtchnl_cmd args;
>+ int len, err;
>+
>+ len = sizeof(*rss_key) + hw->vf_res->rss_key_size - 1;
>+ rss_key = rte_zmalloc("rss_key", len, 0);
>+ if (!rss_key)
>+ return -ENOMEM;
>+
>+ rss_key->vsi_id = hw->vsi_res->vsi_id;
>+ rss_key->key_len = hw->vf_res->rss_key_size;
>+ rte_memcpy(rss_key->key, hw->rss_key, hw->vf_res->rss_key_size);
>+
>+ args.v_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
>+ args.req_msglen = len;
>+ args.req_msg = (uint8_t *)rss_key;
>+ args.rsp_msglen = 0;
>+ args.rsp_buflen = 0;
>+ args.rsp_msgbuf = NULL;
>+ args.pending = 0;
>+
>+ err = ice_dcf_execute_virtchnl_cmd(hw, &args);
>+ if (err) {
>+ PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_KEY");
Need to free rss_key in error handling as well.
>+ return err;
>+ }
>+
>+ rte_free(rss_key);
>+ return 0;
>+}
>+
>+static int
>+ice_dcf_configure_rss_lut(struct ice_dcf_hw *hw)
>+{
>+ struct virtchnl_rss_lut *rss_lut;
>+ struct dcf_virtchnl_cmd args;
>+ int len, err;
>+
>+ len = sizeof(*rss_lut) + hw->vf_res->rss_lut_size - 1;
>+ rss_lut = rte_zmalloc("rss_lut", len, 0);
>+ if (!rss_lut)
>+ return -ENOMEM;
>+
>+ rss_lut->vsi_id = hw->vsi_res->vsi_id;
>+ rss_lut->lut_entries = hw->vf_res->rss_lut_size;
>+ rte_memcpy(rss_lut->lut, hw->rss_lut, hw->vf_res->rss_lut_size);
>+
>+ args.v_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
>+ args.req_msglen = len;
>+ args.req_msg = (uint8_t *)rss_lut;
>+ args.rsp_msglen = 0;
>+ args.rsp_buflen = 0;
>+ args.rsp_msgbuf = NULL;
>+ args.pending = 0;
>+
>+ err = ice_dcf_execute_virtchnl_cmd(hw, &args);
>+ if (err) {
>+ PMD_INIT_LOG(ERR, "Failed to execute OP_CONFIG_RSS_LUT");
Need to free rss_lut here.
>+ return err;
>+ }
>+
>+ rte_free(rss_lut);
>+ return 0;
>+}
>+
>+int
>+ice_dcf_init_rss(struct ice_dcf_hw *hw)
>+{
>+ struct rte_eth_dev *dev = hw->eth_dev;
>+ struct rte_eth_rss_conf *rss_conf;
>+ uint8_t i, j, nb_q;
>+ int ret;
>+
>+ rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
>+ nb_q = dev->data->nb_rx_queues;
>+
>+ if (!(hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF)) {
>+ PMD_DRV_LOG(DEBUG, "RSS is not supported");
>+ return -ENOTSUP;
>+ }
>+ if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
>+ PMD_DRV_LOG(WARNING, "RSS is enabled by PF by default");
>+ /* set all lut items to default queue */
>+ for (i = 0; i < hw->vf_res->rss_lut_size; i++)
>+ hw->rss_lut[i] = 0;
How about memset(hw->rss_lut, 0, hw->vf_res->rss_lut_size);
>+ ret = ice_dcf_configure_rss_lut(hw);
>+ return ret;
return ice_dcf_configure_rss_lut(hw);
>+ }
>+
>+ /* In IAVF, RSS enablement is set by PF driver. It is not supported
>+ * to set based on rss_conf->rss_hf.
>+ */
>+
>+ /* configure RSS key */
>+ if (!rss_conf->rss_key)
>+ /* Calculate the default hash key */
>+ for (i = 0; i <= hw->vf_res->rss_key_size; i++)
>+ hw->rss_key[i] = (uint8_t)rte_rand();
Why use <=, will it cause out-of-bounds access?
>+ else
>+ rte_memcpy(hw->rss_key, rss_conf->rss_key,
>+ RTE_MIN(rss_conf->rss_key_len,
>+ hw->vf_res->rss_key_size));
>+
>+ /* init RSS LUT table */
>+ for (i = 0, j = 0; i < hw->vf_res->rss_lut_size; i++, j++) {
>+ if (j >= nb_q)
>+ j = 0;
>+ hw->rss_lut[i] = j;
>+ }
>+ /* send virtchnnl ops to configure rss*/
>+ ret = ice_dcf_configure_rss_lut(hw);
>+ if (ret)
>+ return ret;
>+ ret = ice_dcf_configure_rss_key(hw);
>+ if (ret)
>+ return ret;
>+
>+ return 0;
>+}
>diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
>index dcb2a0283..eea4b286b 100644
>--- a/drivers/net/ice/ice_dcf.h
>+++ b/drivers/net/ice/ice_dcf.h
>@@ -63,5 +63,6 @@ int ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
> int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw);
> int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
> void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
>+int ice_dcf_init_rss(struct ice_dcf_hw *hw);
>
> #endif /* _ICE_DCF_H_ */
>diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
>index 1f7474dc3..5fbf70803 100644
>--- a/drivers/net/ice/ice_dcf_ethdev.c
>+++ b/drivers/net/ice/ice_dcf_ethdev.c
>@@ -51,9 +51,9 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
> uint16_t buf_size, max_pkt_len, len;
>
> buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
>-
>- /* Calculate the maximum packet length allowed */
>- len = rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS;
>+ rxq->rx_hdr_len = 0;
>+ rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
>+ len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
Above change seems unrelated to this patch, what about squashing it to patch 6?
Thanks,
Xiaolong
> max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len);
>
> /* Check if the jumbo frame and maximum packet length are set
>@@ -133,6 +133,14 @@ ice_dcf_dev_start(struct rte_eth_dev *dev)
> return ret;
> }
>
>+ if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
>+ ret = ice_dcf_init_rss(hw);
>+ if (ret) {
>+ PMD_DRV_LOG(ERR, "Failed to configure RSS");
>+ return ret;
>+ }
>+ }
>+
> dev->data->dev_link.link_status = ETH_LINK_UP;
>
> return 0;
>--
>2.17.1
>
next prev parent reply other threads:[~2020-06-05 15:35 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-05 20:17 [dpdk-dev] [PATCH v1 00/12] enable DCF datapath configuration Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 02/12] net/ice: complete device info get " Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 03/12] net/ice: complete dev configure " Ting Xu
2020-06-05 14:56 ` Ye Xiaolong
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 04/12] net/ice: complete queue setup " Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 05/12] net/ice: add stop flag for device start / stop Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 06/12] net/ice: add Rx queue init in DCF Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 07/12] net/ice: init RSS during DCF start Ting Xu
2020-06-05 15:26 ` Ye Xiaolong [this message]
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 08/12] net/ice: add queue config in DCF Ting Xu
2020-06-07 10:11 ` Ye Xiaolong
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 09/12] net/ice: add queue start and stop for DCF Ting Xu
2020-06-07 12:28 ` Ye Xiaolong
2020-06-08 7:35 ` Yang, Qiming
2020-06-09 7:35 ` Xu, Ting
2020-06-10 5:03 ` Yang, Qiming
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 10/12] net/ice: enable stats " Ting Xu
2020-06-07 10:19 ` Ye Xiaolong
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 11/12] net/ice: set MAC filter during dev start " Ting Xu
2020-06-05 20:17 ` [dpdk-dev] [PATCH v1 12/12] doc: enable DCF datapath configuration Ting Xu
2020-06-05 14:41 ` Ye Xiaolong
2020-06-09 7:50 ` Xu, Ting
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 00/12] " Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 02/12] net/ice: complete device info get " Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 03/12] net/ice: complete dev configure " Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 04/12] net/ice: complete queue setup " Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 05/12] net/ice: add stop flag for device start / stop Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 06/12] net/ice: add Rx queue init in DCF Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 07/12] net/ice: init RSS during DCF start Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 08/12] net/ice: add queue config in DCF Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 09/12] net/ice: add queue start and stop for DCF Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 10/12] net/ice: enable stats " Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 11/12] net/ice: set MAC filter during dev start " Ting Xu
2020-06-11 17:08 ` [dpdk-dev] [PATCH v2 12/12] doc: enable DCF datapath configuration Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 00/12] " Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 02/12] net/ice: complete device info get " Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 03/12] net/ice: complete dev configure " Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 04/12] net/ice: complete queue setup " Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 05/12] net/ice: add stop flag for device start / stop Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 06/12] net/ice: add Rx queue init in DCF Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 07/12] net/ice: init RSS during DCF start Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 08/12] net/ice: add queue config in DCF Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 09/12] net/ice: add queue start and stop for DCF Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 10/12] net/ice: enable stats " Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 11/12] net/ice: set MAC filter during dev start " Ting Xu
2020-06-19 8:50 ` [dpdk-dev] [PATCH v4 12/12] doc: enable DCF datapath configuration Ting Xu
2020-06-22 4:48 ` Zhang, Qi Z
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 00/12] " Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 01/12] net/ice: init RSS and supported RXDID in DCF Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 02/12] net/ice: complete device info get " Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 03/12] net/ice: complete dev configure " Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 04/12] net/ice: complete queue setup " Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 05/12] net/ice: add stop flag for device start / stop Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 06/12] net/ice: add Rx queue init in DCF Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 07/12] net/ice: init RSS during DCF start Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 08/12] net/ice: add queue config in DCF Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 09/12] net/ice: add queue start and stop for DCF Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 10/12] net/ice: enable stats " Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 11/12] net/ice: set MAC filter during dev start " Ting Xu
2020-06-23 2:38 ` [dpdk-dev] [PATCH v5 12/12] doc: enable DCF datapath configuration Ting Xu
2020-06-29 2:43 ` [dpdk-dev] [PATCH v5 00/12] " Yang, Qiming
2020-06-29 5:36 ` Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200605152654.GA7654@intel.com \
--to=xiaolong.ye@intel.com \
--cc=dev@dpdk.org \
--cc=john.mcnamara@intel.com \
--cc=marko.kovacevic@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=qiming.yang@intel.com \
--cc=ting.xu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).