From: Junfeng Guo <junfeng.guo@intel.com>
To: qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com
Cc: dev@dpdk.org, junfeng.guo@intel.com
Subject: [RFC v2 9/9] net/idpf: support RSS
Date: Mon, 9 May 2022 17:11:33 +0800 [thread overview]
Message-ID: <20220509091133.3752306-10-junfeng.guo@intel.com> (raw)
In-Reply-To: <20220509091133.3752306-1-junfeng.guo@intel.com>
Add RSS support.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
drivers/net/idpf/idpf_ethdev.c | 106 +++++++++++++++++++++++++++++++++
drivers/net/idpf/idpf_ethdev.h | 18 +++++-
drivers/net/idpf/idpf_vchnl.c | 93 +++++++++++++++++++++++++++++
3 files changed, 216 insertions(+), 1 deletion(-)
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 1a985caf46..2a0304c18e 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -85,6 +85,7 @@ idpf_dev_info_get(__rte_unused struct rte_eth_dev *dev, struct rte_eth_dev_info
dev_info->max_mtu = dev_info->max_rx_pktlen - IDPF_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL;
dev_info->max_mac_addrs = IDPF_NUM_MACADDR_MAX;
dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
dev_info->rx_offload_capa =
@@ -292,9 +293,96 @@ idpf_init_vport(struct rte_eth_dev *dev)
return 0;
}
+static int
+idpf_config_rss(struct idpf_vport *vport)
+{
+ int ret;
+
+ ret = idpf_set_rss_key(vport);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure RSS key");
+ return ret;
+ }
+
+ ret = idpf_set_rss_lut(vport);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure RSS lut");
+ return ret;
+ }
+
+ ret = idpf_set_rss_hash(vport);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure RSS hash");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+idpf_init_rss(struct idpf_vport *vport)
+{
+ struct rte_eth_rss_conf *rss_conf;
+ uint16_t i, nb_q, lut_size;
+ int ret = 0;
+
+ rss_conf = &vport->dev_data->dev_conf.rx_adv_conf.rss_conf;
+ nb_q = vport->num_rx_q;
+
+ vport->rss_key = (uint8_t *)rte_zmalloc("rss_key",
+ vport->rss_key_size, 0);
+ if (!vport->rss_key) {
+ PMD_INIT_LOG(ERR, "Failed to allocate RSS key");
+ ret = -ENOMEM;
+ goto err_key;
+ }
+
+ lut_size = vport->rss_lut_size;
+ vport->rss_lut = (uint32_t *)rte_zmalloc("rss_lut",
+ sizeof(uint32_t) * lut_size, 0);
+ if (!vport->rss_lut) {
+ PMD_INIT_LOG(ERR, "Failed to allocate RSS lut");
+ ret = -ENOMEM;
+ goto err_lut;
+ }
+
+ if (!rss_conf->rss_key) {
+ for (i = 0; i < vport->rss_key_size; i++)
+ vport->rss_key[i] = (uint8_t)rte_rand();
+ } else {
+ rte_memcpy(vport->rss_key, rss_conf->rss_key,
+ RTE_MIN(rss_conf->rss_key_len,
+ vport->rss_key_size));
+ }
+
+ for (i = 0; i < lut_size; i++)
+ vport->rss_lut[i] = i % nb_q;
+
+ vport->rss_hf = IECM_DEFAULT_RSS_HASH_EXPANDED;
+
+ ret = idpf_config_rss(vport);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure RSS");
+ goto err_cfg;
+ }
+
+ return ret;
+
+err_cfg:
+ rte_free(vport->rss_lut);
+ vport->rss_lut = NULL;
+err_lut:
+ rte_free(vport->rss_key);
+ vport->rss_key = NULL;
+err_key:
+ return ret;
+}
+
static int
idpf_dev_configure(struct rte_eth_dev *dev)
{
+ struct idpf_vport *vport =
+ (struct idpf_vport *)dev->data->dev_private;
int ret = 0;
if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
@@ -319,6 +407,14 @@ idpf_dev_configure(struct rte_eth_dev *dev)
return ret;
}
+ if (adapter->caps->rss_caps) {
+ ret = idpf_init_rss(vport);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init rss");
+ return ret;
+ }
+ }
+
return ret;
}
@@ -451,6 +547,16 @@ idpf_dev_close(struct rte_eth_dev *dev)
idpf_dev_stop(dev);
idpf_destroy_vport(vport);
+ if (vport->rss_lut) {
+ rte_free(vport->rss_lut);
+ vport->rss_lut = NULL;
+ }
+
+ if (vport->rss_key) {
+ rte_free(vport->rss_key);
+ vport->rss_key = NULL;
+ }
+
return 0;
}
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 5520b2d6ce..0b8e163bbb 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -43,6 +43,20 @@
#define IDPF_ETH_OVERHEAD \
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IDPF_VLAN_TAG_SIZE * 2)
+#define IDPF_RSS_OFFLOAD_ALL ( \
+ RTE_ETH_RSS_IPV4 | \
+ RTE_ETH_RSS_FRAG_IPV4 | \
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
+ RTE_ETH_RSS_IPV6 | \
+ RTE_ETH_RSS_FRAG_IPV6 | \
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
+
#ifndef ETH_ADDR_LEN
#define ETH_ADDR_LEN 6
#endif
@@ -196,7 +210,9 @@ int idpf_check_api_version(struct idpf_adapter *adapter);
int idpf_get_caps(struct idpf_adapter *adapter);
int idpf_create_vport(__rte_unused struct rte_eth_dev *dev);
int idpf_destroy_vport(struct idpf_vport *vport);
-
+int idpf_set_rss_key(struct idpf_vport *vport);
+int idpf_set_rss_lut(struct idpf_vport *vport);
+int idpf_set_rss_hash(struct idpf_vport *vport);
int idpf_config_rxqs(struct idpf_vport *vport);
int idpf_config_txqs(struct idpf_vport *vport);
int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index 74ed555449..fb7cee6915 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -441,6 +441,99 @@ idpf_destroy_vport(struct idpf_vport *vport)
return err;
}
+int
+idpf_set_rss_key(struct idpf_vport *vport)
+{
+ struct virtchnl2_rss_key *rss_key;
+ struct idpf_cmd_info args;
+ int len, err;
+
+ len = sizeof(*rss_key) + sizeof(rss_key->key[0]) *
+ (vport->rss_key_size - 1);
+ rss_key = rte_zmalloc("rss_key", len, 0);
+ if (!rss_key)
+ return -ENOMEM;
+
+ rss_key->vport_id = vport->vport_id;
+ rss_key->key_len = vport->rss_key_size;
+ rte_memcpy(rss_key->key, vport->rss_key,
+ sizeof(rss_key->key[0]) * vport->rss_key_size);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_SET_RSS_KEY;
+ args.in_args = (uint8_t *)rss_key;
+ args.in_args_size = len;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_KEY");
+ return err;
+ }
+
+ rte_free(rss_key);
+ return err;
+}
+
+int
+idpf_set_rss_lut(struct idpf_vport *vport)
+{
+ struct virtchnl2_rss_lut *rss_lut;
+ struct idpf_cmd_info args;
+ int len, err;
+
+ len = sizeof(*rss_lut) + sizeof(rss_lut->lut[0]) *
+ (vport->rss_lut_size - 1);
+ rss_lut = rte_zmalloc("rss_lut", len, 0);
+ if (!rss_lut)
+ return -ENOMEM;
+
+ rss_lut->vport_id = vport->vport_id;
+ rss_lut->lut_entries = vport->rss_lut_size;
+ rte_memcpy(rss_lut->lut, vport->rss_lut,
+ sizeof(rss_lut->lut[0]) * vport->rss_lut_size);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_SET_RSS_LUT;
+ args.in_args = (uint8_t *)rss_lut;
+ args.in_args_size = len;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_LUT");
+
+ rte_free(rss_lut);
+ return err;
+}
+
+int
+idpf_set_rss_hash(struct idpf_vport *vport)
+{
+ struct virtchnl2_rss_hash rss_hash;
+ struct idpf_cmd_info args;
+ int err;
+
+ memset(&rss_hash, 0, sizeof(rss_hash));
+ rss_hash.ptype_groups = vport->rss_hf;
+ rss_hash.vport_id = vport->vport_id;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_SET_RSS_HASH;
+ args.in_args = (uint8_t *)&rss_hash;
+ args.in_args_size = sizeof(rss_hash);
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of OP_SET_RSS_HASH");
+
+ return err;
+}
+
#define IDPF_RX_BUF_STRIDE 64
int
idpf_config_rxqs(struct idpf_vport *vport)
--
2.25.1
next prev parent reply other threads:[~2022-05-09 9:12 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-05-07 7:07 [RFC 0/9] add support for idpf PMD in DPDK Junfeng Guo
2022-05-07 7:07 ` [RFC 1/9] net/idpf/base: introduce base code Junfeng Guo
2022-05-09 9:11 ` [RFC v2 0/9] add support for idpf PMD in DPDK Junfeng Guo
2022-05-09 9:11 ` [RFC v2 1/9] net/idpf/base: introduce base code Junfeng Guo
2022-05-09 9:11 ` [RFC v2 2/9] net/idpf/base: add OS specific implementation Junfeng Guo
2022-05-09 9:11 ` [RFC v2 3/9] net/idpf: support device initialization Junfeng Guo
2022-05-09 9:11 ` [RFC v2 4/9] net/idpf: support queue ops Junfeng Guo
2022-05-09 9:11 ` [RFC v2 5/9] net/idpf: support getting device information Junfeng Guo
2022-05-09 9:11 ` [RFC v2 6/9] net/idpf: support packet type getting Junfeng Guo
2022-05-09 9:11 ` [RFC v2 7/9] net/idpf: support link update Junfeng Guo
2022-05-09 9:11 ` [RFC v2 8/9] net/idpf: support basic Rx/Tx Junfeng Guo
2022-05-09 9:11 ` Junfeng Guo [this message]
2022-05-18 8:25 ` [RFC v3 00/11] add support for idpf PMD in DPDK Junfeng Guo
2022-05-18 8:25 ` [RFC v3 01/11] net/idpf/base: introduce base code Junfeng Guo
2022-05-18 15:26 ` Stephen Hemminger
2022-05-18 8:25 ` [RFC v3 02/11] net/idpf/base: add OS specific implementation Junfeng Guo
2022-05-18 8:25 ` [RFC v3 03/11] net/idpf: support device initialization Junfeng Guo
2022-05-18 8:25 ` [RFC v3 04/11] net/idpf: support queue ops Junfeng Guo
2022-05-18 8:25 ` [RFC v3 05/11] net/idpf: support getting device information Junfeng Guo
2022-05-18 8:25 ` [RFC v3 06/11] net/idpf: support packet type getting Junfeng Guo
2022-05-18 8:25 ` [RFC v3 07/11] net/idpf: support link update Junfeng Guo
2022-05-18 8:25 ` [RFC v3 08/11] net/idpf: support basic Rx/Tx Junfeng Guo
2022-05-18 8:25 ` [RFC v3 09/11] net/idpf: support RSS Junfeng Guo
2022-05-18 8:25 ` [RFC v3 10/11] net/idpf: support MTU configuration Junfeng Guo
2022-05-18 8:25 ` [RFC v3 11/11] net/idpf: add CPF device ID for idpf map table Junfeng Guo
2022-05-07 7:07 ` [RFC 2/9] net/idpf/base: add OS specific implementation Junfeng Guo
2022-05-07 7:07 ` [RFC 3/9] net/idpf: support device initialization Junfeng Guo
2022-05-07 7:07 ` [RFC 4/9] net/idpf: support queue ops Junfeng Guo
2022-05-07 7:07 ` [RFC 5/9] net/idpf: support getting device information Junfeng Guo
2022-05-07 7:07 ` [RFC 6/9] net/idpf: support packet type getting Junfeng Guo
2022-05-07 7:07 ` [RFC 7/9] net/idpf: support link update Junfeng Guo
2022-05-07 7:07 ` [RFC 8/9] net/idpf: support basic Rx/Tx Junfeng Guo
2022-05-07 7:07 ` [RFC 9/9] net/idpf: support RSS Junfeng Guo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220509091133.3752306-10-junfeng.guo@intel.com \
--to=junfeng.guo@intel.com \
--cc=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=qi.z.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).