From: Mingxia Liu <mingxia.liu@intel.com>
To: dev@dpdk.org
Cc: jingjing.wu@intel.com, beilei.xing@intel.com,
qi.z.zhang@intel.com, Mingxia Liu <mingxia.liu@intel.com>
Subject: [PATCH 2/7] common/idpf: add RSS set/get ops
Date: Fri, 16 Dec 2022 09:37:01 +0000 [thread overview]
Message-ID: <20221216093706.2453812-3-mingxia.liu@intel.com> (raw)
In-Reply-To: <20221216093706.2453812-1-mingxia.liu@intel.com>
Add support for these device ops:
- rss_reta_update
- rss_reta_query
- rss_hash_update
- rss_hash_conf_get
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
---
drivers/common/idpf/idpf_common_device.h | 1 +
drivers/common/idpf/idpf_common_virtchnl.c | 119 ++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 15 +-
drivers/common/idpf/version.map | 6 +
drivers/net/idpf/idpf_ethdev.c | 303 +++++++++++++++++++++
drivers/net/idpf/idpf_ethdev.h | 5 +-
6 files changed, 445 insertions(+), 4 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 5184dcee9f..d7d4cd5363 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -95,6 +95,7 @@ struct idpf_vport {
uint32_t *rss_lut;
uint8_t *rss_key;
uint64_t rss_hf;
+ uint64_t last_general_rss_hf;
/* MSIX info*/
struct virtchnl2_queue_vector *qv_map; /* queue vector mapping */
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 80351d15de..ae5a983836 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -218,6 +218,9 @@ idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args)
case VIRTCHNL2_OP_ALLOC_VECTORS:
case VIRTCHNL2_OP_DEALLOC_VECTORS:
case VIRTCHNL2_OP_GET_STATS:
+ case VIRTCHNL2_OP_GET_RSS_KEY:
+ case VIRTCHNL2_OP_GET_RSS_HASH:
+ case VIRTCHNL2_OP_GET_RSS_LUT:
/* for init virtchnl ops, need to poll the response */
err = idpf_read_one_msg(adapter, args->ops, args->out_size, args->out_buffer);
clear_cmd(adapter);
@@ -448,6 +451,48 @@ idpf_vc_set_rss_key(struct idpf_vport *vport)
return err;
}
+int idpf_vc_get_rss_key(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_rss_key *rss_key_ret;
+ struct virtchnl2_rss_key rss_key;
+ struct idpf_cmd_info args;
+ int err;
+
+ memset(&rss_key, 0, sizeof(rss_key));
+ rss_key.vport_id = vport->vport_id;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_GET_RSS_KEY;
+ args.in_args = (uint8_t *)&rss_key;
+ args.in_args_size = sizeof(rss_key);
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+
+ if (!err) {
+ rss_key_ret = (struct virtchnl2_rss_key *)args.out_buffer;
+ if (rss_key_ret->key_len != vport->rss_key_size) {
+ rte_free(vport->rss_key);
+ vport->rss_key = NULL;
+ vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
+ rss_key_ret->key_len);
+ vport->rss_key = rte_zmalloc("rss_key", vport->rss_key_size, 0);
+ if (!vport->rss_key) {
+ vport->rss_key_size = 0;
+ DRV_LOG(ERR, "Failed to allocate RSS key");
+ return -ENOMEM;
+ }
+ }
+ rte_memcpy(vport->rss_key, rss_key_ret->key, vport->rss_key_size);
+ } else {
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_KEY");
+ }
+
+ return err;
+}
+
int
idpf_vc_set_rss_lut(struct idpf_vport *vport)
{
@@ -482,6 +527,48 @@ idpf_vc_set_rss_lut(struct idpf_vport *vport)
return err;
}
+int
+idpf_vc_get_rss_lut(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_rss_lut *rss_lut_ret;
+ struct virtchnl2_rss_lut rss_lut;
+ struct idpf_cmd_info args;
+ int err;
+
+ memset(&rss_lut, 0, sizeof(rss_lut));
+ rss_lut.vport_id = vport->vport_id;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_GET_RSS_LUT;
+ args.in_args = (uint8_t *)&rss_lut;
+ args.in_args_size = sizeof(rss_lut);
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+
+ if (!err) {
+ rss_lut_ret = (struct virtchnl2_rss_lut *)args.out_buffer;
+ if (rss_lut_ret->lut_entries != vport->rss_lut_size) {
+ rte_free(vport->rss_lut);
+ vport->rss_lut = NULL;
+ vport->rss_lut = rte_zmalloc("rss_lut",
+ sizeof(uint32_t) * rss_lut_ret->lut_entries, 0);
+ if (vport->rss_lut == NULL) {
+ DRV_LOG(ERR, "Failed to allocate RSS lut");
+ return -ENOMEM;
+ }
+ }
+ rte_memcpy(vport->rss_lut, rss_lut_ret->lut, rss_lut_ret->lut_entries);
+ vport->rss_lut_size = rss_lut_ret->lut_entries;
+ } else {
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_LUT");
+ }
+
+ return err;
+}
+
int
idpf_vc_set_rss_hash(struct idpf_vport *vport)
{
@@ -508,6 +595,38 @@ idpf_vc_set_rss_hash(struct idpf_vport *vport)
return err;
}
+int
+idpf_vc_get_rss_hash(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_rss_hash *rss_hash_ret;
+ struct virtchnl2_rss_hash rss_hash;
+ struct idpf_cmd_info args;
+ int err;
+
+ memset(&rss_hash, 0, sizeof(rss_hash));
+ rss_hash.ptype_groups = vport->rss_hf;
+ rss_hash.vport_id = vport->vport_id;
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_GET_RSS_HASH;
+ args.in_args = (uint8_t *)&rss_hash;
+ args.in_args_size = sizeof(rss_hash);
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+
+ if (!err) {
+ rss_hash_ret = (struct virtchnl2_rss_hash *)args.out_buffer;
+ vport->rss_hf = rss_hash_ret->ptype_groups;
+ } else {
+ DRV_LOG(ERR, "Failed to execute command of OP_GET_RSS_HASH");
+ }
+
+ return err;
+}
+
int
idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, uint16_t nb_rxq, bool map)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 60347fe571..b5d245a64f 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -13,9 +13,6 @@ int idpf_vc_get_caps(struct idpf_adapter *adapter);
int idpf_vc_create_vport(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
int idpf_vc_destroy_vport(struct idpf_vport *vport);
-int idpf_vc_set_rss_key(struct idpf_vport *vport);
-int idpf_vc_set_rss_lut(struct idpf_vport *vport);
-int idpf_vc_set_rss_hash(struct idpf_vport *vport);
int idpf_vc_config_irq_map_unmap(struct idpf_vport *vport,
uint16_t nb_rxq, bool map);
int idpf_vc_query_ptype_info(struct idpf_adapter *adapter);
@@ -41,4 +38,16 @@ int idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq);
__rte_internal
int idpf_query_stats(struct idpf_vport *vport,
struct virtchnl2_vport_stats **pstats);
+__rte_internal
+int idpf_vc_set_rss_key(struct idpf_vport *vport);
+__rte_internal
+int idpf_vc_get_rss_key(struct idpf_vport *vport);
+__rte_internal
+int idpf_vc_set_rss_lut(struct idpf_vport *vport);
+__rte_internal
+int idpf_vc_get_rss_lut(struct idpf_vport *vport);
+__rte_internal
+int idpf_vc_set_rss_hash(struct idpf_vport *vport);
+__rte_internal
+int idpf_vc_get_rss_hash(struct idpf_vport *vport);
#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 6a1dc13302..cba08c6b4a 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -52,6 +52,12 @@ INTERNAL {
idpf_splitq_xmit_pkts_avx512;
idpf_update_stats;
idpf_query_stats;
+ idpf_vc_set_rss_key;
+ idpf_vc_get_rss_key;
+ idpf_vc_set_rss_lut;
+ idpf_vc_get_rss_lut;
+ idpf_vc_set_rss_hash;
+ idpf_vc_get_rss_hash;
local: *;
};
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 1b1b0f30fd..0d370ace4a 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -29,6 +29,56 @@ static const char * const idpf_valid_args[] = {
NULL
};
+static const uint64_t idpf_map_hena_rss[] = {
+ [IDPF_HASH_NONF_UNICAST_IPV4_UDP] =
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+ [IDPF_HASH_NONF_MULTICAST_IPV4_UDP] =
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+ [IDPF_HASH_NONF_IPV4_UDP] =
+ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
+ [IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK] =
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+ [IDPF_HASH_NONF_IPV4_TCP] =
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
+ [IDPF_HASH_NONF_IPV4_SCTP] =
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP,
+ [IDPF_HASH_NONF_IPV4_OTHER] =
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
+ [IDPF_HASH_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4,
+
+ /* IPv6 */
+ [IDPF_HASH_NONF_UNICAST_IPV6_UDP] =
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ [IDPF_HASH_NONF_MULTICAST_IPV6_UDP] =
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ [IDPF_HASH_NONF_IPV6_UDP] =
+ RTE_ETH_RSS_NONFRAG_IPV6_UDP,
+ [IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK] =
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+ [IDPF_HASH_NONF_IPV6_TCP] =
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP,
+ [IDPF_HASH_NONF_IPV6_SCTP] =
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP,
+ [IDPF_HASH_NONF_IPV6_OTHER] =
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER,
+ [IDPF_HASH_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6,
+
+ /* L2 Payload */
+ [IDPF_HASH_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD
+};
+
+static const uint64_t idpf_ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV4_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV4_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER |
+ RTE_ETH_RSS_FRAG_IPV4;
+
+static const uint64_t idpf_ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_NONFRAG_IPV6_SCTP |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+ RTE_ETH_RSS_FRAG_IPV6;
+
static int
idpf_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
@@ -59,6 +109,9 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mtu = dev_info->max_rx_pktlen - IDPF_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->hash_key_size = vport->rss_key_size;
+ dev_info->reta_size = vport->rss_lut_size;
+
dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL;
dev_info->rx_offload_capa =
@@ -211,6 +264,54 @@ idpf_dev_stats_reset(struct rte_eth_dev *dev)
return 0;
}
+static int idpf_config_rss_hf(struct idpf_vport *vport, uint64_t rss_hf)
+{
+ uint64_t hena = 0, valid_rss_hf = 0;
+ int ret = 0;
+ uint16_t i;
+
+ /**
+ * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2
+ * generalizations of all other IPv4 and IPv6 RSS types.
+ */
+ if (rss_hf & RTE_ETH_RSS_IPV4)
+ rss_hf |= idpf_ipv4_rss;
+
+ if (rss_hf & RTE_ETH_RSS_IPV6)
+ rss_hf |= idpf_ipv6_rss;
+
+ for (i = 0; i < RTE_DIM(idpf_map_hena_rss); i++) {
+ uint64_t bit = BIT_ULL(i);
+
+ if (idpf_map_hena_rss[i] & rss_hf) {
+ valid_rss_hf |= idpf_map_hena_rss[i];
+ hena |= bit;
+ }
+ }
+
+ vport->rss_hf = hena;
+
+ ret = idpf_vc_set_rss_hash(vport);
+ if (ret != 0) {
+ PMD_DRV_LOG(WARNING,
+ "fail to set RSS offload types, ret: %d", ret);
+ return ret;
+ }
+
+ if (valid_rss_hf & idpf_ipv4_rss)
+ valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4;
+
+ if (valid_rss_hf & idpf_ipv6_rss)
+ valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6;
+
+ if (rss_hf & ~valid_rss_hf)
+ PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64,
+ rss_hf & ~valid_rss_hf);
+ vport->last_general_rss_hf = valid_rss_hf;
+
+ return ret;
+}
+
static int
idpf_init_rss(struct idpf_vport *vport)
{
@@ -247,6 +348,204 @@ idpf_init_rss(struct idpf_vport *vport)
return ret;
}
+static int
+idpf_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+ struct idpf_adapter *adapter = vport->adapter;
+ uint16_t idx, shift;
+ uint32_t *lut;
+ int ret = 0;
+ uint16_t i;
+
+ if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
+ PMD_DRV_LOG(DEBUG, "RSS is not supported");
+ return -ENOTSUP;
+ }
+
+ if (reta_size != vport->rss_lut_size) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)",
+ reta_size, vport->rss_lut_size);
+ return -EINVAL;
+ }
+
+ /* It MUST use the current LUT size to get the RSS lookup table,
+ * otherwise if will fail with -100 error code.
+ */
+ lut = rte_zmalloc(NULL, reta_size * sizeof(uint32_t), 0);
+ if (!lut) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+ /* store the old lut table temporarily */
+ rte_memcpy(lut, vport->rss_lut, reta_size);
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ lut[i] = reta_conf[idx].reta[shift];
+ }
+
+ rte_memcpy(vport->rss_lut, lut, reta_size);
+ /* send virtchnl ops to configure RSS */
+ ret = idpf_vc_set_rss_lut(vport);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure RSS lut");
+ goto out;
+ }
+out:
+ rte_free(lut);
+
+ return ret;
+}
+
+static int
+idpf_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+ struct idpf_adapter *adapter = vport->adapter;
+ uint16_t idx, shift;
+ int ret = 0;
+ uint16_t i;
+
+ if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
+ PMD_DRV_LOG(DEBUG, "RSS is not supported");
+ return -ENOTSUP;
+ }
+
+ if (reta_size != vport->rss_lut_size) {
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+ "(%d) doesn't match the number of hardware can "
+ "support (%d)", reta_size, vport->rss_lut_size);
+ return -EINVAL;
+ }
+
+ ret = idpf_vc_get_rss_lut(vport);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get RSS LUT");
+ return ret;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = vport->rss_lut[i];
+ }
+
+ return 0;
+}
+
+static int
+idpf_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+ struct idpf_adapter *adapter = vport->adapter;
+ int ret = 0;
+
+ if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
+ PMD_DRV_LOG(DEBUG, "RSS is not supported");
+ return -ENOTSUP;
+ }
+
+ if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) {
+ PMD_DRV_LOG(DEBUG, "No key to be configured");
+ goto skip_rss_key;
+ } else if (rss_conf->rss_key_len != vport->rss_key_size) {
+ PMD_DRV_LOG(ERR, "The size of hash key configured "
+ "(%d) doesn't match the size of hardware can "
+ "support (%d)",
+ rss_conf->rss_key_len,
+ vport->rss_key_size);
+ return -EINVAL;
+ }
+
+ rte_memcpy(vport->rss_key, rss_conf->rss_key,
+ vport->rss_key_size);
+ ret = idpf_vc_set_rss_key(vport);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to configure RSS key");
+ return ret;
+ }
+
+skip_rss_key:
+ ret = idpf_config_rss_hf(vport, rss_conf->rss_hf);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to configure RSS hash");
+ return ret;
+ }
+
+ return 0;
+}
+
+static uint64_t
+idpf_map_general_rss_hf(uint64_t config_rss_hf, uint64_t last_general_rss_hf)
+{
+ uint64_t valid_rss_hf = 0;
+ uint16_t i;
+
+ for (i = 0; i < RTE_DIM(idpf_map_hena_rss); i++) {
+ uint64_t bit = BIT_ULL(i);
+
+ if (bit & config_rss_hf)
+ valid_rss_hf |= idpf_map_hena_rss[i];
+ }
+
+ if (valid_rss_hf & idpf_ipv4_rss)
+ valid_rss_hf |= last_general_rss_hf & RTE_ETH_RSS_IPV4;
+
+ if (valid_rss_hf & idpf_ipv6_rss)
+ valid_rss_hf |= last_general_rss_hf & RTE_ETH_RSS_IPV6;
+
+ return valid_rss_hf;
+}
+
+static int
+idpf_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+ struct idpf_adapter *adapter = vport->adapter;
+ int ret = 0;
+
+ if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) {
+ PMD_DRV_LOG(DEBUG, "RSS is not supported");
+ return -ENOTSUP;
+ }
+
+ ret = idpf_vc_get_rss_hash(vport);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get RSS hf");
+ return ret;
+ }
+
+ rss_conf->rss_hf = idpf_map_general_rss_hf(vport->rss_hf, vport->last_general_rss_hf);
+
+ if (!rss_conf->rss_key)
+ return 0;
+
+ ret = idpf_vc_get_rss_key(vport);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get RSS key");
+ return ret;
+ }
+
+ if (rss_conf->rss_key_len > vport->rss_key_size)
+ rss_conf->rss_key_len = vport->rss_key_size;
+
+ rte_memcpy(rss_conf->rss_key, vport->rss_key, rss_conf->rss_key_len);
+
+ return 0;
+}
+
static int
idpf_dev_configure(struct rte_eth_dev *dev)
{
@@ -684,6 +983,10 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
.dev_supported_ptypes_get = idpf_dev_supported_ptypes_get,
.stats_get = idpf_dev_stats_get,
.stats_reset = idpf_dev_stats_reset,
+ .reta_update = idpf_rss_reta_update,
+ .reta_query = idpf_rss_reta_query,
+ .rss_hash_update = idpf_rss_hash_update,
+ .rss_hash_conf_get = idpf_rss_hash_conf_get,
};
static uint16_t
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 133589cf98..f3e5d4cbd4 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -47,7 +47,10 @@
RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
- RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
+ RTE_ETH_RSS_L2_PAYLOAD)
+
+#define IDPF_RSS_KEY_LEN 52
#define IDPF_ADAPTER_NAME_LEN (PCI_PRI_STR_SIZE + 1)
--
2.25.1
next prev parent reply other threads:[~2022-12-16 10:32 UTC|newest]
Thread overview: 63+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-16 9:36 [PATCH 0/7] add idpf pmd enhancement features Mingxia Liu
2022-12-16 9:37 ` [PATCH 1/7] common/idpf: add hw statistics Mingxia Liu
2022-12-16 9:37 ` Mingxia Liu [this message]
2022-12-16 9:37 ` [PATCH 3/7] common/idpf: support single q scatter RX datapath Mingxia Liu
2022-12-16 9:37 ` [PATCH 4/7] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2022-12-16 9:37 ` [PATCH 5/7] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2022-12-16 9:37 ` [PATCH 6/7] common/idpf: add xstats ops Mingxia Liu
2022-12-16 9:37 ` [PATCH 7/7] common/idpf: update mbuf_alloc_failed multi-thread process Mingxia Liu
2023-01-11 7:15 ` [PATCH 0/6] add idpf pmd enhancement features Mingxia Liu
2023-01-11 7:15 ` [PATCH v2 1/6] common/idpf: add hw statistics Mingxia Liu
2023-01-11 7:15 ` [PATCH v2 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-01-11 7:15 ` [PATCH v2 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-01-11 7:15 ` [PATCH v2 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-01-11 7:15 ` [PATCH v2 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-01-11 7:15 ` [PATCH v2 6/6] common/idpf: add xstats ops Mingxia Liu
2023-01-18 7:14 ` [PATCH v3 0/6] add idpf pmd enhancement features Mingxia Liu
2023-01-18 7:14 ` [PATCH v3 1/6] common/idpf: add hw statistics Mingxia Liu
2023-02-01 8:48 ` Wu, Jingjing
2023-02-01 12:34 ` Liu, Mingxia
2023-01-18 7:14 ` [PATCH v3 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-02-02 3:28 ` Wu, Jingjing
2023-02-07 3:10 ` Liu, Mingxia
2023-01-18 7:14 ` [PATCH v3 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-02 3:45 ` Wu, Jingjing
2023-02-02 7:19 ` Liu, Mingxia
2023-01-18 7:14 ` [PATCH v3 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-01-18 7:14 ` [PATCH v3 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-02 4:23 ` Wu, Jingjing
2023-02-02 7:39 ` Liu, Mingxia
2023-02-02 8:46 ` Wu, Jingjing
2023-01-18 7:14 ` [PATCH v3 6/6] common/idpf: add xstats ops Mingxia Liu
2023-02-07 9:56 ` [PATCH v4 0/6] add idpf pmd enhancement features Mingxia Liu
2023-02-07 9:56 ` [PATCH v4 1/6] common/idpf: add hw statistics Mingxia Liu
2023-02-07 9:56 ` [PATCH v4 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-02-07 9:56 ` [PATCH v4 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-07 9:56 ` [PATCH v4 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-02-07 9:57 ` [PATCH v4 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-07 9:57 ` [PATCH v4 6/6] common/idpf: add xstats ops Mingxia Liu
2023-02-07 10:08 ` [PATCH v4 0/6] add idpf pmd enhancement features Mingxia Liu
2023-02-07 10:08 ` [PATCH v5 1/6] common/idpf: add hw statistics Mingxia Liu
2023-02-07 10:16 ` [PATCH v6 0/6] add idpf pmd enhancement features Mingxia Liu
2023-02-07 10:16 ` [PATCH v6 1/6] common/idpf: add hw statistics Mingxia Liu
2023-02-08 2:00 ` Zhang, Qi Z
2023-02-08 8:28 ` Liu, Mingxia
2023-02-07 10:16 ` [PATCH v6 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-02-07 10:16 ` [PATCH v6 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-07 10:16 ` [PATCH v6 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-02-07 10:16 ` [PATCH v6 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-07 10:16 ` [PATCH v6 6/6] common/idpf: add xstats ops Mingxia Liu
2023-02-08 0:28 ` [PATCH v6 0/6] add idpf pmd enhancement features Wu, Jingjing
2023-02-08 7:33 ` [PATCH v7 " Mingxia Liu
2023-02-08 7:33 ` [PATCH v7 1/6] net/idpf: add hw statistics Mingxia Liu
2023-02-08 7:33 ` [PATCH v7 2/6] net/idpf: add RSS set/get ops Mingxia Liu
2023-02-08 7:33 ` [PATCH v7 3/6] net/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-08 7:33 ` [PATCH v7 4/6] net/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-02-08 7:34 ` [PATCH v7 5/6] net/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-08 7:34 ` [PATCH v7 6/6] net/idpf: add xstats ops Mingxia Liu
2023-02-08 9:32 ` [PATCH v7 0/6] add idpf pmd enhancement features Zhang, Qi Z
2023-02-07 10:08 ` [PATCH v5 2/6] common/idpf: add RSS set/get ops Mingxia Liu
2023-02-07 10:08 ` [PATCH v5 3/6] common/idpf: support single q scatter RX datapath Mingxia Liu
2023-02-07 10:08 ` [PATCH v5 4/6] common/idpf: add rss_offload hash in singleq rx Mingxia Liu
2023-02-07 10:08 ` [PATCH v5 5/6] common/idpf: add alarm to support handle vchnl message Mingxia Liu
2023-02-07 10:08 ` [PATCH v5 6/6] common/idpf: add xstats ops Mingxia Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221216093706.2453812-3-mingxia.liu@intel.com \
--to=mingxia.liu@intel.com \
--cc=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=qi.z.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).