From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9D1FA423AA; Wed, 11 Jan 2023 09:13:10 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D9A5942D24; Wed, 11 Jan 2023 09:13:04 +0100 (CET) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by mails.dpdk.org (Postfix) with ESMTP id 5415B42C4D for ; Wed, 11 Jan 2023 09:13:02 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1673424782; x=1704960782; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=o8oYLIT831Xcgl5AUMfvvHdQDWe8NjNKg0ZEfnDaDcA=; b=T+2oIcoUJwsnvwcJqOIx2KvZkUuouAhUXER6fK9qpNbEf1yclaYl3yem /RK8I+fXPRmKP25FqBlZyA5EfCIzxw6z9RdA3Soc8CrLslpNTbUMPExjV qXTtvaBuwUIkRtmxOdllGB62fUeSodtSQErlHZplJVvL/z8HG6VX+K9G+ lo8j6FjmRU987mN9J6U+LRqB7LemhFOYZwbOwm1qQUMHLn7X/miYhXQGG GZpeFiv+o43RiXEDcprqRh2Qkj72C0a3bKRZF5pGkenR8mZCvinlwbYAz M8iDHoy1lCtSYOibMCtDvcibr8aalTePSj3Wf3Lh3fCVtONGnlHC4zzMX w==; X-IronPort-AV: E=McAfee;i="6500,9779,10586"; a="311167477" X-IronPort-AV: E=Sophos;i="5.96,315,1665471600"; d="scan'208";a="311167477" Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 11 Jan 2023 00:13:01 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=McAfee;i="6500,9779,10586"; a="831271389" X-IronPort-AV: E=Sophos;i="5.96,315,1665471600"; d="scan'208";a="831271389" Received: from dpdk-mingxial-01.sh.intel.com ([10.67.119.112]) by orsmga005.jf.intel.com with ESMTP; 11 Jan 2023 00:13:00 -0800 From: Mingxia Liu To: dev@dpdk.org Cc: jingjing.wu@intel.com, beilei.xing@intel.com, Mingxia Liu Subject: [PATCH v2 2/6] common/idpf: add RSS set/get ops Date: Wed, 11 Jan 2023 07:15:41 +0000 Message-Id: <20230111071545.504706-3-mingxia.liu@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20230111071545.504706-1-mingxia.liu@intel.com> References: <20221216093706.2453812-1-mingxia.liu@intel.com> <20230111071545.504706-1-mingxia.liu@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add support for these device ops: - rss_reta_update - rss_reta_query - rss_hash_update - rss_hash_conf_get Signed-off-by: Mingxia Liu --- drivers/common/idpf/idpf_common_device.h | 1 + drivers/common/idpf/idpf_common_virtchnl.c | 119 ++++++++ drivers/common/idpf/idpf_common_virtchnl.h | 15 +- drivers/common/idpf/version.map | 6 + drivers/net/idpf/idpf_ethdev.c | 303 +++++++++++++++++++++ drivers/net/idpf/idpf_ethdev.h | 5 +- 6 files changed, 445 insertions(+), 4 deletions(-) diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h index 5184dcee9f..d7d4cd5363 100644 --- a/drivers/common/idpf/idpf_common_device.h +++ b/drivers/common/idpf/idpf_common_device.h @@ -95,6 +95,7 @@ struct idpf_vport { uint32_t *rss_lut; uint8_t *rss_key; uint64_t rss_hf; + uint64_t last_general_rss_hf; /* MSIX info*/ struct virtchnl2_queue_vector *qv_map; /* queue vector mapping */ diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c index 80351d15de..ae5a983836 100644 --- a/drivers/common/idpf/idpf_common_virtchnl.c +++ b/drivers/common/idpf/idpf_common_virtchnl.c @@ -218,6 +218,9 @@ idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info *args) case VIRTCHNL2_OP_ALLOC_VECTORS: case VIRTCHNL2_OP_DEALLOC_VECTORS: case VIRTCHNL2_OP_GET_STATS: + case VIRTCHNL2_OP_GET_RSS_KEY: + case VIRTCHNL2_OP_GET_RSS_HASH: + case VIRTCHNL2_OP_GET_RSS_LUT: /* for init virtchnl ops, need to poll the response */ err = idpf_read_one_msg(adapter, args->ops, args->out_size, args->out_buffer); clear_cmd(adapter); @@ -448,6 +451,48 @@ idpf_vc_set_rss_key(struct idpf_vport *vport) return err; } +int idpf_vc_get_rss_key(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_rss_key *rss_key_ret; + struct virtchnl2_rss_key rss_key; + struct idpf_cmd_info args; + int err; + + memset(&rss_key, 0, sizeof(rss_key)); + rss_key.vport_id = vport->vport_id; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_GET_RSS_KEY; + args.in_args = (uint8_t *)&rss_key; + args.in_args_size = sizeof(rss_key); + args.out_buffer = adapter->mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_execute_vc_cmd(adapter, &args); + + if (!err) { + rss_key_ret = (struct virtchnl2_rss_key *)args.out_buffer; + if (rss_key_ret->key_len != vport->rss_key_size) { + rte_free(vport->rss_key); + vport->rss_key = NULL; + vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN, + rss_key_ret->key_len); + vport->rss_key = rte_zmalloc("rss_key", vport->rss_key_size, 0); + if (!vport->rss_key) { + vport->rss_key_size = 0; + DRV_LOG(ERR, "Failed to allocate RSS key"); + return -ENOMEM; + } + } + rte_memcpy(vport->rss_key, rss_key_ret->key, vport->rss_key_size); + } else { + DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_KEY"); + } + + return err; +} + int idpf_vc_set_rss_lut(struct idpf_vport *vport) { @@ -482,6 +527,48 @@ idpf_vc_set_rss_lut(struct idpf_vport *vport) return err; } +int +idpf_vc_get_rss_lut(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_rss_lut *rss_lut_ret; + struct virtchnl2_rss_lut rss_lut; + struct idpf_cmd_info args; + int err; + + memset(&rss_lut, 0, sizeof(rss_lut)); + rss_lut.vport_id = vport->vport_id; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_GET_RSS_LUT; + args.in_args = (uint8_t *)&rss_lut; + args.in_args_size = sizeof(rss_lut); + args.out_buffer = adapter->mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_execute_vc_cmd(adapter, &args); + + if (!err) { + rss_lut_ret = (struct virtchnl2_rss_lut *)args.out_buffer; + if (rss_lut_ret->lut_entries != vport->rss_lut_size) { + rte_free(vport->rss_lut); + vport->rss_lut = NULL; + vport->rss_lut = rte_zmalloc("rss_lut", + sizeof(uint32_t) * rss_lut_ret->lut_entries, 0); + if (vport->rss_lut == NULL) { + DRV_LOG(ERR, "Failed to allocate RSS lut"); + return -ENOMEM; + } + } + rte_memcpy(vport->rss_lut, rss_lut_ret->lut, rss_lut_ret->lut_entries); + vport->rss_lut_size = rss_lut_ret->lut_entries; + } else { + DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_GET_RSS_LUT"); + } + + return err; +} + int idpf_vc_set_rss_hash(struct idpf_vport *vport) { @@ -508,6 +595,38 @@ idpf_vc_set_rss_hash(struct idpf_vport *vport) return err; } +int +idpf_vc_get_rss_hash(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_rss_hash *rss_hash_ret; + struct virtchnl2_rss_hash rss_hash; + struct idpf_cmd_info args; + int err; + + memset(&rss_hash, 0, sizeof(rss_hash)); + rss_hash.ptype_groups = vport->rss_hf; + rss_hash.vport_id = vport->vport_id; + + memset(&args, 0, sizeof(args)); + args.ops = VIRTCHNL2_OP_GET_RSS_HASH; + args.in_args = (uint8_t *)&rss_hash; + args.in_args_size = sizeof(rss_hash); + args.out_buffer = adapter->mbx_resp; + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; + + err = idpf_execute_vc_cmd(adapter, &args); + + if (!err) { + rss_hash_ret = (struct virtchnl2_rss_hash *)args.out_buffer; + vport->rss_hf = rss_hash_ret->ptype_groups; + } else { + DRV_LOG(ERR, "Failed to execute command of OP_GET_RSS_HASH"); + } + + return err; +} + int idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, uint16_t nb_rxq, bool map) { diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h index 60347fe571..b5d245a64f 100644 --- a/drivers/common/idpf/idpf_common_virtchnl.h +++ b/drivers/common/idpf/idpf_common_virtchnl.h @@ -13,9 +13,6 @@ int idpf_vc_get_caps(struct idpf_adapter *adapter); int idpf_vc_create_vport(struct idpf_vport *vport, struct virtchnl2_create_vport *vport_info); int idpf_vc_destroy_vport(struct idpf_vport *vport); -int idpf_vc_set_rss_key(struct idpf_vport *vport); -int idpf_vc_set_rss_lut(struct idpf_vport *vport); -int idpf_vc_set_rss_hash(struct idpf_vport *vport); int idpf_vc_config_irq_map_unmap(struct idpf_vport *vport, uint16_t nb_rxq, bool map); int idpf_vc_query_ptype_info(struct idpf_adapter *adapter); @@ -41,4 +38,16 @@ int idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq); __rte_internal int idpf_query_stats(struct idpf_vport *vport, struct virtchnl2_vport_stats **pstats); +__rte_internal +int idpf_vc_set_rss_key(struct idpf_vport *vport); +__rte_internal +int idpf_vc_get_rss_key(struct idpf_vport *vport); +__rte_internal +int idpf_vc_set_rss_lut(struct idpf_vport *vport); +__rte_internal +int idpf_vc_get_rss_lut(struct idpf_vport *vport); +__rte_internal +int idpf_vc_set_rss_hash(struct idpf_vport *vport); +__rte_internal +int idpf_vc_get_rss_hash(struct idpf_vport *vport); #endif /* _IDPF_COMMON_VIRTCHNL_H_ */ diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map index 6a1dc13302..cba08c6b4a 100644 --- a/drivers/common/idpf/version.map +++ b/drivers/common/idpf/version.map @@ -52,6 +52,12 @@ INTERNAL { idpf_splitq_xmit_pkts_avx512; idpf_update_stats; idpf_query_stats; + idpf_vc_set_rss_key; + idpf_vc_get_rss_key; + idpf_vc_set_rss_lut; + idpf_vc_get_rss_lut; + idpf_vc_set_rss_hash; + idpf_vc_get_rss_hash; local: *; }; diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index e8bb097c78..037cabb04e 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -29,6 +29,56 @@ static const char * const idpf_valid_args[] = { NULL }; +static const uint64_t idpf_map_hena_rss[] = { + [IDPF_HASH_NONF_UNICAST_IPV4_UDP] = + RTE_ETH_RSS_NONFRAG_IPV4_UDP, + [IDPF_HASH_NONF_MULTICAST_IPV4_UDP] = + RTE_ETH_RSS_NONFRAG_IPV4_UDP, + [IDPF_HASH_NONF_IPV4_UDP] = + RTE_ETH_RSS_NONFRAG_IPV4_UDP, + [IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK] = + RTE_ETH_RSS_NONFRAG_IPV4_TCP, + [IDPF_HASH_NONF_IPV4_TCP] = + RTE_ETH_RSS_NONFRAG_IPV4_TCP, + [IDPF_HASH_NONF_IPV4_SCTP] = + RTE_ETH_RSS_NONFRAG_IPV4_SCTP, + [IDPF_HASH_NONF_IPV4_OTHER] = + RTE_ETH_RSS_NONFRAG_IPV4_OTHER, + [IDPF_HASH_FRAG_IPV4] = RTE_ETH_RSS_FRAG_IPV4, + + /* IPv6 */ + [IDPF_HASH_NONF_UNICAST_IPV6_UDP] = + RTE_ETH_RSS_NONFRAG_IPV6_UDP, + [IDPF_HASH_NONF_MULTICAST_IPV6_UDP] = + RTE_ETH_RSS_NONFRAG_IPV6_UDP, + [IDPF_HASH_NONF_IPV6_UDP] = + RTE_ETH_RSS_NONFRAG_IPV6_UDP, + [IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK] = + RTE_ETH_RSS_NONFRAG_IPV6_TCP, + [IDPF_HASH_NONF_IPV6_TCP] = + RTE_ETH_RSS_NONFRAG_IPV6_TCP, + [IDPF_HASH_NONF_IPV6_SCTP] = + RTE_ETH_RSS_NONFRAG_IPV6_SCTP, + [IDPF_HASH_NONF_IPV6_OTHER] = + RTE_ETH_RSS_NONFRAG_IPV6_OTHER, + [IDPF_HASH_FRAG_IPV6] = RTE_ETH_RSS_FRAG_IPV6, + + /* L2 Payload */ + [IDPF_HASH_L2_PAYLOAD] = RTE_ETH_RSS_L2_PAYLOAD +}; + +static const uint64_t idpf_ipv4_rss = RTE_ETH_RSS_NONFRAG_IPV4_UDP | + RTE_ETH_RSS_NONFRAG_IPV4_TCP | + RTE_ETH_RSS_NONFRAG_IPV4_SCTP | + RTE_ETH_RSS_NONFRAG_IPV4_OTHER | + RTE_ETH_RSS_FRAG_IPV4; + +static const uint64_t idpf_ipv6_rss = RTE_ETH_RSS_NONFRAG_IPV6_UDP | + RTE_ETH_RSS_NONFRAG_IPV6_TCP | + RTE_ETH_RSS_NONFRAG_IPV6_SCTP | + RTE_ETH_RSS_NONFRAG_IPV6_OTHER | + RTE_ETH_RSS_FRAG_IPV6; + static int idpf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) @@ -59,6 +109,9 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_mtu = vport->max_mtu; dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->hash_key_size = vport->rss_key_size; + dev_info->reta_size = vport->rss_lut_size; + dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL; dev_info->rx_offload_capa = @@ -220,6 +273,54 @@ idpf_dev_stats_reset(struct rte_eth_dev *dev) return 0; } +static int idpf_config_rss_hf(struct idpf_vport *vport, uint64_t rss_hf) +{ + uint64_t hena = 0, valid_rss_hf = 0; + int ret = 0; + uint16_t i; + + /** + * RTE_ETH_RSS_IPV4 and RTE_ETH_RSS_IPV6 can be considered as 2 + * generalizations of all other IPv4 and IPv6 RSS types. + */ + if (rss_hf & RTE_ETH_RSS_IPV4) + rss_hf |= idpf_ipv4_rss; + + if (rss_hf & RTE_ETH_RSS_IPV6) + rss_hf |= idpf_ipv6_rss; + + for (i = 0; i < RTE_DIM(idpf_map_hena_rss); i++) { + uint64_t bit = BIT_ULL(i); + + if (idpf_map_hena_rss[i] & rss_hf) { + valid_rss_hf |= idpf_map_hena_rss[i]; + hena |= bit; + } + } + + vport->rss_hf = hena; + + ret = idpf_vc_set_rss_hash(vport); + if (ret != 0) { + PMD_DRV_LOG(WARNING, + "fail to set RSS offload types, ret: %d", ret); + return ret; + } + + if (valid_rss_hf & idpf_ipv4_rss) + valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV4; + + if (valid_rss_hf & idpf_ipv6_rss) + valid_rss_hf |= rss_hf & RTE_ETH_RSS_IPV6; + + if (rss_hf & ~valid_rss_hf) + PMD_DRV_LOG(WARNING, "Unsupported rss_hf 0x%" PRIx64, + rss_hf & ~valid_rss_hf); + vport->last_general_rss_hf = valid_rss_hf; + + return ret; +} + static int idpf_init_rss(struct idpf_vport *vport) { @@ -256,6 +357,204 @@ idpf_init_rss(struct idpf_vport *vport) return ret; } +static int +idpf_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + uint16_t idx, shift; + uint32_t *lut; + int ret = 0; + uint16_t i; + + if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + + if (reta_size != vport->rss_lut_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)", + reta_size, vport->rss_lut_size); + return -EINVAL; + } + + /* It MUST use the current LUT size to get the RSS lookup table, + * otherwise if will fail with -100 error code. + */ + lut = rte_zmalloc(NULL, reta_size * sizeof(uint32_t), 0); + if (!lut) { + PMD_DRV_LOG(ERR, "No memory can be allocated"); + return -ENOMEM; + } + /* store the old lut table temporarily */ + rte_memcpy(lut, vport->rss_lut, reta_size * sizeof(uint32_t)); + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + lut[i] = reta_conf[idx].reta[shift]; + } + + rte_memcpy(vport->rss_lut, lut, reta_size * sizeof(uint32_t)); + /* send virtchnl ops to configure RSS */ + ret = idpf_vc_set_rss_lut(vport); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to configure RSS lut"); + goto out; + } +out: + rte_free(lut); + + return ret; +} + +static int +idpf_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + uint16_t idx, shift; + int ret = 0; + uint16_t i; + + if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + + if (reta_size != vport->rss_lut_size) { + PMD_DRV_LOG(ERR, "The size of hash lookup table configured " + "(%d) doesn't match the number of hardware can " + "support (%d)", reta_size, vport->rss_lut_size); + return -EINVAL; + } + + ret = idpf_vc_get_rss_lut(vport); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS LUT"); + return ret; + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_ETH_RETA_GROUP_SIZE; + shift = i % RTE_ETH_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) + reta_conf[idx].reta[shift] = vport->rss_lut[i]; + } + + return 0; +} + +static int +idpf_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + int ret = 0; + + if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + + if (!rss_conf->rss_key || rss_conf->rss_key_len == 0) { + PMD_DRV_LOG(DEBUG, "No key to be configured"); + goto skip_rss_key; + } else if (rss_conf->rss_key_len != vport->rss_key_size) { + PMD_DRV_LOG(ERR, "The size of hash key configured " + "(%d) doesn't match the size of hardware can " + "support (%d)", + rss_conf->rss_key_len, + vport->rss_key_size); + return -EINVAL; + } + + rte_memcpy(vport->rss_key, rss_conf->rss_key, + vport->rss_key_size); + ret = idpf_vc_set_rss_key(vport); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to configure RSS key"); + return ret; + } + +skip_rss_key: + ret = idpf_config_rss_hf(vport, rss_conf->rss_hf); + if (ret != 0) { + PMD_INIT_LOG(ERR, "Failed to configure RSS hash"); + return ret; + } + + return 0; +} + +static uint64_t +idpf_map_general_rss_hf(uint64_t config_rss_hf, uint64_t last_general_rss_hf) +{ + uint64_t valid_rss_hf = 0; + uint16_t i; + + for (i = 0; i < RTE_DIM(idpf_map_hena_rss); i++) { + uint64_t bit = BIT_ULL(i); + + if (bit & config_rss_hf) + valid_rss_hf |= idpf_map_hena_rss[i]; + } + + if (valid_rss_hf & idpf_ipv4_rss) + valid_rss_hf |= last_general_rss_hf & RTE_ETH_RSS_IPV4; + + if (valid_rss_hf & idpf_ipv6_rss) + valid_rss_hf |= last_general_rss_hf & RTE_ETH_RSS_IPV6; + + return valid_rss_hf; +} + +static int +idpf_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct idpf_vport *vport = dev->data->dev_private; + struct idpf_adapter *adapter = vport->adapter; + int ret = 0; + + if (adapter->caps.rss_caps == 0 || dev->data->nb_rx_queues == 0) { + PMD_DRV_LOG(DEBUG, "RSS is not supported"); + return -ENOTSUP; + } + + ret = idpf_vc_get_rss_hash(vport); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS hf"); + return ret; + } + + rss_conf->rss_hf = idpf_map_general_rss_hf(vport->rss_hf, vport->last_general_rss_hf); + + if (!rss_conf->rss_key) + return 0; + + ret = idpf_vc_get_rss_key(vport); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to get RSS key"); + return ret; + } + + if (rss_conf->rss_key_len > vport->rss_key_size) + rss_conf->rss_key_len = vport->rss_key_size; + + rte_memcpy(rss_conf->rss_key, vport->rss_key, rss_conf->rss_key_len); + + return 0; +} + static int idpf_dev_configure(struct rte_eth_dev *dev) { @@ -693,6 +992,10 @@ static const struct eth_dev_ops idpf_eth_dev_ops = { .dev_supported_ptypes_get = idpf_dev_supported_ptypes_get, .stats_get = idpf_dev_stats_get, .stats_reset = idpf_dev_stats_reset, + .reta_update = idpf_rss_reta_update, + .reta_query = idpf_rss_reta_query, + .rss_hash_update = idpf_rss_hash_update, + .rss_hash_conf_get = idpf_rss_hash_conf_get, }; static uint16_t diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h index d791d402fb..5bd1b441ea 100644 --- a/drivers/net/idpf/idpf_ethdev.h +++ b/drivers/net/idpf/idpf_ethdev.h @@ -48,7 +48,10 @@ RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ RTE_ETH_RSS_NONFRAG_IPV6_UDP | \ RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \ - RTE_ETH_RSS_NONFRAG_IPV6_OTHER) + RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \ + RTE_ETH_RSS_L2_PAYLOAD) + +#define IDPF_RSS_KEY_LEN 52 #define IDPF_ADAPTER_NAME_LEN (PCI_PRI_STR_SIZE + 1) -- 2.25.1