From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F2C4DA0032; Fri, 21 Oct 2022 10:38:03 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id D9A7E42B91; Fri, 21 Oct 2022 10:38:03 +0200 (CEST) Received: from shelob.oktetlabs.ru (shelob.oktetlabs.ru [91.220.146.113]) by mails.dpdk.org (Postfix) with ESMTP id 1CD2D4281C for ; Fri, 21 Oct 2022 10:38:02 +0200 (CEST) Received: from [192.168.38.17] (aros.oktetlabs.ru [192.168.38.17]) (using TLSv1.3 with cipher TLS_AES_128_GCM_SHA256 (128/128 bits) key-exchange X25519 server-signature RSA-PSS (4096 bits)) (No client certificate requested) by shelob.oktetlabs.ru (Postfix) with ESMTPSA id A1D0B7B; Fri, 21 Oct 2022 11:38:01 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 shelob.oktetlabs.ru A1D0B7B DKIM-Signature: v=1; a=rsa-sha256; c=simple/simple; d=oktetlabs.ru; s=default; t=1666341481; bh=NyF00MuOwJ0nqLv1v72GXzrGRMGruXujYVCt9eIc13s=; h=Date:Subject:To:Cc:References:From:In-Reply-To:From; b=oud5fmll9yl/46xEVNCIv+H62t6XU/wTA3FC5wxrccST1HO4ZT+DfG5EXpmrf1bEx KU9NVSn+krZWtlk2mWJofiiQxzLXSDWXclvIq5dsyBOIQNjP3r5b121UYBonoWxUHm vx6c0ZzOrcoUMLKxPdb4SBdZV6ua6Rv2mK/B2m8g= Message-ID: <61341c0b-0b3a-5b29-a661-03136eef6515@oktetlabs.ru> Date: Fri, 21 Oct 2022 11:38:01 +0300 MIME-Version: 1.0 User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Thunderbird/102.3.0 Subject: Re: [PATCH v9 10/14] net/idpf: add support for RSS Content-Language: en-US To: Junfeng Guo , qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com Cc: dev@dpdk.org References: <20221020062951.645121-2-junfeng.guo@intel.com> <20221021051821.2164939-1-junfeng.guo@intel.com> <20221021051821.2164939-11-junfeng.guo@intel.com> From: Andrew Rybchenko Organization: OKTET Labs In-Reply-To: <20221021051821.2164939-11-junfeng.guo@intel.com> Content-Type: text/plain; charset=UTF-8; format=flowed Content-Transfer-Encoding: 7bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org On 10/21/22 08:18, Junfeng Guo wrote: > Add RSS support. > > Signed-off-by: Beilei Xing > Signed-off-by: Junfeng Guo > --- > drivers/net/idpf/idpf_ethdev.c | 123 ++++++++++++++++++++++++++++++++- > drivers/net/idpf/idpf_ethdev.h | 26 +++++++ > drivers/net/idpf/idpf_rxtx.c | 27 ++++++++ > drivers/net/idpf/idpf_vchnl.c | 96 +++++++++++++++++++++++++ > 4 files changed, 269 insertions(+), 3 deletions(-) > > diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c > index ffde2ce7d1..cc1cfe402b 100644 > --- a/drivers/net/idpf/idpf_ethdev.c > +++ b/drivers/net/idpf/idpf_ethdev.c > @@ -83,6 +83,7 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) > dev_info->min_rx_bufsize = IDPF_MIN_BUF_SIZE; > dev_info->max_rx_pktlen = IDPF_MAX_FRAME_SIZE; > > + dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL; > dev_info->max_mac_addrs = IDPF_NUM_MACADDR_MAX; > dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP | > RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP; > @@ -90,7 +91,8 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) > RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | > RTE_ETH_RX_OFFLOAD_UDP_CKSUM | > RTE_ETH_RX_OFFLOAD_TCP_CKSUM | > - RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM; > + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | > + RTE_ETH_RX_OFFLOAD_RSS_HASH; > > dev_info->tx_offload_capa = > RTE_ETH_TX_OFFLOAD_TCP_TSO | > @@ -197,6 +199,10 @@ idpf_parse_devarg_id(char *name) > return val; > } > > +#ifndef IDPF_RSS_KEY_LEN Why do we ned ifndef here? > +#define IDPF_RSS_KEY_LEN 52 > +#endif > + > static int > idpf_init_vport(struct rte_eth_dev *dev) > { > @@ -217,6 +223,10 @@ idpf_init_vport(struct rte_eth_dev *dev) > vport->max_mtu = vport_info->max_mtu; > rte_memcpy(vport->default_mac_addr, > vport_info->default_mac_addr, ETH_ALEN); > + vport->rss_algorithm = vport_info->rss_algorithm; > + vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN, > + vport_info->rss_key_size); > + vport->rss_lut_size = vport_info->rss_lut_size; > vport->sw_idx = idx; > > for (i = 0; i < vport_info->chunks.num_chunks; i++) { > @@ -265,15 +275,116 @@ idpf_init_vport(struct rte_eth_dev *dev) > } > > static int > -idpf_dev_configure(__rte_unused struct rte_eth_dev *dev) > +idpf_config_rss(struct idpf_vport *vport) > +{ > + int ret; > + > + ret = idpf_vc_set_rss_key(vport); > + if (ret) { > + PMD_INIT_LOG(ERR, "Failed to configure RSS key"); > + return ret; > + } > + > + ret = idpf_vc_set_rss_lut(vport); > + if (ret) { > + PMD_INIT_LOG(ERR, "Failed to configure RSS lut"); > + return ret; > + } > + > + ret = idpf_vc_set_rss_hash(vport); > + if (ret) { > + PMD_INIT_LOG(ERR, "Failed to configure RSS hash"); > + return ret; > + } > + > + return ret; > +} > + > +static int > +idpf_init_rss(struct idpf_vport *vport) > +{ > + struct rte_eth_rss_conf *rss_conf; > + uint16_t i, nb_q, lut_size; > + int ret = 0; > + > + rss_conf = &vport->dev_data->dev_conf.rx_adv_conf.rss_conf; > + nb_q = vport->dev_data->nb_rx_queues; > + > + vport->rss_key = (uint8_t *)rte_zmalloc("rss_key", > + vport->rss_key_size, 0); > + if (!vport->rss_key) { > + PMD_INIT_LOG(ERR, "Failed to allocate RSS key"); > + ret = -ENOMEM; > + goto err_key; > + } > + > + lut_size = vport->rss_lut_size; > + vport->rss_lut = (uint32_t *)rte_zmalloc("rss_lut", > + sizeof(uint32_t) * lut_size, 0); > + if (!vport->rss_lut) { > + PMD_INIT_LOG(ERR, "Failed to allocate RSS lut"); > + ret = -ENOMEM; > + goto err_lut; > + } > + > + if (!rss_conf->rss_key) { > + for (i = 0; i < vport->rss_key_size; i++) > + vport->rss_key[i] = (uint8_t)rte_rand(); > + } else { > + rte_memcpy(vport->rss_key, rss_conf->rss_key, > + RTE_MIN(rss_conf->rss_key_len, > + vport->rss_key_size)); If provides length is smaller than required, shouldn't we fill in the rest using rte_rand() as above? Since the RSS key length is reported in device info, IMHO it is better to require from user to provide RSS key with correct legnth. > + } > + > + for (i = 0; i < lut_size; i++) > + vport->rss_lut[i] = i % nb_q; Are you sure that nb_q is not 0? In theory it is possible to configure and start with Tx queues only. > + > + vport->rss_hf = IDPF_DEFAULT_RSS_HASH_EXPANDED; > + > + ret = idpf_config_rss(vport); > + if (ret) { > + PMD_INIT_LOG(ERR, "Failed to configure RSS"); > + goto err_cfg; > + } > + > + return ret; > + > +err_cfg: > + rte_free(vport->rss_lut); > + vport->rss_lut = NULL; > +err_lut: > + rte_free(vport->rss_key); > + vport->rss_key = NULL; > +err_key: > + return ret; > +} > + > +static int > +idpf_dev_configure(struct rte_eth_dev *dev) > { > + struct idpf_vport *vport = dev->data->dev_private; > + struct idpf_adapter *adapter = vport->adapter; > + int ret = 0; > + > if (dev->data->nb_tx_queues > IDPF_DEFAULT_TXQ_NUM || > dev->data->nb_rx_queues > IDPF_DEFAULT_RXQ_NUM) { > PMD_INIT_LOG(ERR, "Invalid queue number."); > return -EINVAL; > } > > - return 0; > + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) > + dev->data->dev_conf.rxmode.offloads |= > + RTE_ETH_RX_OFFLOAD_RSS_HASH; Why? User controls offloads. > + > + if (adapter->caps->rss_caps) { > + ret = idpf_init_rss(vport); > + if (ret) { > + PMD_INIT_LOG(ERR, "Failed to init rss"); > + return ret; > + } If RSS is not supported but requested, it must be an error, not simply ignoring of the RSS request. > + } > + > + return ret; > } > > static int > @@ -372,6 +483,12 @@ idpf_dev_close(struct rte_eth_dev *dev) > idpf_dev_stop(dev); > idpf_vc_destroy_vport(vport); > > + rte_free(vport->rss_lut); > + vport->rss_lut = NULL; > + > + rte_free(vport->rss_key); > + vport->rss_key = NULL; > + > adapter->cur_vports &= ~BIT(vport->devarg_id); > > rte_free(vport); > diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h > index d414efc917..c25fa2e3ce 100644 > --- a/drivers/net/idpf/idpf_ethdev.h > +++ b/drivers/net/idpf/idpf_ethdev.h > @@ -56,6 +56,20 @@ > #define IDPF_ETH_OVERHEAD \ > (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IDPF_VLAN_TAG_SIZE * 2) > > +#define IDPF_RSS_OFFLOAD_ALL ( \ > + RTE_ETH_RSS_IPV4 | \ > + RTE_ETH_RSS_FRAG_IPV4 | \ > + RTE_ETH_RSS_NONFRAG_IPV4_TCP | \ > + RTE_ETH_RSS_NONFRAG_IPV4_UDP | \ > + RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \ > + RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \ > + RTE_ETH_RSS_IPV6 | \ > + RTE_ETH_RSS_FRAG_IPV6 | \ > + RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ > + RTE_ETH_RSS_NONFRAG_IPV6_UDP | \ > + RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \ > + RTE_ETH_RSS_NONFRAG_IPV6_OTHER) > + > #ifndef ETH_ADDR_LEN > #define ETH_ADDR_LEN 6 > #endif > @@ -102,11 +116,20 @@ struct idpf_vport { > uint16_t max_mtu; > uint8_t default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; > > + enum virtchnl_rss_algorithm rss_algorithm; > + uint16_t rss_key_size; > + uint16_t rss_lut_size; > + > uint16_t sw_idx; /* SW idx */ > > struct rte_eth_dev_data *dev_data; /* Pointer to the device data */ > uint16_t max_pkt_len; /* Maximum packet length */ > > + /* RSS info */ > + uint32_t *rss_lut; > + uint8_t *rss_key; > + uint64_t rss_hf; > + > /* Chunk info */ > struct idpf_chunks_info chunks_info; > > @@ -216,6 +239,9 @@ int idpf_get_pkt_type(struct idpf_adapter *adapter); > int idpf_vc_get_caps(struct idpf_adapter *adapter); > int idpf_vc_create_vport(struct rte_eth_dev *dev); > int idpf_vc_destroy_vport(struct idpf_vport *vport); > +int idpf_vc_set_rss_key(struct idpf_vport *vport); > +int idpf_vc_set_rss_lut(struct idpf_vport *vport); > +int idpf_vc_set_rss_hash(struct idpf_vport *vport); > int idpf_vc_config_rxqs(struct idpf_vport *vport); > int idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id); > int idpf_vc_config_txqs(struct idpf_vport *vport); > diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c > index 31e266bbec..05976acf7f 100644 > --- a/drivers/net/idpf/idpf_rxtx.c > +++ b/drivers/net/idpf/idpf_rxtx.c > @@ -1310,6 +1310,32 @@ idpf_splitq_rx_csum_offload(uint8_t err) > return flags; > } > > +#define IDPF_RX_FLEX_DESC_ADV_HASH1_S 0 > +#define IDPF_RX_FLEX_DESC_ADV_HASH2_S 16 > +#define IDPF_RX_FLEX_DESC_ADV_HASH3_S 24 > + > +static inline uint64_t > +idpf_splitq_rx_rss_offload(struct rte_mbuf *mb, > + volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) > +{ > + uint8_t status_err0_qw0; > + uint64_t flags = 0; > + > + status_err0_qw0 = rx_desc->status_err0_qw0; > + > + if (status_err0_qw0 & BIT(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) { > + flags |= RTE_MBUF_F_RX_RSS_HASH; > + mb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) << > + IDPF_RX_FLEX_DESC_ADV_HASH1_S) | > + ((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) << > + IDPF_RX_FLEX_DESC_ADV_HASH2_S) | > + ((uint32_t)(rx_desc->hash3) << > + IDPF_RX_FLEX_DESC_ADV_HASH3_S); > + } > + > + return flags; > +} > + > static void > idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq) > { > @@ -1465,6 +1491,7 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, > > status_err0_qw1 = rx_desc->status_err0_qw1; > pkt_flags = idpf_splitq_rx_csum_offload(status_err0_qw1); > + pkt_flags |= idpf_splitq_rx_rss_offload(rxm, rx_desc); > > rxm->ol_flags |= pkt_flags; > > diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c > index 7389128712..8f447046b1 100644 > --- a/drivers/net/idpf/idpf_vchnl.c > +++ b/drivers/net/idpf/idpf_vchnl.c > @@ -679,6 +679,102 @@ idpf_vc_destroy_vport(struct idpf_vport *vport) > return err; > } > > +int > +idpf_vc_set_rss_key(struct idpf_vport *vport) > +{ > + struct idpf_adapter *adapter = vport->adapter; > + struct virtchnl2_rss_key *rss_key; > + struct idpf_cmd_info args; > + int len, err; > + > + len = sizeof(*rss_key) + sizeof(rss_key->key[0]) * > + (vport->rss_key_size - 1); > + rss_key = rte_zmalloc("rss_key", len, 0); > + if (!rss_key) > + return -ENOMEM; > + > + rss_key->vport_id = vport->vport_id; > + rss_key->key_len = vport->rss_key_size; > + rte_memcpy(rss_key->key, vport->rss_key, > + sizeof(rss_key->key[0]) * vport->rss_key_size); > + > + memset(&args, 0, sizeof(args)); > + args.ops = VIRTCHNL2_OP_SET_RSS_KEY; > + args.in_args = (uint8_t *)rss_key; > + args.in_args_size = len; > + args.out_buffer = adapter->mbx_resp; > + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; > + > + err = idpf_execute_vc_cmd(adapter, &args); > + if (err) { > + PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_KEY"); > + return err; > + } > + > + rte_free(rss_key); > + return err; > +} > + > +int > +idpf_vc_set_rss_lut(struct idpf_vport *vport) > +{ > + struct idpf_adapter *adapter = vport->adapter; > + struct virtchnl2_rss_lut *rss_lut; > + struct idpf_cmd_info args; > + int len, err; > + > + len = sizeof(*rss_lut) + sizeof(rss_lut->lut[0]) * > + (vport->rss_lut_size - 1); > + rss_lut = rte_zmalloc("rss_lut", len, 0); > + if (!rss_lut) > + return -ENOMEM; > + > + rss_lut->vport_id = vport->vport_id; > + rss_lut->lut_entries = vport->rss_lut_size; > + rte_memcpy(rss_lut->lut, vport->rss_lut, > + sizeof(rss_lut->lut[0]) * vport->rss_lut_size); > + > + memset(&args, 0, sizeof(args)); > + args.ops = VIRTCHNL2_OP_SET_RSS_LUT; > + args.in_args = (uint8_t *)rss_lut; > + args.in_args_size = len; > + args.out_buffer = adapter->mbx_resp; > + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; > + > + err = idpf_execute_vc_cmd(adapter, &args); > + if (err) > + PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_SET_RSS_LUT"); > + > + rte_free(rss_lut); > + return err; > +} > + > +int > +idpf_vc_set_rss_hash(struct idpf_vport *vport) > +{ > + struct idpf_adapter *adapter = vport->adapter; > + struct virtchnl2_rss_hash rss_hash; > + struct idpf_cmd_info args; > + int err; > + > + memset(&rss_hash, 0, sizeof(rss_hash)); > + rss_hash.ptype_groups = vport->rss_hf; > + rss_hash.vport_id = vport->vport_id; > + > + memset(&args, 0, sizeof(args)); > + args.ops = VIRTCHNL2_OP_SET_RSS_HASH; > + args.in_args = (uint8_t *)&rss_hash; > + args.in_args_size = sizeof(rss_hash); > + args.out_buffer = adapter->mbx_resp; > + args.out_size = IDPF_DFLT_MBX_BUF_SIZE; > + > + err = idpf_execute_vc_cmd(adapter, &args); > + if (err) > + PMD_DRV_LOG(ERR, "Failed to execute command of OP_SET_RSS_HASH"); > + > + return err; > +} > + > #define IDPF_RX_BUF_STRIDE 64 > int > idpf_vc_config_rxqs(struct idpf_vport *vport)