DPDK patches and discussions
 help / color / mirror / Atom feed
From: Wenbo Cao <caowenbo@mucse.com>
To: thomas@monjalon.net, Wenbo Cao <caowenbo@mucse.com>
Cc: stephen@networkplumber.org, dev@dpdk.org, ferruh.yigit@amd.com,
	andrew.rybchenko@oktetlabs.ru, yaojun@mucse.com
Subject: [PATCH v7 11/28] net/rnp: add RSS support operations
Date: Sat,  8 Feb 2025 10:43:48 +0800	[thread overview]
Message-ID: <1738982645-34550-12-git-send-email-caowenbo@mucse.com> (raw)
In-Reply-To: <1738982645-34550-1-git-send-email-caowenbo@mucse.com>

add support rss reta updata/qury rss hash update/get
dev_configure add rss conf check.

Signed-off-by: Wenbo Cao <caowenbo@mucse.com>
---
 doc/guides/nics/features/rnp.ini    |   4 +
 doc/guides/nics/rnp.rst             |   3 +
 drivers/net/rnp/base/rnp_eth_regs.h |  16 ++
 drivers/net/rnp/meson.build         |   1 +
 drivers/net/rnp/rnp.h               |   7 +
 drivers/net/rnp/rnp_ethdev.c        |  23 +++
 drivers/net/rnp/rnp_rss.c           | 367 ++++++++++++++++++++++++++++++++++++
 drivers/net/rnp/rnp_rss.h           |  43 +++++
 8 files changed, 464 insertions(+)
 create mode 100644 drivers/net/rnp/rnp_rss.c
 create mode 100644 drivers/net/rnp/rnp_rss.h

diff --git a/doc/guides/nics/features/rnp.ini b/doc/guides/nics/features/rnp.ini
index fd7d4b9..2fc94825f 100644
--- a/doc/guides/nics/features/rnp.ini
+++ b/doc/guides/nics/features/rnp.ini
@@ -8,5 +8,9 @@ Speed capabilities   = Y
 Queue start/stop     = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
+RSS hash             = Y
+RSS key update       = Y
+RSS reta update      = Y
+Inner RSS            = Y
 Linux                = Y
 x86-64               = Y
diff --git a/doc/guides/nics/rnp.rst b/doc/guides/nics/rnp.rst
index 5417593..8f9d38d 100644
--- a/doc/guides/nics/rnp.rst
+++ b/doc/guides/nics/rnp.rst
@@ -11,6 +11,9 @@ Features
 --------
 
 - Multiple queues for TX and RX
+- Receiver Side Steering (RSS)
+  Receiver Side Steering (RSS) on IPv4, IPv6, IPv4-TCP/UDP/SCTP, IPv6-TCP/UDP/SCTP
+  Inner RSS is only support for vxlan/nvgre
 - Promiscuous mode
 
 Prerequisites
diff --git a/drivers/net/rnp/base/rnp_eth_regs.h b/drivers/net/rnp/base/rnp_eth_regs.h
index 60766d2..be7ed5b 100644
--- a/drivers/net/rnp/base/rnp_eth_regs.h
+++ b/drivers/net/rnp/base/rnp_eth_regs.h
@@ -32,7 +32,23 @@
 #define RNP_MAC_MULTICASE_TBL_EN	RTE_BIT32(2)
 #define RNP_MAC_UNICASE_TBL_EN		RTE_BIT32(3)
 /* rss function ctrl */
+#define RNP_RSS_INNER_CTRL	_ETH_(0x805c)
+#define RNP_INNER_RSS_EN	(1)
+#define RNP_INNER_RSS_DIS	(0)
 #define RNP_RSS_REDIR_TB(n, id) _ETH_(0xe000 + ((n) * 0x200) + ((id) * 0x4))
+#define RNP_RSS_MRQC_ADDR	_ETH_(0x92a0)
+/* RSS policy */
+#define RNP_RSS_HASH_CFG_MASK	(0x3F30000)
+#define RNP_RSS_HASH_IPV4_TCP	RTE_BIT32(16)
+#define RNP_RSS_HASH_IPV4	RTE_BIT32(17)
+#define RNP_RSS_HASH_IPV6	RTE_BIT32(20)
+#define RNP_RSS_HASH_IPV6_TCP	RTE_BIT32(21)
+#define RNP_RSS_HASH_IPV4_UDP	RTE_BIT32(22)
+#define RNP_RSS_HASH_IPV6_UDP   RTE_BIT32(23)
+#define RNP_RSS_HASH_IPV4_SCTP  RTE_BIT32(24)
+#define RNP_RSS_HASH_IPV6_SCTP  RTE_BIT32(25)
+/* rss hash key */
+#define RNP_RSS_KEY_TABLE(idx)	_ETH_(0x92d0 + ((idx) * 0x4))
 
 #define RNP_TC_PORT_OFFSET(lane)	_ETH_(0xe840 + 0x04 * (lane))
 
diff --git a/drivers/net/rnp/meson.build b/drivers/net/rnp/meson.build
index ff3dc41..40b0139 100644
--- a/drivers/net/rnp/meson.build
+++ b/drivers/net/rnp/meson.build
@@ -15,4 +15,5 @@ includes += include_directories('base')
 sources = files(
 		'rnp_ethdev.c',
 		'rnp_rxtx.c',
+		'rnp_rss.c',
 )
diff --git a/drivers/net/rnp/rnp.h b/drivers/net/rnp/rnp.h
index 086135a..e02de85 100644
--- a/drivers/net/rnp/rnp.h
+++ b/drivers/net/rnp/rnp.h
@@ -108,6 +108,13 @@ struct rnp_eth_port {
 	struct rnp_tx_queue *tx_queues[RNP_MAX_RX_QUEUE_NUM];
 	struct rnp_hw *hw;
 
+	struct rte_eth_rss_conf rss_conf;
+	uint16_t last_rx_num;
+	bool rxq_num_changed;
+	bool reta_has_cfg;
+	bool hw_rss_en;
+	uint32_t indirtbl[RNP_RSS_INDIR_SIZE];
+
 	rte_spinlock_t rx_mac_lock;
 	bool port_stopped;
 };
diff --git a/drivers/net/rnp/rnp_ethdev.c b/drivers/net/rnp/rnp_ethdev.c
index 7b7ed8c..bd22034 100644
--- a/drivers/net/rnp/rnp_ethdev.c
+++ b/drivers/net/rnp/rnp_ethdev.c
@@ -17,6 +17,7 @@
 #include "base/rnp_dma_regs.h"
 #include "base/rnp_mac_regs.h"
 #include "rnp_rxtx.h"
+#include "rnp_rss.h"
 
 static struct rte_eth_dev *
 rnp_alloc_eth_port(struct rte_pci_device *pci, char *name)
@@ -234,6 +235,9 @@ static int rnp_dev_start(struct rte_eth_dev *eth_dev)
 	}
 	/* disable eth rx flow */
 	RNP_RX_ETH_DISABLE(hw, lane);
+	ret = rnp_dev_rss_configure(eth_dev);
+	if (ret)
+		return ret;
 	ret = rnp_rx_scattered_setup(eth_dev);
 	if (ret)
 		return ret;
@@ -301,6 +305,19 @@ static int rnp_disable_all_tx_queue(struct rte_eth_dev *dev)
 	return ret;
 }
 
+static int rnp_dev_configure(struct rte_eth_dev *eth_dev)
+{
+	struct rnp_eth_port *port = RNP_DEV_TO_PORT(eth_dev);
+
+	if (port->last_rx_num != eth_dev->data->nb_rx_queues)
+		port->rxq_num_changed = true;
+	else
+		port->rxq_num_changed = false;
+	port->last_rx_num = eth_dev->data->nb_rx_queues;
+
+	return 0;
+}
+
 static int rnp_dev_stop(struct rte_eth_dev *eth_dev)
 {
 	struct rnp_eth_port *port = RNP_DEV_TO_PORT(eth_dev);
@@ -497,6 +514,7 @@ static int rnp_allmulticast_disable(struct rte_eth_dev *eth_dev)
 
 /* Features supported by this driver */
 static const struct eth_dev_ops rnp_eth_dev_ops = {
+	.dev_configure                = rnp_dev_configure,
 	.dev_close                    = rnp_dev_close,
 	.dev_start                    = rnp_dev_start,
 	.dev_stop                     = rnp_dev_stop,
@@ -512,6 +530,11 @@ static int rnp_allmulticast_disable(struct rte_eth_dev *eth_dev)
 	.rx_queue_release             = rnp_dev_rx_queue_release,
 	.tx_queue_setup               = rnp_tx_queue_setup,
 	.tx_queue_release             = rnp_dev_tx_queue_release,
+	/* rss impl */
+	.reta_update                  = rnp_dev_rss_reta_update,
+	.reta_query                   = rnp_dev_rss_reta_query,
+	.rss_hash_update              = rnp_dev_rss_hash_update,
+	.rss_hash_conf_get            = rnp_dev_rss_hash_conf_get,
 };
 
 static void
diff --git a/drivers/net/rnp/rnp_rss.c b/drivers/net/rnp/rnp_rss.c
new file mode 100644
index 0000000..ebbc887
--- /dev/null
+++ b/drivers/net/rnp/rnp_rss.c
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Mucse IC Design Ltd.
+ */
+
+#include <stdint.h>
+
+#include "base/rnp_bdq_if.h"
+#include "base/rnp_eth_regs.h"
+
+#include "rnp.h"
+#include "rnp_rxtx.h"
+#include "rnp_rss.h"
+
+static const struct rnp_rss_hash_cfg rnp_rss_cfg[] = {
+	{RNP_RSS_IPV4, RNP_RSS_HASH_IPV4, RTE_ETH_RSS_IPV4},
+	{RNP_RSS_IPV4, RNP_RSS_HASH_IPV4, RTE_ETH_RSS_FRAG_IPV4},
+	{RNP_RSS_IPV4, RNP_RSS_HASH_IPV4, RTE_ETH_RSS_NONFRAG_IPV4_OTHER},
+	{RNP_RSS_IPV6, RNP_RSS_HASH_IPV6, RTE_ETH_RSS_IPV6},
+	{RNP_RSS_IPV6, RNP_RSS_HASH_IPV6, RTE_ETH_RSS_FRAG_IPV6},
+	{RNP_RSS_IPV6, RNP_RSS_HASH_IPV6, RTE_ETH_RSS_NONFRAG_IPV6_OTHER},
+	{RNP_RSS_IPV4_TCP, RNP_RSS_HASH_IPV4_TCP, RTE_ETH_RSS_NONFRAG_IPV4_TCP},
+	{RNP_RSS_IPV4_UDP, RNP_RSS_HASH_IPV4_UDP, RTE_ETH_RSS_NONFRAG_IPV4_UDP},
+	{RNP_RSS_IPV4_SCTP, RNP_RSS_HASH_IPV4_SCTP, RTE_ETH_RSS_NONFRAG_IPV4_SCTP},
+	{RNP_RSS_IPV6_TCP, RNP_RSS_HASH_IPV6_TCP, RTE_ETH_RSS_NONFRAG_IPV6_TCP},
+	{RNP_RSS_IPV6_UDP, RNP_RSS_HASH_IPV6_UDP, RTE_ETH_RSS_NONFRAG_IPV6_UDP},
+	{RNP_RSS_IPV6_SCTP, RNP_RSS_HASH_IPV6_SCTP, RTE_ETH_RSS_NONFRAG_IPV6_SCTP}
+};
+
+static uint8_t rnp_rss_default_key[40] = {
+	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+int
+rnp_dev_rss_reta_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			uint16_t reta_size)
+{
+	struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+	uint16_t port_offset = port->attr.port_offset;
+	uint32_t *indirtbl = &port->indirtbl[0];
+	uint16_t lane = port->attr.nr_lane;
+	struct rnp_hw *hw = port->hw;
+	struct rnp_rx_queue *rxq;
+	uint16_t i, idx, shift;
+	uint16_t hwrid;
+	uint16_t qid = 0;
+
+	if (reta_size > RNP_RSS_INDIR_SIZE) {
+		RNP_PMD_ERR("Invalid reta size, reta_size:%d", reta_size);
+		return -EINVAL;
+	}
+	for (i = 0; i < reta_size; i++) {
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
+		if (reta_conf[idx].mask & (1ULL << shift))
+			indirtbl[i] = reta_conf[idx].reta[shift];
+	}
+	for (i = 0; i < RNP_RSS_INDIR_SIZE; i++) {
+		qid = indirtbl[i];
+		if (qid < dev->data->nb_rx_queues) {
+			rxq = dev->data->rx_queues[qid];
+			hwrid = rxq->attr.index - port_offset;
+			RNP_E_REG_WR(hw, RNP_RSS_REDIR_TB(lane, i), hwrid);
+			rxq->rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+		} else {
+			RNP_PMD_WARN("port[%d] reta[%d]-queue=%d "
+					"rx queueu is out range of cur Settings\n",
+					dev->data->port_id, i, qid);
+		}
+	}
+	port->reta_has_cfg = true;
+
+	return 0;
+}
+
+static uint16_t
+rnp_hwrid_to_queue_id(struct rte_eth_dev *dev, uint16_t hwrid)
+{
+	struct rnp_rx_queue *rxq;
+	bool find = false;
+	uint16_t idx;
+
+	for (idx = 0; idx < dev->data->nb_rx_queues; idx++) {
+		rxq = dev->data->rx_queues[idx];
+		if (!rxq)
+			continue;
+		if (rxq->attr.index == hwrid) {
+			find = true;
+			break;
+		}
+	}
+	if (find)
+		return rxq->attr.queue_id;
+
+	return UINT16_MAX;
+}
+
+int
+rnp_dev_rss_reta_query(struct rte_eth_dev *dev,
+		       struct rte_eth_rss_reta_entry64 *reta_conf,
+		       uint16_t reta_size)
+{
+	struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+	uint16_t port_offset = port->attr.port_offset;
+	struct rnp_hw *hw = port->hw;
+	uint32_t *indirtbl = &port->indirtbl[0];
+	uint16_t lane = port->attr.nr_lane;
+	uint16_t i, idx, shift;
+	uint16_t hwrid;
+	uint16_t queue_id;
+
+	if (reta_size > RNP_RSS_INDIR_SIZE) {
+		RNP_PMD_ERR("Invalid reta size, reta_size:%d", reta_size);
+		return -EINVAL;
+	}
+	for (i = 0; i < reta_size; i++) {
+		hwrid = RNP_E_REG_RD(hw, RNP_RSS_REDIR_TB(lane, i));
+		hwrid = hwrid + port_offset;
+		queue_id = rnp_hwrid_to_queue_id(dev, hwrid);
+		if (queue_id == UINT16_MAX) {
+			RNP_PMD_ERR("Invalid rss-table value is the"
+					" Sw-queue not Match Hardware?\n");
+			return -EINVAL;
+		}
+		indirtbl[i] = queue_id;
+	}
+	for (i = 0; i < reta_size; i++) {
+		idx = i / RTE_ETH_RETA_GROUP_SIZE;
+		shift = i % RTE_ETH_RETA_GROUP_SIZE;
+		if (reta_conf[idx].mask & (1ULL << shift))
+			reta_conf[idx].reta[shift] = (uint16_t)indirtbl[i];
+	}
+
+	return 0;
+}
+
+static void rnp_disable_rss(struct rte_eth_dev *dev)
+{
+	struct rnp_eth_adapter *adapter = RNP_DEV_TO_ADAPTER(dev);
+	struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+	struct rte_eth_rss_conf *conf = &port->rss_conf;
+	struct rnp_rx_queue *rxq = NULL;
+	struct rnp_hw *hw = port->hw;
+	uint8_t rss_disable = 0;
+	uint32_t mrqc_reg = 0;
+	uint16_t lane, index;
+	uint16_t idx;
+
+	memset(conf, 0, sizeof(*conf));
+	lane = port->attr.nr_lane;
+	for (idx = 0; idx < hw->max_port_num; idx++) {
+		if (adapter->ports[idx] == NULL) {
+			rss_disable++;
+			continue;
+		}
+		if (!adapter->ports[idx]->rss_conf.rss_hf)
+			rss_disable++;
+	}
+
+	for (idx = 0; idx < dev->data->nb_rx_queues; idx++) {
+		rxq = dev->data->rx_queues[idx];
+		if (!rxq)
+			continue;
+		rxq->rx_offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH;
+	}
+	/* we use software way to achieve multiple port mode
+	 * rss feature disable by set RSS table to default ring.
+	 * So when re enable RSS,the rss reta table need to set
+	 * last user set State
+	 */
+	rxq = dev->data->rx_queues[0];
+	index = rxq->attr.index - port->attr.port_offset;
+	for (idx = 0; idx < RNP_RSS_INDIR_SIZE; idx++)
+		RNP_E_REG_WR(hw, RNP_RSS_REDIR_TB(lane, idx), index);
+	if (rss_disable == hw->max_port_num) {
+		mrqc_reg = RNP_E_REG_RD(hw, RNP_RSS_MRQC_ADDR);
+		mrqc_reg &= ~RNP_RSS_HASH_CFG_MASK;
+		RNP_E_REG_WR(hw, RNP_RSS_MRQC_ADDR, mrqc_reg);
+	}
+}
+
+static void
+rnp_rss_hash_set(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
+{
+	uint64_t rss_hash_level = RTE_ETH_RSS_LEVEL(rss_conf->rss_hf);
+	struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+	struct rnp_rx_queue *rxq = NULL;
+	struct rnp_hw *hw = port->hw;
+	uint8_t *hash_key;
+	uint32_t mrqc_reg = 0;
+	uint32_t rss_key;
+	uint64_t rss_hf;
+	uint16_t i;
+
+	rss_hf = rss_conf->rss_hf;
+	hash_key = rss_conf->rss_key;
+	if (hash_key != NULL) {
+		for (i = 0; i < RNP_MAX_HASH_KEY_SIZE; i++) {
+			rss_key  = hash_key[(i * 4)];
+			rss_key |= hash_key[(i * 4) + 1] << 8;
+			rss_key |= hash_key[(i * 4) + 2] << 16;
+			rss_key |= hash_key[(i * 4) + 3] << 24;
+			rss_key = rte_cpu_to_be_32(rss_key);
+			RNP_E_REG_WR(hw, RNP_RSS_KEY_TABLE(9 - i), rss_key);
+		}
+	}
+	if (rss_hf) {
+		for (i = 0; i < RTE_DIM(rnp_rss_cfg); i++)
+			if (rnp_rss_cfg[i].rss_flag & rss_hf)
+				mrqc_reg |= rnp_rss_cfg[i].reg_val;
+		/* Enable inner rss mode
+		 * If enable, outer(vxlan/nvgre) rss won't cals
+		 */
+		if (rss_hash_level == RTE_ETH_RSS_LEVEL_INNERMOST)
+			RNP_E_REG_WR(hw, RNP_RSS_INNER_CTRL, RNP_INNER_RSS_EN);
+		else
+			RNP_E_REG_WR(hw, RNP_RSS_INNER_CTRL, RNP_INNER_RSS_DIS);
+		RNP_E_REG_WR(hw, RNP_RSS_MRQC_ADDR, mrqc_reg);
+		for (i = 0; i < dev->data->nb_rx_queues; i++) {
+			rxq = dev->data->rx_queues[i];
+			if (!rxq)
+				continue;
+			rxq->rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+		}
+	}
+}
+
+static void
+rnp_reta_table_update(struct rte_eth_dev *dev)
+{
+	struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+	uint16_t port_offset = port->attr.port_offset;
+	uint32_t *indirtbl = &port->indirtbl[0];
+	struct rnp_hw *hw = port->hw;
+	struct rnp_rx_queue *rxq;
+	int i = 0, qid = 0, p_id;
+	uint16_t hwrid;
+
+	p_id = port->attr.nr_lane;
+	for (i = 0; i < RNP_RSS_INDIR_SIZE; i++) {
+		qid = indirtbl[i];
+		if (qid < dev->data->nb_rx_queues) {
+			rxq = dev->data->rx_queues[qid];
+			hwrid = rxq->attr.index - port_offset;
+			RNP_E_REG_WR(hw, RNP_RSS_REDIR_TB(p_id, i), hwrid);
+			rxq->rx_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
+		} else {
+			RNP_PMD_LOG(WARNING, "port[%d] reta[%d]-queue=%d "
+					"rx queues is out range of cur set\n",
+					dev->data->port_id, i, qid);
+		}
+	}
+}
+
+int
+rnp_dev_rss_hash_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_conf *rss_conf)
+{
+	struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+
+	if (rss_conf->rss_key &&
+			rss_conf->rss_key_len > RNP_MAX_HASH_KEY_SIZE) {
+		RNP_PMD_ERR("Invalid rss key, rss_key_len:%d",
+				rss_conf->rss_key_len);
+		return -EINVAL;
+	}
+	if (rss_conf->rss_hf &&
+			(!(rss_conf->rss_hf & RNP_SUPPORT_RSS_OFFLOAD_ALL))) {
+		RNP_PMD_ERR("RSS type don't support 0x%.2lx", rss_conf->rss_hf);
+		return -EINVAL;
+	}
+	if (!rss_conf->rss_hf) {
+		rnp_disable_rss(dev);
+	} else {
+		rnp_rss_hash_set(dev, rss_conf);
+		rnp_reta_table_update(dev);
+	}
+	port->rss_conf = *rss_conf;
+
+	return 0;
+}
+
+int
+rnp_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+			  struct rte_eth_rss_conf *rss_conf)
+{
+	struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+	struct rnp_hw *hw = port->hw;
+	uint8_t *hash_key;
+	uint32_t rss_key;
+	uint64_t rss_hf;
+	uint32_t mrqc;
+	uint16_t i;
+
+	hash_key = rss_conf->rss_key;
+	if (hash_key != NULL) {
+		for (i = 0; i < 10; i++) {
+			rss_key = RNP_E_REG_RD(hw, RNP_RSS_KEY_TABLE(9 - i));
+			rss_key = rte_be_to_cpu_32(rss_key);
+			hash_key[(i * 4)] = rss_key & 0x000000FF;
+			hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
+			hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
+			hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
+		}
+	}
+	rss_hf = 0;
+	mrqc = RNP_E_REG_RD(hw, RNP_RSS_MRQC_ADDR) & RNP_RSS_HASH_CFG_MASK;
+	if (mrqc == 0) {
+		rss_conf->rss_hf = 0;
+		return 0;
+	}
+	for (i = 0; i < RTE_DIM(rnp_rss_cfg); i++)
+		if (rnp_rss_cfg[i].reg_val & mrqc)
+			rss_hf |= rnp_rss_cfg[i].rss_flag;
+
+	rss_conf->rss_hf = rss_hf;
+
+	return 0;
+}
+
+int rnp_dev_rss_configure(struct rte_eth_dev *dev)
+{
+	struct rnp_eth_port *port = RNP_DEV_TO_PORT(dev);
+	uint32_t *indirtbl = port->indirtbl;
+	enum rte_eth_rx_mq_mode mq_mode = 0;
+	struct rte_eth_rss_conf rss_conf;
+	struct rnp_rx_queue *rxq;
+	int i, j;
+
+	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+	if (dev->data->rx_queues == NULL) {
+		RNP_PMD_ERR("rx_queue is not setup skip rss set");
+		return -EINVAL;
+	}
+	rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+	if (!(rss_conf.rss_hf & RNP_SUPPORT_RSS_OFFLOAD_ALL) ||
+			!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
+		rnp_disable_rss(dev);
+
+		return 0;
+	}
+	if (rss_conf.rss_key == NULL)
+		rss_conf.rss_key = rnp_rss_default_key;
+
+	if (port->rxq_num_changed || !port->reta_has_cfg) {
+		/* set default reta policy */
+		for (i = 0; i < RNP_RSS_INDIR_SIZE; i++) {
+			j = i % dev->data->nb_rx_queues;
+			rxq = dev->data->rx_queues[j];
+			if (!rxq) {
+				RNP_PMD_ERR("rss Set reta-cfg rxq %d Is Null\n", i);
+				return -EINVAL;
+			}
+			indirtbl[i] = rxq->attr.queue_id;
+		}
+	}
+	rnp_reta_table_update(dev);
+	port->rss_conf = rss_conf;
+	/* setup rss key and hash func */
+	rnp_rss_hash_set(dev, &rss_conf);
+
+	return 0;
+}
diff --git a/drivers/net/rnp/rnp_rss.h b/drivers/net/rnp/rnp_rss.h
new file mode 100644
index 0000000..73f895d
--- /dev/null
+++ b/drivers/net/rnp/rnp_rss.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Mucse IC Design Ltd.
+ */
+
+#ifndef _RNP_RSS_H_
+#define _RNP_RSS_H_
+
+#include "rnp.h"
+
+struct rnp_rss_hash_cfg {
+	uint32_t func_id;
+	uint32_t reg_val;
+	uint64_t rss_flag;
+};
+
+enum rnp_rss_hash_type {
+	RNP_RSS_IPV4,
+	RNP_RSS_IPV6,
+	RNP_RSS_IPV4_TCP,
+	RNP_RSS_IPV4_UDP,
+	RNP_RSS_IPV4_SCTP,
+	RNP_RSS_IPV6_TCP,
+	RNP_RSS_IPV6_UDP,
+	RNP_RSS_IPV6_SCTP,
+};
+
+int
+rnp_dev_rss_reta_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_reta_entry64 *reta_conf,
+			uint16_t reta_size);
+int
+rnp_dev_rss_reta_query(struct rte_eth_dev *dev,
+		       struct rte_eth_rss_reta_entry64 *reta_conf,
+		       uint16_t reta_size);
+int
+rnp_dev_rss_hash_update(struct rte_eth_dev *dev,
+			struct rte_eth_rss_conf *rss_conf);
+int
+rnp_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+			  struct rte_eth_rss_conf *rss_conf);
+int rnp_dev_rss_configure(struct rte_eth_dev *dev);
+
+#endif /* _RNP_RSS_H_ */
-- 
1.8.3.1


  parent reply	other threads:[~2025-02-08  2:45 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-02-08  2:43 [PATCH v7 00/28] [v6]drivers/net Add Support mucse N10 Pmd Driver Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 01/28] net/rnp: add skeleton Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 02/28] net/rnp: add ethdev probe and remove Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 03/28] net/rnp: add log Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 04/28] net/rnp: support mailbox basic operate Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 05/28] net/rnp: add device init and uninit Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 06/28] net/rnp: add get device information operation Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 07/28] net/rnp: add support mac promisc mode Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 08/28] net/rnp: add queue setup and release operations Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 09/28] net/rnp: add queue stop and start operations Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 10/28] net/rnp: add support device start stop operations Wenbo Cao
2025-02-08  2:43 ` Wenbo Cao [this message]
2025-02-08  2:43 ` [PATCH v7 12/28] net/rnp: add support link update operations Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 13/28] net/rnp: add support link setup operations Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 14/28] net/rnp: add Rx burst simple support Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 15/28] net/rnp: add Tx " Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 16/28] net/rnp: add MTU set operation Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 17/28] net/rnp: add Rx scatter segment version Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 18/28] net/rnp: add Tx multiple " Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 19/28] net/rnp: add support basic stats operation Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 20/28] net/rnp: add support xstats operation Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 21/28] net/rnp: add unicast MAC filter operation Wenbo Cao
2025-02-08  2:43 ` [PATCH v7 22/28] net/rnp: add supported packet types Wenbo Cao
2025-02-08  2:44 ` [PATCH v7 23/28] net/rnp: add support Rx checksum offload Wenbo Cao
2025-02-08  2:44 ` [PATCH v7 24/28] net/rnp: add support Tx TSO offload Wenbo Cao
2025-02-08  2:44 ` [PATCH v7 25/28] net/rnp: support VLAN offloads Wenbo Cao
2025-02-08  2:44 ` [PATCH v7 26/28] net/rnp: add support VLAN filters operations Wenbo Cao
2025-02-08  2:44 ` [PATCH v7 27/28] net/rnp: add queue info operation Wenbo Cao
2025-02-08  2:44 ` [PATCH v7 28/28] net/rnp: support Rx/Tx burst mode info Wenbo Cao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1738982645-34550-12-git-send-email-caowenbo@mucse.com \
    --to=caowenbo@mucse.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=stephen@networkplumber.org \
    --cc=thomas@monjalon.net \
    --cc=yaojun@mucse.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).