DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ivan Malov <ivan.malov@oktetlabs.ru>
To: dev@dpdk.org
Cc: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,
	Andy Moreton <amoreton@xilinx.com>
Subject: [PATCH 1/8] net/sfc: rework flow action RSS support
Date: Tue,  1 Feb 2022 11:49:55 +0300	[thread overview]
Message-ID: <20220201085002.320102-2-ivan.malov@oktetlabs.ru> (raw)
In-Reply-To: <20220201085002.320102-1-ivan.malov@oktetlabs.ru>

Currently, the driver always allocates a dedicated NIC RSS context
for every separate flow rule with action RSS, which is not optimal.

First of all, multiple rules which have the same RSS specification
can share a context since filters in the hardware operate this way.

Secondly, entries in a context's indirection table are not precise
queue IDs but offsets with regard to the base queue ID of a filter.
This way, for example, queue arrays "0, 1, 2" and "3, 4, 5" in two
otherwise identical RSS specifications allow the driver to use the
same context since they both yield the same table of queue offsets.

Rework flow action RSS support in order to use these optimisations.

Signed-off-by: Ivan Malov <ivan.malov@oktetlabs.ru>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Andy Moreton <amoreton@xilinx.com>
---
 drivers/net/sfc/meson.build    |   1 +
 drivers/net/sfc/sfc.c          |  12 +-
 drivers/net/sfc/sfc.h          |   4 +-
 drivers/net/sfc/sfc_ethdev.c   |   8 +-
 drivers/net/sfc/sfc_flow.c     | 249 ++++----------------
 drivers/net/sfc/sfc_flow.h     |  19 +-
 drivers/net/sfc/sfc_flow_rss.c | 409 +++++++++++++++++++++++++++++++++
 drivers/net/sfc/sfc_flow_rss.h |  81 +++++++
 8 files changed, 562 insertions(+), 221 deletions(-)
 create mode 100644 drivers/net/sfc/sfc_flow_rss.c
 create mode 100644 drivers/net/sfc/sfc_flow_rss.h

diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build
index 46d94184b8..547cb8db8c 100644
--- a/drivers/net/sfc/meson.build
+++ b/drivers/net/sfc/meson.build
@@ -90,6 +90,7 @@ sources = files(
         'sfc_mae.c',
         'sfc_mae_counter.c',
         'sfc_flow.c',
+	'sfc_flow_rss.c',
         'sfc_flow_tunnel.c',
         'sfc_dp.c',
         'sfc_ef10_rx.c',
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 2cead4e045..51726d229b 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -848,7 +848,9 @@ sfc_rss_attach(struct sfc_adapter *sa)
 	efx_intr_fini(sa->nic);
 
 	rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
-	rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
+	memset(&rss->dummy_ctx, 0, sizeof(rss->dummy_ctx));
+	rss->dummy_ctx.conf.qid_span = 1;
+	rss->dummy_ctx.dummy = true;
 
 	return 0;
 
@@ -970,6 +972,10 @@ sfc_attach(struct sfc_adapter *sa)
 	if (rc != 0)
 		goto fail_rss_attach;
 
+	rc = sfc_flow_rss_attach(sa);
+	if (rc != 0)
+		goto fail_flow_rss_attach;
+
 	rc = sfc_filter_attach(sa);
 	if (rc != 0)
 		goto fail_filter_attach;
@@ -1033,6 +1039,9 @@ sfc_attach(struct sfc_adapter *sa)
 	sfc_filter_detach(sa);
 
 fail_filter_attach:
+	sfc_flow_rss_detach(sa);
+
+fail_flow_rss_attach:
 	sfc_rss_detach(sa);
 
 fail_rss_attach:
@@ -1087,6 +1096,7 @@ sfc_detach(struct sfc_adapter *sa)
 	sfc_mae_detach(sa);
 	sfc_mae_counter_rxq_detach(sa);
 	sfc_filter_detach(sa);
+	sfc_flow_rss_detach(sa);
 	sfc_rss_detach(sa);
 	sfc_port_detach(sa);
 	sfc_ev_detach(sa);
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
index 3337cb57e3..c075c01883 100644
--- a/drivers/net/sfc/sfc.h
+++ b/drivers/net/sfc/sfc.h
@@ -27,6 +27,7 @@
 #include "sfc_debug.h"
 #include "sfc_log.h"
 #include "sfc_filter.h"
+#include "sfc_flow_rss.h"
 #include "sfc_flow_tunnel.h"
 #include "sfc_sriov.h"
 #include "sfc_mae.h"
@@ -118,7 +119,7 @@ struct sfc_rss {
 	unsigned int			tbl[EFX_RSS_TBL_SIZE];
 	uint8_t				key[EFX_RSS_KEY_SIZE];
 
-	uint32_t			dummy_rss_context;
+	struct sfc_flow_rss_ctx		dummy_ctx;
 };
 
 /* Adapter private data shared by primary and secondary processes */
@@ -238,6 +239,7 @@ struct sfc_adapter {
 	struct sfc_intr			intr;
 	struct sfc_port			port;
 	struct sfc_sw_stats		sw_stats;
+	struct sfc_flow_rss		flow_rss;
 	/* Registry of tunnel offload contexts */
 	struct sfc_flow_tunnel		flow_tunnels[SFC_FT_MAX_NTUNNELS];
 	struct sfc_filter		filter;
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index d4210b63dd..abf7b8d287 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -1674,15 +1674,13 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
 	unsigned int efx_hash_types;
-	uint32_t contexts[] = {EFX_RSS_CONTEXT_DEFAULT, rss->dummy_rss_context};
 	unsigned int n_contexts;
 	unsigned int mode_i = 0;
 	unsigned int key_i = 0;
+	uint32_t contexts[2];
 	unsigned int i = 0;
 	int rc = 0;
 
-	n_contexts = rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT ? 1 : 2;
-
 	if (sfc_sa2shared(sa)->isolated)
 		return -ENOTSUP;
 
@@ -1709,6 +1707,10 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
 	if (rc != 0)
 		goto fail_rx_hf_rte_to_efx;
 
+	contexts[0] = EFX_RSS_CONTEXT_DEFAULT;
+	contexts[1] = rss->dummy_ctx.nic_handle;
+	n_contexts = (rss->dummy_ctx.nic_handle_refcnt == 0) ? 1 : 2;
+
 	for (mode_i = 0; mode_i < n_contexts; mode_i++) {
 		rc = efx_rx_scale_mode_set(sa->nic, contexts[mode_i],
 					   rss->hash_alg, efx_hash_types,
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 509fde4a86..bbb40a3b38 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -22,6 +22,7 @@
 #include "sfc_rx.h"
 #include "sfc_filter.h"
 #include "sfc_flow.h"
+#include "sfc_flow_rss.h"
 #include "sfc_flow_tunnel.h"
 #include "sfc_log.h"
 #include "sfc_dp_rx.h"
@@ -41,11 +42,12 @@ static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
 static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
 static sfc_flow_insert_cb_t sfc_flow_filter_insert;
 static sfc_flow_remove_cb_t sfc_flow_filter_remove;
+static sfc_flow_cleanup_cb_t sfc_flow_cleanup;
 
 static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
 	.parse = sfc_flow_parse_rte_to_filter,
 	.verify = NULL,
-	.cleanup = NULL,
+	.cleanup = sfc_flow_cleanup,
 	.insert = sfc_flow_filter_insert,
 	.remove = sfc_flow_filter_remove,
 	.query = NULL,
@@ -1429,8 +1431,14 @@ sfc_flow_parse_queue(struct sfc_adapter *sa,
 	spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
 
 	rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
-	spec_filter->rss_hash_required = !!(rxq_info->rxq_flags &
-					    SFC_RXQ_FLAG_RSS_HASH);
+
+	if ((rxq_info->rxq_flags & SFC_RXQ_FLAG_RSS_HASH) != 0) {
+		struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+		struct sfc_rss *ethdev_rss = &sas->rss;
+
+		spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
+		spec_filter->rss_ctx = &ethdev_rss->dummy_ctx;
+	}
 
 	return 0;
 }
@@ -1440,107 +1448,30 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
 		   const struct rte_flow_action_rss *action_rss,
 		   struct rte_flow *flow)
 {
-	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
-	struct sfc_rss *rss = &sas->rss;
-	sfc_ethdev_qid_t ethdev_qid;
+	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
+	struct sfc_flow_rss_conf conf;
+	uint16_t sw_qid_min;
 	struct sfc_rxq *rxq;
-	unsigned int rxq_hw_index_min;
-	unsigned int rxq_hw_index_max;
-	efx_rx_hash_type_t efx_hash_types;
-	const uint8_t *rss_key;
-	struct sfc_flow_spec *spec = &flow->spec;
-	struct sfc_flow_spec_filter *spec_filter = &spec->filter;
-	struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
-	unsigned int i;
-
-	if (action_rss->queue_num == 0)
-		return -EINVAL;
-
-	ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1;
-	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
-	rxq_hw_index_min = rxq->hw_index;
-	rxq_hw_index_max = 0;
-
-	for (i = 0; i < action_rss->queue_num; ++i) {
-		ethdev_qid = action_rss->queue[i];
-
-		if ((unsigned int)ethdev_qid >=
-		    sfc_sa2shared(sa)->ethdev_rxq_count)
-			return -EINVAL;
-
-		rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
-
-		if (rxq->hw_index < rxq_hw_index_min)
-			rxq_hw_index_min = rxq->hw_index;
-
-		if (rxq->hw_index > rxq_hw_index_max)
-			rxq_hw_index_max = rxq->hw_index;
-	}
+	int rc;
 
-	if (rxq_hw_index_max - rxq_hw_index_min + 1 > EFX_MAXRSS)
-		return -EINVAL;
+	spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
 
-	switch (action_rss->func) {
-	case RTE_ETH_HASH_FUNCTION_DEFAULT:
-	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
-		break;
-	default:
-		return -EINVAL;
-	}
+	rc = sfc_flow_rss_parse_conf(sa, action_rss, &conf, &sw_qid_min);
+	if (rc != 0)
+		return -rc;
 
-	if (action_rss->level)
-		return -EINVAL;
+	rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, sw_qid_min);
+	spec_filter->template.efs_dmaq_id = rxq->hw_index;
 
-	/*
-	 * Dummy RSS action with only one queue and no specific settings
-	 * for hash types and key does not require dedicated RSS context
-	 * and may be simplified to single queue action.
-	 */
-	if (action_rss->queue_num == 1 && action_rss->types == 0 &&
-	    action_rss->key_len == 0) {
-		spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
+	spec_filter->rss_ctx = sfc_flow_rss_ctx_reuse(sa, &conf, sw_qid_min,
+						      action_rss->queue);
+	if (spec_filter->rss_ctx != NULL)
 		return 0;
-	}
-
-	if (action_rss->types) {
-		int rc;
-
-		rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
-					  &efx_hash_types);
-		if (rc != 0)
-			return -rc;
-	} else {
-		unsigned int i;
-
-		efx_hash_types = 0;
-		for (i = 0; i < rss->hf_map_nb_entries; ++i)
-			efx_hash_types |= rss->hf_map[i].efx;
-	}
-
-	if (action_rss->key_len) {
-		if (action_rss->key_len != sizeof(rss->key))
-			return -EINVAL;
-
-		rss_key = action_rss->key;
-	} else {
-		rss_key = rss->key;
-	}
-
-	spec_filter->rss = B_TRUE;
-
-	sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
-	sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
-	sfc_rss_conf->rss_hash_types = efx_hash_types;
-	rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
 
-	for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
-		unsigned int nb_queues = action_rss->queue_num;
-		struct sfc_rxq *rxq;
-
-		ethdev_qid = action_rss->queue[i % nb_queues];
-		rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
-		sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
-	}
+	rc = sfc_flow_rss_ctx_add(sa, &conf, sw_qid_min, action_rss->queue,
+				  &spec_filter->rss_ctx);
+	if (rc != 0)
+		return -rc;
 
 	return 0;
 }
@@ -1597,61 +1528,17 @@ static int
 sfc_flow_filter_insert(struct sfc_adapter *sa,
 		       struct rte_flow *flow)
 {
-	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
-	struct sfc_rss *rss = &sas->rss;
 	struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
-	struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
-	uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
-	boolean_t create_context;
-	unsigned int i;
+	struct sfc_flow_rss_ctx *rss_ctx = spec_filter->rss_ctx;
 	int rc = 0;
 
-	create_context = spec_filter->rss || (spec_filter->rss_hash_required &&
-			rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT);
-
-	if (create_context) {
-		unsigned int rss_spread;
-		unsigned int rss_hash_types;
-		uint8_t *rss_key;
-
-		if (spec_filter->rss) {
-			rss_spread = flow_rss->rxq_hw_index_max -
-				     flow_rss->rxq_hw_index_min + 1;
-			rss_hash_types = flow_rss->rss_hash_types;
-			rss_key = flow_rss->rss_key;
-		} else {
-			/*
-			 * Initialize dummy RSS context parameters to have
-			 * valid RSS hash. Use default RSS hash function and
-			 * key.
-			 */
-			rss_spread = 1;
-			rss_hash_types = rss->hash_types;
-			rss_key = rss->key;
-		}
-
-		rc = efx_rx_scale_context_alloc(sa->nic,
-						EFX_RX_SCALE_EXCLUSIVE,
-						rss_spread,
-						&efs_rss_context);
-		if (rc != 0)
-			goto fail_scale_context_alloc;
-
-		rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
-					   rss->hash_alg,
-					   rss_hash_types, B_TRUE);
-		if (rc != 0)
-			goto fail_scale_mode_set;
+	rc = sfc_flow_rss_ctx_program(sa, rss_ctx);
+	if (rc != 0)
+		goto fail_rss_ctx_program;
 
-		rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
-					  rss_key, sizeof(rss->key));
-		if (rc != 0)
-			goto fail_scale_key_set;
-	} else {
-		efs_rss_context = rss->dummy_rss_context;
-	}
+	if (rss_ctx != NULL) {
+		unsigned int i;
 
-	if (spec_filter->rss || spec_filter->rss_hash_required) {
 		/*
 		 * At this point, fully elaborated filter specifications
 		 * have been produced from the template. To make sure that
@@ -1661,10 +1548,7 @@ sfc_flow_filter_insert(struct sfc_adapter *sa,
 		for (i = 0; i < spec_filter->count; i++) {
 			efx_filter_spec_t *spec = &spec_filter->filters[i];
 
-			spec->efs_rss_context = efs_rss_context;
-			spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
-			if (spec_filter->rss)
-				spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
+			spec->efs_rss_context = rss_ctx->nic_handle;
 		}
 	}
 
@@ -1672,42 +1556,12 @@ sfc_flow_filter_insert(struct sfc_adapter *sa,
 	if (rc != 0)
 		goto fail_filter_insert;
 
-	if (create_context) {
-		unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0};
-		unsigned int *tbl;
-
-		tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl;
-
-		/*
-		 * Scale table is set after filter insertion because
-		 * the table entries are relative to the base RxQ ID
-		 * and the latter is submitted to the HW by means of
-		 * inserting a filter, so by the time of the request
-		 * the HW knows all the information needed to verify
-		 * the table entries, and the operation will succeed
-		 */
-		rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
-					  tbl, RTE_DIM(flow_rss->rss_tbl));
-		if (rc != 0)
-			goto fail_scale_tbl_set;
-
-		/* Remember created dummy RSS context */
-		if (!spec_filter->rss)
-			rss->dummy_rss_context = efs_rss_context;
-	}
-
 	return 0;
 
-fail_scale_tbl_set:
-	sfc_flow_spec_remove(sa, &flow->spec);
-
 fail_filter_insert:
-fail_scale_key_set:
-fail_scale_mode_set:
-	if (create_context)
-		efx_rx_scale_context_free(sa->nic, efs_rss_context);
+	sfc_flow_rss_ctx_terminate(sa, rss_ctx);
 
-fail_scale_context_alloc:
+fail_rss_ctx_program:
 	return rc;
 }
 
@@ -1722,18 +1576,9 @@ sfc_flow_filter_remove(struct sfc_adapter *sa,
 	if (rc != 0)
 		return rc;
 
-	if (spec_filter->rss) {
-		/*
-		 * All specifications for a given flow rule have the same RSS
-		 * context, so that RSS context value is taken from the first
-		 * filter specification
-		 */
-		efx_filter_spec_t *spec = &spec_filter->filters[0];
-
-		rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
-	}
+	sfc_flow_rss_ctx_terminate(sa, spec_filter->rss_ctx);
 
-	return rc;
+	return 0;
 }
 
 static int
@@ -2985,8 +2830,6 @@ sfc_flow_fini(struct sfc_adapter *sa)
 void
 sfc_flow_stop(struct sfc_adapter *sa)
 {
-	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
-	struct sfc_rss *rss = &sas->rss;
 	struct rte_flow *flow;
 
 	SFC_ASSERT(sfc_adapter_is_locked(sa));
@@ -2994,11 +2837,6 @@ sfc_flow_stop(struct sfc_adapter *sa)
 	TAILQ_FOREACH(flow, &sa->flow_list, entries)
 		sfc_flow_remove(sa, flow, NULL);
 
-	if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
-		efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context);
-		rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
-	}
-
 	/*
 	 * MAE counter service is not stopped on flow rule remove to avoid
 	 * extra work. Make sure that it is stopped here.
@@ -3029,3 +2867,12 @@ sfc_flow_start(struct sfc_adapter *sa)
 fail_bad_flow:
 	return rc;
 }
+
+static void
+sfc_flow_cleanup(struct sfc_adapter *sa, struct rte_flow *flow)
+{
+	if (flow == NULL)
+		return;
+
+	sfc_flow_rss_ctx_del(sa, flow->spec.filter.rss_ctx);
+}
diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h
index efdecc97ab..545e2267d4 100644
--- a/drivers/net/sfc/sfc_flow.h
+++ b/drivers/net/sfc/sfc_flow.h
@@ -15,6 +15,8 @@
 
 #include "efx.h"
 
+#include "sfc_flow_rss.h"
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -30,15 +32,6 @@ extern "C" {
 #define SFC_BUILD_SET_OVERFLOW(_action, _set) \
 	RTE_BUILD_BUG_ON((_action) >= sizeof(_set) * CHAR_BIT)
 
-/* RSS configuration storage */
-struct sfc_flow_rss {
-	unsigned int	rxq_hw_index_min;
-	unsigned int	rxq_hw_index_max;
-	unsigned int	rss_hash_types;
-	uint8_t		rss_key[EFX_RSS_KEY_SIZE];
-	unsigned int	rss_tbl[EFX_RSS_TBL_SIZE];
-};
-
 /* Flow engines supported by the implementation */
 enum sfc_flow_spec_type {
 	SFC_FLOW_SPEC_FILTER = 0,
@@ -55,12 +48,8 @@ struct sfc_flow_spec_filter {
 	efx_filter_spec_t filters[SF_FLOW_SPEC_NB_FILTERS_MAX];
 	/* number of complete specifications */
 	unsigned int count;
-	/* RSS toggle */
-	boolean_t rss;
-	/* RSS hash toggle */
-	boolean_t rss_hash_required;
-	/* RSS configuration */
-	struct sfc_flow_rss rss_conf;
+	/* RSS context (or NULL) */
+	struct sfc_flow_rss_ctx *rss_ctx;
 };
 
 /* Indicates the role of a given flow in tunnel offload */
diff --git a/drivers/net/sfc/sfc_flow_rss.c b/drivers/net/sfc/sfc_flow_rss.c
new file mode 100644
index 0000000000..17876f11c1
--- /dev/null
+++ b/drivers/net/sfc/sfc_flow_rss.c
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright(c) 2022 Xilinx, Inc.
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_flow.h>
+#include <rte_tailq.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_flow_rss.h"
+#include "sfc_log.h"
+#include "sfc_rx.h"
+
+int
+sfc_flow_rss_attach(struct sfc_adapter *sa)
+{
+	struct sfc_flow_rss *flow_rss = &sa->flow_rss;
+
+	sfc_log_init(sa, "entry");
+
+	TAILQ_INIT(&flow_rss->ctx_list);
+
+	sfc_log_init(sa, "done");
+
+	return 0;
+}
+
+void
+sfc_flow_rss_detach(struct sfc_adapter *sa)
+{
+	sfc_log_init(sa, "entry");
+
+	sfc_log_init(sa, "done");
+}
+
+int
+sfc_flow_rss_parse_conf(struct sfc_adapter *sa,
+			const struct rte_flow_action_rss *in,
+			struct sfc_flow_rss_conf *out, uint16_t *sw_qid_minp)
+{
+	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+	const struct sfc_rss *ethdev_rss = &sas->rss;
+	uint16_t sw_qid_min;
+	uint16_t sw_qid_max;
+	const uint8_t *key;
+	unsigned int i;
+	int rc;
+
+	if (in->level) {
+		/*
+		 * The caller demands that RSS hash be computed
+		 * within the given encapsulation frame / level.
+		 * Per flow control for that is not implemented.
+		 */
+		sfc_err(sa, "flow-rss: parse: 'level' must be 0");
+		return EINVAL;
+	}
+
+	if (in->types != 0) {
+		rc = sfc_rx_hf_rte_to_efx(sa, in->types,
+					  &out->efx_hash_types);
+		if (rc != 0) {
+			sfc_err(sa, "flow-rss: parse: failed to process 'types'");
+			return rc;
+		}
+	} else {
+		sfc_dbg(sa, "flow-rss: parse: 'types' is 0; proceeding with ethdev setting");
+		out->efx_hash_types = ethdev_rss->hash_types;
+	}
+
+	if (in->key_len != 0) {
+		if (in->key_len != sizeof(out->key)) {
+			sfc_err(sa, "flow-rss: parse: 'key_len' must be either %zu or 0",
+				sizeof(out->key));
+			return EINVAL;
+		}
+
+		if (in->key == NULL) {
+			sfc_err(sa, "flow-rss: parse: 'key' is NULL");
+			return EINVAL;
+		}
+
+		key = in->key;
+	} else {
+		sfc_dbg(sa, "flow-rss: parse: 'key_len' is 0; proceeding with ethdev key");
+		key = ethdev_rss->key;
+	}
+
+	rte_memcpy(out->key, key, sizeof(out->key));
+
+	switch (in->func) {
+	case RTE_ETH_HASH_FUNCTION_DEFAULT:
+		/*
+		 * DEFAULT means that conformance to a specific
+		 * hash algorithm is a don't care to the caller.
+		 * The driver can pick the one it deems optimal.
+		 */
+		break;
+	case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
+		if (ethdev_rss->hash_alg != EFX_RX_HASHALG_TOEPLITZ) {
+			sfc_err(sa, "flow-rss: parse: 'func' TOEPLITZ is unavailable; use DEFAULT");
+			return EINVAL;
+		}
+		break;
+	default:
+		sfc_err(sa, "flow-rss: parse: 'func' #%d is unsupported", in->func);
+		return EINVAL;
+	}
+
+	if (in->queue_num == 0) {
+		sfc_err(sa, "flow-rss: parse: 'queue_num' is 0; MIN=1");
+		return EINVAL;
+	}
+
+	if (in->queue_num > EFX_RSS_TBL_SIZE) {
+		sfc_err(sa, "flow-rss: parse: 'queue_num' is too large; MAX=%u",
+			EFX_RSS_TBL_SIZE);
+		return EINVAL;
+	}
+
+	if (in->queue == NULL) {
+		sfc_err(sa, "flow-rss: parse: 'queue' is NULL");
+		return EINVAL;
+	}
+
+	sw_qid_min = sas->ethdev_rxq_count - 1;
+	sw_qid_max = 0;
+
+	out->nb_qid_offsets = 0;
+
+	for (i = 0; i < in->queue_num; ++i) {
+		uint16_t sw_qid = in->queue[i];
+
+		if (sw_qid >= sas->ethdev_rxq_count) {
+			sfc_err(sa, "flow-rss: parse: queue=%u does not exist",
+				sw_qid);
+			return EINVAL;
+		}
+
+		if (sw_qid < sw_qid_min)
+			sw_qid_min = sw_qid;
+
+		if (sw_qid > sw_qid_max)
+			sw_qid_max = sw_qid;
+
+		if (sw_qid != in->queue[0] + i)
+			out->nb_qid_offsets = in->queue_num;
+	}
+
+	out->qid_span = sw_qid_max - sw_qid_min + 1;
+
+	if (out->qid_span > EFX_MAXRSS) {
+		sfc_err(sa, "flow-rss: parse: queue ID span %u is too large; MAX=%u",
+			out->qid_span, EFX_MAXRSS);
+		return EINVAL;
+	}
+
+	if (sw_qid_minp != NULL)
+		*sw_qid_minp = sw_qid_min;
+
+	return 0;
+}
+
+struct sfc_flow_rss_ctx *
+sfc_flow_rss_ctx_reuse(struct sfc_adapter *sa,
+		       const struct sfc_flow_rss_conf *conf,
+		       uint16_t sw_qid_min, const uint16_t *sw_qids)
+{
+	struct sfc_flow_rss *flow_rss = &sa->flow_rss;
+	static struct sfc_flow_rss_ctx *ctx;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	TAILQ_FOREACH(ctx, &flow_rss->ctx_list, entries) {
+		if (memcmp(&ctx->conf, conf, sizeof(*conf)) != 0)
+			continue;
+
+		if (conf->nb_qid_offsets != 0) {
+			bool match_confirmed = true;
+			unsigned int i;
+
+			for (i = 0; i < conf->nb_qid_offsets; ++i) {
+				uint16_t qid_offset = sw_qids[i] - sw_qid_min;
+
+				if (ctx->qid_offsets[i] != qid_offset) {
+					match_confirmed = false;
+					break;
+				}
+			}
+
+			if (!match_confirmed)
+				continue;
+		}
+
+		sfc_dbg(sa, "flow-rss: reusing ctx=%p", ctx);
+		++(ctx->refcnt);
+		return ctx;
+	}
+
+	return NULL;
+}
+
+int
+sfc_flow_rss_ctx_add(struct sfc_adapter *sa,
+		     const struct sfc_flow_rss_conf *conf, uint16_t sw_qid_min,
+		     const uint16_t *sw_qids, struct sfc_flow_rss_ctx **ctxp)
+{
+	struct sfc_flow_rss *flow_rss = &sa->flow_rss;
+	struct sfc_flow_rss_ctx *ctx;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	ctx = rte_zmalloc("sfc_flow_rss_ctx", sizeof(*ctx), 0);
+	if (ctx == NULL)
+		return ENOMEM;
+
+	if (conf->nb_qid_offsets != 0) {
+		unsigned int i;
+
+		ctx->qid_offsets = rte_calloc("sfc_flow_rss_ctx_qid_offsets",
+					      conf->nb_qid_offsets,
+					      sizeof(*ctx->qid_offsets), 0);
+		if (ctx->qid_offsets == NULL) {
+			rte_free(ctx);
+			return ENOMEM;
+		}
+
+		for (i = 0; i < conf->nb_qid_offsets; ++i)
+			ctx->qid_offsets[i] = sw_qids[i] - sw_qid_min;
+	}
+
+	ctx->conf = *conf;
+	ctx->refcnt = 1;
+
+	TAILQ_INSERT_TAIL(&flow_rss->ctx_list, ctx, entries);
+
+	*ctxp = ctx;
+
+	sfc_dbg(sa, "flow-rss: added ctx=%p", ctx);
+
+	return 0;
+}
+
+void
+sfc_flow_rss_ctx_del(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
+{
+	struct sfc_flow_rss *flow_rss = &sa->flow_rss;
+
+	if (ctx == NULL)
+		return;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	if (ctx->dummy)
+		return;
+
+	SFC_ASSERT(ctx->refcnt != 0);
+
+	--(ctx->refcnt);
+
+	if (ctx->refcnt != 0)
+		return;
+
+	if (ctx->nic_handle_refcnt != 0) {
+		sfc_err(sa, "flow-rss: deleting ctx=%p abandons its NIC resource: handle=0x%08x, refcnt=%u",
+			ctx, ctx->nic_handle, ctx->nic_handle_refcnt);
+	}
+
+	TAILQ_REMOVE(&flow_rss->ctx_list, ctx, entries);
+	rte_free(ctx->qid_offsets);
+	rte_free(ctx);
+
+	sfc_dbg(sa, "flow-rss: deleted ctx=%p", ctx);
+}
+
+static int
+sfc_flow_rss_ctx_program_tbl(struct sfc_adapter *sa,
+			     const struct sfc_flow_rss_ctx *ctx)
+{
+	const struct sfc_flow_rss_conf *conf = &ctx->conf;
+	unsigned int *tbl = sa->flow_rss.bounce_tbl;
+	unsigned int i;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	if (conf->nb_qid_offsets != 0) {
+		SFC_ASSERT(ctx->qid_offsets != NULL);
+
+		for (i = 0; i < EFX_RSS_TBL_SIZE; ++i)
+			tbl[i] = ctx->qid_offsets[i % conf->nb_qid_offsets];
+	} else {
+		for (i = 0; i < EFX_RSS_TBL_SIZE; ++i)
+			tbl[i] = i % conf->qid_span;
+	}
+
+	return efx_rx_scale_tbl_set(sa->nic, ctx->nic_handle,
+				    tbl, EFX_RSS_TBL_SIZE);
+}
+
+int
+sfc_flow_rss_ctx_program(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
+{
+	efx_rx_scale_context_type_t ctx_type = EFX_RX_SCALE_EXCLUSIVE;
+	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+	struct sfc_rss *ethdev_rss = &sas->rss;
+	struct sfc_flow_rss_conf *conf;
+	bool allocation_done = B_FALSE;
+	int rc;
+
+	if (ctx == NULL)
+		return 0;
+
+	conf = &ctx->conf;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	if (ctx->nic_handle_refcnt == 0) {
+		rc = efx_rx_scale_context_alloc(sa->nic, ctx_type,
+						conf->qid_span,
+						&ctx->nic_handle);
+		if (rc != 0) {
+			sfc_err(sa, "flow-rss: failed to allocate NIC resource for ctx=%p: type=%d, qid_span=%u, rc=%d",
+				ctx, ctx_type, conf->qid_span, rc);
+			goto fail;
+		}
+
+		sfc_dbg(sa, "flow-rss: allocated NIC resource for ctx=%p: type=%d, qid_span=%u; handle=0x%08x",
+			ctx, ctx_type, conf->qid_span,
+			ctx->nic_handle);
+
+		++(ctx->nic_handle_refcnt);
+		allocation_done = B_TRUE;
+	} else {
+		++(ctx->nic_handle_refcnt);
+		return 0;
+	}
+
+	rc = efx_rx_scale_mode_set(sa->nic, ctx->nic_handle,
+				   ethdev_rss->hash_alg,
+				   (ctx->dummy) ? ethdev_rss->hash_types :
+						  conf->efx_hash_types,
+				   B_TRUE);
+	if (rc != 0) {
+		sfc_err(sa, "flow-rss: failed to configure hash for ctx=%p: efx_hash_alg=%d, efx_hash_types=0x%08x; rc=%d",
+			ctx, ethdev_rss->hash_alg,
+			(ctx->dummy) ? ethdev_rss->hash_types :
+				       conf->efx_hash_types,
+			rc);
+		goto fail;
+	}
+
+	rc = efx_rx_scale_key_set(sa->nic, ctx->nic_handle,
+				  (ctx->dummy) ? ethdev_rss->key : conf->key,
+				  RTE_DIM(conf->key));
+	if (rc != 0) {
+		sfc_err(sa, "flow-rss: failed to set key for ctx=%p; rc=%d",
+			ctx, rc);
+		goto fail;
+	}
+
+	rc = sfc_flow_rss_ctx_program_tbl(sa, ctx);
+	if (rc != 0) {
+		sfc_err(sa, "flow-rss: failed to program table for ctx=%p; rc=%d",
+			ctx, rc);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	if (allocation_done)
+		sfc_flow_rss_ctx_terminate(sa, ctx);
+
+	return rc;
+}
+
+void
+sfc_flow_rss_ctx_terminate(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
+{
+	if (ctx == NULL)
+		return;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	SFC_ASSERT(ctx->nic_handle_refcnt != 0);
+	--(ctx->nic_handle_refcnt);
+
+	if (ctx->nic_handle_refcnt == 0) {
+		int rc;
+
+		rc = efx_rx_scale_context_free(sa->nic, ctx->nic_handle);
+		if (rc != 0) {
+			sfc_err(sa, "flow-rss: failed to release NIC resource for ctx=%p: handle=0x%08x; rc=%d",
+				ctx, ctx->nic_handle, rc);
+
+			sfc_warn(sa, "flow-rss: proceeding despite the prior error");
+		}
+
+		sfc_dbg(sa, "flow-rss: released NIC resource for ctx=%p; rc=%d",
+			ctx, rc);
+	}
+}
diff --git a/drivers/net/sfc/sfc_flow_rss.h b/drivers/net/sfc/sfc_flow_rss.h
new file mode 100644
index 0000000000..cb2355ab67
--- /dev/null
+++ b/drivers/net/sfc/sfc_flow_rss.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright(c) 2022 Xilinx, Inc.
+ */
+
+#ifndef _SFC_FLOW_RSS_H
+#define _SFC_FLOW_RSS_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_flow.h>
+#include <rte_tailq.h>
+
+#include "efx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_flow_rss_conf {
+	uint8_t				key[EFX_RSS_KEY_SIZE];
+	efx_rx_hash_type_t		efx_hash_types;
+	unsigned int			nb_qid_offsets;
+	unsigned int			qid_span;
+};
+
+struct sfc_flow_rss_ctx {
+	TAILQ_ENTRY(sfc_flow_rss_ctx)	entries;
+
+	unsigned int			refcnt;
+	bool				dummy;
+
+	unsigned int			nic_handle_refcnt;
+	uint32_t			nic_handle;
+
+	struct sfc_flow_rss_conf	conf;
+
+	uint16_t			*qid_offsets;
+};
+
+TAILQ_HEAD(sfc_flow_rss_ctx_list, sfc_flow_rss_ctx);
+
+struct sfc_flow_rss {
+	unsigned int			bounce_tbl[EFX_RSS_TBL_SIZE];
+
+	struct sfc_flow_rss_ctx_list	ctx_list;
+};
+
+struct sfc_adapter;
+
+int sfc_flow_rss_attach(struct sfc_adapter *sa);
+
+void sfc_flow_rss_detach(struct sfc_adapter *sa);
+
+int sfc_flow_rss_parse_conf(struct sfc_adapter *sa,
+			    const struct rte_flow_action_rss *in,
+			    struct sfc_flow_rss_conf *out,
+			    uint16_t *sw_qid_minp);
+
+struct sfc_flow_rss_ctx *sfc_flow_rss_ctx_reuse(struct sfc_adapter *sa,
+				const struct sfc_flow_rss_conf *conf,
+				uint16_t sw_qid_min, const uint16_t *sw_qids);
+
+int sfc_flow_rss_ctx_add(struct sfc_adapter *sa,
+			 const struct sfc_flow_rss_conf *conf,
+			 uint16_t sw_qid_min, const uint16_t *sw_qids,
+			 struct sfc_flow_rss_ctx **ctxp);
+
+void sfc_flow_rss_ctx_del(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx);
+
+int sfc_flow_rss_ctx_program(struct sfc_adapter *sa,
+			     struct sfc_flow_rss_ctx *ctx);
+
+void sfc_flow_rss_ctx_terminate(struct sfc_adapter *sa,
+				struct sfc_flow_rss_ctx *ctx);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_FLOW_RSS_H */
-- 
2.30.2


  reply	other threads:[~2022-02-01  8:50 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-01  8:49 [PATCH 0/8] net/sfc: improve flow action RSS support on EF100 boards Ivan Malov
2022-02-01  8:49 ` Ivan Malov [this message]
2022-02-01  8:49 ` [PATCH 2/8] common/sfc_efx/base: query RSS queue span limit on Riverhead Ivan Malov
2022-02-01  8:49 ` [PATCH 3/8] net/sfc: use non-static queue span limit in flow action RSS Ivan Malov
2022-02-01  8:49 ` [PATCH 4/8] common/sfc_efx/base: revise name of RSS table entry count Ivan Malov
2022-02-01  8:49 ` [PATCH 5/8] common/sfc_efx/base: support selecting " Ivan Malov
2022-02-02 11:51   ` Ray Kinsella
2022-02-02 12:24     ` Ivan Malov
2022-02-01  8:50 ` [PATCH 6/8] net/sfc: use adaptive table entry count in flow action RSS Ivan Malov
2022-02-01  8:50 ` [PATCH 7/8] common/sfc_efx/base: support the even spread RSS mode Ivan Malov
2022-02-01  8:50 ` [PATCH 8/8] net/sfc: use the even spread mode in flow action RSS Ivan Malov
2022-02-02 17:41 ` [PATCH 0/8] net/sfc: improve flow action RSS support on EF100 boards Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220201085002.320102-2-ivan.malov@oktetlabs.ru \
    --to=ivan.malov@oktetlabs.ru \
    --cc=amoreton@xilinx.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).