DPDK patches and discussions
 help / color / mirror / Atom feed
From: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
To: dev@dpdk.org
Cc: Igor Romanov <igor.romanov@oktetlabs.ru>,
	Andy Moreton <amoreton@xilinx.com>,
	Ivan Malov <ivan.malov@oktetlabs.ru>
Subject: [dpdk-dev] [PATCH v2 12/20] net/sfc: reserve RxQ for counters
Date: Fri,  4 Jun 2021 17:24:06 +0300	[thread overview]
Message-ID: <20210604142414.283611-13-andrew.rybchenko@oktetlabs.ru> (raw)
In-Reply-To: <20210604142414.283611-1-andrew.rybchenko@oktetlabs.ru>

From: Igor Romanov <igor.romanov@oktetlabs.ru>

MAE delivers counters data as special packets via dedicated Rx queue.
Reserve an RxQ so that it does not interfere with ethdev Rx queues.
A routine will be added later to handle these packets.

There is no point to reserve the queue if no service cores are
available and counters cannot be used.

Signed-off-by: Igor Romanov <igor.romanov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Andy Moreton <amoreton@xilinx.com>
Reviewed-by: Ivan Malov <ivan.malov@oktetlabs.ru>
---
 drivers/net/sfc/meson.build       |   1 +
 drivers/net/sfc/sfc.c             |  68 ++++++++--
 drivers/net/sfc/sfc.h             |  19 +++
 drivers/net/sfc/sfc_dp.h          |   2 +
 drivers/net/sfc/sfc_ev.h          |  72 ++++++++--
 drivers/net/sfc/sfc_mae.c         |   1 +
 drivers/net/sfc/sfc_mae_counter.c | 217 ++++++++++++++++++++++++++++++
 drivers/net/sfc/sfc_mae_counter.h |  44 ++++++
 drivers/net/sfc/sfc_rx.c          |  43 ++++--
 9 files changed, 438 insertions(+), 29 deletions(-)
 create mode 100644 drivers/net/sfc/sfc_mae_counter.c
 create mode 100644 drivers/net/sfc/sfc_mae_counter.h

diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build
index 4ac97e8d43..f8880f740a 100644
--- a/drivers/net/sfc/meson.build
+++ b/drivers/net/sfc/meson.build
@@ -55,6 +55,7 @@ sources = files(
         'sfc_filter.c',
         'sfc_switch.c',
         'sfc_mae.c',
+        'sfc_mae_counter.c',
         'sfc_flow.c',
         'sfc_dp.c',
         'sfc_ef10_rx.c',
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 3477c7530b..4097cf39de 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -20,6 +20,7 @@
 #include "sfc_log.h"
 #include "sfc_ev.h"
 #include "sfc_rx.h"
+#include "sfc_mae_counter.h"
 #include "sfc_tx.h"
 #include "sfc_kvargs.h"
 #include "sfc_tweak.h"
@@ -174,6 +175,7 @@ static int
 sfc_estimate_resource_limits(struct sfc_adapter *sa)
 {
 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
 	efx_drv_limits_t limits;
 	int rc;
 	uint32_t evq_allocated;
@@ -235,17 +237,53 @@ sfc_estimate_resource_limits(struct sfc_adapter *sa)
 	rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
 	txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
 
-	/* Subtract management EVQ not used for traffic */
-	SFC_ASSERT(evq_allocated > 0);
+	/*
+	 * Subtract management EVQ not used for traffic
+	 * The resource allocation strategy is as follows:
+	 * - one EVQ for management
+	 * - one EVQ for each ethdev RXQ
+	 * - one EVQ for each ethdev TXQ
+	 * - one EVQ and one RXQ for optional MAE counters.
+	 */
+	if (evq_allocated == 0) {
+		sfc_err(sa, "count of allocated EvQ is 0");
+		rc = ENOMEM;
+		goto fail_allocate_evq;
+	}
 	evq_allocated--;
 
-	/* Right now we use separate EVQ for Rx and Tx */
-	sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
-	sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
+	/*
+	 * Reserve absolutely required minimum.
+	 * Right now we use separate EVQ for Rx and Tx.
+	 */
+	if (rxq_allocated > 0 && evq_allocated > 0) {
+		sa->rxq_max = 1;
+		rxq_allocated--;
+		evq_allocated--;
+	}
+	if (txq_allocated > 0 && evq_allocated > 0) {
+		sa->txq_max = 1;
+		txq_allocated--;
+		evq_allocated--;
+	}
+
+	if (sfc_mae_counter_rxq_required(sa) &&
+	    rxq_allocated > 0 && evq_allocated > 0) {
+		rxq_allocated--;
+		evq_allocated--;
+		sas->counters_rxq_allocated = true;
+	} else {
+		sas->counters_rxq_allocated = false;
+	}
+
+	/* Add remaining allocated queues */
+	sa->rxq_max += MIN(rxq_allocated, evq_allocated / 2);
+	sa->txq_max += MIN(txq_allocated, evq_allocated - sa->rxq_max);
 
 	/* Keep NIC initialized */
 	return 0;
 
+fail_allocate_evq:
 fail_get_vi_pool:
 	efx_nic_fini(sa->nic);
 fail_nic_init:
@@ -256,14 +294,20 @@ static int
 sfc_set_drv_limits(struct sfc_adapter *sa)
 {
 	const struct rte_eth_dev_data *data = sa->eth_dev->data;
+	uint32_t rxq_reserved = sfc_nb_reserved_rxq(sfc_sa2shared(sa));
 	efx_drv_limits_t lim;
 
 	memset(&lim, 0, sizeof(lim));
 
-	/* Limits are strict since take into account initial estimation */
+	/*
+	 * Limits are strict since take into account initial estimation.
+	 * Resource allocation stategy is described in
+	 * sfc_estimate_resource_limits().
+	 */
 	lim.edl_min_evq_count = lim.edl_max_evq_count =
-		1 + data->nb_rx_queues + data->nb_tx_queues;
-	lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
+		1 + data->nb_rx_queues + data->nb_tx_queues + rxq_reserved;
+	lim.edl_min_rxq_count = lim.edl_max_rxq_count =
+		data->nb_rx_queues + rxq_reserved;
 	lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
 
 	return efx_nic_set_drv_limits(sa->nic, &lim);
@@ -834,6 +878,10 @@ sfc_attach(struct sfc_adapter *sa)
 	if (rc != 0)
 		goto fail_filter_attach;
 
+	rc = sfc_mae_counter_rxq_attach(sa);
+	if (rc != 0)
+		goto fail_mae_counter_rxq_attach;
+
 	rc = sfc_mae_attach(sa);
 	if (rc != 0)
 		goto fail_mae_attach;
@@ -862,6 +910,9 @@ sfc_attach(struct sfc_adapter *sa)
 	sfc_mae_detach(sa);
 
 fail_mae_attach:
+	sfc_mae_counter_rxq_detach(sa);
+
+fail_mae_counter_rxq_attach:
 	sfc_filter_detach(sa);
 
 fail_filter_attach:
@@ -903,6 +954,7 @@ sfc_detach(struct sfc_adapter *sa)
 	sfc_flow_fini(sa);
 
 	sfc_mae_detach(sa);
+	sfc_mae_counter_rxq_detach(sa);
 	sfc_filter_detach(sa);
 	sfc_rss_detach(sa);
 	sfc_port_detach(sa);
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
index 00fc26cf0e..546739bd4a 100644
--- a/drivers/net/sfc/sfc.h
+++ b/drivers/net/sfc/sfc.h
@@ -186,6 +186,8 @@ struct sfc_adapter_shared {
 
 	char				*dp_rx_name;
 	char				*dp_tx_name;
+
+	bool				counters_rxq_allocated;
 };
 
 /* Adapter process private data */
@@ -205,6 +207,15 @@ sfc_adapter_priv_by_eth_dev(struct rte_eth_dev *eth_dev)
 	return sap;
 }
 
+/* RxQ dedicated for counters (counter only RxQ) data */
+struct sfc_counter_rxq {
+	unsigned int			state;
+#define SFC_COUNTER_RXQ_ATTACHED		0x1
+#define SFC_COUNTER_RXQ_INITIALIZED		0x2
+	sfc_sw_index_t			sw_index;
+	struct rte_mempool		*mp;
+};
+
 /* Adapter private data */
 struct sfc_adapter {
 	/*
@@ -283,6 +294,8 @@ struct sfc_adapter {
 	bool				mgmt_evq_running;
 	struct sfc_evq			*mgmt_evq;
 
+	struct sfc_counter_rxq		counter_rxq;
+
 	struct sfc_rxq			*rxq_ctrl;
 	struct sfc_txq			*txq_ctrl;
 
@@ -357,6 +370,12 @@ sfc_adapter_lock_fini(__rte_unused struct sfc_adapter *sa)
 	/* Just for symmetry of the API */
 }
 
+static inline unsigned int
+sfc_nb_counter_rxq(const struct sfc_adapter_shared *sas)
+{
+	return sas->counters_rxq_allocated ? 1 : 0;
+}
+
 /** Get the number of milliseconds since boot from the default timer */
 static inline uint64_t
 sfc_get_system_msecs(void)
diff --git a/drivers/net/sfc/sfc_dp.h b/drivers/net/sfc/sfc_dp.h
index 76065483d4..61c1a3fbac 100644
--- a/drivers/net/sfc/sfc_dp.h
+++ b/drivers/net/sfc/sfc_dp.h
@@ -97,6 +97,8 @@ struct sfc_dp {
 TAILQ_HEAD(sfc_dp_list, sfc_dp);
 
 typedef unsigned int sfc_sw_index_t;
+#define SFC_SW_INDEX_INVALID	((sfc_sw_index_t)(UINT_MAX))
+
 typedef int32_t	sfc_ethdev_qid_t;
 #define SFC_ETHDEV_QID_INVALID	((sfc_ethdev_qid_t)(-1))
 
diff --git a/drivers/net/sfc/sfc_ev.h b/drivers/net/sfc/sfc_ev.h
index 3f3c4b5b9a..b2a0380205 100644
--- a/drivers/net/sfc/sfc_ev.h
+++ b/drivers/net/sfc/sfc_ev.h
@@ -66,36 +66,87 @@ sfc_mgmt_evq_sw_index(__rte_unused const struct sfc_adapter_shared *sas)
 	return 0;
 }
 
+/* Return the number of Rx queues reserved for driver's internal use */
+static inline unsigned int
+sfc_nb_reserved_rxq(const struct sfc_adapter_shared *sas)
+{
+	return sfc_nb_counter_rxq(sas);
+}
+
+static inline unsigned int
+sfc_nb_reserved_evq(const struct sfc_adapter_shared *sas)
+{
+	/* An EvQ is required for each reserved RxQ */
+	return 1 + sfc_nb_reserved_rxq(sas);
+}
+
+/*
+ * The mapping functions that return SW index of a specific reserved
+ * queue rely on the relative order of reserved queues. Some reserved
+ * queues are optional, and if they are disabled or not supported, then
+ * the function for that specific reserved queue will return previous
+ * valid index of a reserved queue in the dependency chain or
+ * SFC_SW_INDEX_INVALID if it is the first reserved queue in the chain.
+ * If at least one of the reserved queues in the chain is enabled, then
+ * the corresponding function will give valid SW index, even if previous
+ * functions in the chain returned SFC_SW_INDEX_INVALID, since this value
+ * is one less than the first valid SW index.
+ *
+ * The dependency mechanism is utilized to avoid regid defines for SW indices
+ * for reserved queues and to allow these indices to shrink and make space
+ * for ethdev queue indices when some of the reserved queues are disabled.
+ */
+
+static inline sfc_sw_index_t
+sfc_counters_rxq_sw_index(const struct sfc_adapter_shared *sas)
+{
+	return sas->counters_rxq_allocated ? 0 : SFC_SW_INDEX_INVALID;
+}
+
 /*
  * Functions below define event queue to transmit/receive queue and vice
  * versa mapping.
+ * SFC_ETHDEV_QID_INVALID is returned when sw_index is converted to
+ * ethdev_qid, but sw_index represents a reserved queue for driver's
+ * internal use.
  * Own event queue is allocated for management, each Rx and each Tx queue.
  * Zero event queue is used for management events.
- * Rx event queues from 1 to RxQ number follow management event queue.
+ * When counters are supported, one Rx event queue is reserved.
+ * Rx event queues follow reserved event queues.
  * Tx event queues follow Rx event queues.
  */
 
 static inline sfc_ethdev_qid_t
-sfc_ethdev_rx_qid_by_rxq_sw_index(__rte_unused struct sfc_adapter_shared *sas,
+sfc_ethdev_rx_qid_by_rxq_sw_index(struct sfc_adapter_shared *sas,
 				  sfc_sw_index_t rxq_sw_index)
 {
-	/* Only ethdev queues are present for now */
-	return rxq_sw_index;
+	if (rxq_sw_index < sfc_nb_reserved_rxq(sas))
+		return SFC_ETHDEV_QID_INVALID;
+
+	return rxq_sw_index - sfc_nb_reserved_rxq(sas);
 }
 
 static inline sfc_sw_index_t
-sfc_rxq_sw_index_by_ethdev_rx_qid(__rte_unused struct sfc_adapter_shared *sas,
+sfc_rxq_sw_index_by_ethdev_rx_qid(struct sfc_adapter_shared *sas,
 				  sfc_ethdev_qid_t ethdev_qid)
 {
-	/* Only ethdev queues are present for now */
-	return ethdev_qid;
+	return sfc_nb_reserved_rxq(sas) + ethdev_qid;
 }
 
 static inline sfc_sw_index_t
-sfc_evq_sw_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa,
+sfc_evq_sw_index_by_rxq_sw_index(struct sfc_adapter *sa,
 				 sfc_sw_index_t rxq_sw_index)
 {
-	return 1 + rxq_sw_index;
+	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
+	sfc_ethdev_qid_t ethdev_qid;
+
+	ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, rxq_sw_index);
+	if (ethdev_qid == SFC_ETHDEV_QID_INVALID) {
+		/* One EvQ is reserved for management */
+		return 1 + rxq_sw_index;
+	}
+
+	return sfc_nb_reserved_evq(sas) + ethdev_qid;
 }
 
 static inline sfc_ethdev_qid_t
@@ -118,7 +169,8 @@ static inline sfc_sw_index_t
 sfc_evq_sw_index_by_txq_sw_index(struct sfc_adapter *sa,
 				 sfc_sw_index_t txq_sw_index)
 {
-	return 1 + sa->eth_dev->data->nb_rx_queues + txq_sw_index;
+	return sfc_nb_reserved_evq(sfc_sa2shared(sa)) +
+		sa->eth_dev->data->nb_rx_queues + txq_sw_index;
 }
 
 int sfc_ev_attach(struct sfc_adapter *sa);
diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index d8c662503f..e603ffbdb4 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -16,6 +16,7 @@
 #include "efx.h"
 
 #include "sfc.h"
+#include "sfc_mae_counter.h"
 #include "sfc_log.h"
 #include "sfc_switch.h"
 
diff --git a/drivers/net/sfc/sfc_mae_counter.c b/drivers/net/sfc/sfc_mae_counter.c
new file mode 100644
index 0000000000..c7646cf7b1
--- /dev/null
+++ b/drivers/net/sfc/sfc_mae_counter.c
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright(c) 2020-2021 Xilinx, Inc.
+ */
+
+#include <rte_common.h>
+
+#include "efx.h"
+
+#include "sfc_ev.h"
+#include "sfc.h"
+#include "sfc_rx.h"
+#include "sfc_mae_counter.h"
+#include "sfc_service.h"
+
+static uint32_t
+sfc_mae_counter_get_service_lcore(struct sfc_adapter *sa)
+{
+	uint32_t cid;
+
+	cid = sfc_get_service_lcore(sa->socket_id);
+	if (cid != RTE_MAX_LCORE)
+		return cid;
+
+	if (sa->socket_id != SOCKET_ID_ANY)
+		cid = sfc_get_service_lcore(SOCKET_ID_ANY);
+
+	if (cid == RTE_MAX_LCORE) {
+		sfc_warn(sa, "failed to get service lcore for counter service");
+	} else if (sa->socket_id != SOCKET_ID_ANY) {
+		sfc_warn(sa,
+			"failed to get service lcore for counter service at socket %d, but got at socket %u",
+			sa->socket_id, rte_lcore_to_socket_id(cid));
+	}
+	return cid;
+}
+
+bool
+sfc_mae_counter_rxq_required(struct sfc_adapter *sa)
+{
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+
+	if (encp->enc_mae_supported == B_FALSE)
+		return false;
+
+	if (sfc_mae_counter_get_service_lcore(sa) == RTE_MAX_LCORE)
+		return false;
+
+	return true;
+}
+
+int
+sfc_mae_counter_rxq_attach(struct sfc_adapter *sa)
+{
+	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+	char name[RTE_MEMPOOL_NAMESIZE];
+	struct rte_mempool *mp;
+	unsigned int n_elements;
+	unsigned int cache_size;
+	/* The mempool is internal and private area is not required */
+	const uint16_t priv_size = 0;
+	const uint16_t data_room_size = RTE_PKTMBUF_HEADROOM +
+		SFC_MAE_COUNTER_STREAM_PACKET_SIZE;
+	int rc;
+
+	sfc_log_init(sa, "entry");
+
+	if (!sas->counters_rxq_allocated) {
+		sfc_log_init(sa, "counter queue is not supported - skip");
+		return 0;
+	}
+
+	/*
+	 * At least one element in the ring is always unused to distinguish
+	 * between empty and full ring cases.
+	 */
+	n_elements = SFC_COUNTER_RXQ_RX_DESC_COUNT - 1;
+
+	/*
+	 * The cache must have sufficient space to put received buckets
+	 * before they're reused on refill.
+	 */
+	cache_size = rte_align32pow2(SFC_COUNTER_RXQ_REFILL_LEVEL +
+				     SFC_MAE_COUNTER_RX_BURST - 1);
+
+	if (snprintf(name, sizeof(name), "counter_rxq-pool-%u", sas->port_id) >=
+	    (int)sizeof(name)) {
+		sfc_err(sa, "failed: counter RxQ mempool name is too long");
+		rc = ENAMETOOLONG;
+		goto fail_long_name;
+	}
+
+	/*
+	 * It could be single-producer single-consumer ring mempool which
+	 * requires minimal barriers. However, cache size and refill/burst
+	 * policy are aligned, therefore it does not matter which
+	 * mempool backend is chosen since backend is unused.
+	 */
+	mp = rte_pktmbuf_pool_create(name, n_elements, cache_size,
+				     priv_size, data_room_size, sa->socket_id);
+	if (mp == NULL) {
+		sfc_err(sa, "failed to create counter RxQ mempool");
+		rc = rte_errno;
+		goto fail_mp_create;
+	}
+
+	sa->counter_rxq.sw_index = sfc_counters_rxq_sw_index(sas);
+	sa->counter_rxq.mp = mp;
+	sa->counter_rxq.state |= SFC_COUNTER_RXQ_ATTACHED;
+
+	sfc_log_init(sa, "done");
+
+	return 0;
+
+fail_mp_create:
+fail_long_name:
+	sfc_log_init(sa, "failed: %s", rte_strerror(rc));
+
+	return rc;
+}
+
+void
+sfc_mae_counter_rxq_detach(struct sfc_adapter *sa)
+{
+	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+
+	sfc_log_init(sa, "entry");
+
+	if (!sas->counters_rxq_allocated) {
+		sfc_log_init(sa, "counter queue is not supported - skip");
+		return;
+	}
+
+	if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) {
+		sfc_log_init(sa, "counter queue is not attached - skip");
+		return;
+	}
+
+	rte_mempool_free(sa->counter_rxq.mp);
+	sa->counter_rxq.mp = NULL;
+	sa->counter_rxq.state &= ~SFC_COUNTER_RXQ_ATTACHED;
+
+	sfc_log_init(sa, "done");
+}
+
+int
+sfc_mae_counter_rxq_init(struct sfc_adapter *sa)
+{
+	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+	const struct rte_eth_rxconf rxconf = {
+		.rx_free_thresh = SFC_COUNTER_RXQ_REFILL_LEVEL,
+		.rx_drop_en = 1,
+	};
+	uint16_t nb_rx_desc = SFC_COUNTER_RXQ_RX_DESC_COUNT;
+	int rc;
+
+	sfc_log_init(sa, "entry");
+
+	if (!sas->counters_rxq_allocated) {
+		sfc_log_init(sa, "counter queue is not supported - skip");
+		return 0;
+	}
+
+	if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_ATTACHED) == 0) {
+		sfc_log_init(sa, "counter queue is not attached - skip");
+		return 0;
+	}
+
+	nb_rx_desc = RTE_MIN(nb_rx_desc, sa->rxq_max_entries);
+	nb_rx_desc = RTE_MAX(nb_rx_desc, sa->rxq_min_entries);
+
+	rc = sfc_rx_qinit_info(sa, sa->counter_rxq.sw_index,
+			       EFX_RXQ_FLAG_USER_MARK);
+	if (rc != 0)
+		goto fail_counter_rxq_init_info;
+
+	rc = sfc_rx_qinit(sa, sa->counter_rxq.sw_index, nb_rx_desc,
+			  sa->socket_id, &rxconf, sa->counter_rxq.mp);
+	if (rc != 0) {
+		sfc_err(sa, "failed to init counter RxQ");
+		goto fail_counter_rxq_init;
+	}
+
+	sa->counter_rxq.state |= SFC_COUNTER_RXQ_INITIALIZED;
+
+	sfc_log_init(sa, "done");
+
+	return 0;
+
+fail_counter_rxq_init:
+fail_counter_rxq_init_info:
+	sfc_log_init(sa, "failed: %s", rte_strerror(rc));
+
+	return rc;
+}
+
+void
+sfc_mae_counter_rxq_fini(struct sfc_adapter *sa)
+{
+	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+
+	sfc_log_init(sa, "entry");
+
+	if (!sas->counters_rxq_allocated) {
+		sfc_log_init(sa, "counter queue is not supported - skip");
+		return;
+	}
+
+	if ((sa->counter_rxq.state & SFC_COUNTER_RXQ_INITIALIZED) == 0) {
+		sfc_log_init(sa, "counter queue is not initialized - skip");
+		return;
+	}
+
+	sfc_rx_qfini(sa, sa->counter_rxq.sw_index);
+
+	sfc_log_init(sa, "done");
+}
diff --git a/drivers/net/sfc/sfc_mae_counter.h b/drivers/net/sfc/sfc_mae_counter.h
new file mode 100644
index 0000000000..f16d64a999
--- /dev/null
+++ b/drivers/net/sfc/sfc_mae_counter.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright(c) 2020-2021 Xilinx, Inc.
+ */
+
+#ifndef _SFC_MAE_COUNTER_H
+#define _SFC_MAE_COUNTER_H
+
+#include "sfc.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Default values for a user of counter RxQ */
+#define SFC_MAE_COUNTER_RX_BURST 32
+#define SFC_COUNTER_RXQ_RX_DESC_COUNT 256
+
+/*
+ * The refill level is chosen based on requirement to keep number
+ * of give credits operations low.
+ */
+#define SFC_COUNTER_RXQ_REFILL_LEVEL (SFC_COUNTER_RXQ_RX_DESC_COUNT / 4)
+
+/*
+ * SF-122415-TC states that the packetiser that generates packets for
+ * counter stream must support 9k frames. Set it to the maximum supported
+ * size since in case of huge flow of counters, having fewer packets in counter
+ * updates is better.
+ */
+#define SFC_MAE_COUNTER_STREAM_PACKET_SIZE 9216
+
+bool sfc_mae_counter_rxq_required(struct sfc_adapter *sa);
+
+int sfc_mae_counter_rxq_attach(struct sfc_adapter *sa);
+void sfc_mae_counter_rxq_detach(struct sfc_adapter *sa);
+
+int sfc_mae_counter_rxq_init(struct sfc_adapter *sa);
+void sfc_mae_counter_rxq_fini(struct sfc_adapter *sa);
+
+#ifdef __cplusplus
+}
+#endif
+#endif  /* _SFC_MAE_COUNTER_H */
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index c7a7bd66ef..0532f77082 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -16,6 +16,7 @@
 #include "sfc_log.h"
 #include "sfc_ev.h"
 #include "sfc_rx.h"
+#include "sfc_mae_counter.h"
 #include "sfc_kvargs.h"
 #include "sfc_tweak.h"
 
@@ -1705,6 +1706,9 @@ sfc_rx_configure(struct sfc_adapter *sa)
 	struct sfc_rss *rss = &sas->rss;
 	struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
 	const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
+	const unsigned int nb_rsrv_rx_queues = sfc_nb_reserved_rxq(sas);
+	const unsigned int nb_rxq_total = nb_rx_queues + nb_rsrv_rx_queues;
+	bool reconfigure;
 	int rc;
 
 	sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
@@ -1714,12 +1718,15 @@ sfc_rx_configure(struct sfc_adapter *sa)
 	if (rc != 0)
 		goto fail_check_mode;
 
-	if (nb_rx_queues == sas->rxq_count)
+	if (nb_rxq_total == sas->rxq_count) {
+		reconfigure = true;
 		goto configure_rss;
+	}
 
 	if (sas->rxq_info == NULL) {
+		reconfigure = false;
 		rc = ENOMEM;
-		sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
+		sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rxq_total,
 						  sizeof(sas->rxq_info[0]), 0,
 						  sa->socket_id);
 		if (sas->rxq_info == NULL)
@@ -1730,39 +1737,42 @@ sfc_rx_configure(struct sfc_adapter *sa)
 		 * since it should not be shared.
 		 */
 		rc = ENOMEM;
-		sa->rxq_ctrl = calloc(nb_rx_queues, sizeof(sa->rxq_ctrl[0]));
+		sa->rxq_ctrl = calloc(nb_rxq_total, sizeof(sa->rxq_ctrl[0]));
 		if (sa->rxq_ctrl == NULL)
 			goto fail_rxqs_ctrl_alloc;
 	} else {
 		struct sfc_rxq_info *new_rxq_info;
 		struct sfc_rxq *new_rxq_ctrl;
 
+		reconfigure = true;
+
+		/* Do not ununitialize reserved queues */
 		if (nb_rx_queues < sas->ethdev_rxq_count)
 			sfc_rx_fini_queues(sa, nb_rx_queues);
 
 		rc = ENOMEM;
 		new_rxq_info =
 			rte_realloc(sas->rxq_info,
-				    nb_rx_queues * sizeof(sas->rxq_info[0]), 0);
-		if (new_rxq_info == NULL && nb_rx_queues > 0)
+				    nb_rxq_total * sizeof(sas->rxq_info[0]), 0);
+		if (new_rxq_info == NULL && nb_rxq_total > 0)
 			goto fail_rxqs_realloc;
 
 		rc = ENOMEM;
 		new_rxq_ctrl = realloc(sa->rxq_ctrl,
-				       nb_rx_queues * sizeof(sa->rxq_ctrl[0]));
-		if (new_rxq_ctrl == NULL && nb_rx_queues > 0)
+				       nb_rxq_total * sizeof(sa->rxq_ctrl[0]));
+		if (new_rxq_ctrl == NULL && nb_rxq_total > 0)
 			goto fail_rxqs_ctrl_realloc;
 
 		sas->rxq_info = new_rxq_info;
 		sa->rxq_ctrl = new_rxq_ctrl;
-		if (nb_rx_queues > sas->rxq_count) {
+		if (nb_rxq_total > sas->rxq_count) {
 			unsigned int rxq_count = sas->rxq_count;
 
 			memset(&sas->rxq_info[rxq_count], 0,
-			       (nb_rx_queues - rxq_count) *
+			       (nb_rxq_total - rxq_count) *
 			       sizeof(sas->rxq_info[0]));
 			memset(&sa->rxq_ctrl[rxq_count], 0,
-			       (nb_rx_queues - rxq_count) *
+			       (nb_rxq_total - rxq_count) *
 			       sizeof(sa->rxq_ctrl[0]));
 		}
 	}
@@ -1779,7 +1789,13 @@ sfc_rx_configure(struct sfc_adapter *sa)
 		sas->ethdev_rxq_count++;
 	}
 
-	sas->rxq_count = sas->ethdev_rxq_count;
+	sas->rxq_count = sas->ethdev_rxq_count + nb_rsrv_rx_queues;
+
+	if (!reconfigure) {
+		rc = sfc_mae_counter_rxq_init(sa);
+		if (rc != 0)
+			goto fail_count_rxq_init;
+	}
 
 configure_rss:
 	rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
@@ -1801,6 +1817,10 @@ sfc_rx_configure(struct sfc_adapter *sa)
 	return 0;
 
 fail_rx_process_adv_conf_rss:
+	if (!reconfigure)
+		sfc_mae_counter_rxq_fini(sa);
+
+fail_count_rxq_init:
 fail_rx_qinit_info:
 fail_rxqs_ctrl_realloc:
 fail_rxqs_realloc:
@@ -1824,6 +1844,7 @@ sfc_rx_close(struct sfc_adapter *sa)
 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
 
 	sfc_rx_fini_queues(sa, 0);
+	sfc_mae_counter_rxq_fini(sa);
 
 	rss->channels = 0;
 
-- 
2.30.2


  parent reply	other threads:[~2021-06-04 14:26 UTC|newest]

Thread overview: 104+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-27 15:24 [dpdk-dev] [PATCH 00/20] net/sfc: support flow API COUNT action Andrew Rybchenko
2021-05-27 15:24 ` [dpdk-dev] [PATCH 01/20] net/sfc: introduce ethdev Rx queue ID Andrew Rybchenko
2021-05-27 15:24 ` [dpdk-dev] [PATCH 02/20] net/sfc: do not enable interrupts on internal Rx queues Andrew Rybchenko
2021-05-27 15:24 ` [dpdk-dev] [PATCH 03/20] common/sfc_efx/base: separate target EvQ and IRQ config Andrew Rybchenko
2021-05-27 15:24 ` [dpdk-dev] [PATCH 04/20] common/sfc_efx/base: support custom EvQ to IRQ mapping Andrew Rybchenko
2021-05-27 15:24 ` [dpdk-dev] [PATCH 05/20] net/sfc: explicitly control IRQ used for Rx queues Andrew Rybchenko
2021-05-27 15:24 ` [dpdk-dev] [PATCH 06/20] net/sfc: introduce ethdev Tx queue ID Andrew Rybchenko
2021-05-27 15:24 ` [dpdk-dev] [PATCH 07/20] common/sfc_efx/base: add ingress m-port RxQ flag Andrew Rybchenko
2021-05-27 15:24 ` [dpdk-dev] [PATCH 08/20] common/sfc_efx/base: add user mark " Andrew Rybchenko
2021-05-27 15:24 ` [dpdk-dev] [PATCH 09/20] net/sfc: add abstractions for the management EVQ identity Andrew Rybchenko
2021-05-27 15:25 ` [dpdk-dev] [PATCH 10/20] net/sfc: add support for initialising different RxQ types Andrew Rybchenko
2021-05-27 15:25 ` [dpdk-dev] [PATCH 11/20] net/sfc: add NUMA-aware registry of service logical cores Andrew Rybchenko
2021-05-27 15:25 ` [dpdk-dev] [PATCH 12/20] net/sfc: reserve RxQ for counters Andrew Rybchenko
2021-05-27 15:25 ` [dpdk-dev] [PATCH 13/20] common/sfc_efx/base: add counter creation MCDI wrappers Andrew Rybchenko
2021-05-27 15:25 ` [dpdk-dev] [PATCH 14/20] common/sfc_efx/base: add counter stream " Andrew Rybchenko
2021-05-27 15:25 ` [dpdk-dev] [PATCH 15/20] common/sfc_efx/base: support counter in action set Andrew Rybchenko
2021-05-27 15:25 ` [dpdk-dev] [PATCH 16/20] net/sfc: add Rx datapath method to get pushed buffers count Andrew Rybchenko
2021-05-27 15:25 ` [dpdk-dev] [PATCH 17/20] common/sfc_efx/base: add max MAE counters to limits Andrew Rybchenko
2021-05-27 15:25 ` [dpdk-dev] [PATCH 18/20] common/sfc_efx/base: add packetiser packet format definition Andrew Rybchenko
2021-05-27 15:25 ` [dpdk-dev] [PATCH 19/20] net/sfc: support flow action COUNT in transfer rules Andrew Rybchenko
2021-05-27 15:25 ` [dpdk-dev] [PATCH 20/20] net/sfc: support flow API query for count actions Andrew Rybchenko
2021-06-04 14:23 ` [dpdk-dev] [PATCH v2 00/20] net/sfc: support flow API COUNT action Andrew Rybchenko
2021-06-04 14:23   ` [dpdk-dev] [PATCH v2 01/20] net/sfc: introduce ethdev Rx queue ID Andrew Rybchenko
2021-06-04 14:23   ` [dpdk-dev] [PATCH v2 02/20] net/sfc: do not enable interrupts on internal Rx queues Andrew Rybchenko
2021-06-04 14:23   ` [dpdk-dev] [PATCH v2 03/20] common/sfc_efx/base: separate target EvQ and IRQ config Andrew Rybchenko
2021-06-04 14:23   ` [dpdk-dev] [PATCH v2 04/20] common/sfc_efx/base: support custom EvQ to IRQ mapping Andrew Rybchenko
2021-06-04 14:23   ` [dpdk-dev] [PATCH v2 05/20] net/sfc: explicitly control IRQ used for Rx queues Andrew Rybchenko
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 06/20] net/sfc: introduce ethdev Tx queue ID Andrew Rybchenko
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 07/20] common/sfc_efx/base: add ingress m-port RxQ flag Andrew Rybchenko
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 08/20] common/sfc_efx/base: add user mark " Andrew Rybchenko
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 09/20] net/sfc: add abstractions for the management EVQ identity Andrew Rybchenko
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 10/20] net/sfc: add support for initialising different RxQ types Andrew Rybchenko
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 11/20] net/sfc: add NUMA-aware registry of service logical cores Andrew Rybchenko
2021-06-04 14:24   ` Andrew Rybchenko [this message]
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 13/20] common/sfc_efx/base: add counter creation MCDI wrappers Andrew Rybchenko
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 14/20] common/sfc_efx/base: add counter stream " Andrew Rybchenko
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 15/20] common/sfc_efx/base: support counter in action set Andrew Rybchenko
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 16/20] net/sfc: add Rx datapath method to get pushed buffers count Andrew Rybchenko
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 17/20] common/sfc_efx/base: add max MAE counters to limits Andrew Rybchenko
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 18/20] common/sfc_efx/base: add packetiser packet format definition Andrew Rybchenko
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 19/20] net/sfc: support flow action COUNT in transfer rules Andrew Rybchenko
2021-06-04 14:24   ` [dpdk-dev] [PATCH v2 20/20] net/sfc: support flow API query for count actions Andrew Rybchenko
2021-06-17  8:37   ` [dpdk-dev] [PATCH v2 00/20] net/sfc: support flow API COUNT action David Marchand
2021-06-18 13:40     ` Andrew Rybchenko
2021-06-18 13:40 ` [dpdk-dev] [PATCH v3 " Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 01/20] net/sfc: introduce ethdev Rx queue ID Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 02/20] net/sfc: do not enable interrupts on internal Rx queues Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 03/20] common/sfc_efx/base: separate target EvQ and IRQ config Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 04/20] common/sfc_efx/base: support custom EvQ to IRQ mapping Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 05/20] net/sfc: explicitly control IRQ used for Rx queues Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 06/20] net/sfc: introduce ethdev Tx queue ID Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 07/20] common/sfc_efx/base: add ingress m-port RxQ flag Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 08/20] common/sfc_efx/base: add user mark " Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 09/20] net/sfc: add abstractions for the management EVQ identity Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 10/20] net/sfc: add support for initialising different RxQ types Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 11/20] net/sfc: add NUMA-aware registry of service logical cores Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 12/20] net/sfc: reserve RxQ for counters Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 13/20] common/sfc_efx/base: add counter creation MCDI wrappers Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 14/20] common/sfc_efx/base: add counter stream " Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 15/20] common/sfc_efx/base: support counter in action set Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 16/20] net/sfc: add Rx datapath method to get pushed buffers count Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 17/20] common/sfc_efx/base: add max MAE counters to limits Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 18/20] common/sfc_efx/base: add packetiser packet format definition Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 19/20] net/sfc: support flow action COUNT in transfer rules Andrew Rybchenko
2021-06-21  8:28     ` David Marchand
2021-06-21  9:30       ` Thomas Monjalon
2021-07-01  9:22         ` Andrew Rybchenko
2021-07-01 12:34           ` David Marchand
2021-07-01 13:05             ` Andrew Rybchenko
2021-07-01 13:35               ` Bruce Richardson
2021-07-02  8:03                 ` Andrew Rybchenko
2021-07-02  8:43               ` Andrew Rybchenko
2021-07-02 12:30                 ` Thomas Monjalon
2021-07-02 12:53                   ` Andrew Rybchenko
2021-07-04 19:45                     ` Thomas Monjalon
2021-07-05  8:41                       ` Andrew Rybchenko
2021-07-02 13:37                 ` David Marchand
2021-07-02 13:39                   ` Andrew Rybchenko
2021-06-18 13:40   ` [dpdk-dev] [PATCH v3 20/20] net/sfc: support flow API query for count actions Andrew Rybchenko
2021-07-02  8:39 ` [dpdk-dev] [PATCH v4 00/20] net/sfc: support flow API COUNT action Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 01/20] net/sfc: introduce ethdev Rx queue ID Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 02/20] net/sfc: do not enable interrupts on internal Rx queues Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 03/20] common/sfc_efx/base: separate target EvQ and IRQ config Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 04/20] common/sfc_efx/base: support custom EvQ to IRQ mapping Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 05/20] net/sfc: explicitly control IRQ used for Rx queues Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 06/20] net/sfc: introduce ethdev Tx queue ID Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 07/20] common/sfc_efx/base: add ingress m-port RxQ flag Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 08/20] common/sfc_efx/base: add user mark " Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 09/20] net/sfc: add abstractions for the management EVQ identity Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 10/20] net/sfc: add support for initialising different RxQ types Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 11/20] net/sfc: add NUMA-aware registry of service logical cores Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 12/20] net/sfc: reserve RxQ for counters Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 13/20] common/sfc_efx/base: add counter creation MCDI wrappers Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 14/20] common/sfc_efx/base: add counter stream " Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 15/20] common/sfc_efx/base: support counter in action set Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 16/20] net/sfc: add Rx datapath method to get pushed buffers count Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 17/20] common/sfc_efx/base: add max MAE counters to limits Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 18/20] common/sfc_efx/base: add packetiser packet format definition Andrew Rybchenko
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 19/20] net/sfc: support flow action COUNT in transfer rules Andrew Rybchenko
2021-07-15 14:58     ` David Marchand
2021-07-15 18:30       ` Ivan Malov
2021-07-16 12:12     ` David Marchand
2021-07-02  8:39   ` [dpdk-dev] [PATCH v4 20/20] net/sfc: support flow API query for count actions Andrew Rybchenko
2021-07-20 12:19   ` [dpdk-dev] [PATCH v4 00/20] net/sfc: support flow API COUNT action David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210604142414.283611-13-andrew.rybchenko@oktetlabs.ru \
    --to=andrew.rybchenko@oktetlabs.ru \
    --cc=amoreton@xilinx.com \
    --cc=dev@dpdk.org \
    --cc=igor.romanov@oktetlabs.ru \
    --cc=ivan.malov@oktetlabs.ru \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).