From: Ivan Malov <ivan.malov@oktetlabs.ru>
To: dev@dpdk.org
Cc: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,
Andy Moreton <amoreton@xilinx.com>
Subject: [PATCH 6/8] net/sfc: use adaptive table entry count in flow action RSS
Date: Tue, 1 Feb 2022 11:50:00 +0300 [thread overview]
Message-ID: <20220201085002.320102-7-ivan.malov@oktetlabs.ru> (raw)
In-Reply-To: <20220201085002.320102-1-ivan.malov@oktetlabs.ru>
Currently, every RSS context uses 128 indirection entries in
the hardware. That is not always optimal because the entries
come from a pool shared among all PCI functions of the board,
while the format of action RSS allows to pass less queue IDs.
With EF100 boards, it is possible to decide how many entries
to allocate for the indirection table of a context. Make use
of that in order to optimise resource usage in RSS scenarios.
Signed-off-by: Ivan Malov <ivan.malov@oktetlabs.ru>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Andy Moreton <amoreton@xilinx.com>
---
| 72 +++++++++++++++++++++++++++-------
| 4 +-
2 files changed, 60 insertions(+), 16 deletions(-)
--git a/drivers/net/sfc/sfc_flow_rss.c b/drivers/net/sfc/sfc_flow_rss.c
index 1c94333b62..4bf3002164 100644
--- a/drivers/net/sfc/sfc_flow_rss.c
+++ b/drivers/net/sfc/sfc_flow_rss.c
@@ -23,23 +23,45 @@ sfc_flow_rss_attach(struct sfc_adapter *sa)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
struct sfc_flow_rss *flow_rss = &sa->flow_rss;
+ int rc;
sfc_log_init(sa, "entry");
flow_rss->qid_span_max = encp->enc_rx_scale_indirection_max_nqueues;
+ flow_rss->nb_tbl_entries_min = encp->enc_rx_scale_tbl_min_nentries;
+ flow_rss->nb_tbl_entries_max = encp->enc_rx_scale_tbl_max_nentries;
+
+ sfc_log_init(sa, "allocate the bounce buffer for indirection entries");
+ flow_rss->bounce_tbl = rte_calloc("sfc_flow_rss_bounce_tbl",
+ flow_rss->nb_tbl_entries_max,
+ sizeof(*flow_rss->bounce_tbl), 0);
+ if (flow_rss->bounce_tbl == NULL) {
+ rc = ENOMEM;
+ goto fail;
+ }
TAILQ_INIT(&flow_rss->ctx_list);
sfc_log_init(sa, "done");
return 0;
+
+fail:
+ sfc_log_init(sa, "failed %d", rc);
+
+ return rc;
}
void
sfc_flow_rss_detach(struct sfc_adapter *sa)
{
+ struct sfc_flow_rss *flow_rss = &sa->flow_rss;
+
sfc_log_init(sa, "entry");
+ sfc_log_init(sa, "free the bounce buffer for indirection entries");
+ rte_free(flow_rss->bounce_tbl);
+
sfc_log_init(sa, "done");
}
@@ -123,9 +145,9 @@ sfc_flow_rss_parse_conf(struct sfc_adapter *sa,
return EINVAL;
}
- if (in->queue_num > EFX_RSS_TBL_SIZE) {
+ if (in->queue_num > flow_rss->nb_tbl_entries_max) {
sfc_err(sa, "flow-rss: parse: 'queue_num' is too large; MAX=%u",
- EFX_RSS_TBL_SIZE);
+ flow_rss->nb_tbl_entries_max);
return EINVAL;
}
@@ -286,6 +308,7 @@ sfc_flow_rss_ctx_del(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
static int
sfc_flow_rss_ctx_program_tbl(struct sfc_adapter *sa,
+ unsigned int nb_tbl_entries,
const struct sfc_flow_rss_ctx *ctx)
{
const struct sfc_flow_rss_conf *conf = &ctx->conf;
@@ -297,15 +320,15 @@ sfc_flow_rss_ctx_program_tbl(struct sfc_adapter *sa,
if (conf->nb_qid_offsets != 0) {
SFC_ASSERT(ctx->qid_offsets != NULL);
- for (i = 0; i < EFX_RSS_TBL_SIZE; ++i)
+ for (i = 0; i < nb_tbl_entries; ++i)
tbl[i] = ctx->qid_offsets[i % conf->nb_qid_offsets];
} else {
- for (i = 0; i < EFX_RSS_TBL_SIZE; ++i)
+ for (i = 0; i < nb_tbl_entries; ++i)
tbl[i] = i % conf->qid_span;
}
return efx_rx_scale_tbl_set(sa->nic, ctx->nic_handle,
- tbl, EFX_RSS_TBL_SIZE);
+ tbl, nb_tbl_entries);
}
int
@@ -313,9 +336,12 @@ sfc_flow_rss_ctx_program(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
{
efx_rx_scale_context_type_t ctx_type = EFX_RX_SCALE_EXCLUSIVE;
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ const struct sfc_flow_rss *flow_rss = &sa->flow_rss;
struct sfc_rss *ethdev_rss = &sas->rss;
struct sfc_flow_rss_conf *conf;
bool allocation_done = B_FALSE;
+ unsigned int nb_qid_offsets;
+ unsigned int nb_tbl_entries;
int rc;
if (ctx == NULL)
@@ -325,18 +351,34 @@ sfc_flow_rss_ctx_program(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
SFC_ASSERT(sfc_adapter_is_locked(sa));
+ if (conf->nb_qid_offsets != 0)
+ nb_qid_offsets = conf->nb_qid_offsets;
+ else
+ nb_qid_offsets = conf->qid_span;
+
+ if (!RTE_IS_POWER_OF_2(nb_qid_offsets)) {
+ /*
+ * Most likely, it pays to enlarge the indirection
+ * table to facilitate better distribution quality.
+ */
+ nb_qid_offsets = flow_rss->nb_tbl_entries_max;
+ }
+
+ nb_tbl_entries = RTE_MAX(flow_rss->nb_tbl_entries_min, nb_qid_offsets);
+
if (ctx->nic_handle_refcnt == 0) {
- rc = efx_rx_scale_context_alloc(sa->nic, ctx_type,
- conf->qid_span,
- &ctx->nic_handle);
+ rc = efx_rx_scale_context_alloc_v2(sa->nic, ctx_type,
+ conf->qid_span,
+ nb_tbl_entries,
+ &ctx->nic_handle);
if (rc != 0) {
- sfc_err(sa, "flow-rss: failed to allocate NIC resource for ctx=%p: type=%d, qid_span=%u, rc=%d",
- ctx, ctx_type, conf->qid_span, rc);
+ sfc_err(sa, "flow-rss: failed to allocate NIC resource for ctx=%p: type=%d, qid_span=%u, nb_tbl_entries=%u; rc=%d",
+ ctx, ctx_type, conf->qid_span, nb_tbl_entries, rc);
goto fail;
}
- sfc_dbg(sa, "flow-rss: allocated NIC resource for ctx=%p: type=%d, qid_span=%u; handle=0x%08x",
- ctx, ctx_type, conf->qid_span,
+ sfc_dbg(sa, "flow-rss: allocated NIC resource for ctx=%p: type=%d, qid_span=%u, nb_tbl_entries=%u; handle=0x%08x",
+ ctx, ctx_type, conf->qid_span, nb_tbl_entries,
ctx->nic_handle);
++(ctx->nic_handle_refcnt);
@@ -369,10 +411,10 @@ sfc_flow_rss_ctx_program(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx)
goto fail;
}
- rc = sfc_flow_rss_ctx_program_tbl(sa, ctx);
+ rc = sfc_flow_rss_ctx_program_tbl(sa, nb_tbl_entries, ctx);
if (rc != 0) {
- sfc_err(sa, "flow-rss: failed to program table for ctx=%p; rc=%d",
- ctx, rc);
+ sfc_err(sa, "flow-rss: failed to program table for ctx=%p: nb_tbl_entries=%u; rc=%d",
+ ctx, nb_tbl_entries, rc);
goto fail;
}
--git a/drivers/net/sfc/sfc_flow_rss.h b/drivers/net/sfc/sfc_flow_rss.h
index e9f798a8f3..3341d06cf4 100644
--- a/drivers/net/sfc/sfc_flow_rss.h
+++ b/drivers/net/sfc/sfc_flow_rss.h
@@ -42,9 +42,11 @@ struct sfc_flow_rss_ctx {
TAILQ_HEAD(sfc_flow_rss_ctx_list, sfc_flow_rss_ctx);
struct sfc_flow_rss {
+ unsigned int nb_tbl_entries_min;
+ unsigned int nb_tbl_entries_max;
unsigned int qid_span_max;
- unsigned int bounce_tbl[EFX_RSS_TBL_SIZE];
+ unsigned int *bounce_tbl; /* MAX */
struct sfc_flow_rss_ctx_list ctx_list;
};
--
2.30.2
next prev parent reply other threads:[~2022-02-01 8:50 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-01 8:49 [PATCH 0/8] net/sfc: improve flow action RSS support on EF100 boards Ivan Malov
2022-02-01 8:49 ` [PATCH 1/8] net/sfc: rework flow action RSS support Ivan Malov
2022-02-01 8:49 ` [PATCH 2/8] common/sfc_efx/base: query RSS queue span limit on Riverhead Ivan Malov
2022-02-01 8:49 ` [PATCH 3/8] net/sfc: use non-static queue span limit in flow action RSS Ivan Malov
2022-02-01 8:49 ` [PATCH 4/8] common/sfc_efx/base: revise name of RSS table entry count Ivan Malov
2022-02-01 8:49 ` [PATCH 5/8] common/sfc_efx/base: support selecting " Ivan Malov
2022-02-02 11:51 ` Ray Kinsella
2022-02-02 12:24 ` Ivan Malov
2022-02-01 8:50 ` Ivan Malov [this message]
2022-02-01 8:50 ` [PATCH 7/8] common/sfc_efx/base: support the even spread RSS mode Ivan Malov
2022-02-01 8:50 ` [PATCH 8/8] net/sfc: use the even spread mode in flow action RSS Ivan Malov
2022-02-02 17:41 ` [PATCH 0/8] net/sfc: improve flow action RSS support on EF100 boards Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220201085002.320102-7-ivan.malov@oktetlabs.ru \
--to=ivan.malov@oktetlabs.ru \
--cc=amoreton@xilinx.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).