From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D2E33A00C5; Tue, 1 Feb 2022 09:50:33 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 69F13426E5; Tue, 1 Feb 2022 09:50:12 +0100 (CET) Received: from shelob.oktetlabs.ru (shelob.oktetlabs.ru [91.220.146.113]) by mails.dpdk.org (Postfix) with ESMTP id DCCA240685 for ; Tue, 1 Feb 2022 09:50:06 +0100 (CET) Received: from bree.oktetlabs.ru (bree.oktetlabs.ru [192.168.34.5]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by shelob.oktetlabs.ru (Postfix) with ESMTPS id A02224A; Tue, 1 Feb 2022 11:50:06 +0300 (MSK) DKIM-Filter: OpenDKIM Filter v2.11.0 shelob.oktetlabs.ru A02224A Authentication-Results: shelob.oktetlabs.ru/A02224A; dkim=none; dkim-atps=neutral From: Ivan Malov To: dev@dpdk.org Cc: Andrew Rybchenko , Andy Moreton Subject: [PATCH 6/8] net/sfc: use adaptive table entry count in flow action RSS Date: Tue, 1 Feb 2022 11:50:00 +0300 Message-Id: <20220201085002.320102-7-ivan.malov@oktetlabs.ru> X-Mailer: git-send-email 2.30.2 In-Reply-To: <20220201085002.320102-1-ivan.malov@oktetlabs.ru> References: <20220201085002.320102-1-ivan.malov@oktetlabs.ru> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Currently, every RSS context uses 128 indirection entries in the hardware. That is not always optimal because the entries come from a pool shared among all PCI functions of the board, while the format of action RSS allows to pass less queue IDs. With EF100 boards, it is possible to decide how many entries to allocate for the indirection table of a context. Make use of that in order to optimise resource usage in RSS scenarios. Signed-off-by: Ivan Malov Reviewed-by: Andrew Rybchenko Reviewed-by: Andy Moreton --- drivers/net/sfc/sfc_flow_rss.c | 72 +++++++++++++++++++++++++++------- drivers/net/sfc/sfc_flow_rss.h | 4 +- 2 files changed, 60 insertions(+), 16 deletions(-) diff --git a/drivers/net/sfc/sfc_flow_rss.c b/drivers/net/sfc/sfc_flow_rss.c index 1c94333b62..4bf3002164 100644 --- a/drivers/net/sfc/sfc_flow_rss.c +++ b/drivers/net/sfc/sfc_flow_rss.c @@ -23,23 +23,45 @@ sfc_flow_rss_attach(struct sfc_adapter *sa) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); struct sfc_flow_rss *flow_rss = &sa->flow_rss; + int rc; sfc_log_init(sa, "entry"); flow_rss->qid_span_max = encp->enc_rx_scale_indirection_max_nqueues; + flow_rss->nb_tbl_entries_min = encp->enc_rx_scale_tbl_min_nentries; + flow_rss->nb_tbl_entries_max = encp->enc_rx_scale_tbl_max_nentries; + + sfc_log_init(sa, "allocate the bounce buffer for indirection entries"); + flow_rss->bounce_tbl = rte_calloc("sfc_flow_rss_bounce_tbl", + flow_rss->nb_tbl_entries_max, + sizeof(*flow_rss->bounce_tbl), 0); + if (flow_rss->bounce_tbl == NULL) { + rc = ENOMEM; + goto fail; + } TAILQ_INIT(&flow_rss->ctx_list); sfc_log_init(sa, "done"); return 0; + +fail: + sfc_log_init(sa, "failed %d", rc); + + return rc; } void sfc_flow_rss_detach(struct sfc_adapter *sa) { + struct sfc_flow_rss *flow_rss = &sa->flow_rss; + sfc_log_init(sa, "entry"); + sfc_log_init(sa, "free the bounce buffer for indirection entries"); + rte_free(flow_rss->bounce_tbl); + sfc_log_init(sa, "done"); } @@ -123,9 +145,9 @@ sfc_flow_rss_parse_conf(struct sfc_adapter *sa, return EINVAL; } - if (in->queue_num > EFX_RSS_TBL_SIZE) { + if (in->queue_num > flow_rss->nb_tbl_entries_max) { sfc_err(sa, "flow-rss: parse: 'queue_num' is too large; MAX=%u", - EFX_RSS_TBL_SIZE); + flow_rss->nb_tbl_entries_max); return EINVAL; } @@ -286,6 +308,7 @@ sfc_flow_rss_ctx_del(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx) static int sfc_flow_rss_ctx_program_tbl(struct sfc_adapter *sa, + unsigned int nb_tbl_entries, const struct sfc_flow_rss_ctx *ctx) { const struct sfc_flow_rss_conf *conf = &ctx->conf; @@ -297,15 +320,15 @@ sfc_flow_rss_ctx_program_tbl(struct sfc_adapter *sa, if (conf->nb_qid_offsets != 0) { SFC_ASSERT(ctx->qid_offsets != NULL); - for (i = 0; i < EFX_RSS_TBL_SIZE; ++i) + for (i = 0; i < nb_tbl_entries; ++i) tbl[i] = ctx->qid_offsets[i % conf->nb_qid_offsets]; } else { - for (i = 0; i < EFX_RSS_TBL_SIZE; ++i) + for (i = 0; i < nb_tbl_entries; ++i) tbl[i] = i % conf->qid_span; } return efx_rx_scale_tbl_set(sa->nic, ctx->nic_handle, - tbl, EFX_RSS_TBL_SIZE); + tbl, nb_tbl_entries); } int @@ -313,9 +336,12 @@ sfc_flow_rss_ctx_program(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx) { efx_rx_scale_context_type_t ctx_type = EFX_RX_SCALE_EXCLUSIVE; struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); + const struct sfc_flow_rss *flow_rss = &sa->flow_rss; struct sfc_rss *ethdev_rss = &sas->rss; struct sfc_flow_rss_conf *conf; bool allocation_done = B_FALSE; + unsigned int nb_qid_offsets; + unsigned int nb_tbl_entries; int rc; if (ctx == NULL) @@ -325,18 +351,34 @@ sfc_flow_rss_ctx_program(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx) SFC_ASSERT(sfc_adapter_is_locked(sa)); + if (conf->nb_qid_offsets != 0) + nb_qid_offsets = conf->nb_qid_offsets; + else + nb_qid_offsets = conf->qid_span; + + if (!RTE_IS_POWER_OF_2(nb_qid_offsets)) { + /* + * Most likely, it pays to enlarge the indirection + * table to facilitate better distribution quality. + */ + nb_qid_offsets = flow_rss->nb_tbl_entries_max; + } + + nb_tbl_entries = RTE_MAX(flow_rss->nb_tbl_entries_min, nb_qid_offsets); + if (ctx->nic_handle_refcnt == 0) { - rc = efx_rx_scale_context_alloc(sa->nic, ctx_type, - conf->qid_span, - &ctx->nic_handle); + rc = efx_rx_scale_context_alloc_v2(sa->nic, ctx_type, + conf->qid_span, + nb_tbl_entries, + &ctx->nic_handle); if (rc != 0) { - sfc_err(sa, "flow-rss: failed to allocate NIC resource for ctx=%p: type=%d, qid_span=%u, rc=%d", - ctx, ctx_type, conf->qid_span, rc); + sfc_err(sa, "flow-rss: failed to allocate NIC resource for ctx=%p: type=%d, qid_span=%u, nb_tbl_entries=%u; rc=%d", + ctx, ctx_type, conf->qid_span, nb_tbl_entries, rc); goto fail; } - sfc_dbg(sa, "flow-rss: allocated NIC resource for ctx=%p: type=%d, qid_span=%u; handle=0x%08x", - ctx, ctx_type, conf->qid_span, + sfc_dbg(sa, "flow-rss: allocated NIC resource for ctx=%p: type=%d, qid_span=%u, nb_tbl_entries=%u; handle=0x%08x", + ctx, ctx_type, conf->qid_span, nb_tbl_entries, ctx->nic_handle); ++(ctx->nic_handle_refcnt); @@ -369,10 +411,10 @@ sfc_flow_rss_ctx_program(struct sfc_adapter *sa, struct sfc_flow_rss_ctx *ctx) goto fail; } - rc = sfc_flow_rss_ctx_program_tbl(sa, ctx); + rc = sfc_flow_rss_ctx_program_tbl(sa, nb_tbl_entries, ctx); if (rc != 0) { - sfc_err(sa, "flow-rss: failed to program table for ctx=%p; rc=%d", - ctx, rc); + sfc_err(sa, "flow-rss: failed to program table for ctx=%p: nb_tbl_entries=%u; rc=%d", + ctx, nb_tbl_entries, rc); goto fail; } diff --git a/drivers/net/sfc/sfc_flow_rss.h b/drivers/net/sfc/sfc_flow_rss.h index e9f798a8f3..3341d06cf4 100644 --- a/drivers/net/sfc/sfc_flow_rss.h +++ b/drivers/net/sfc/sfc_flow_rss.h @@ -42,9 +42,11 @@ struct sfc_flow_rss_ctx { TAILQ_HEAD(sfc_flow_rss_ctx_list, sfc_flow_rss_ctx); struct sfc_flow_rss { + unsigned int nb_tbl_entries_min; + unsigned int nb_tbl_entries_max; unsigned int qid_span_max; - unsigned int bounce_tbl[EFX_RSS_TBL_SIZE]; + unsigned int *bounce_tbl; /* MAX */ struct sfc_flow_rss_ctx_list ctx_list; }; -- 2.30.2