From: Bing Zhao <bingz@nvidia.com>
To: <matan@nvidia.com>, <viacheslavo@nvidia.com>, <orika@nvidia.com>,
<suanmingm@nvidia.com>, <rasland@nvidia.com>
Cc: <dev@dpdk.org>, Gregory Etelson <getelson@nvidia.com>,
<stable@dpdk.org>, David Marchand <david.marchand@redhat.com>
Subject: [PATCH v3] net/mlx5: fix flow workspace destruction
Date: Mon, 3 Jul 2023 12:50:52 +0300 [thread overview]
Message-ID: <20230703095052.449945-1-bingz@nvidia.com> (raw)
In-Reply-To: <20230701145116.441135-1-bingz@nvidia.com>
From: Gregory Etelson <getelson@nvidia.com>
PMD uses pthread key to allocate and access per thread flow
workspace memory buffers.
PMD registered a key destructor function to clean up flow workspace
buffers. However, the key destructor was not called by the pthread
library.
The patch keeps track of per-thread flow workspaces in PMD.
Flow workspaces memory release is activated from PMD destructor.
In the meanwhile, workspace buffer and RSS queues array are allocated
in a single memory chunk with this patch. The maximal number of
queues RTE_ETH_RSS_RETA_SIZE_512 is chosen. Then the workspace
adjustment can be removed to reduce the software hiccup:
1. realloc and content copy
2. spinlock acquire and release
Bugzilla ID: 1255
Fixes: 5d55a494f4e6 ("net/mlx5: split multi-thread flow handling per OS")
Cc: stable@dpdk.org
Reported-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
v2: fix typo and code style
v3: add bugzilla information
---
drivers/net/mlx5/linux/mlx5_flow_os.c | 2 +-
drivers/net/mlx5/mlx5.c | 1 +
drivers/net/mlx5/mlx5_flow.c | 76 +++++++++++----------------
drivers/net/mlx5/mlx5_flow.h | 4 +-
4 files changed, 36 insertions(+), 47 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_flow_os.c b/drivers/net/mlx5/linux/mlx5_flow_os.c
index 3c9a823edf..b139bb75b9 100644
--- a/drivers/net/mlx5/linux/mlx5_flow_os.c
+++ b/drivers/net/mlx5/linux/mlx5_flow_os.c
@@ -51,7 +51,7 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
int
mlx5_flow_os_init_workspace_once(void)
{
- if (rte_thread_key_create(&key_workspace, flow_release_workspace)) {
+ if (rte_thread_key_create(&key_workspace, NULL)) {
DRV_LOG(ERR, "Can't create flow workspace data thread key.");
rte_errno = ENOMEM;
return -rte_errno;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 5f0aa296ba..fd9b76027d 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1838,6 +1838,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
mlx5_os_net_cleanup();
mlx5_flow_os_release_workspace();
+ mlx5_flow_workspace_gc_release();
}
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
if (sh->flex_parsers_dv) {
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index cf83db7b60..d3b1252ad6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7155,36 +7155,6 @@ flow_tunnel_from_rule(const struct mlx5_flow *flow)
return tunnel;
}
-/**
- * Adjust flow RSS workspace if needed.
- *
- * @param wks
- * Pointer to thread flow work space.
- * @param rss_desc
- * Pointer to RSS descriptor.
- * @param[in] nrssq_num
- * New RSS queue number.
- *
- * @return
- * 0 on success, -1 otherwise and rte_errno is set.
- */
-static int
-flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
- struct mlx5_flow_rss_desc *rss_desc,
- uint32_t nrssq_num)
-{
- if (likely(nrssq_num <= wks->rssq_num))
- return 0;
- rss_desc->queue = realloc(rss_desc->queue,
- sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2));
- if (!rss_desc->queue) {
- rte_errno = ENOMEM;
- return -1;
- }
- wks->rssq_num = RTE_ALIGN(nrssq_num, 2);
- return 0;
-}
-
/**
* Create a flow and add it to @p list.
*
@@ -7303,8 +7273,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
if (attr->ingress)
rss = flow_get_rss_action(dev, p_actions_rx);
if (rss) {
- if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
- return 0;
+ MLX5_ASSERT(rss->queue_num <= RTE_ETH_RSS_RETA_SIZE_512);
/*
* The following information is required by
* mlx5_flow_hashfields_adjust() in advance.
@@ -8072,12 +8041,34 @@ flow_release_workspace(void *data)
while (wks) {
next = wks->next;
- free(wks->rss_desc.queue);
free(wks);
wks = next;
}
}
+static struct mlx5_flow_workspace *gc_head;
+static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER;
+
+static void
+mlx5_flow_workspace_gc_add(struct mlx5_flow_workspace *ws)
+{
+ rte_spinlock_lock(&mlx5_flow_workspace_lock);
+ ws->gc = gc_head;
+ gc_head = ws;
+ rte_spinlock_unlock(&mlx5_flow_workspace_lock);
+}
+
+void
+mlx5_flow_workspace_gc_release(void)
+{
+ while (gc_head) {
+ struct mlx5_flow_workspace *wks = gc_head;
+
+ gc_head = wks->gc;
+ flow_release_workspace(wks);
+ }
+}
+
/**
* Get thread specific current flow workspace.
*
@@ -8103,23 +8094,17 @@ mlx5_flow_get_thread_workspace(void)
static struct mlx5_flow_workspace*
flow_alloc_thread_workspace(void)
{
- struct mlx5_flow_workspace *data = calloc(1, sizeof(*data));
+ size_t data_size = RTE_ALIGN(sizeof(struct mlx5_flow_workspace), sizeof(long));
+ size_t rss_queue_array_size = sizeof(uint16_t) * RTE_ETH_RSS_RETA_SIZE_512;
+ struct mlx5_flow_workspace *data = calloc(1, data_size +
+ rss_queue_array_size);
if (!data) {
- DRV_LOG(ERR, "Failed to allocate flow workspace "
- "memory.");
+ DRV_LOG(ERR, "Failed to allocate flow workspace memory.");
return NULL;
}
- data->rss_desc.queue = calloc(1,
- sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
- if (!data->rss_desc.queue)
- goto err;
- data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
+ data->rss_desc.queue = RTE_PTR_ADD(data, data_size);
return data;
-err:
- free(data->rss_desc.queue);
- free(data);
- return NULL;
}
/**
@@ -8140,6 +8125,7 @@ mlx5_flow_push_thread_workspace(void)
data = flow_alloc_thread_workspace();
if (!data)
return NULL;
+ mlx5_flow_workspace_gc_add(data);
} else if (!curr->inuse) {
data = curr;
} else if (curr->next) {
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 003e7da3a6..62789853ab 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1496,10 +1496,10 @@ struct mlx5_flow_workspace {
/* If creating another flow in same thread, push new as stack. */
struct mlx5_flow_workspace *prev;
struct mlx5_flow_workspace *next;
+ struct mlx5_flow_workspace *gc;
uint32_t inuse; /* can't create new flow with current. */
struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
struct mlx5_flow_rss_desc rss_desc;
- uint32_t rssq_num; /* Allocated queue num in rss_desc. */
uint32_t flow_idx; /* Intermediate device flow index. */
struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */
struct mlx5_flow_meter_policy *policy;
@@ -2022,6 +2022,8 @@ struct mlx5_flow_driver_ops {
struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
void mlx5_flow_pop_thread_workspace(void);
struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
+void mlx5_flow_workspace_gc_release(void);
+
__extension__
struct flow_grp_info {
uint64_t external:1;
--
2.34.1
next prev parent reply other threads:[~2023-07-03 9:51 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-01 14:43 [PATCH] " Bing Zhao
2023-07-01 14:51 ` [PATCH v2] " Bing Zhao
2023-07-03 8:59 ` Matan Azrad
2023-07-03 9:50 ` Bing Zhao [this message]
2023-07-03 14:03 ` [PATCH v3] " Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230703095052.449945-1-bingz@nvidia.com \
--to=bingz@nvidia.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=getelson@nvidia.com \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=stable@dpdk.org \
--cc=suanmingm@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).