From: Dariusz Sosnowski <dsosnowski@nvidia.com>
To: Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Ori Kam <orika@nvidia.com>, Suanming Mou <suanmingm@nvidia.com>,
Matan Azrad <matan@nvidia.com>
Cc: <dev@dpdk.org>, Raslan Darawsheh <rasland@nvidia.com>,
Bing Zhao <bingz@nvidia.com>
Subject: [PATCH v2 06/11] net/mlx5: remove flow pattern from job
Date: Thu, 29 Feb 2024 12:51:51 +0100 [thread overview]
Message-ID: <20240229115157.201671-7-dsosnowski@nvidia.com> (raw)
In-Reply-To: <20240229115157.201671-1-dsosnowski@nvidia.com>
mlx5_hw_q_job struct held a reference to temporary flow rule pattern
and contained temporary REPRESENTED_PORT and TAG items structs.
They are used whenever it is required to prepend a flow rule pattern,
provided by the application with one of such items.
If prepending is required, then flow rule pattern is copied over to
temporary buffer and a new item added internally in PMD.
Such constructed buffer is passed to the HWS layer when flow create
operation is being enqueued.
After operation is enqueued, temporary flow pattern can be safely
discarded, so there is no need to store it during
the whole lifecycle of mlx5_hw_q_job.
This patch removes all references to flow rule pattern and items stored
inside mlx5_hw_q_job and removes relevant allocations to reduce job
memory footprint.
Temporary pattern and items stored per job are replaced with stack
allocated ones, contained in mlx5_flow_hw_pattern_params struct.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 17 ++++-------
drivers/net/mlx5/mlx5_flow.h | 10 +++++++
drivers/net/mlx5/mlx5_flow_hw.c | 51 ++++++++++++++-------------------
3 files changed, 37 insertions(+), 41 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 42dc312a87..1ca6223f95 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -401,17 +401,12 @@ struct mlx5_hw_q_job {
const void *action; /* Indirect action attached to the job. */
};
void *user_data; /* Job user data. */
- struct rte_flow_item *items;
- union {
- struct {
- /* User memory for query output */
- void *user;
- /* Data extracted from hardware */
- void *hw;
- } __rte_packed query;
- struct rte_flow_item_ethdev port_spec;
- struct rte_flow_item_tag tag_spec;
- } __rte_packed;
+ struct {
+ /* User memory for query output */
+ void *user;
+ /* Data extracted from hardware */
+ void *hw;
+ } query;
struct rte_flow_hw *upd_flow; /* Flow with updated values. */
};
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 9ed356e1c2..436d1391bc 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1316,6 +1316,16 @@ struct mlx5_flow_hw_action_params {
uint8_t ipv6_push_data[MLX5_PUSH_MAX_LEN];
};
+/** Container for dynamically generated flow items used during flow rule creation. */
+struct mlx5_flow_hw_pattern_params {
+ /** Array of dynamically generated flow items. */
+ struct rte_flow_item items[MLX5_HW_MAX_ITEMS];
+ /** Temporary REPRESENTED_PORT item generated by PMD. */
+ struct rte_flow_item_ethdev port_spec;
+ /** Temporary TAG item generated by PMD. */
+ struct rte_flow_item_tag tag_spec;
+};
+
/* rte flow action translate to DR action struct. */
struct mlx5_action_construct_data {
LIST_ENTRY(mlx5_action_construct_data) next;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index a87fe4d07a..ab67dc139e 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -3272,44 +3272,44 @@ flow_hw_get_rule_items(struct rte_eth_dev *dev,
const struct rte_flow_template_table *table,
const struct rte_flow_item items[],
uint8_t pattern_template_index,
- struct mlx5_hw_q_job *job)
+ struct mlx5_flow_hw_pattern_params *pp)
{
struct rte_flow_pattern_template *pt = table->its[pattern_template_index];
/* Only one implicit item can be added to flow rule pattern. */
MLX5_ASSERT(!pt->implicit_port || !pt->implicit_tag);
- /* At least one item was allocated in job descriptor for items. */
+ /* At least one item was allocated in pattern params for items. */
MLX5_ASSERT(MLX5_HW_MAX_ITEMS >= 1);
if (pt->implicit_port) {
if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
rte_errno = ENOMEM;
return NULL;
}
- /* Set up represented port item in job descriptor. */
- job->port_spec = (struct rte_flow_item_ethdev){
+ /* Set up represented port item in pattern params. */
+ pp->port_spec = (struct rte_flow_item_ethdev){
.port_id = dev->data->port_id,
};
- job->items[0] = (struct rte_flow_item){
+ pp->items[0] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
- .spec = &job->port_spec,
+ .spec = &pp->port_spec,
};
- rte_memcpy(&job->items[1], items, sizeof(*items) * pt->orig_item_nb);
- return job->items;
+ rte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);
+ return pp->items;
} else if (pt->implicit_tag) {
if (pt->orig_item_nb + 1 > MLX5_HW_MAX_ITEMS) {
rte_errno = ENOMEM;
return NULL;
}
- /* Set up tag item in job descriptor. */
- job->tag_spec = (struct rte_flow_item_tag){
+ /* Set up tag item in pattern params. */
+ pp->tag_spec = (struct rte_flow_item_tag){
.data = flow_hw_tx_tag_regc_value(dev),
};
- job->items[0] = (struct rte_flow_item){
+ pp->items[0] = (struct rte_flow_item){
.type = (enum rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_TAG,
- .spec = &job->tag_spec,
+ .spec = &pp->tag_spec,
};
- rte_memcpy(&job->items[1], items, sizeof(*items) * pt->orig_item_nb);
- return job->items;
+ rte_memcpy(&pp->items[1], items, sizeof(*items) * pt->orig_item_nb);
+ return pp->items;
} else {
return items;
}
@@ -3364,6 +3364,7 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,
};
struct mlx5dr_rule_action *rule_acts;
struct mlx5_flow_hw_action_params ap;
+ struct mlx5_flow_hw_pattern_params pp;
struct rte_flow_hw *flow = NULL;
struct mlx5_hw_q_job *job = NULL;
const struct rte_flow_item *rule_items;
@@ -3428,7 +3429,7 @@ flow_hw_async_flow_create(struct rte_eth_dev *dev,
goto error;
}
rule_items = flow_hw_get_rule_items(dev, table, items,
- pattern_template_index, job);
+ pattern_template_index, &pp);
if (!rule_items)
goto error;
if (likely(!rte_flow_template_table_resizable(dev->data->port_id, &table->cfg.attr))) {
@@ -10121,11 +10122,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
goto err;
}
mem_size += (sizeof(struct mlx5_hw_q_job *) +
- sizeof(struct mlx5_hw_q_job) +
- sizeof(struct rte_flow_item) *
- MLX5_HW_MAX_ITEMS +
- sizeof(struct rte_flow_hw)) *
- _queue_attr[i]->size;
+ sizeof(struct mlx5_hw_q_job) +
+ sizeof(struct rte_flow_hw)) * _queue_attr[i]->size;
}
priv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
64, SOCKET_ID_ANY);
@@ -10134,7 +10132,6 @@ flow_hw_configure(struct rte_eth_dev *dev,
goto err;
}
for (i = 0; i < nb_q_updated; i++) {
- struct rte_flow_item *items = NULL;
struct rte_flow_hw *upd_flow = NULL;
priv->hw_q[i].job_idx = _queue_attr[i]->size;
@@ -10147,12 +10144,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
&job[_queue_attr[i - 1]->size - 1].upd_flow[1];
job = (struct mlx5_hw_q_job *)
&priv->hw_q[i].job[_queue_attr[i]->size];
- items = (struct rte_flow_item *)
- &job[_queue_attr[i]->size];
- upd_flow = (struct rte_flow_hw *)
- &items[_queue_attr[i]->size * MLX5_HW_MAX_ITEMS];
+ upd_flow = (struct rte_flow_hw *)&job[_queue_attr[i]->size];
for (j = 0; j < _queue_attr[i]->size; j++) {
- job[j].items = &items[j * MLX5_HW_MAX_ITEMS];
job[j].upd_flow = &upd_flow[j];
priv->hw_q[i].job[j] = &job[j];
}
@@ -12329,14 +12322,12 @@ flow_hw_calc_table_hash(struct rte_eth_dev *dev,
uint32_t *hash, struct rte_flow_error *error)
{
const struct rte_flow_item *items;
- /* Temp job to allow adding missing items */
- static struct rte_flow_item tmp_items[MLX5_HW_MAX_ITEMS];
- static struct mlx5_hw_q_job job = {.items = tmp_items};
+ struct mlx5_flow_hw_pattern_params pp;
int res;
items = flow_hw_get_rule_items(dev, table, pattern,
pattern_template_index,
- &job);
+ &pp);
res = mlx5dr_rule_hash_calculate(mlx5_table_matcher(table), items,
pattern_template_index,
MLX5DR_RULE_HASH_CALC_MODE_RAW,
--
2.39.2
next prev parent reply other threads:[~2024-02-29 11:53 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-28 17:00 [PATCH 00/11] net/mlx5: flow insertion performance improvements Dariusz Sosnowski
2024-02-28 17:00 ` [PATCH 01/11] net/mlx5: allocate local DR rule action buffers Dariusz Sosnowski
2024-02-28 17:00 ` [PATCH 02/11] net/mlx5/hws: add check for matcher rule update support Dariusz Sosnowski
2024-02-28 17:00 ` [PATCH 03/11] net/mlx5/hws: add check if matcher contains complex rules Dariusz Sosnowski
2024-02-28 17:00 ` [PATCH 04/11] net/mlx5: skip the unneeded resource index allocation Dariusz Sosnowski
2024-02-28 17:00 ` [PATCH 05/11] net/mlx5: remove action params from job Dariusz Sosnowski
2024-02-28 17:00 ` [PATCH 06/11] net/mlx5: remove flow pattern " Dariusz Sosnowski
2024-02-28 17:00 ` [PATCH 07/11] net/mlx5: remove updated flow " Dariusz Sosnowski
2024-02-28 17:00 ` [PATCH 08/11] net/mlx5: use flow as operation container Dariusz Sosnowski
2024-02-28 17:00 ` [PATCH 09/11] net/mlx5: move rarely used flow fields outside Dariusz Sosnowski
2024-02-28 17:00 ` [PATCH 10/11] net/mlx5: reuse flow fields Dariusz Sosnowski
2024-02-28 17:00 ` [PATCH 11/11] net/mlx5: remove unneeded device status checking Dariusz Sosnowski
2024-02-29 8:52 ` [PATCH 00/11] net/mlx5: flow insertion performance improvements Ori Kam
2024-02-29 11:51 ` [PATCH v2 " Dariusz Sosnowski
2024-02-29 11:51 ` [PATCH v2 01/11] net/mlx5: allocate local DR rule action buffers Dariusz Sosnowski
2024-02-29 11:51 ` [PATCH v2 02/11] net/mlx5/hws: add check for matcher rule update support Dariusz Sosnowski
2024-02-29 11:51 ` [PATCH v2 03/11] net/mlx5/hws: add check if matcher contains complex rules Dariusz Sosnowski
2024-02-29 11:51 ` [PATCH v2 04/11] net/mlx5: skip the unneeded resource index allocation Dariusz Sosnowski
2024-02-29 11:51 ` [PATCH v2 05/11] net/mlx5: remove action params from job Dariusz Sosnowski
2024-02-29 11:51 ` Dariusz Sosnowski [this message]
2024-02-29 11:51 ` [PATCH v2 07/11] net/mlx5: remove updated flow " Dariusz Sosnowski
2024-02-29 11:51 ` [PATCH v2 08/11] net/mlx5: use flow as operation container Dariusz Sosnowski
2024-02-29 11:51 ` [PATCH v2 09/11] net/mlx5: move rarely used flow fields outside Dariusz Sosnowski
2024-02-29 11:51 ` [PATCH v2 10/11] net/mlx5: reuse flow fields Dariusz Sosnowski
2024-02-29 11:51 ` [PATCH v2 11/11] net/mlx5: remove unneeded device status checking Dariusz Sosnowski
2024-03-03 12:16 ` [PATCH v2 00/11] net/mlx5: flow insertion performance improvements Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240229115157.201671-7-dsosnowski@nvidia.com \
--to=dsosnowski@nvidia.com \
--cc=bingz@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).