From: Slava Ovsiienko <viacheslavo@mellanox.com>
To: Suanming Mou <suanmingm@mellanox.com>, Matan Azrad <matan@mellanox.com>
Cc: Raslan Darawsheh <rasland@mellanox.com>, "dev@dpdk.org" <dev@dpdk.org>
Subject: Re: [dpdk-dev] [PATCH] net/mlx5: fix RSS description corrupt issue
Date: Tue, 21 Apr 2020 07:44:50 +0000 [thread overview]
Message-ID: <AM4PR05MB32652B3E838965E2C07F8C6ED2D50@AM4PR05MB3265.eurprd05.prod.outlook.com> (raw)
In-Reply-To: <1587348545-57200-1-git-send-email-suanmingm@mellanox.com>
> -----Original Message-----
> From: Suanming Mou <suanmingm@mellanox.com>
> Sent: Monday, April 20, 2020 5:09
> To: Slava Ovsiienko <viacheslavo@mellanox.com>; Matan Azrad
> <matan@mellanox.com>
> Cc: Raslan Darawsheh <rasland@mellanox.com>; dev@dpdk.org
> Subject: [PATCH] net/mlx5: fix RSS description corrupt issue
>
> Currently, RSS description data is saved in the mlx5 private data. It dose not
> support the nest flow_list_create() routine.
>
> In the case with mark reg copy, the flow_list_create() function will be called
> nested. The RSS description data will be cleared in the nested case.
>
> Allocate one more RSS description to avoid the nested case.
>
> Fixes: bb7e7801704e ("net/mlx5: optimize mlx5 flow RSS struct")
>
> Signed-off-by: Suanming Mou <suanmingm@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
> ---
> drivers/net/mlx5/mlx5_flow.c | 10 ++++++----
> drivers/net/mlx5/mlx5_flow_dv.c | 8 +++++---
> drivers/net/mlx5/mlx5_flow_verbs.c | 8 +++++---
> 3 files changed, 16 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index c529aa3..109d71e 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -4266,7 +4266,8 @@ uint32_t mlx5_flow_adjust_priority(struct
> rte_eth_dev *dev, int32_t priority,
> uint8_t buffer[2048];
> } items_tx;
> struct rte_flow_expand_rss *buf = &expand_buffer.buf;
> - struct mlx5_flow_rss_desc *rss_desc = priv->rss_desc;
> + struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc
> *)
> + priv->rss_desc)[!!priv->flow_idx];
> const struct rte_flow_action *p_actions_rx = actions;
> uint32_t i;
> uint32_t idx = 0;
> @@ -4684,10 +4685,11 @@ struct rte_flow *
> struct mlx5_priv *priv = dev->data->dev_private;
>
> if (!priv->inter_flows) {
> - priv->inter_flows = rte_calloc(__func__,
> MLX5_NUM_MAX_DEV_FLOWS,
> + priv->inter_flows = rte_calloc(__func__, 1,
> + MLX5_NUM_MAX_DEV_FLOWS *
> sizeof(struct mlx5_flow) +
> - sizeof(struct mlx5_flow_rss_desc) +
> - sizeof(uint16_t) * UINT16_MAX, 0);
> + (sizeof(struct mlx5_flow_rss_desc) +
> + sizeof(uint16_t) * UINT16_MAX) * 2, 0);
> if (!priv->inter_flows) {
> DRV_LOG(ERR, "can't allocate intermediate
> memory.");
> return;
> diff --git a/drivers/net/mlx5/mlx5_flow_dv.c
> b/drivers/net/mlx5/mlx5_flow_dv.c index cfc911c..fd8f936 100644
> --- a/drivers/net/mlx5/mlx5_flow_dv.c
> +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> @@ -7377,8 +7377,9 @@ struct field_modify_info modify_tcp[] = {
> struct mlx5_dev_config *dev_conf = &priv->config;
> struct rte_flow *flow = dev_flow->flow;
> struct mlx5_flow_handle *handle = dev_flow->handle;
> - struct mlx5_flow_rss_desc *rss_desc = (struct mlx5_flow_rss_desc *)
> - priv->rss_desc;
> + struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc
> *)
> + priv->rss_desc)
> + [!!priv->flow_nested_idx];
> uint64_t item_flags = 0;
> uint64_t last_item = 0;
> uint64_t action_flags = 0;
> @@ -8148,7 +8149,8 @@ struct field_modify_info modify_tcp[] = {
> struct mlx5_hrxq *hrxq;
> uint32_t hrxq_idx;
> struct mlx5_flow_rss_desc *rss_desc =
> - (struct mlx5_flow_rss_desc *)priv->rss_desc;
> + &((struct mlx5_flow_rss_desc *)priv-
> >rss_desc)
> + [!!priv->flow_nested_idx];
>
> MLX5_ASSERT(rss_desc->queue_num);
> hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key, diff --
> git a/drivers/net/mlx5/mlx5_flow_verbs.c
> b/drivers/net/mlx5/mlx5_flow_verbs.c
> index a246917..722220c 100644
> --- a/drivers/net/mlx5/mlx5_flow_verbs.c
> +++ b/drivers/net/mlx5/mlx5_flow_verbs.c
> @@ -1570,8 +1570,9 @@
> uint64_t priority = attr->priority;
> uint32_t subpriority = 0;
> struct mlx5_priv *priv = dev->data->dev_private;
> - struct mlx5_flow_rss_desc *rss_desc = (struct mlx5_flow_rss_desc *)
> - priv->rss_desc;
> + struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc
> *)
> + priv->rss_desc)
> + [!!priv->flow_nested_idx];
>
> if (priority == MLX5_FLOW_PRIO_RSVD)
> priority = priv->config.flow_prio - 1; @@ -1844,7 +1845,8
> @@
> } else {
> uint32_t hrxq_idx;
> struct mlx5_flow_rss_desc *rss_desc =
> - (struct mlx5_flow_rss_desc *)priv->rss_desc;
> + &((struct mlx5_flow_rss_desc *)priv-
> >rss_desc)
> + [!!priv->flow_nested_idx];
>
> MLX5_ASSERT(rss_desc->queue_num);
> hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
> --
> 1.8.3.1
next prev parent reply other threads:[~2020-04-21 7:44 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-20 2:09 Suanming Mou
2020-04-21 7:44 ` Slava Ovsiienko [this message]
2020-04-21 8:43 ` Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=AM4PR05MB32652B3E838965E2C07F8C6ED2D50@AM4PR05MB3265.eurprd05.prod.outlook.com \
--to=viacheslavo@mellanox.com \
--cc=dev@dpdk.org \
--cc=matan@mellanox.com \
--cc=rasland@mellanox.com \
--cc=suanmingm@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).