From: Xiaoyu Min <jackmin@nvidia.com>
To: Matan Azrad <matan@nvidia.com>,
Shahaf Shuler <shahafs@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Andrey Vesnovaty <andreyv@nvidia.com>
Cc: dev@dpdk.org, stable@dpdk.org
Subject: [dpdk-stable] [PATCH 1/2] net/mlx5: fix shared inner RSS
Date: Fri, 26 Mar 2021 13:20:20 +0800 [thread overview]
Message-ID: <22e41984b2b2aa63eafe160503c35308fadce731.1616724524.git.jackmin@nvidia.com> (raw)
In-Reply-To: <cover.1616724524.git.jackmin@nvidia.com>
The shared RSS action use the _tunnel_ information which is derived
from flow items to decide whether need to do inner RSS or not.
However, inner RSS should be decided by RSS level (>1) in configuration
and then to create TIR with 'IBV_RX_HASH_INNER' hash bit set.
Also, for one shared RSS action there is only one set of TIRs -
outer or inner could be so the unnecessary set of TIRs are removed
in order to reduce resource.
Fixes: d2046c09aa64 ("net/mlx5: support shared action for RSS")
Cc: stable@dpdk.org
Signed-off-by: Xiaoyu Min <jackmin@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 2 --
drivers/net/mlx5/mlx5_flow_dv.c | 48 +++++++++++++++------------------
2 files changed, 22 insertions(+), 28 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 8324e188e1..00b6cd97b9 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1129,8 +1129,6 @@ struct mlx5_shared_action_rss {
/**< Hash RX queues (hrxq, hrxq_tunnel fields) indirection table. */
uint32_t hrxq[MLX5_RSS_HASH_FIELDS_LEN];
/**< Hash RX queue indexes mapped to mlx5_rss_hash_fields */
- uint32_t hrxq_tunnel[MLX5_RSS_HASH_FIELDS_LEN];
- /**< Hash RX queue indexes for tunneled RSS */
rte_spinlock_t action_rss_sl; /**< Shared RSS action spinlock. */
};
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 23e5849783..5037b7feac 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -11884,10 +11884,9 @@ flow_dv_translate(struct rte_eth_dev *dev,
static int
__flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
const uint64_t hash_fields,
- const int tunnel,
uint32_t hrxq_idx)
{
- uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
+ uint32_t *hrxqs = action->hrxq;
switch (hash_fields & ~IBV_RX_HASH_INNER) {
case MLX5_RSS_HASH_IPV4:
@@ -11934,14 +11933,12 @@ __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
*/
static uint32_t
__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
- const uint64_t hash_fields,
- const int tunnel)
+ const uint64_t hash_fields)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_shared_action_rss *shared_rss =
mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
- const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
- shared_rss->hrxq_tunnel;
+ const uint32_t *hrxqs = shared_rss->hrxq;
switch (hash_fields & ~IBV_RX_HASH_INNER) {
case MLX5_RSS_HASH_IPV4:
@@ -12030,9 +12027,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
rss_desc->shared_rss,
- dev_flow->hash_fields,
- !!(dh->layers &
- MLX5_FLOW_LAYER_TUNNEL));
+ dev_flow->hash_fields);
if (hrxq_idx)
hrxq = mlx5_ipool_get
(priv->sh->ipool[MLX5_IPOOL_HRXQ],
@@ -12643,8 +12638,7 @@ static int
__flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
struct mlx5_shared_action_rss *shared_rss)
{
- return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq) +
- __flow_dv_hrxqs_release(dev, &shared_rss->hrxq_tunnel);
+ return __flow_dv_hrxqs_release(dev, &shared_rss->hrxq);
}
/**
@@ -12690,23 +12684,25 @@ __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
uint32_t hrxq_idx;
uint64_t hash_fields = mlx5_rss_hash_fields[i];
- int tunnel;
+ int tunnel = 0;
- for (tunnel = 0; tunnel < 2; tunnel++) {
- rss_desc.tunnel = tunnel;
- rss_desc.hash_fields = hash_fields;
- hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
- if (!hrxq_idx) {
- rte_flow_error_set
- (error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "cannot get hash queue");
- goto error_hrxq_new;
- }
- err = __flow_dv_action_rss_hrxq_set
- (shared_rss, hash_fields, tunnel, hrxq_idx);
- MLX5_ASSERT(!err);
+ if (shared_rss->origin.level > 1) {
+ hash_fields |= IBV_RX_HASH_INNER;
+ tunnel = 1;
+ }
+ rss_desc.tunnel = tunnel;
+ rss_desc.hash_fields = hash_fields;
+ hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
+ if (!hrxq_idx) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot get hash queue");
+ goto error_hrxq_new;
}
+ err = __flow_dv_action_rss_hrxq_set
+ (shared_rss, hash_fields, hrxq_idx);
+ MLX5_ASSERT(!err);
}
return 0;
error_hrxq_new:
--
2.31.0
next parent reply other threads:[~2021-03-26 5:20 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <cover.1616724524.git.jackmin@nvidia.com>
2021-03-26 5:20 ` Xiaoyu Min [this message]
2021-03-29 20:57 ` Matan Azrad
2021-03-26 5:20 ` [dpdk-stable] [PATCH 2/2] net/mlx5: fix missing shared RSS hash types Xiaoyu Min
2021-03-29 20:58 ` Matan Azrad
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=22e41984b2b2aa63eafe160503c35308fadce731.1616724524.git.jackmin@nvidia.com \
--to=jackmin@nvidia.com \
--cc=andreyv@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=shahafs@nvidia.com \
--cc=stable@dpdk.org \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).