DPDK patches and discussions
 help / color / mirror / Atom feed
From: Li Zhang <lizh@nvidia.com>
To: dekelp@nvidia.com, orika@nvidia.com, viacheslavo@nvidia.com,
	matan@nvidia.com, shahafs@nvidia.com
Cc: dev@dpdk.org, thomas@monjalon.net, rasland@nvidia.com, roniba@nvidia.com
Subject: [dpdk-dev] [PATCH v5 3/4] net/mlx5: prepare sub-policy for a flow with meter
Date: Thu, 15 Apr 2021 08:05:04 +0300	[thread overview]
Message-ID: <20210415050505.2082663-4-lizh@nvidia.com> (raw)
In-Reply-To: <20210415050505.2082663-1-lizh@nvidia.com>

When a flow has a RSS action, the driver splits
each sub flow finally is configured with
a different HW TIR action.

Any RSS action configured in meter policy may cause
a split in the flow configuration.
To save performance, any TIR action will be configured
in different flow table, so policy can be split to
sub-policies per TIR in the flow creation time.

Create a function to prepare the policy and
its sub-policies for a configured flow with meter.

Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.h    |  10 +++
 drivers/net/mlx5/mlx5_flow_dv.c | 144 ++++++++++++++++++++++++++++++++
 2 files changed, 154 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 89e43f2de6..cc9b37b9eb 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1095,6 +1095,11 @@ typedef int (*mlx5_flow_create_mtr_tbls_t)(struct rte_eth_dev *dev,
 typedef void (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,
 				struct mlx5_flow_meter_info *fm);
 typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev);
+typedef struct mlx5_flow_meter_sub_policy *
+	(*mlx5_flow_meter_sub_policy_rss_prepare_t)
+		(struct rte_eth_dev *dev,
+		struct mlx5_flow_meter_policy *mtr_policy,
+		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
 typedef uint32_t (*mlx5_flow_mtr_alloc_t)
 					    (struct rte_eth_dev *dev);
 typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
@@ -1187,6 +1192,7 @@ struct mlx5_flow_driver_ops {
 	mlx5_flow_destroy_policy_rules_t destroy_policy_rules;
 	mlx5_flow_create_def_policy_t create_def_policy;
 	mlx5_flow_destroy_def_policy_t destroy_def_policy;
+	mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;
 	mlx5_flow_counter_alloc_t counter_alloc;
 	mlx5_flow_counter_free_t counter_free;
 	mlx5_flow_counter_query_t counter_query;
@@ -1418,6 +1424,10 @@ int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
 void mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
 			       struct mlx5_flow_meter_info *fm);
 void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev);
+struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare
+		(struct rte_eth_dev *dev,
+		struct mlx5_flow_meter_policy *mtr_policy,
+		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
 int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
 int mlx5_shared_action_flush(struct rte_eth_dev *dev);
 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f789f2454e..ed17bd903f 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -14857,6 +14857,149 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
 	return -1;
 }
 
+/**
+ * Find the policy table for prefix table with RSS.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device.
+ * @param[in] policy_id
+ *   Policy index.
+ * @param[in] rss_desc
+ *   Pointer to rss_desc
+ * @return
+ *   Pointer to table set on success, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_meter_sub_policy *
+flow_dv_meter_sub_policy_rss_prepare(struct rte_eth_dev *dev,
+		struct mlx5_flow_meter_policy *mtr_policy,
+		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS])
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flow_meter_sub_policy *sub_policy = NULL;
+	uint32_t sub_policy_idx = 0;
+	uint32_t hrxq_idx[MLX5_MTR_RTE_COLORS] = {0};
+	uint32_t i, j;
+	struct mlx5_hrxq *hrxq;
+	struct mlx5_flow_handle dh;
+	struct mlx5_meter_policy_action_container *act_cnt;
+	uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
+	uint16_t sub_policy_num;
+
+	rte_spinlock_lock(&mtr_policy->sl);
+	for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+		if (!rss_desc[i])
+			continue;
+		hrxq_idx[i] = mlx5_hrxq_get(dev, rss_desc[i]);
+		if (!hrxq_idx[i]) {
+			rte_spinlock_unlock(&mtr_policy->sl);
+			return NULL;
+		}
+	}
+	sub_policy_num = (mtr_policy->sub_policy_num >>
+			(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+			MLX5_MTR_SUB_POLICY_NUM_MASK;
+	for (i = 0; i < sub_policy_num;
+		i++) {
+		for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
+			if (rss_desc[j] &&
+				hrxq_idx[j] !=
+			mtr_policy->sub_policys[domain][i]->rix_hrxq[j])
+				break;
+		}
+		if (j >= MLX5_MTR_RTE_COLORS) {
+			/*
+			 * Found the sub policy table with
+			 * the same queue per color
+			 */
+			rte_spinlock_unlock(&mtr_policy->sl);
+			for (j = 0; j < MLX5_MTR_RTE_COLORS; j++)
+				mlx5_hrxq_release(dev, hrxq_idx[j]);
+			return mtr_policy->sub_policys[domain][i];
+		}
+	}
+	/* Create sub policy. */
+	if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
+		/* Reuse the first dummy sub_policy*/
+		sub_policy = mtr_policy->sub_policys[domain][0];
+		sub_policy_idx = sub_policy->idx;
+	} else {
+		sub_policy = mlx5_ipool_zmalloc
+				(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+				&sub_policy_idx);
+		if (!sub_policy ||
+			sub_policy_idx > MLX5_MAX_SUB_POLICY_TBL_NUM)
+			goto rss_sub_policy_error;
+		sub_policy->idx = sub_policy_idx;
+		sub_policy->main_policy = mtr_policy;
+	}
+	for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
+		if (!rss_desc[i])
+			continue;
+		sub_policy->rix_hrxq[i] = hrxq_idx[i];
+		/*
+		 * Overwrite the last action from
+		 * RSS action to Queue action.
+		 */
+		hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
+			      hrxq_idx[i]);
+		if (!hrxq) {
+			DRV_LOG(ERR, "Failed to create policy hrxq");
+			goto rss_sub_policy_error;
+		}
+		act_cnt = &mtr_policy->act_cnt[i];
+		if (act_cnt->rix_mark || act_cnt->modify_hdr) {
+			memset(&dh, 0, sizeof(struct mlx5_flow_handle));
+			if (act_cnt->rix_mark)
+				dh.mark = 1;
+			dh.fate_action = MLX5_FLOW_FATE_QUEUE;
+			dh.rix_hrxq = hrxq_idx[i];
+			flow_drv_rxq_flags_set(dev, &dh);
+		}
+	}
+	if (__flow_dv_create_policy_acts_rules(dev, mtr_policy,
+		sub_policy, domain)) {
+		DRV_LOG(ERR, "Failed to create policy "
+			"rules per domain.");
+		goto rss_sub_policy_error;
+	}
+	if (sub_policy != mtr_policy->sub_policys[domain][0]) {
+		i = (mtr_policy->sub_policy_num >>
+			(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+			MLX5_MTR_SUB_POLICY_NUM_MASK;
+		mtr_policy->sub_policys[domain][i] = sub_policy;
+		i++;
+		if (i > MLX5_MTR_RSS_MAX_SUB_POLICY)
+			goto rss_sub_policy_error;
+		mtr_policy->sub_policy_num &= ~(MLX5_MTR_SUB_POLICY_NUM_MASK <<
+			(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain));
+		mtr_policy->sub_policy_num |=
+			(i & MLX5_MTR_SUB_POLICY_NUM_MASK) <<
+			(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain);
+	}
+	rte_spinlock_unlock(&mtr_policy->sl);
+	return sub_policy;
+rss_sub_policy_error:
+	if (sub_policy) {
+		__flow_dv_destroy_sub_policy_rules(dev, sub_policy);
+		if (sub_policy != mtr_policy->sub_policys[domain][0]) {
+			i = (mtr_policy->sub_policy_num >>
+			(MLX5_MTR_SUB_POLICY_NUM_SHIFT * domain)) &
+			MLX5_MTR_SUB_POLICY_NUM_MASK;
+			mtr_policy->sub_policys[domain][i] = NULL;
+			mlx5_ipool_free
+			(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+					sub_policy->idx);
+		}
+	}
+	for (i = 0; i < MLX5_MTR_RTE_COLORS; i++)
+		mlx5_hrxq_release(dev, hrxq_idx[i]);
+	if (sub_policy_idx)
+		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MTR_POLICY],
+			sub_policy_idx);
+	rte_spinlock_unlock(&mtr_policy->sl);
+	return NULL;
+}
+
 /**
  * Validate the batch counter support in root table.
  *
@@ -15447,6 +15590,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
 	.destroy_policy_rules = flow_dv_destroy_policy_rules,
 	.create_def_policy = flow_dv_create_def_policy,
 	.destroy_def_policy = flow_dv_destroy_def_policy,
+	.meter_sub_policy_rss_prepare = flow_dv_meter_sub_policy_rss_prepare,
 	.counter_alloc = flow_dv_counter_allocate,
 	.counter_free = flow_dv_counter_free,
 	.counter_query = flow_dv_counter_query,
-- 
2.27.0


  parent reply	other threads:[~2021-04-15  5:05 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-01  8:16 [dpdk-dev] [PATCH 0/4] net/mlx5: support meter policy operations Li Zhang
2021-04-01  8:16 ` [dpdk-dev] [PATCH 1/4] " Li Zhang
2021-04-01  8:16 ` [dpdk-dev] [PATCH 2/4] net/mlx5: support meter creation with policy Li Zhang
2021-04-01  8:16 ` [dpdk-dev] [PATCH 3/4] net/mlx5: prepare sub-policy for a flow with meter Li Zhang
2021-04-01  8:16 ` [dpdk-dev] [PATCH 4/4] net/mlx5: connect meter policy to created flows Li Zhang
2021-04-02 15:56 ` [dpdk-dev] [PATCH v2 0/4] net/mlx5: support meter policy operations Li Zhang
2021-04-02 15:56   ` [dpdk-dev] [PATCH v2 1/4] " Li Zhang
2021-04-02 15:56   ` [dpdk-dev] [PATCH v2 2/4] net/mlx5: support meter creation with policy Li Zhang
2021-04-02 15:56   ` [dpdk-dev] [PATCH v2 3/4] net/mlx5: prepare sub-policy for a flow with meter Li Zhang
2021-04-02 15:56   ` [dpdk-dev] [PATCH v2 4/4] net/mlx5: connect meter policy to created flows Li Zhang
2021-04-13  0:19 ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: support meter policy operations Li Zhang
2021-04-13  0:19   ` [dpdk-dev] [PATCH v3 1/4] " Li Zhang
2021-04-13  0:19   ` [dpdk-dev] [PATCH v3 2/4] net/mlx5: support meter creation with policy Li Zhang
2021-04-13  0:19   ` [dpdk-dev] [PATCH v3 3/4] net/mlx5: prepare sub-policy for a flow with meter Li Zhang
2021-04-13  0:19   ` [dpdk-dev] [PATCH v3 4/4] net/mlx5: connect meter policy to created flows Li Zhang
2021-04-14  6:40 ` [dpdk-dev] [PATCH v4 0/4] net/mlx5: support meter policy operations Li Zhang
2021-04-14  6:40   ` [dpdk-dev] [PATCH v4 1/4] " Li Zhang
2021-04-14  6:40   ` [dpdk-dev] [PATCH v4 2/4] net/mlx5: support meter creation with policy Li Zhang
2021-04-14  6:40   ` [dpdk-dev] [PATCH v4 3/4] net/mlx5: prepare sub-policy for a flow with meter Li Zhang
2021-04-14  6:40   ` [dpdk-dev] [PATCH v4 4/4] net/mlx5: connect meter policy to created flows Li Zhang
2021-04-15  5:05 ` [dpdk-dev] [PATCH v5 0/4] net/mlx5: support meter policy operations Li Zhang
2021-04-15  5:05   ` [dpdk-dev] [PATCH v5 1/4] " Li Zhang
2021-04-15  5:05   ` [dpdk-dev] [PATCH v5 2/4] net/mlx5: support meter creation with policy Li Zhang
2021-04-15  5:05   ` Li Zhang [this message]
2021-04-15  5:05   ` [dpdk-dev] [PATCH v5 4/4] net/mlx5: connect meter policy to created flows Li Zhang
2021-04-15  5:09 ` [dpdk-dev] [PATCH v6 0/4] net/mlx5: support meter policy operations Li Zhang
2021-04-15  5:09   ` [dpdk-dev] [PATCH v6 1/4] " Li Zhang
2021-04-15  5:09   ` [dpdk-dev] [PATCH v6 2/4] net/mlx5: support meter creation with policy Li Zhang
2021-04-15  5:09   ` [dpdk-dev] [PATCH v6 3/4] net/mlx5: prepare sub-policy for a flow with meter Li Zhang
2021-04-15  5:09   ` [dpdk-dev] [PATCH v6 4/4] net/mlx5: connect meter policy to created flows Li Zhang
2021-04-21  3:11 ` [dpdk-dev] [PATCH v7 0/4] net/mlx5: support meter policy operations Li Zhang
2021-04-21  3:11   ` [dpdk-dev] [PATCH v7 1/4] " Li Zhang
2021-04-21  3:11   ` [dpdk-dev] [PATCH v7 2/4] net/mlx5: support meter creation with policy Li Zhang
2021-04-21  3:11   ` [dpdk-dev] [PATCH v7 3/4] net/mlx5: prepare sub-policy for a flow with meter Li Zhang
2021-04-21  3:11   ` [dpdk-dev] [PATCH v7 4/4] net/mlx5: connect meter policy to created flows Li Zhang
2021-04-22 10:45   ` [dpdk-dev] [PATCH v7 0/4] net/mlx5: support meter policy operations Raslan Darawsheh
2021-04-27 10:43 ` [dpdk-dev] [PATCH v8 " Li Zhang
2021-04-27 10:43   ` [dpdk-dev] [PATCH v8 1/4] " Li Zhang
2021-04-27 10:43   ` [dpdk-dev] [PATCH v8 2/4] net/mlx5: support meter creation with policy Li Zhang
2021-04-27 10:43   ` [dpdk-dev] [PATCH v8 3/4] net/mlx5: prepare sub-policy for a flow with meter Li Zhang
2021-04-27 10:43   ` [dpdk-dev] [PATCH v8 4/4] net/mlx5: connect meter policy to created flows Li Zhang
2021-04-27 12:14   ` [dpdk-dev] [PATCH v8 0/4] net/mlx5: support meter policy operations Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210415050505.2082663-4-lizh@nvidia.com \
    --to=lizh@nvidia.com \
    --cc=dekelp@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=roniba@nvidia.com \
    --cc=shahafs@nvidia.com \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).