DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/mlx5: set correct priority for meter policy
@ 2024-03-01  8:46 Shun Hao
  2024-03-01 16:14 ` Patrick Robb
  2024-03-12  8:07 ` Raslan Darawsheh
  0 siblings, 2 replies; 3+ messages in thread
From: Shun Hao @ 2024-03-01  8:46 UTC (permalink / raw)
  To: viacheslavo, matan, orika, Suanming Mou, Bing Zhao; +Cc: dev, rasland, stable

Currently a meter policy's flows are always using the same priority for
all colors, so the red color flow might be before green/yellow ones.
This will impact the performance cause green/yellow packets will check
red flow first and got miss, then match green/yellow flows, introducing
more hops.

This patch fixes this by giving the same priority to flows for all
colors.

Fixes: 363db9b00f ("net/mlx5: handle yellow case in default meter policy")
CC: stable@dpdk.org

Signed-off-by: Shun Hao <shunh@nvidia.com>
Acked-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow_dv.c | 41 +++++++++++++++++++--------------
 1 file changed, 24 insertions(+), 17 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 18f09b22be..f1584ed6e0 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -17922,9 +17922,8 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
 		}
 	}
 	tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry, tbl);
-	if (priority < RTE_COLOR_RED)
-		flow_dv_match_meta_reg(matcher.mask.buf,
-			(enum modify_reg)color_reg_c_idx, color_mask, color_mask);
+	flow_dv_match_meta_reg(matcher.mask.buf,
+		(enum modify_reg)color_reg_c_idx, color_mask, color_mask);
 	matcher.priority = priority;
 	matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
 				    matcher.mask.size);
@@ -17975,7 +17974,6 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
 	int i;
 	int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
 	struct mlx5_sub_policy_color_rule *color_rule;
-	bool svport_match;
 	struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
 
 	if (ret < 0)
@@ -18011,10 +18009,9 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
 		/* No use. */
 		attr.priority = i;
 		/* Create matchers for colors. */
-		svport_match = (i != RTE_COLOR_RED) ? match_src_port : false;
 		if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
 				MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
-				&attr, svport_match, NULL,
+				&attr, match_src_port, NULL,
 				&color_rule->matcher, &flow_err)) {
 			DRV_LOG(ERR, "Failed to create color%u matcher.", i);
 			goto err_exit;
@@ -18024,7 +18021,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
 				color_reg_c_idx, (enum rte_color)i,
 				color_rule->matcher,
 				acts[i].actions_n, acts[i].dv_actions,
-				svport_match, NULL, &color_rule->rule,
+				match_src_port, NULL, &color_rule->rule,
 				&attr)) {
 			DRV_LOG(ERR, "Failed to create color%u rule.", i);
 			goto err_exit;
@@ -18907,7 +18904,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
 	struct {
 		struct mlx5_flow_meter_policy *fm_policy;
 		struct mlx5_flow_meter_info *next_fm;
-		struct mlx5_sub_policy_color_rule *tag_rule[MLX5_MTR_RTE_COLORS];
+		struct mlx5_sub_policy_color_rule *tag_rule[RTE_COLORS];
 	} fm_info[MLX5_MTR_CHAIN_MAX_NUM] = { {0} };
 	uint32_t fm_cnt = 0;
 	uint32_t i, j;
@@ -18941,14 +18938,22 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
 		mtr_policy = fm_info[i].fm_policy;
 		rte_spinlock_lock(&mtr_policy->sl);
 		sub_policy = mtr_policy->sub_policys[domain][0];
-		for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
+		for (j = 0; j < RTE_COLORS; j++) {
 			uint8_t act_n = 0;
-			struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
+			struct mlx5_flow_dv_modify_hdr_resource *modify_hdr = NULL;
 			struct mlx5_flow_dv_port_id_action_resource *port_action;
+			uint8_t fate_action;
 
-			if (mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_MTR &&
-			    mtr_policy->act_cnt[j].fate_action != MLX5_FLOW_FATE_PORT_ID)
-				continue;
+			if (j == RTE_COLOR_RED) {
+				fate_action = MLX5_FLOW_FATE_DROP;
+			} else {
+				fate_action = mtr_policy->act_cnt[j].fate_action;
+				modify_hdr = mtr_policy->act_cnt[j].modify_hdr;
+				if (fate_action != MLX5_FLOW_FATE_MTR &&
+				    fate_action != MLX5_FLOW_FATE_PORT_ID &&
+				    fate_action != MLX5_FLOW_FATE_DROP)
+					continue;
+			}
 			color_rule = mlx5_malloc(MLX5_MEM_ZERO,
 						 sizeof(struct mlx5_sub_policy_color_rule),
 						 0, SOCKET_ID_ANY);
@@ -18960,9 +18965,8 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
 				goto err_exit;
 			}
 			color_rule->src_port = src_port;
-			modify_hdr = mtr_policy->act_cnt[j].modify_hdr;
 			/* Prepare to create color rule. */
-			if (mtr_policy->act_cnt[j].fate_action == MLX5_FLOW_FATE_MTR) {
+			if (fate_action == MLX5_FLOW_FATE_MTR) {
 				next_fm = fm_info[i].next_fm;
 				if (mlx5_flow_meter_attach(priv, next_fm, &attr, error)) {
 					mlx5_free(color_rule);
@@ -18989,7 +18993,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
 				}
 				acts.dv_actions[act_n++] = tbl_data->jump.action;
 				acts.actions_n = act_n;
-			} else {
+			} else if (fate_action == MLX5_FLOW_FATE_PORT_ID) {
 				port_action =
 					mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
 						       mtr_policy->act_cnt[j].rix_port_id_action);
@@ -19002,6 +19006,9 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
 					acts.dv_actions[act_n++] = modify_hdr->action;
 				acts.dv_actions[act_n++] = port_action->action;
 				acts.actions_n = act_n;
+			} else {
+				acts.dv_actions[act_n++] = mtr_policy->dr_drop_action[domain];
+				acts.actions_n = act_n;
 			}
 			fm_info[i].tag_rule[j] = color_rule;
 			TAILQ_INSERT_TAIL(&sub_policy->color_rules[j], color_rule, next_port);
@@ -19033,7 +19040,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
 		mtr_policy = fm_info[i].fm_policy;
 		rte_spinlock_lock(&mtr_policy->sl);
 		sub_policy = mtr_policy->sub_policys[domain][0];
-		for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
+		for (j = 0; j < RTE_COLORS; j++) {
 			color_rule = fm_info[i].tag_rule[j];
 			if (!color_rule)
 				continue;
-- 
2.20.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] net/mlx5: set correct priority for meter policy
  2024-03-01  8:46 [PATCH] net/mlx5: set correct priority for meter policy Shun Hao
@ 2024-03-01 16:14 ` Patrick Robb
  2024-03-12  8:07 ` Raslan Darawsheh
  1 sibling, 0 replies; 3+ messages in thread
From: Patrick Robb @ 2024-03-01 16:14 UTC (permalink / raw)
  To: Shun Hao
  Cc: viacheslavo, matan, orika, Suanming Mou, Bing Zhao, dev, rasland, stable

[-- Attachment #1: Type: text/plain, Size: 8709 bytes --]

The Community CI Testing Lab had an infra failure this morning and some
patches including yours were affected with false failures. The issue is now
resolved and we are rerunning the tests in question for all patches
submitted today.

On Fri, Mar 1, 2024 at 3:46 AM Shun Hao <shunh@nvidia.com> wrote:

> Currently a meter policy's flows are always using the same priority for
> all colors, so the red color flow might be before green/yellow ones.
> This will impact the performance cause green/yellow packets will check
> red flow first and got miss, then match green/yellow flows, introducing
> more hops.
>
> This patch fixes this by giving the same priority to flows for all
> colors.
>
> Fixes: 363db9b00f ("net/mlx5: handle yellow case in default meter policy")
> CC: stable@dpdk.org
>
> Signed-off-by: Shun Hao <shunh@nvidia.com>
> Acked-by: Bing Zhao <bingz@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
> ---
>  drivers/net/mlx5/mlx5_flow_dv.c | 41 +++++++++++++++++++--------------
>  1 file changed, 24 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/net/mlx5/mlx5_flow_dv.c
> b/drivers/net/mlx5/mlx5_flow_dv.c
> index 18f09b22be..f1584ed6e0 100644
> --- a/drivers/net/mlx5/mlx5_flow_dv.c
> +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> @@ -17922,9 +17922,8 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev
> *dev,
>                 }
>         }
>         tbl_data = container_of(tbl_rsc, struct mlx5_flow_tbl_data_entry,
> tbl);
> -       if (priority < RTE_COLOR_RED)
> -               flow_dv_match_meta_reg(matcher.mask.buf,
> -                       (enum modify_reg)color_reg_c_idx, color_mask,
> color_mask);
> +       flow_dv_match_meta_reg(matcher.mask.buf,
> +               (enum modify_reg)color_reg_c_idx, color_mask, color_mask);
>         matcher.priority = priority;
>         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
>                                     matcher.mask.size);
> @@ -17975,7 +17974,6 @@ __flow_dv_create_domain_policy_rules(struct
> rte_eth_dev *dev,
>         int i;
>         int ret = mlx5_flow_get_reg_id(dev, MLX5_MTR_COLOR, 0, &flow_err);
>         struct mlx5_sub_policy_color_rule *color_rule;
> -       bool svport_match;
>         struct mlx5_sub_policy_color_rule *tmp_rules[RTE_COLORS] = {NULL};
>
>         if (ret < 0)
> @@ -18011,10 +18009,9 @@ __flow_dv_create_domain_policy_rules(struct
> rte_eth_dev *dev,
>                 /* No use. */
>                 attr.priority = i;
>                 /* Create matchers for colors. */
> -               svport_match = (i != RTE_COLOR_RED) ? match_src_port :
> false;
>                 if (__flow_dv_create_policy_matcher(dev, color_reg_c_idx,
>                                 MLX5_MTR_POLICY_MATCHER_PRIO, sub_policy,
> -                               &attr, svport_match, NULL,
> +                               &attr, match_src_port, NULL,
>                                 &color_rule->matcher, &flow_err)) {
>                         DRV_LOG(ERR, "Failed to create color%u matcher.",
> i);
>                         goto err_exit;
> @@ -18024,7 +18021,7 @@ __flow_dv_create_domain_policy_rules(struct
> rte_eth_dev *dev,
>                                 color_reg_c_idx, (enum rte_color)i,
>                                 color_rule->matcher,
>                                 acts[i].actions_n, acts[i].dv_actions,
> -                               svport_match, NULL, &color_rule->rule,
> +                               match_src_port, NULL, &color_rule->rule,
>                                 &attr)) {
>                         DRV_LOG(ERR, "Failed to create color%u rule.", i);
>                         goto err_exit;
> @@ -18907,7 +18904,7 @@ flow_dv_meter_hierarchy_rule_create(struct
> rte_eth_dev *dev,
>         struct {
>                 struct mlx5_flow_meter_policy *fm_policy;
>                 struct mlx5_flow_meter_info *next_fm;
> -               struct mlx5_sub_policy_color_rule
> *tag_rule[MLX5_MTR_RTE_COLORS];
> +               struct mlx5_sub_policy_color_rule *tag_rule[RTE_COLORS];
>         } fm_info[MLX5_MTR_CHAIN_MAX_NUM] = { {0} };
>         uint32_t fm_cnt = 0;
>         uint32_t i, j;
> @@ -18941,14 +18938,22 @@ flow_dv_meter_hierarchy_rule_create(struct
> rte_eth_dev *dev,
>                 mtr_policy = fm_info[i].fm_policy;
>                 rte_spinlock_lock(&mtr_policy->sl);
>                 sub_policy = mtr_policy->sub_policys[domain][0];
> -               for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
> +               for (j = 0; j < RTE_COLORS; j++) {
>                         uint8_t act_n = 0;
> -                       struct mlx5_flow_dv_modify_hdr_resource
> *modify_hdr;
> +                       struct mlx5_flow_dv_modify_hdr_resource
> *modify_hdr = NULL;
>                         struct mlx5_flow_dv_port_id_action_resource
> *port_action;
> +                       uint8_t fate_action;
>
> -                       if (mtr_policy->act_cnt[j].fate_action !=
> MLX5_FLOW_FATE_MTR &&
> -                           mtr_policy->act_cnt[j].fate_action !=
> MLX5_FLOW_FATE_PORT_ID)
> -                               continue;
> +                       if (j == RTE_COLOR_RED) {
> +                               fate_action = MLX5_FLOW_FATE_DROP;
> +                       } else {
> +                               fate_action =
> mtr_policy->act_cnt[j].fate_action;
> +                               modify_hdr =
> mtr_policy->act_cnt[j].modify_hdr;
> +                               if (fate_action != MLX5_FLOW_FATE_MTR &&
> +                                   fate_action != MLX5_FLOW_FATE_PORT_ID
> &&
> +                                   fate_action != MLX5_FLOW_FATE_DROP)
> +                                       continue;
> +                       }
>                         color_rule = mlx5_malloc(MLX5_MEM_ZERO,
>                                                  sizeof(struct
> mlx5_sub_policy_color_rule),
>                                                  0, SOCKET_ID_ANY);
> @@ -18960,9 +18965,8 @@ flow_dv_meter_hierarchy_rule_create(struct
> rte_eth_dev *dev,
>                                 goto err_exit;
>                         }
>                         color_rule->src_port = src_port;
> -                       modify_hdr = mtr_policy->act_cnt[j].modify_hdr;
>                         /* Prepare to create color rule. */
> -                       if (mtr_policy->act_cnt[j].fate_action ==
> MLX5_FLOW_FATE_MTR) {
> +                       if (fate_action == MLX5_FLOW_FATE_MTR) {
>                                 next_fm = fm_info[i].next_fm;
>                                 if (mlx5_flow_meter_attach(priv, next_fm,
> &attr, error)) {
>                                         mlx5_free(color_rule);
> @@ -18989,7 +18993,7 @@ flow_dv_meter_hierarchy_rule_create(struct
> rte_eth_dev *dev,
>                                 }
>                                 acts.dv_actions[act_n++] =
> tbl_data->jump.action;
>                                 acts.actions_n = act_n;
> -                       } else {
> +                       } else if (fate_action == MLX5_FLOW_FATE_PORT_ID) {
>                                 port_action =
>
> mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
>
>  mtr_policy->act_cnt[j].rix_port_id_action);
> @@ -19002,6 +19006,9 @@ flow_dv_meter_hierarchy_rule_create(struct
> rte_eth_dev *dev,
>                                         acts.dv_actions[act_n++] =
> modify_hdr->action;
>                                 acts.dv_actions[act_n++] =
> port_action->action;
>                                 acts.actions_n = act_n;
> +                       } else {
> +                               acts.dv_actions[act_n++] =
> mtr_policy->dr_drop_action[domain];
> +                               acts.actions_n = act_n;
>                         }
>                         fm_info[i].tag_rule[j] = color_rule;
>                         TAILQ_INSERT_TAIL(&sub_policy->color_rules[j],
> color_rule, next_port);
> @@ -19033,7 +19040,7 @@ flow_dv_meter_hierarchy_rule_create(struct
> rte_eth_dev *dev,
>                 mtr_policy = fm_info[i].fm_policy;
>                 rte_spinlock_lock(&mtr_policy->sl);
>                 sub_policy = mtr_policy->sub_policys[domain][0];
> -               for (j = 0; j < MLX5_MTR_RTE_COLORS; j++) {
> +               for (j = 0; j < RTE_COLORS; j++) {
>                         color_rule = fm_info[i].tag_rule[j];
>                         if (!color_rule)
>                                 continue;
> --
> 2.20.0
>
>

[-- Attachment #2: Type: text/html, Size: 10993 bytes --]

^ permalink raw reply	[flat|nested] 3+ messages in thread

* RE: [PATCH] net/mlx5: set correct priority for meter policy
  2024-03-01  8:46 [PATCH] net/mlx5: set correct priority for meter policy Shun Hao
  2024-03-01 16:14 ` Patrick Robb
@ 2024-03-12  8:07 ` Raslan Darawsheh
  1 sibling, 0 replies; 3+ messages in thread
From: Raslan Darawsheh @ 2024-03-12  8:07 UTC (permalink / raw)
  To: Shun Hao, Slava Ovsiienko, Matan Azrad, Ori Kam, Suanming Mou, Bing Zhao
  Cc: dev, stable

Hi,

> -----Original Message-----
> From: Shun Hao <shunh@nvidia.com>
> Sent: Friday, March 1, 2024 10:46 AM
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Ori Kam <orika@nvidia.com>; Suanming Mou
> <suanmingm@nvidia.com>; Bing Zhao <bingz@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>;
> stable@dpdk.org
> Subject: [PATCH] net/mlx5: set correct priority for meter policy
> 
> Currently a meter policy's flows are always using the same priority for all
> colors, so the red color flow might be before green/yellow ones.
> This will impact the performance cause green/yellow packets will check red
> flow first and got miss, then match green/yellow flows, introducing more
> hops.
> 
> This patch fixes this by giving the same priority to flows for all colors.
> 
> Fixes: 363db9b00f ("net/mlx5: handle yellow case in default meter policy")
> CC: stable@dpdk.org
> 
> Signed-off-by: Shun Hao <shunh@nvidia.com>
> Acked-by: Bing Zhao <bingz@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
Patch applied to next-net-mlx,

Kindest regards
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2024-03-12  8:07 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-03-01  8:46 [PATCH] net/mlx5: set correct priority for meter policy Shun Hao
2024-03-01 16:14 ` Patrick Robb
2024-03-12  8:07 ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).