automatic DPDK test reports
 help / color / mirror / Atom feed
* |WARNING| pw133798 [PATCH] [21.11, v1] net/mlx5: fix matcher layout size calculation
@ 2023-11-03  3:31 dpdklab
  0 siblings, 0 replies; only message in thread
From: dpdklab @ 2023-11-03  3:31 UTC (permalink / raw)
  To: test-report; +Cc: dpdk-test-reports

Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/133798

_apply patch failure_

Submitter: Rongwei Liu <rongweil@nvidia.com>
Date: Friday, November 03 2023 03:07:21 
Applied on: CommitID:ee70470b08ce3abf13849b310cfdac84b4eae1d2
Apply patch set 133798 failed:

Checking patch drivers/net/mlx5/mlx5_flow_dv.c...
error: while searching for:
			}
			dv->actions[n++] = priv->sh->default_miss_action;
		}
		misc_mask = flow_dv_matcher_enable(dv->value.buf);
		__flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
		err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
					       (void *)&dv->value, n,

error: patch failed: drivers/net/mlx5/mlx5_flow_dv.c:14067
error: while searching for:
static int
__flow_dv_create_policy_flow(struct rte_eth_dev *dev,
			uint32_t color_reg_c_idx,
			enum rte_color color, void *matcher_object,
			int actions_n, void *actions,
			bool match_src_port, const struct rte_flow_item *item,
			void **rule, const struct rte_flow_attr *attr)

error: patch failed: drivers/net/mlx5/mlx5_flow_dv.c:16236
error: while searching for:
	flow_dv_match_meta_reg(matcher.buf, value.buf,
			       (enum modify_reg)color_reg_c_idx,
			       rte_col_2_mlx5_col(color), UINT32_MAX);
	misc_mask = flow_dv_matcher_enable(value.buf);
	__flow_dv_adjust_buf_size(&value.size, misc_mask);
	ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
				       actions_n, actions, rule);
	if (ret) {
		DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);

error: patch failed: drivers/net/mlx5/mlx5_flow_dv.c:16262
error: while searching for:
		/* Create flow, matching color. */
		if (__flow_dv_create_policy_flow(dev,
				color_reg_c_idx, (enum rte_color)i,
				color_rule->matcher->matcher_object,
				acts[i].actions_n, acts[i].dv_actions,
				svport_match, NULL, &color_rule->rule,
				&attr)) {

error: patch failed: drivers/net/mlx5/mlx5_flow_dv.c:16412
error: while searching for:
			actions[i++] = priv->sh->dr_drop_action;
			flow_dv_match_meta_reg(matcher_para.buf, value.buf,
				(enum modify_reg)mtr_id_reg_c, 0, 0);
			misc_mask = flow_dv_matcher_enable(value.buf);
			__flow_dv_adjust_buf_size(&value.size, misc_mask);
			ret = mlx5_flow_os_create_flow
				(mtrmng->def_matcher[domain]->matcher_object,

error: patch failed: drivers/net/mlx5/mlx5_flow_dv.c:16872
error: while searching for:
					fm->drop_cnt, NULL);
		actions[i++] = cnt->action;
		actions[i++] = priv->sh->dr_drop_action;
		misc_mask = flow_dv_matcher_enable(value.buf);
		__flow_dv_adjust_buf_size(&value.size, misc_mask);
		ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
					       (void *)&value, i, actions,

error: patch failed: drivers/net/mlx5/mlx5_flow_dv.c:16917
error: while searching for:
		}
		if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
					(enum rte_color)i,
					color_rule->matcher->matcher_object,
					acts.actions_n, acts.dv_actions,
					true, item,
					&color_rule->rule, &attr)) {

error: patch failed: drivers/net/mlx5/mlx5_flow_dv.c:17292
error: while searching for:
			break;
		}
		/* Try to apply the flow to HW. */
		misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
		__flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
		err = mlx5_flow_os_create_flow
				(flow.handle->dvh.matcher->matcher_object,

error: patch failed: drivers/net/mlx5/mlx5_flow_dv.c:18309
Applying patch drivers/net/mlx5/mlx5_flow_dv.c with 8 rejects...
Rejected hunk #1.
Rejected hunk #2.
Rejected hunk #3.
Rejected hunk #4.
Rejected hunk #5.
Rejected hunk #6.
Rejected hunk #7.
Rejected hunk #8.
hint: Use 'git am --show-current-patch' to see the failed patch
diff a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c	(rejected hunks)
@@ -14067,7 +14067,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
 			}
 			dv->actions[n++] = priv->sh->default_miss_action;
 		}
-		misc_mask = flow_dv_matcher_enable(dv->value.buf);
+		misc_mask = flow_dv_matcher_enable(dv_h->matcher->mask.buf);
 		__flow_dv_adjust_buf_size(&dv->value.size, misc_mask);
 		err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
 					       (void *)&dv->value, n,
@@ -16236,7 +16236,7 @@ flow_dv_destroy_def_policy(struct rte_eth_dev *dev)
 static int
 __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
 			uint32_t color_reg_c_idx,
-			enum rte_color color, void *matcher_object,
+			enum rte_color color, struct mlx5_flow_dv_matcher *dv_matcher,
 			int actions_n, void *actions,
 			bool match_src_port, const struct rte_flow_item *item,
 			void **rule, const struct rte_flow_attr *attr)
@@ -16262,9 +16262,9 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
 	flow_dv_match_meta_reg(matcher.buf, value.buf,
 			       (enum modify_reg)color_reg_c_idx,
 			       rte_col_2_mlx5_col(color), UINT32_MAX);
-	misc_mask = flow_dv_matcher_enable(value.buf);
+	misc_mask = flow_dv_matcher_enable(dv_matcher->mask.buf);
 	__flow_dv_adjust_buf_size(&value.size, misc_mask);
-	ret = mlx5_flow_os_create_flow(matcher_object, (void *)&value,
+	ret = mlx5_flow_os_create_flow(dv_matcher->matcher_object, (void *)&value,
 				       actions_n, actions, rule);
 	if (ret) {
 		DRV_LOG(ERR, "Failed to create meter policy%d flow.", color);
@@ -16412,7 +16412,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
 		/* Create flow, matching color. */
 		if (__flow_dv_create_policy_flow(dev,
 				color_reg_c_idx, (enum rte_color)i,
-				color_rule->matcher->matcher_object,
+				color_rule->matcher,
 				acts[i].actions_n, acts[i].dv_actions,
 				svport_match, NULL, &color_rule->rule,
 				&attr)) {
@@ -16872,7 +16872,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
 			actions[i++] = priv->sh->dr_drop_action;
 			flow_dv_match_meta_reg(matcher_para.buf, value.buf,
 				(enum modify_reg)mtr_id_reg_c, 0, 0);
-			misc_mask = flow_dv_matcher_enable(value.buf);
+			misc_mask = flow_dv_matcher_enable(mtrmng->def_matcher[domain]->mask.buf);
 			__flow_dv_adjust_buf_size(&value.size, misc_mask);
 			ret = mlx5_flow_os_create_flow
 				(mtrmng->def_matcher[domain]->matcher_object,
@@ -16917,7 +16917,7 @@ flow_dv_create_mtr_tbls(struct rte_eth_dev *dev,
 					fm->drop_cnt, NULL);
 		actions[i++] = cnt->action;
 		actions[i++] = priv->sh->dr_drop_action;
-		misc_mask = flow_dv_matcher_enable(value.buf);
+		misc_mask = flow_dv_matcher_enable(drop_matcher->mask.buf);
 		__flow_dv_adjust_buf_size(&value.size, misc_mask);
 		ret = mlx5_flow_os_create_flow(drop_matcher->matcher_object,
 					       (void *)&value, i, actions,
@@ -17292,7 +17292,7 @@ flow_dv_meter_hierarchy_rule_create(struct rte_eth_dev *dev,
 		}
 		if (__flow_dv_create_policy_flow(dev, color_reg_c_idx,
 					(enum rte_color)i,
-					color_rule->matcher->matcher_object,
+					color_rule->matcher,
 					acts.actions_n, acts.dv_actions,
 					true, item,
 					&color_rule->rule, &attr)) {
@@ -18309,7 +18309,7 @@ flow_dv_discover_priorities(struct rte_eth_dev *dev,
 			break;
 		}
 		/* Try to apply the flow to HW. */
-		misc_mask = flow_dv_matcher_enable(flow.dv.value.buf);
+		misc_mask = flow_dv_matcher_enable(flow.handle->dvh.matcher->mask.buf);
 		__flow_dv_adjust_buf_size(&flow.dv.value.size, misc_mask);
 		err = mlx5_flow_os_create_flow
 				(flow.handle->dvh.matcher->matcher_object,

https://lab.dpdk.org/results/dashboard/patchsets/28212/

UNH-IOL DPDK Community Lab

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2023-11-03  3:31 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-03  3:31 |WARNING| pw133798 [PATCH] [21.11, v1] net/mlx5: fix matcher layout size calculation dpdklab

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).