patches for DPDK stable branches
 help / color / mirror / Atom feed
* [PATCH] net/mlx5: fix mark enabling for Rx datapath
@ 2022-01-16 15:23 Raja Zidane
  2022-01-26 15:26 ` Raslan Darawsheh
  0 siblings, 1 reply; 2+ messages in thread
From: Raja Zidane @ 2022-01-16 15:23 UTC (permalink / raw)
  To: dev; +Cc: Matan Azrad, stable

To optimize datapath, the mlx5 pmd checked for mark action on flow
creation, and flagged possible destination rxqs (through queue/RSS
actions), then it enabled the mark action logic only for flagged rxqs.

Mark action didn't work if no queue/rss action was in the same flow,
even when the user use multi-group logic to manage the flows.
So, if mark action is performed in group X and the packet is moved to group
Y > X when the packet is forwarded to Rx queues, SW did not get the mark ID
to the mbuf.

Flag Rx datapath to report mark action for any queue when the driver
detects the first mark action after dev_start operation.

Fixes: 8e61555657b2 ("net/mlx5: fix shared RSS and mark actions combination")
Cc: stable@dpdk.org

Signed-off-by: Raja Zidane <rzidane@nvidia.com>
---
Acked-by: Matan Azrad matan@nvidia.com
 drivers/net/mlx5/mlx5.h            |  1 +
 drivers/net/mlx5/mlx5_flow.c       | 53 ++++++++++++++++--------------
 drivers/net/mlx5/mlx5_flow.h       |  2 +-
 drivers/net/mlx5/mlx5_flow_dv.c    | 14 +++++---
 drivers/net/mlx5/mlx5_flow_verbs.c |  4 +--
 drivers/net/mlx5/mlx5_rx.h         |  1 -
 6 files changed, 41 insertions(+), 34 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 9413e3397c..737ad6895c 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1413,6 +1413,7 @@ struct mlx5_priv {
 	unsigned int mtr_en:1; /* Whether support meter. */
 	unsigned int mtr_reg_share:1; /* Whether support meter REG_C share. */
 	unsigned int lb_used:1; /* Loopback queue is referred to. */
+	uint32_t mark_enabled:1; /* If mark action is enabled on rxqs. */
 	uint16_t domain_id; /* Switch domain identifier. */
 	uint16_t vport_id; /* Associated VF vport index (if any). */
 	uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 2cadf615ec..d7cb1eb89b 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1234,7 +1234,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
 		       struct mlx5_flow_handle *dev_handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	const int mark = dev_handle->mark;
 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
 	struct mlx5_ind_table_obj *ind_tbl = NULL;
 	unsigned int i;
@@ -1269,15 +1268,6 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
 		 * this must be always enabled (metadata may arive
 		 * from other port - not from local flows only.
 		 */
-		if (priv->config.dv_flow_en &&
-		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
-		    mlx5_flow_ext_mreg_supported(dev)) {
-			rxq_ctrl->rxq.mark = 1;
-			rxq_ctrl->flow_mark_n = 1;
-		} else if (mark) {
-			rxq_ctrl->rxq.mark = 1;
-			rxq_ctrl->flow_mark_n++;
-		}
 		if (tunnel) {
 			unsigned int j;
 
@@ -1295,6 +1285,20 @@ flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
 	}
 }
 
+static void
+flow_rxq_mark_flag_set(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_rxq_ctrl *rxq_ctrl;
+
+	if (priv->mark_enabled)
+		return;
+	LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
+		rxq_ctrl->rxq.mark = 1;
+	}
+	priv->mark_enabled = 1;
+}
+
 /**
  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
  *
@@ -1309,7 +1313,11 @@ flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
 	struct mlx5_priv *priv = dev->data->dev_private;
 	uint32_t handle_idx;
 	struct mlx5_flow_handle *dev_handle;
+	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 
+	MLX5_ASSERT(wks);
+	if (wks->mark)
+		flow_rxq_mark_flag_set(dev);
 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
 		       handle_idx, dev_handle, next)
 		flow_drv_rxq_flags_set(dev, dev_handle);
@@ -1329,7 +1337,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
 			struct mlx5_flow_handle *dev_handle)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	const int mark = dev_handle->mark;
 	const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
 	struct mlx5_ind_table_obj *ind_tbl = NULL;
 	unsigned int i;
@@ -1360,15 +1367,6 @@ flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
 		MLX5_ASSERT(rxq_ctrl != NULL);
 		if (rxq_ctrl == NULL)
 			continue;
-		if (priv->config.dv_flow_en &&
-		    priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
-		    mlx5_flow_ext_mreg_supported(dev)) {
-			rxq_ctrl->rxq.mark = 1;
-			rxq_ctrl->flow_mark_n = 1;
-		} else if (mark) {
-			rxq_ctrl->flow_mark_n--;
-			rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
-		}
 		if (tunnel) {
 			unsigned int j;
 
@@ -1425,12 +1423,12 @@ flow_rxq_flags_clear(struct rte_eth_dev *dev)
 
 		if (rxq == NULL || rxq->ctrl == NULL)
 			continue;
-		rxq->ctrl->flow_mark_n = 0;
 		rxq->ctrl->rxq.mark = 0;
 		for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
 			rxq->ctrl->flow_tunnels_n[j] = 0;
 		rxq->ctrl->rxq.tunnel = 0;
 	}
+	priv->mark_enabled = 0;
 }
 
 /**
@@ -4811,6 +4809,7 @@ flow_create_split_inner(struct rte_eth_dev *dev,
 			struct rte_flow_error *error)
 {
 	struct mlx5_flow *dev_flow;
+	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 
 	dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
 				    flow_split_info->flow_idx, error);
@@ -4829,8 +4828,10 @@ flow_create_split_inner(struct rte_eth_dev *dev,
 	 */
 	if (flow_split_info->prefix_layers)
 		dev_flow->handle->layers = flow_split_info->prefix_layers;
-	if (flow_split_info->prefix_mark)
-		dev_flow->handle->mark = 1;
+	if (flow_split_info->prefix_mark) {
+		MLX5_ASSERT(wks);
+		wks->mark = 1;
+	}
 	if (sub_flow)
 		*sub_flow = dev_flow;
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
@@ -6143,7 +6144,7 @@ flow_create_split_meter(struct rte_eth_dev *dev,
 				 MLX5_FLOW_TABLE_LEVEL_METER;
 		flow_split_info->prefix_layers =
 				flow_get_prefix_layer_flags(dev_flow);
-		flow_split_info->prefix_mark |= dev_flow->handle->mark;
+		flow_split_info->prefix_mark |= wks->mark;
 		flow_split_info->table_id = MLX5_MTR_TABLE_ID_SUFFIX;
 	}
 	/* Add the prefix subflow. */
@@ -6209,6 +6210,7 @@ flow_create_split_sample(struct rte_eth_dev *dev,
 	struct mlx5_flow_dv_sample_resource *sample_res;
 	struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
 	struct mlx5_flow_tbl_resource *sfx_tbl;
+	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 #endif
 	size_t act_size;
 	size_t item_size;
@@ -6295,7 +6297,8 @@ flow_create_split_sample(struct rte_eth_dev *dev,
 		}
 		flow_split_info->prefix_layers =
 				flow_get_prefix_layer_flags(dev_flow);
-		flow_split_info->prefix_mark |= dev_flow->handle->mark;
+		MLX5_ASSERT(wks);
+		flow_split_info->prefix_mark |= wks->mark;
 		/* Suffix group level already be scaled with factor, set
 		 * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
 		 * again in translation.
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 125d85899c..7fec79afb3 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -697,7 +697,6 @@ struct mlx5_flow_handle {
 	void *drv_flow; /**< pointer to driver flow object. */
 	uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
 	uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
-	uint32_t mark:1; /**< Metadata rxq mark flag. */
 	uint32_t fate_action:3; /**< Fate action type. */
 	uint32_t flex_item; /**< referenced Flex Item bitmask. */
 	union {
@@ -1108,6 +1107,7 @@ struct mlx5_flow_workspace {
 	/* The final policy when meter policy is hierarchy. */
 	uint32_t skip_matcher_reg:1;
 	/* Indicates if need to skip matcher register in translate. */
+	uint32_t mark:1; /* Indicates if flow contains mark action. */
 };
 
 struct mlx5_flow_split_info {
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 2dad86064b..4fda6597cc 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -11645,7 +11645,7 @@ flow_dv_translate_action_sample(struct rte_eth_dev *dev,
 				(((const struct rte_flow_action_mark *)
 				(sub_actions->conf))->id);
 
-			dev_flow->handle->mark = 1;
+			wks->mark = 1;
 			pre_rix = dev_flow->handle->dvh.rix_tag;
 			/* Save the mark resource before sample */
 			pre_r = dev_flow->dv.tag_resource;
@@ -12805,7 +12805,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
 			break;
 		case RTE_FLOW_ACTION_TYPE_FLAG:
 			action_flags |= MLX5_FLOW_ACTION_FLAG;
-			dev_flow->handle->mark = 1;
+			wks->mark = 1;
 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
 				struct rte_flow_action_mark mark = {
 					.id = MLX5_FLOW_MARK_DEFAULT,
@@ -12834,7 +12834,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
 			break;
 		case RTE_FLOW_ACTION_TYPE_MARK:
 			action_flags |= MLX5_FLOW_ACTION_MARK;
-			dev_flow->handle->mark = 1;
+			wks->mark = 1;
 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
 				const struct rte_flow_action_mark *mark =
 					(const struct rte_flow_action_mark *)
@@ -15402,7 +15402,9 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
 			    (MLX5_MAX_MODIFY_NUM + 1)];
 	} mhdr_dummy;
 	struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
+	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 
+	MLX5_ASSERT(wks);
 	egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
 	transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
 	memset(&dh, 0, sizeof(struct mlx5_flow_handle));
@@ -15440,7 +15442,7 @@ __flow_dv_create_domain_policy_acts(struct rte_eth_dev *dev,
 					  NULL,
 					  "cannot create policy "
 					  "mark action for this color");
-				dev_flow.handle->mark = 1;
+				wks->mark = 1;
 				if (flow_dv_tag_resource_register(dev, tag_be,
 						  &dev_flow, &flow_err))
 					return -rte_mtr_error_set(error,
@@ -16865,7 +16867,9 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
 	struct mlx5_meter_policy_action_container *act_cnt;
 	uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
 	uint16_t sub_policy_num;
+	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 
+	MLX5_ASSERT(wks);
 	rte_spinlock_lock(&mtr_policy->sl);
 	for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
 		if (!rss_desc[i])
@@ -16939,7 +16943,7 @@ __flow_dv_meter_get_rss_sub_policy(struct rte_eth_dev *dev,
 			if (act_cnt->rix_mark || act_cnt->modify_hdr) {
 				memset(&dh, 0, sizeof(struct mlx5_flow_handle));
 				if (act_cnt->rix_mark)
-					dh.mark = 1;
+					wks->mark = 1;
 				dh.fate_action = MLX5_FLOW_FATE_QUEUE;
 				dh.rix_hrxq = hrxq_idx[i];
 				flow_drv_rxq_flags_set(dev, &dh);
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 192a00d4fd..90ccb9aaff 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1693,12 +1693,12 @@ flow_verbs_translate(struct rte_eth_dev *dev,
 		case RTE_FLOW_ACTION_TYPE_FLAG:
 			flow_verbs_translate_action_flag(dev_flow, actions);
 			action_flags |= MLX5_FLOW_ACTION_FLAG;
-			dev_flow->handle->mark = 1;
+			wks->mark = 1;
 			break;
 		case RTE_FLOW_ACTION_TYPE_MARK:
 			flow_verbs_translate_action_mark(dev_flow, actions);
 			action_flags |= MLX5_FLOW_ACTION_MARK;
-			dev_flow->handle->mark = 1;
+			wks->mark = 1;
 			break;
 		case RTE_FLOW_ACTION_TYPE_DROP:
 			flow_verbs_translate_action_drop(dev_flow, actions);
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index c178f9a24b..cb5d51340d 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -161,7 +161,6 @@ struct mlx5_rxq_ctrl {
 	uint16_t share_qid; /* Shared RxQ ID in group. */
 	unsigned int started:1; /* Whether (shared) RXQ has been started. */
 	unsigned int irq:1; /* Whether IRQ is enabled. */
-	uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
 	uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
 	uint32_t wqn; /* WQ number. */
 	uint32_t rxseg_n; /* Number of split segment descriptions. */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* RE: [PATCH] net/mlx5: fix mark enabling for Rx datapath
  2022-01-16 15:23 [PATCH] net/mlx5: fix mark enabling for Rx datapath Raja Zidane
@ 2022-01-26 15:26 ` Raslan Darawsheh
  0 siblings, 0 replies; 2+ messages in thread
From: Raslan Darawsheh @ 2022-01-26 15:26 UTC (permalink / raw)
  To: Raja Zidane, dev; +Cc: Matan Azrad, stable

Hi,

> -----Original Message-----
> From: Raja Zidane <rzidane@nvidia.com>
> Sent: Sunday, January 16, 2022 5:24 PM
> To: dev@dpdk.org
> Cc: Matan Azrad <matan@nvidia.com>; stable@dpdk.org
> Subject: [PATCH] net/mlx5: fix mark enabling for Rx datapath
> 
> To optimize datapath, the mlx5 pmd checked for mark action on flow
> creation, and flagged possible destination rxqs (through queue/RSS
> actions), then it enabled the mark action logic only for flagged rxqs.
> 
> Mark action didn't work if no queue/rss action was in the same flow,
> even when the user use multi-group logic to manage the flows.
> So, if mark action is performed in group X and the packet is moved to group
> Y > X when the packet is forwarded to Rx queues, SW did not get the mark ID
> to the mbuf.
> 
> Flag Rx datapath to report mark action for any queue when the driver
> detects the first mark action after dev_start operation.
> 
> Fixes: 8e61555657b2 ("net/mlx5: fix shared RSS and mark actions
> combination")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Raja Zidane <rzidane@nvidia.com>
> ---
> Acked-by: Matan Azrad matan@nvidia.com

Patch applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-01-26 15:27 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-01-16 15:23 [PATCH] net/mlx5: fix mark enabling for Rx datapath Raja Zidane
2022-01-26 15:26 ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).