DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/mlx5: fix shared RSS action release
@ 2020-11-06  3:08 Suanming Mou
  2020-11-09  8:22 ` Raslan Darawsheh
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Suanming Mou @ 2020-11-06  3:08 UTC (permalink / raw)
  To: viacheslavo, matan; +Cc: rasland, dev

As shared RSS action is global standalone action, the shared action
should be maintained only by the shared action management functions.

Currently, the shared RSS action hrxq uses the same fate action type
with existed general queues, and implicitly uses the deprecate refcnt
increment to indicate the hrxq is in using which leads the hrxq be
released when flow is destroyed.

This commit adds a new fate action type for shared RSS action to
handle the shared RSS action release correctly.

Fixes: e1592b6c4dea ("net/mlx5: make Rx queue thread safe")

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5.h            |  3 +-
 drivers/net/mlx5/mlx5_flow.c       |  7 ++-
 drivers/net/mlx5/mlx5_flow.h       |  4 +-
 drivers/net/mlx5/mlx5_flow_dv.c    | 88 +++++++++++++++++++++++++-------------
 drivers/net/mlx5/mlx5_flow_verbs.c |  2 +-
 drivers/net/mlx5/mlx5_rxq.c        | 36 ++++++++--------
 6 files changed, 86 insertions(+), 54 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index b43a8c9..80fffc3 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -789,11 +789,11 @@ struct mlx5_flow_rss_desc {
 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
 	uint32_t key_len; /**< RSS hash key len. */
 	uint32_t tunnel; /**< Queue in tunnel. */
+	uint32_t shared_rss; /**< Shared RSS index. */
 	union {
 		uint16_t *queue; /**< Destination queues. */
 		const uint16_t *const_q; /**< Const pointer convert. */
 	};
-	bool standalone; /**< Queue is standalone or not. */
 };
 
 #define MLX5_PROC_PRIV(port_id) \
@@ -836,7 +836,6 @@ struct mlx5_ind_table_obj {
 __extension__
 struct mlx5_hrxq {
 	struct mlx5_cache_entry entry; /* Cache entry. */
-	uint32_t refcnt; /* Reference counter. */
 	uint32_t standalone:1; /* This object used in shared action. */
 	struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
 	RTE_STD_C11
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index e9c0ddd..f1d5f2d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -5622,7 +5622,7 @@ struct tunnel_default_miss_ctx {
 		buf->entries = 1;
 		buf->entry[0].pattern = (void *)(uintptr_t)items;
 	}
-	flow->shared_rss = flow_get_shared_rss_action(dev, shared_actions,
+	rss_desc->shared_rss = flow_get_shared_rss_action(dev, shared_actions,
 						      shared_actions_n);
 	/*
 	 * Record the start index when there is a nested call. All sub-flows
@@ -5729,6 +5729,11 @@ struct tunnel_default_miss_ctx {
 	ret = rte_errno; /* Save rte_errno before cleanup. */
 	flow_mreg_del_copy_action(dev, flow);
 	flow_drv_destroy(dev, flow);
+	if (rss_desc->shared_rss)
+		__atomic_sub_fetch(&((struct mlx5_shared_action_rss *)
+			mlx5_ipool_get
+			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+			rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
 	rte_errno = ret; /* Restore rte_errno. */
 	ret = rte_errno;
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 58185fb..5fab1bd 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -388,6 +388,7 @@ enum mlx5_flow_fate_type {
 	MLX5_FLOW_FATE_PORT_ID,
 	MLX5_FLOW_FATE_DROP,
 	MLX5_FLOW_FATE_DEFAULT_MISS,
+	MLX5_FLOW_FATE_SHARED_RSS,
 	MLX5_FLOW_FATE_MAX,
 };
 
@@ -651,6 +652,8 @@ struct mlx5_flow_handle {
 		/**< Generic value indicates the fate action. */
 		uint32_t rix_default_fate;
 		/**< Indicates default miss fate action. */
+		uint32_t rix_srss;
+		/**< Indicates shared RSS fate action. */
 	};
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	struct mlx5_flow_handle_dv dvh;
@@ -1024,7 +1027,6 @@ struct tunnel_tbl_entry {
 /* Flow structure. */
 struct rte_flow {
 	ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */
-	uint32_t shared_rss; /** < Shared RSS action ID. */
 	uint32_t dev_handles;
 	/**< Device flow handles that are part of the flow. */
 	uint32_t drv_type:2; /**< Driver type. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index b1928a0..576aee6 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -81,6 +81,8 @@
 static int
 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
 					uint32_t port_id);
+static void
+flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
 
 /**
  * Initialize flow attributes structure according to flow items' types.
@@ -8558,7 +8560,7 @@ struct mlx5_hlist_entry *
 	rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
 	rss_desc->hash_fields = dev_flow->hash_fields;
 	rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
-	rss_desc->standalone = false;
+	rss_desc->shared_rss = 0;
 	*hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
 	if (!*hrxq_idx)
 		return NULL;
@@ -9779,7 +9781,9 @@ struct mlx5_cache_entry *
 			 * when expanding items for RSS.
 			 */
 			action_flags |= MLX5_FLOW_ACTION_RSS;
-			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+			dev_flow->handle->fate_action = rss_desc->shared_rss ?
+				MLX5_FLOW_FATE_SHARED_RSS :
+				MLX5_FLOW_FATE_QUEUE;
 			break;
 		case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
 			flow->age = (uint32_t)(uintptr_t)(action->conf);
@@ -10572,10 +10576,10 @@ struct mlx5_cache_entry *
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
- * @param[in] flow
- *   Shred RSS action holding hash RX queue objects.
  * @param[in] dev_flow
  *   Pointer to the sub flow.
+ * @param[in] rss_desc
+ *   Pointer to the RSS descriptor.
  * @param[out] hrxq
  *   Pointer to retrieved hash RX queue object.
  *
@@ -10583,29 +10587,23 @@ struct mlx5_cache_entry *
  *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
  */
 static uint32_t
-__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
-			   struct mlx5_flow *dev_flow,
-			   struct mlx5_hrxq **hrxq)
+__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
+		       struct mlx5_flow_rss_desc *rss_desc,
+		       struct mlx5_hrxq **hrxq)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 	uint32_t hrxq_idx;
 
-	if (flow->shared_rss) {
+	if (rss_desc->shared_rss) {
 		hrxq_idx = __flow_dv_action_rss_hrxq_lookup
-				(dev, flow->shared_rss, dev_flow->hash_fields,
+				(dev, rss_desc->shared_rss,
+				 dev_flow->hash_fields,
 				 !!(dev_flow->handle->layers &
 				    MLX5_FLOW_LAYER_TUNNEL));
-		if (hrxq_idx) {
+		if (hrxq_idx)
 			*hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
 					       hrxq_idx);
-			__atomic_fetch_add(&(*hrxq)->refcnt, 1,
-					   __ATOMIC_RELAXED);
-		}
 	} else {
-		struct mlx5_flow_rss_desc *rss_desc =
-				&wks->rss_desc[!!wks->flow_nested_idx];
-
 		*hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
 					     &hrxq_idx);
 	}
@@ -10640,8 +10638,15 @@ struct mlx5_cache_entry *
 	int err;
 	int idx;
 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+	struct mlx5_flow_rss_desc *rss_desc =
+				&wks->rss_desc[!!wks->flow_nested_idx];
 
 	MLX5_ASSERT(wks);
+	if (rss_desc->shared_rss) {
+		dh = wks->flows[wks->flow_idx - 1].handle;
+		MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
+		dh->rix_srss = rss_desc->shared_rss;
+	}
 	for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
 		dev_flow = &wks->flows[idx];
 		dv = &dev_flow->dv;
@@ -10656,11 +10661,12 @@ struct mlx5_cache_entry *
 				dv->actions[n++] =
 						priv->drop_queue.hrxq->action;
 			}
-		} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
-			   !dv_h->rix_sample && !dv_h->rix_dest_array) {
+		} else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
+			   !dv_h->rix_sample && !dv_h->rix_dest_array) ||
+			    (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) {
 			struct mlx5_hrxq *hrxq = NULL;
 			uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
-						(dev, flow, dev_flow, &hrxq);
+					(dev, dev_flow, rss_desc, &hrxq);
 			if (!hrxq) {
 				rte_flow_error_set
 					(error, rte_errno,
@@ -10668,7 +10674,8 @@ struct mlx5_cache_entry *
 					 "cannot get hash queue");
 				goto error;
 			}
-			dh->rix_hrxq = hrxq_idx;
+			if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
+				dh->rix_hrxq = hrxq_idx;
 			dv->actions[n++] = hrxq->action;
 		} else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
 			if (!priv->sh->default_miss_action) {
@@ -10714,6 +10721,8 @@ struct mlx5_cache_entry *
 		if (dh->vf_vlan.tag && dh->vf_vlan.created)
 			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
+	if (rss_desc->shared_rss)
+		wks->flows[wks->flow_idx - 1].handle->rix_srss = 0;
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
 }
@@ -10898,6 +10907,25 @@ struct mlx5_cache_entry *
 				     &cache->entry);
 }
 
+/**
+ * Release shared RSS action resource.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param srss
+ *   Shared RSS action index.
+ */
+static void
+flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_shared_action_rss *shared_rss;
+
+	shared_rss = mlx5_ipool_get
+			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
+	__atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+}
+
 void
 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
 			    struct mlx5_cache_entry *entry)
@@ -10962,6 +10990,9 @@ struct mlx5_cache_entry *
 		flow_dv_port_id_action_resource_release(dev,
 				handle->rix_port_id_action);
 		break;
+	case MLX5_FLOW_FATE_SHARED_RSS:
+		flow_dv_shared_rss_action_release(dev, handle->rix_srss);
+		break;
 	default:
 		DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
 		break;
@@ -11128,13 +11159,6 @@ struct mlx5_cache_entry *
 	if (!flow)
 		return;
 	flow_dv_remove(dev, flow);
-	if (flow->shared_rss) {
-		struct mlx5_shared_action_rss *shared_rss = mlx5_ipool_get
-				(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
-							      flow->shared_rss);
-
-		__atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
-	}
 	if (flow->counter) {
 		flow_dv_counter_free(dev, flow->counter);
 		flow->counter = 0;
@@ -11239,6 +11263,8 @@ struct mlx5_cache_entry *
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
+ * @param[in] action_idx
+ *   Shared RSS action ipool index.
  * @param[in, out] action
  *   Partially initialized shared RSS action.
  * @param[out] error
@@ -11250,6 +11276,7 @@ struct mlx5_cache_entry *
  */
 static int
 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
+			uint32_t action_idx,
 			struct mlx5_shared_action_rss *action,
 			struct rte_flow_error *error)
 {
@@ -11261,7 +11288,8 @@ struct mlx5_cache_entry *
 	rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
 	rss_desc.const_q = action->origin.queue;
 	rss_desc.queue_num = action->origin.queue_num;
-	rss_desc.standalone = true;
+	/* Set non-zero value to indicate a shared RSS. */
+	rss_desc.shared_rss = action_idx;
 	for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
 		uint32_t hrxq_idx;
 		uint64_t hash_fields = mlx5_rss_hash_fields[i];
@@ -11353,7 +11381,7 @@ struct mlx5_cache_entry *
 	memcpy(shared_action->queue, rss->queue, queue_size);
 	origin->queue = shared_action->queue;
 	origin->queue_num = rss->queue_num;
-	if (__flow_dv_action_rss_setup(dev, shared_action, error))
+	if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
 		goto error_rss_init;
 	__atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
 	rte_spinlock_lock(&priv->shared_act_sl);
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 28fd120..ac30080 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1972,7 +1972,7 @@
 			rss_desc->hash_fields = dev_flow->hash_fields;
 			rss_desc->tunnel = !!(handle->layers &
 					      MLX5_FLOW_LAYER_TUNNEL);
-			rss_desc->standalone = false;
+			rss_desc->shared_rss = 0;
 			hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
 					      hrxq_idx);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index d95a573..a0edbb9 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2211,8 +2211,9 @@ struct mlx5_ind_table_obj *
 	struct mlx5_priv *priv = dev->data->dev_private;
 	const uint8_t *rss_key = rss_desc->key;
 	uint32_t rss_key_len =  rss_desc->key_len;
+	bool standalone = !!rss_desc->shared_rss;
 	const uint16_t *queues =
-		rss_desc->standalone ? rss_desc->const_q : rss_desc->queue;
+		standalone ? rss_desc->const_q : rss_desc->queue;
 	uint32_t queues_n = rss_desc->queue_num;
 	struct mlx5_hrxq *hrxq = NULL;
 	uint32_t hrxq_idx = 0;
@@ -2220,16 +2221,17 @@ struct mlx5_ind_table_obj *
 	int ret;
 
 	queues_n = rss_desc->hash_fields ? queues_n : 1;
-	ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
+	ind_tbl = standalone ? NULL :
+		  mlx5_ind_table_obj_get(dev, queues, queues_n);
 	if (!ind_tbl)
 		ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
-						 rss_desc->standalone);
+						 standalone);
 	if (!ind_tbl)
 		return NULL;
 	hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
 	if (!hrxq)
 		goto error;
-	hrxq->standalone = rss_desc->standalone;
+	hrxq->standalone = standalone;
 	hrxq->idx = hrxq_idx;
 	hrxq->ind_table = ind_tbl;
 	hrxq->rss_key_len = rss_key_len;
@@ -2240,7 +2242,7 @@ struct mlx5_ind_table_obj *
 		goto error;
 	return hrxq;
 error:
-	mlx5_ind_table_obj_release(dev, ind_tbl, rss_desc->standalone);
+	mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
 	if (hrxq)
 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
 	return NULL;
@@ -2294,7 +2296,7 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
 		.data = rss_desc,
 	};
 
-	if (rss_desc->standalone) {
+	if (rss_desc->shared_rss) {
 		hrxq = __mlx5_hrxq_create(dev, rss_desc);
 	} else {
 		entry = mlx5_cache_register(&priv->hrxqs, &ctx);
@@ -2344,11 +2346,8 @@ struct mlx5_hrxq *
 	struct mlx5_hrxq *hrxq = NULL;
 	int ret;
 
-	if (priv->drop_queue.hrxq) {
-		__atomic_fetch_add(&priv->drop_queue.hrxq->refcnt, 1,
-				   __ATOMIC_RELAXED);
+	if (priv->drop_queue.hrxq)
 		return priv->drop_queue.hrxq;
-	}
 	hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
 	if (!hrxq) {
 		DRV_LOG(WARNING,
@@ -2367,7 +2366,6 @@ struct mlx5_hrxq *
 	ret = priv->obj_ops.drop_action_create(dev);
 	if (ret < 0)
 		goto error;
-	__atomic_store_n(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
 	return hrxq;
 error:
 	if (hrxq) {
@@ -2391,14 +2389,14 @@ struct mlx5_hrxq *
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
 
-	if (__atomic_sub_fetch(&hrxq->refcnt, 1, __ATOMIC_RELAXED) == 0) {
-		priv->obj_ops.drop_action_destroy(dev);
-		mlx5_free(priv->drop_queue.rxq);
-		mlx5_free(hrxq->ind_table);
-		mlx5_free(hrxq);
-		priv->drop_queue.rxq = NULL;
-		priv->drop_queue.hrxq = NULL;
-	}
+	if (!priv->drop_queue.hrxq)
+		return;
+	priv->obj_ops.drop_action_destroy(dev);
+	mlx5_free(priv->drop_queue.rxq);
+	mlx5_free(hrxq->ind_table);
+	mlx5_free(hrxq);
+	priv->drop_queue.rxq = NULL;
+	priv->drop_queue.hrxq = NULL;
 }
 
 /**
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix shared RSS action release
  2020-11-06  3:08 [dpdk-dev] [PATCH] net/mlx5: fix shared RSS action release Suanming Mou
@ 2020-11-09  8:22 ` Raslan Darawsheh
  2020-11-09 18:16 ` Ferruh Yigit
  2020-11-10  3:28 ` [dpdk-dev] [PATCH v2] " Suanming Mou
  2 siblings, 0 replies; 6+ messages in thread
From: Raslan Darawsheh @ 2020-11-09  8:22 UTC (permalink / raw)
  To: Suanming Mou, Slava Ovsiienko, Matan Azrad; +Cc: dev

Hi,
> -----Original Message-----
> From: Suanming Mou <suanmingm@nvidia.com>
> Sent: Friday, November 6, 2020 5:08 AM
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
> <matan@nvidia.com>
> Cc: Raslan Darawsheh <rasland@nvidia.com>; dev@dpdk.org
> Subject: [PATCH] net/mlx5: fix shared RSS action release
> 
> As shared RSS action is global standalone action, the shared action
> should be maintained only by the shared action management functions.
> 
> Currently, the shared RSS action hrxq uses the same fate action type
> with existed general queues, and implicitly uses the deprecate refcnt
> increment to indicate the hrxq is in using which leads the hrxq be
> released when flow is destroyed.
> 
> This commit adds a new fate action type for shared RSS action to
> handle the shared RSS action release correctly.
> 
> Fixes: e1592b6c4dea ("net/mlx5: make Rx queue thread safe")
> 
> Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
> ---
Patch applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix shared RSS action release
  2020-11-06  3:08 [dpdk-dev] [PATCH] net/mlx5: fix shared RSS action release Suanming Mou
  2020-11-09  8:22 ` Raslan Darawsheh
@ 2020-11-09 18:16 ` Ferruh Yigit
  2020-11-10  1:44   ` Suanming Mou
  2020-11-10  3:28 ` [dpdk-dev] [PATCH v2] " Suanming Mou
  2 siblings, 1 reply; 6+ messages in thread
From: Ferruh Yigit @ 2020-11-09 18:16 UTC (permalink / raw)
  To: Suanming Mou, viacheslavo, matan; +Cc: rasland, dev

On 11/6/2020 3:08 AM, Suanming Mou wrote:
> As shared RSS action is global standalone action, the shared action
> should be maintained only by the shared action management functions.
> 
> Currently, the shared RSS action hrxq uses the same fate action type
> with existed general queues, and implicitly uses the deprecate refcnt
> increment to indicate the hrxq is in using which leads the hrxq be
> released when flow is destroyed.
> 

Can you please re-word above paragraph to make it easier to understand?

Thanks,
ferruh

> This commit adds a new fate action type for shared RSS action to
> handle the shared RSS action release correctly.
> 
> Fixes: e1592b6c4dea ("net/mlx5: make Rx queue thread safe")
> 
> Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>

<...>


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix shared RSS action release
  2020-11-09 18:16 ` Ferruh Yigit
@ 2020-11-10  1:44   ` Suanming Mou
  0 siblings, 0 replies; 6+ messages in thread
From: Suanming Mou @ 2020-11-10  1:44 UTC (permalink / raw)
  To: Ferruh Yigit, Slava Ovsiienko, Matan Azrad; +Cc: Raslan Darawsheh, dev



> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Tuesday, November 10, 2020 2:17 AM
> To: Suanming Mou <suanmingm@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Matan Azrad <matan@nvidia.com>
> Cc: Raslan Darawsheh <rasland@nvidia.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] net/mlx5: fix shared RSS action release
> 
> On 11/6/2020 3:08 AM, Suanming Mou wrote:
> > As shared RSS action is global standalone action, the shared action
> > should be maintained only by the shared action management functions.
> >
> > Currently, the shared RSS action hrxq uses the same fate action type
> > with existed general queues, and implicitly uses the deprecate refcnt
> > increment to indicate the hrxq is in using which leads the hrxq be
> > released when flow is destroyed.
> >
> 
> Can you please re-word above paragraph to make it easier to understand?

OK, will send V2 with the update. 

> 
> Thanks,
> ferruh
> 
> > This commit adds a new fate action type for shared RSS action to
> > handle the shared RSS action release correctly.
> >
> > Fixes: e1592b6c4dea ("net/mlx5: make Rx queue thread safe")
> >
> > Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
> > Acked-by: Matan Azrad <matan@nvidia.com>
> 
> <...>


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH v2] net/mlx5: fix shared RSS action release
  2020-11-06  3:08 [dpdk-dev] [PATCH] net/mlx5: fix shared RSS action release Suanming Mou
  2020-11-09  8:22 ` Raslan Darawsheh
  2020-11-09 18:16 ` Ferruh Yigit
@ 2020-11-10  3:28 ` Suanming Mou
  2020-11-11 12:39   ` Ferruh Yigit
  2 siblings, 1 reply; 6+ messages in thread
From: Suanming Mou @ 2020-11-10  3:28 UTC (permalink / raw)
  To: ferruh.yigit, rasland; +Cc: viacheslavo, matan, dev

As shared RSS action will be shared by multiple flows, the action
is created as global standalone action and managed only by the
relevant shared action management functions.

Currently, hrxqs will be created by shared RSS action or general
queue action. For hrxqs created by shared RSS action, they should
also only be released with shared RSS action. It's not correct to
release the shared RSS action hrxqs as general queue actions do
in flow destroy.

This commit adds a new fate action type for shared RSS action to
handle the shared RSS action hrxq release correctly.

Fixes: e1592b6c4dea ("net/mlx5: make Rx queue thread safe")

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---

v2:
 - commit log update.

---
 drivers/net/mlx5/mlx5.h            |  3 +-
 drivers/net/mlx5/mlx5_flow.c       |  7 ++-
 drivers/net/mlx5/mlx5_flow.h       |  4 +-
 drivers/net/mlx5/mlx5_flow_dv.c    | 88 +++++++++++++++++++++++++-------------
 drivers/net/mlx5/mlx5_flow_verbs.c |  2 +-
 drivers/net/mlx5/mlx5_rxq.c        | 36 ++++++++--------
 6 files changed, 86 insertions(+), 54 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index b43a8c9..80fffc3 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -789,11 +789,11 @@ struct mlx5_flow_rss_desc {
 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
 	uint32_t key_len; /**< RSS hash key len. */
 	uint32_t tunnel; /**< Queue in tunnel. */
+	uint32_t shared_rss; /**< Shared RSS index. */
 	union {
 		uint16_t *queue; /**< Destination queues. */
 		const uint16_t *const_q; /**< Const pointer convert. */
 	};
-	bool standalone; /**< Queue is standalone or not. */
 };
 
 #define MLX5_PROC_PRIV(port_id) \
@@ -836,7 +836,6 @@ struct mlx5_ind_table_obj {
 __extension__
 struct mlx5_hrxq {
 	struct mlx5_cache_entry entry; /* Cache entry. */
-	uint32_t refcnt; /* Reference counter. */
 	uint32_t standalone:1; /* This object used in shared action. */
 	struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
 	RTE_STD_C11
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index e9c0ddd..f1d5f2d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -5622,7 +5622,7 @@ struct tunnel_default_miss_ctx {
 		buf->entries = 1;
 		buf->entry[0].pattern = (void *)(uintptr_t)items;
 	}
-	flow->shared_rss = flow_get_shared_rss_action(dev, shared_actions,
+	rss_desc->shared_rss = flow_get_shared_rss_action(dev, shared_actions,
 						      shared_actions_n);
 	/*
 	 * Record the start index when there is a nested call. All sub-flows
@@ -5729,6 +5729,11 @@ struct tunnel_default_miss_ctx {
 	ret = rte_errno; /* Save rte_errno before cleanup. */
 	flow_mreg_del_copy_action(dev, flow);
 	flow_drv_destroy(dev, flow);
+	if (rss_desc->shared_rss)
+		__atomic_sub_fetch(&((struct mlx5_shared_action_rss *)
+			mlx5_ipool_get
+			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
+			rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
 	rte_errno = ret; /* Restore rte_errno. */
 	ret = rte_errno;
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 58185fb..5fab1bd 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -388,6 +388,7 @@ enum mlx5_flow_fate_type {
 	MLX5_FLOW_FATE_PORT_ID,
 	MLX5_FLOW_FATE_DROP,
 	MLX5_FLOW_FATE_DEFAULT_MISS,
+	MLX5_FLOW_FATE_SHARED_RSS,
 	MLX5_FLOW_FATE_MAX,
 };
 
@@ -651,6 +652,8 @@ struct mlx5_flow_handle {
 		/**< Generic value indicates the fate action. */
 		uint32_t rix_default_fate;
 		/**< Indicates default miss fate action. */
+		uint32_t rix_srss;
+		/**< Indicates shared RSS fate action. */
 	};
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 	struct mlx5_flow_handle_dv dvh;
@@ -1024,7 +1027,6 @@ struct tunnel_tbl_entry {
 /* Flow structure. */
 struct rte_flow {
 	ILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */
-	uint32_t shared_rss; /** < Shared RSS action ID. */
 	uint32_t dev_handles;
 	/**< Device flow handles that are part of the flow. */
 	uint32_t drv_type:2; /**< Driver type. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index b1928a0..576aee6 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -81,6 +81,8 @@
 static int
 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
 					uint32_t port_id);
+static void
+flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
 
 /**
  * Initialize flow attributes structure according to flow items' types.
@@ -8558,7 +8560,7 @@ struct mlx5_hlist_entry *
 	rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
 	rss_desc->hash_fields = dev_flow->hash_fields;
 	rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
-	rss_desc->standalone = false;
+	rss_desc->shared_rss = 0;
 	*hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
 	if (!*hrxq_idx)
 		return NULL;
@@ -9779,7 +9781,9 @@ struct mlx5_cache_entry *
 			 * when expanding items for RSS.
 			 */
 			action_flags |= MLX5_FLOW_ACTION_RSS;
-			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+			dev_flow->handle->fate_action = rss_desc->shared_rss ?
+				MLX5_FLOW_FATE_SHARED_RSS :
+				MLX5_FLOW_FATE_QUEUE;
 			break;
 		case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
 			flow->age = (uint32_t)(uintptr_t)(action->conf);
@@ -10572,10 +10576,10 @@ struct mlx5_cache_entry *
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
- * @param[in] flow
- *   Shred RSS action holding hash RX queue objects.
  * @param[in] dev_flow
  *   Pointer to the sub flow.
+ * @param[in] rss_desc
+ *   Pointer to the RSS descriptor.
  * @param[out] hrxq
  *   Pointer to retrieved hash RX queue object.
  *
@@ -10583,29 +10587,23 @@ struct mlx5_cache_entry *
  *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
  */
 static uint32_t
-__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
-			   struct mlx5_flow *dev_flow,
-			   struct mlx5_hrxq **hrxq)
+__flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
+		       struct mlx5_flow_rss_desc *rss_desc,
+		       struct mlx5_hrxq **hrxq)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
 	uint32_t hrxq_idx;
 
-	if (flow->shared_rss) {
+	if (rss_desc->shared_rss) {
 		hrxq_idx = __flow_dv_action_rss_hrxq_lookup
-				(dev, flow->shared_rss, dev_flow->hash_fields,
+				(dev, rss_desc->shared_rss,
+				 dev_flow->hash_fields,
 				 !!(dev_flow->handle->layers &
 				    MLX5_FLOW_LAYER_TUNNEL));
-		if (hrxq_idx) {
+		if (hrxq_idx)
 			*hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
 					       hrxq_idx);
-			__atomic_fetch_add(&(*hrxq)->refcnt, 1,
-					   __ATOMIC_RELAXED);
-		}
 	} else {
-		struct mlx5_flow_rss_desc *rss_desc =
-				&wks->rss_desc[!!wks->flow_nested_idx];
-
 		*hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
 					     &hrxq_idx);
 	}
@@ -10640,8 +10638,15 @@ struct mlx5_cache_entry *
 	int err;
 	int idx;
 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+	struct mlx5_flow_rss_desc *rss_desc =
+				&wks->rss_desc[!!wks->flow_nested_idx];
 
 	MLX5_ASSERT(wks);
+	if (rss_desc->shared_rss) {
+		dh = wks->flows[wks->flow_idx - 1].handle;
+		MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
+		dh->rix_srss = rss_desc->shared_rss;
+	}
 	for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
 		dev_flow = &wks->flows[idx];
 		dv = &dev_flow->dv;
@@ -10656,11 +10661,12 @@ struct mlx5_cache_entry *
 				dv->actions[n++] =
 						priv->drop_queue.hrxq->action;
 			}
-		} else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
-			   !dv_h->rix_sample && !dv_h->rix_dest_array) {
+		} else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
+			   !dv_h->rix_sample && !dv_h->rix_dest_array) ||
+			    (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) {
 			struct mlx5_hrxq *hrxq = NULL;
 			uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
-						(dev, flow, dev_flow, &hrxq);
+					(dev, dev_flow, rss_desc, &hrxq);
 			if (!hrxq) {
 				rte_flow_error_set
 					(error, rte_errno,
@@ -10668,7 +10674,8 @@ struct mlx5_cache_entry *
 					 "cannot get hash queue");
 				goto error;
 			}
-			dh->rix_hrxq = hrxq_idx;
+			if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
+				dh->rix_hrxq = hrxq_idx;
 			dv->actions[n++] = hrxq->action;
 		} else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
 			if (!priv->sh->default_miss_action) {
@@ -10714,6 +10721,8 @@ struct mlx5_cache_entry *
 		if (dh->vf_vlan.tag && dh->vf_vlan.created)
 			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
 	}
+	if (rss_desc->shared_rss)
+		wks->flows[wks->flow_idx - 1].handle->rix_srss = 0;
 	rte_errno = err; /* Restore rte_errno. */
 	return -rte_errno;
 }
@@ -10898,6 +10907,25 @@ struct mlx5_cache_entry *
 				     &cache->entry);
 }
 
+/**
+ * Release shared RSS action resource.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param srss
+ *   Shared RSS action index.
+ */
+static void
+flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_shared_action_rss *shared_rss;
+
+	shared_rss = mlx5_ipool_get
+			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
+	__atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+}
+
 void
 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
 			    struct mlx5_cache_entry *entry)
@@ -10962,6 +10990,9 @@ struct mlx5_cache_entry *
 		flow_dv_port_id_action_resource_release(dev,
 				handle->rix_port_id_action);
 		break;
+	case MLX5_FLOW_FATE_SHARED_RSS:
+		flow_dv_shared_rss_action_release(dev, handle->rix_srss);
+		break;
 	default:
 		DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
 		break;
@@ -11128,13 +11159,6 @@ struct mlx5_cache_entry *
 	if (!flow)
 		return;
 	flow_dv_remove(dev, flow);
-	if (flow->shared_rss) {
-		struct mlx5_shared_action_rss *shared_rss = mlx5_ipool_get
-				(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
-							      flow->shared_rss);
-
-		__atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
-	}
 	if (flow->counter) {
 		flow_dv_counter_free(dev, flow->counter);
 		flow->counter = 0;
@@ -11239,6 +11263,8 @@ struct mlx5_cache_entry *
  *
  * @param[in] dev
  *   Pointer to the Ethernet device structure.
+ * @param[in] action_idx
+ *   Shared RSS action ipool index.
  * @param[in, out] action
  *   Partially initialized shared RSS action.
  * @param[out] error
@@ -11250,6 +11276,7 @@ struct mlx5_cache_entry *
  */
 static int
 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
+			uint32_t action_idx,
 			struct mlx5_shared_action_rss *action,
 			struct rte_flow_error *error)
 {
@@ -11261,7 +11288,8 @@ struct mlx5_cache_entry *
 	rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
 	rss_desc.const_q = action->origin.queue;
 	rss_desc.queue_num = action->origin.queue_num;
-	rss_desc.standalone = true;
+	/* Set non-zero value to indicate a shared RSS. */
+	rss_desc.shared_rss = action_idx;
 	for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
 		uint32_t hrxq_idx;
 		uint64_t hash_fields = mlx5_rss_hash_fields[i];
@@ -11353,7 +11381,7 @@ struct mlx5_cache_entry *
 	memcpy(shared_action->queue, rss->queue, queue_size);
 	origin->queue = shared_action->queue;
 	origin->queue_num = rss->queue_num;
-	if (__flow_dv_action_rss_setup(dev, shared_action, error))
+	if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
 		goto error_rss_init;
 	__atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
 	rte_spinlock_lock(&priv->shared_act_sl);
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 28fd120..ac30080 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1972,7 +1972,7 @@
 			rss_desc->hash_fields = dev_flow->hash_fields;
 			rss_desc->tunnel = !!(handle->layers &
 					      MLX5_FLOW_LAYER_TUNNEL);
-			rss_desc->standalone = false;
+			rss_desc->shared_rss = 0;
 			hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
 			hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
 					      hrxq_idx);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index d95a573..a0edbb9 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2211,8 +2211,9 @@ struct mlx5_ind_table_obj *
 	struct mlx5_priv *priv = dev->data->dev_private;
 	const uint8_t *rss_key = rss_desc->key;
 	uint32_t rss_key_len =  rss_desc->key_len;
+	bool standalone = !!rss_desc->shared_rss;
 	const uint16_t *queues =
-		rss_desc->standalone ? rss_desc->const_q : rss_desc->queue;
+		standalone ? rss_desc->const_q : rss_desc->queue;
 	uint32_t queues_n = rss_desc->queue_num;
 	struct mlx5_hrxq *hrxq = NULL;
 	uint32_t hrxq_idx = 0;
@@ -2220,16 +2221,17 @@ struct mlx5_ind_table_obj *
 	int ret;
 
 	queues_n = rss_desc->hash_fields ? queues_n : 1;
-	ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
+	ind_tbl = standalone ? NULL :
+		  mlx5_ind_table_obj_get(dev, queues, queues_n);
 	if (!ind_tbl)
 		ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
-						 rss_desc->standalone);
+						 standalone);
 	if (!ind_tbl)
 		return NULL;
 	hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
 	if (!hrxq)
 		goto error;
-	hrxq->standalone = rss_desc->standalone;
+	hrxq->standalone = standalone;
 	hrxq->idx = hrxq_idx;
 	hrxq->ind_table = ind_tbl;
 	hrxq->rss_key_len = rss_key_len;
@@ -2240,7 +2242,7 @@ struct mlx5_ind_table_obj *
 		goto error;
 	return hrxq;
 error:
-	mlx5_ind_table_obj_release(dev, ind_tbl, rss_desc->standalone);
+	mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
 	if (hrxq)
 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
 	return NULL;
@@ -2294,7 +2296,7 @@ uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
 		.data = rss_desc,
 	};
 
-	if (rss_desc->standalone) {
+	if (rss_desc->shared_rss) {
 		hrxq = __mlx5_hrxq_create(dev, rss_desc);
 	} else {
 		entry = mlx5_cache_register(&priv->hrxqs, &ctx);
@@ -2344,11 +2346,8 @@ struct mlx5_hrxq *
 	struct mlx5_hrxq *hrxq = NULL;
 	int ret;
 
-	if (priv->drop_queue.hrxq) {
-		__atomic_fetch_add(&priv->drop_queue.hrxq->refcnt, 1,
-				   __ATOMIC_RELAXED);
+	if (priv->drop_queue.hrxq)
 		return priv->drop_queue.hrxq;
-	}
 	hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
 	if (!hrxq) {
 		DRV_LOG(WARNING,
@@ -2367,7 +2366,6 @@ struct mlx5_hrxq *
 	ret = priv->obj_ops.drop_action_create(dev);
 	if (ret < 0)
 		goto error;
-	__atomic_store_n(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
 	return hrxq;
 error:
 	if (hrxq) {
@@ -2391,14 +2389,14 @@ struct mlx5_hrxq *
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
 
-	if (__atomic_sub_fetch(&hrxq->refcnt, 1, __ATOMIC_RELAXED) == 0) {
-		priv->obj_ops.drop_action_destroy(dev);
-		mlx5_free(priv->drop_queue.rxq);
-		mlx5_free(hrxq->ind_table);
-		mlx5_free(hrxq);
-		priv->drop_queue.rxq = NULL;
-		priv->drop_queue.hrxq = NULL;
-	}
+	if (!priv->drop_queue.hrxq)
+		return;
+	priv->obj_ops.drop_action_destroy(dev);
+	mlx5_free(priv->drop_queue.rxq);
+	mlx5_free(hrxq->ind_table);
+	mlx5_free(hrxq);
+	priv->drop_queue.rxq = NULL;
+	priv->drop_queue.hrxq = NULL;
 }
 
 /**
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH v2] net/mlx5: fix shared RSS action release
  2020-11-10  3:28 ` [dpdk-dev] [PATCH v2] " Suanming Mou
@ 2020-11-11 12:39   ` Ferruh Yigit
  0 siblings, 0 replies; 6+ messages in thread
From: Ferruh Yigit @ 2020-11-11 12:39 UTC (permalink / raw)
  To: Suanming Mou, rasland; +Cc: viacheslavo, matan, dev

On 11/10/2020 3:28 AM, Suanming Mou wrote:
> As shared RSS action will be shared by multiple flows, the action
> is created as global standalone action and managed only by the
> relevant shared action management functions.
> 
> Currently, hrxqs will be created by shared RSS action or general
> queue action. For hrxqs created by shared RSS action, they should
> also only be released with shared RSS action. It's not correct to
> release the shared RSS action hrxqs as general queue actions do
> in flow destroy.
> 
> This commit adds a new fate action type for shared RSS action to
> handle the shared RSS action hrxq release correctly.
> 
> Fixes: e1592b6c4dea ("net/mlx5: make Rx queue thread safe")
> 
> Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>

Applied to dpdk-next-net/main, thanks.


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-11-11 12:39 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-06  3:08 [dpdk-dev] [PATCH] net/mlx5: fix shared RSS action release Suanming Mou
2020-11-09  8:22 ` Raslan Darawsheh
2020-11-09 18:16 ` Ferruh Yigit
2020-11-10  1:44   ` Suanming Mou
2020-11-10  3:28 ` [dpdk-dev] [PATCH v2] " Suanming Mou
2020-11-11 12:39   ` Ferruh Yigit

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git