DPDK patches and discussions
 help / color / mirror / Atom feed
From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>, <getelson@nvidia.com>, <viacheslavo@nvidia.com>
Cc: <matan@nvidia.com>
Subject: [dpdk-dev] [PATCH 6/9] net/mlx5: add flex parser DevX object management
Date: Mon, 1 Nov 2021 11:15:11 +0200	[thread overview]
Message-ID: <20211101091514.3891-7-getelson@nvidia.com> (raw)
In-Reply-To: <20211101091514.3891-1-getelson@nvidia.com>

The DevX flex parsers can be shared between representors
within the same IB context. We should put the flex parser
objects into the shared list and engage the standard
mlx5_list_xxx API to manage ones.

Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Reviewed-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c  |  10 +++
 drivers/net/mlx5/mlx5.c           |   4 +
 drivers/net/mlx5/mlx5.h           |  20 +++++
 drivers/net/mlx5/mlx5_flow_flex.c | 121 +++++++++++++++++++++++++++++-
 4 files changed, 154 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index cf5c5b9722..b800ddd01a 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -337,6 +337,16 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
 					      flow_dv_dest_array_clone_free_cb);
 	if (!sh->dest_array_list)
 		goto error;
+	/* Init shared flex parsers list, no need lcore_share */
+	snprintf(s, sizeof(s), "%s_flex_parsers_list", sh->ibdev_name);
+	sh->flex_parsers_dv = mlx5_list_create(s, sh, false,
+					       mlx5_flex_parser_create_cb,
+					       mlx5_flex_parser_match_cb,
+					       mlx5_flex_parser_remove_cb,
+					       mlx5_flex_parser_clone_cb,
+					       mlx5_flex_parser_clone_free_cb);
+	if (!sh->flex_parsers_dv)
+		goto error;
 #endif
 #ifdef HAVE_MLX5DV_DR
 	void *domain;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 8166d6272c..2c1e6b6637 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1428,6 +1428,10 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
 		mlx5_flow_os_release_workspace();
 	}
 	pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
+	if (sh->flex_parsers_dv) {
+		mlx5_list_destroy(sh->flex_parsers_dv);
+		sh->flex_parsers_dv = NULL;
+	}
 	/*
 	 *  Ensure there is no async event handler installed.
 	 *  Only primary process handles async device events.
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 75906da2c0..244f45bea2 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1101,6 +1101,15 @@ struct mlx5_lag {
 	uint8_t affinity_mode; /* TIS or hash based affinity */
 };
 
+/* DevX flex parser context. */
+struct mlx5_flex_parser_devx {
+	struct mlx5_list_entry entry;  /* List element at the beginning. */
+	uint32_t num_samples;
+	void *devx_obj;
+	struct mlx5_devx_graph_node_attr devx_conf;
+	uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
+};
+
 /* Port flex item context. */
 struct mlx5_flex_item {
 	struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
@@ -1157,6 +1166,7 @@ struct mlx5_dev_ctx_shared {
 	struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
 	struct mlx5_list *sample_action_list; /* List of sample actions. */
 	struct mlx5_list *dest_array_list;
+	struct mlx5_list *flex_parsers_dv; /* Flex Item parsers. */
 	/* List of destination array actions. */
 	struct mlx5_flow_counter_mng cmng; /* Counters management structure. */
 	void *default_miss_action; /* Default miss action. */
@@ -1823,4 +1833,14 @@ int flow_dv_item_release(struct rte_eth_dev *dev,
 		    struct rte_flow_error *error);
 int mlx5_flex_item_port_init(struct rte_eth_dev *dev);
 void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
+/* Flex parser list callbacks. */
+struct mlx5_list_entry *mlx5_flex_parser_create_cb(void *list_ctx, void *ctx);
+int mlx5_flex_parser_match_cb(void *list_ctx,
+			      struct mlx5_list_entry *iter, void *ctx);
+void mlx5_flex_parser_remove_cb(void *list_ctx,	struct mlx5_list_entry *entry);
+struct mlx5_list_entry *mlx5_flex_parser_clone_cb(void *list_ctx,
+						  struct mlx5_list_entry *entry,
+						  void *ctx);
+void mlx5_flex_parser_clone_free_cb(void *tool_ctx,
+				    struct mlx5_list_entry *entry);
 #endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index b7bc4af6fb..2f87073e97 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -45,7 +45,13 @@ mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
 
 	for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
 		if (priv->flex_item_map & (1 << i)) {
-			/* DevX object dereferencing should be provided here. */
+			struct mlx5_flex_item *flex = &priv->flex_item[i];
+
+			claim_zero(mlx5_list_unregister
+					(priv->sh->flex_parsers_dv,
+					 &flex->devx_fp->entry));
+			flex->devx_fp = NULL;
+			flex->refcnt = 0;
 			priv->flex_item_map &= ~(1 << i);
 		}
 	}
@@ -127,7 +133,9 @@ flow_dv_item_create(struct rte_eth_dev *dev,
 		    struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
 	struct mlx5_flex_item *flex;
+	struct mlx5_list_entry *ent;
 
 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	flex = mlx5_flex_alloc(priv);
@@ -137,10 +145,22 @@ flow_dv_item_create(struct rte_eth_dev *dev,
 				   "too many flex items created on the port");
 		return NULL;
 	}
+	ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
+	if (!ent) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				   "flex item creation failure");
+		goto error;
+	}
+	flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
 	RTE_SET_USED(conf);
 	/* Mark initialized flex item valid. */
 	__atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
 	return (struct rte_flow_item_flex_handle *)flex;
+
+error:
+	mlx5_flex_free(priv, flex);
+	return NULL;
 }
 
 /**
@@ -166,6 +186,7 @@ flow_dv_item_release(struct rte_eth_dev *dev,
 	struct mlx5_flex_item *flex =
 		(struct mlx5_flex_item *)(uintptr_t)handle;
 	uint32_t old_refcnt = 1;
+	int rc;
 
 	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	rte_spinlock_lock(&priv->flex_item_sl);
@@ -184,6 +205,104 @@ flow_dv_item_release(struct rte_eth_dev *dev,
 	}
 	/* Flex item is marked as invalid, we can leave locked section. */
 	rte_spinlock_unlock(&priv->flex_item_sl);
+	MLX5_ASSERT(flex->devx_fp);
+	rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
+				  &flex->devx_fp->entry);
+	flex->devx_fp = NULL;
 	mlx5_flex_free(priv, flex);
+	if (rc < 0)
+		return rte_flow_error_set(error, EBUSY,
+					  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+					  "flex item release failure");
 	return 0;
 }
+
+/* DevX flex parser list callbacks. */
+struct mlx5_list_entry *
+mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
+{
+	struct mlx5_dev_ctx_shared *sh = list_ctx;
+	struct mlx5_flex_parser_devx *fp, *conf = ctx;
+	int ret;
+
+	fp = mlx5_malloc(MLX5_MEM_ZERO,	sizeof(struct mlx5_flex_parser_devx),
+			 0, SOCKET_ID_ANY);
+	if (!fp)
+		return NULL;
+	/* Copy the requested configurations. */
+	fp->num_samples = conf->num_samples;
+	memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
+	/* Create DevX flex parser. */
+	fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx,
+							&fp->devx_conf);
+	if (!fp->devx_obj)
+		goto error;
+	/* Query the firmware assigned sample ids. */
+	ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
+						fp->sample_ids,
+						fp->num_samples);
+	if (ret)
+		goto error;
+	DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
+		(const void *)fp, fp->num_samples);
+	return &fp->entry;
+error:
+	if (fp->devx_obj)
+		mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
+	if (fp)
+		mlx5_free(fp);
+	return NULL;
+}
+
+int
+mlx5_flex_parser_match_cb(void *list_ctx,
+			  struct mlx5_list_entry *iter, void *ctx)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(iter, struct mlx5_flex_parser_devx, entry);
+	struct mlx5_flex_parser_devx *org =
+		container_of(ctx, struct mlx5_flex_parser_devx, entry);
+
+	RTE_SET_USED(list_ctx);
+	return !iter || !ctx || memcmp(&fp->devx_conf,
+				       &org->devx_conf,
+				       sizeof(fp->devx_conf));
+}
+
+void
+mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(entry, struct mlx5_flex_parser_devx, entry);
+
+	RTE_SET_USED(list_ctx);
+	MLX5_ASSERT(fp->devx_obj);
+	claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
+	DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp);
+	mlx5_free(entry);
+}
+
+struct mlx5_list_entry *
+mlx5_flex_parser_clone_cb(void *list_ctx,
+			  struct mlx5_list_entry *entry, void *ctx)
+{
+	struct mlx5_flex_parser_devx *fp;
+
+	RTE_SET_USED(list_ctx);
+	RTE_SET_USED(entry);
+	fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
+			 0, SOCKET_ID_ANY);
+	if (!fp)
+		return NULL;
+	memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
+	return &fp->entry;
+}
+
+void
+mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
+{
+	struct mlx5_flex_parser_devx *fp =
+		container_of(entry, struct mlx5_flex_parser_devx, entry);
+	RTE_SET_USED(list_ctx);
+	mlx5_free(fp);
+}
-- 
2.33.1


  parent reply	other threads:[~2021-11-01  9:16 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-11-01  9:15 [dpdk-dev] [PATCH 0/9] net/mlx5: add flex item support Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 1/9] common/mlx5: refactor HCA attributes query Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 2/9] common/mlx5: extend flex parser capabilities Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 3/9] common/mlx5: fix flex parser DevX creation routine Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 4/9] net/mlx5: update eCPRI flex parser structures Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 5/9] net/mlx5: add flex item API Gregory Etelson
2021-11-01  9:15 ` Gregory Etelson [this message]
2021-11-01  9:15 ` [dpdk-dev] [PATCH 7/9] net/mlx5: translate flex item configuration Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 8/9] net/mlx5: translate flex item pattern into matcher Gregory Etelson
2021-11-01  9:15 ` [dpdk-dev] [PATCH 9/9] net/mlx5: handle flex item in flows Gregory Etelson
2021-11-02  8:53 ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 1/9] common/mlx5: refactor HCA attributes query Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 2/9] common/mlx5: extend flex parser capabilities Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 3/9] common/mlx5: fix flex parser DevX creation routine Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 4/9] net/mlx5: update eCPRI flex parser structures Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 5/9] net/mlx5: add flex item API Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 6/9] net/mlx5: add flex parser DevX object management Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 7/9] net/mlx5: translate flex item configuration Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 8/9] net/mlx5: translate flex item pattern into matcher Gregory Etelson
2021-11-02  8:53   ` [dpdk-dev] [PATCH v2 9/9] net/mlx5: handle flex item in flows Gregory Etelson
2021-11-03 12:57   ` [dpdk-dev] [PATCH v2 0/9] net/mlx5: add flex item support Raslan Darawsheh
2021-11-03 18:24   ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211101091514.3891-7-getelson@nvidia.com \
    --to=getelson@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).