DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ori Kam <orika@mellanox.com>
To: Matan Azrad <matan@mellanox.com>,
	Yongseok Koh <yskoh@mellanox.com>,
	Shahaf Shuler <shahafs@mellanox.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>, Ori Kam <orika@mellanox.com>,
	Slava Ovsiienko <viacheslavo@mellanox.com>
Subject: [dpdk-dev] [PATCH v3 2/3] net/mlx5: add Direct Rules API
Date: Wed, 3 Apr 2019 13:21:57 +0000	[thread overview]
Message-ID: <1554297697-13525-3-git-send-email-orika@mellanox.com> (raw)
Message-ID: <20190403132157.EV_K4pZIEIT77RVrd49SFy0LLvskq1Z1fs5sU2rM-YY@z> (raw)
In-Reply-To: <1554297697-13525-1-git-send-email-orika@mellanox.com>

Adds calls to the Direct Rules API inside the glue functions.
Due to difference in parameters between the Direct Rules and Direct
Verbs some of the glue functions API was updated.

Signed-off-by: Ori Kam <orika@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
---
 drivers/net/mlx5/Makefile       |   5 ++
 drivers/net/mlx5/mlx5.c         |  16 ++++
 drivers/net/mlx5/mlx5.h         |  15 ++++
 drivers/net/mlx5/mlx5_flow.c    |   1 +
 drivers/net/mlx5/mlx5_flow.h    |   6 +-
 drivers/net/mlx5/mlx5_flow_dv.c | 103 ++++++++++++++++++++----
 drivers/net/mlx5/mlx5_glue.c    | 170 ++++++++++++++++++++++++++++++++++------
 drivers/net/mlx5/mlx5_glue.h    |  31 +++++---
 drivers/net/mlx5/mlx5_prm.h     |  24 +++++-
 9 files changed, 318 insertions(+), 53 deletions(-)

diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index 0d20f0f..93bc869 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -156,6 +156,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
 		func mlx5dv_create_flow_action_packet_reformat \
 		$(AUTOCONF_OUTPUT)
 	$Q sh -- '$<' '$@' \
+		HAVE_MLX5DV_DR \
+		infiniband/mlx5dv.h \
+		enum MLX5DV_DR_NS_TYPE_TERMINATING \
+		$(AUTOCONF_OUTPUT)
+	$Q sh -- '$<' '$@' \
 		HAVE_IBV_DEVX_OBJ \
 		infiniband/mlx5dv.h \
 		func mlx5dv_devx_obj_create \
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 4044505..65aa9cf 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1483,6 +1483,22 @@ struct mlx5_dev_spawn_data {
 			priv->tcf_context = NULL;
 		}
 	}
+#ifdef HAVE_MLX5DV_DR
+		priv->rx_ns = mlx5dv_dr_create_ns
+			(sh->ctx, MLX5DV_DR_NS_DOMAIN_INGRESS_BYPASS);
+		if (priv->rx_ns == NULL) {
+			DRV_LOG(ERR, "mlx5dv_dr_create_ns failed");
+			err = errno;
+			goto error;
+		}
+		priv->tx_ns = mlx5dv_dr_create_ns(sh->ctx,
+					 MLX5DV_DR_NS_DOMAIN_EGRESS_BYPASS);
+		if (priv->tx_ns == NULL) {
+			DRV_LOG(ERR, "mlx5dv_dr_create_ns failed");
+			err = errno;
+			goto error;
+		}
+#endif
 	TAILQ_INIT(&priv->flows);
 	TAILQ_INIT(&priv->ctrl_flows);
 	/* Hint libmlx5 to use PMD allocator for data plane resources */
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 784bf9b..d4963cb 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -259,6 +259,15 @@ struct mlx5_ibv_shared {
 	struct mlx5_ibv_shared_port port[]; /* per device port data array. */
 };
 
+/* Table structure. */
+struct mlx5_flow_tbl_resource {
+	void *obj; /**< Pointer to DR table object. */
+	rte_atomic32_t refcnt; /**< Reference counter. */
+};
+
+#define MLX5_MAX_TABLES 1024
+#define MLX5_GROUP_FACTOR 1
+
 struct mlx5_priv {
 	LIST_ENTRY(mlx5_priv) mem_event_cb;
 	/**< Called by memory event callback. */
@@ -326,6 +335,12 @@ struct mlx5_priv {
 	/* UAR same-page access control required in 32bit implementations. */
 #endif
 	struct mlx5_flow_tcf_context *tcf_context; /* TC flower context. */
+	void *rx_ns; /* RX Direct Rules name space handle. */
+	struct mlx5_flow_tbl_resource rx_tbl[MLX5_MAX_TABLES];
+	/* RX Direct Rules tables. */
+	void *tx_ns; /* TX Direct Rules name space handle. */
+	struct mlx5_flow_tbl_resource tx_tbl[MLX5_MAX_TABLES];
+	/* TX Direct Rules tables/ */
 };
 
 #define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index bc6a7c1..9dc492a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -2084,6 +2084,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
 	flow = rte_calloc(__func__, 1, flow_size, 0);
 	flow->drv_type = flow_get_drv_type(dev, attr);
+	flow->ingress = attr->ingress;
 	assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
 	       flow->drv_type < MLX5_FLOW_TYPE_MAX);
 	flow->queue = (void *)(flow + 1);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 4f69ae2..8ba37a0 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -204,6 +204,7 @@ struct mlx5_flow_dv_matcher {
 	uint16_t crc; /**< CRC of key. */
 	uint16_t priority; /**< Priority of matcher. */
 	uint8_t egress; /**< Egress matcher. */
+	uint32_t group; /**< The matcher group. */
 	struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
 };
 
@@ -220,6 +221,7 @@ struct mlx5_flow_dv_encap_decap_resource {
 	size_t size;
 	uint8_t reformat_type;
 	uint8_t ft_type;
+	uint64_t flags; /**< Flags for RDMA API. */
 };
 
 /* Tag resource structure. */
@@ -348,7 +350,7 @@ struct mlx5_flow_counter {
 /* Flow structure. */
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
-	enum mlx5_flow_drv_type drv_type; /**< Drvier type. */
+	enum mlx5_flow_drv_type drv_type; /**< Driver type. */
 	struct mlx5_flow_counter *counter; /**< Holds flow counter. */
 	struct mlx5_flow_dv_tag_resource *tag_resource;
 	/**< pointer to the tag action. */
@@ -360,6 +362,8 @@ struct rte_flow {
 	uint64_t actions;
 	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
 	struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
+	uint8_t ingress; /**< 1 if the flow is ingress. */
+	uint32_t group; /**< The group index. */
 };
 
 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d9e2ac9..dd5b541 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -809,11 +809,20 @@ struct field_modify_info modify_tcp[] = {
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+	struct rte_flow *flow = dev_flow->flow;
+	struct mlx5dv_dr_ns *ns;
+
+	resource->flags = flow->group ? 0 : 1;
+	if (flow->ingress)
+		ns = priv->rx_ns;
+	else
+		ns = priv->tx_ns;
 
 	/* Lookup a matching resource from cache. */
 	LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
 		if (resource->reformat_type == cache_resource->reformat_type &&
 		    resource->ft_type == cache_resource->ft_type &&
+		    resource->flags == cache_resource->flags &&
 		    resource->size == cache_resource->size &&
 		    !memcmp((const void *)resource->buf,
 			    (const void *)cache_resource->buf,
@@ -835,10 +844,10 @@ struct field_modify_info modify_tcp[] = {
 	*cache_resource = *resource;
 	cache_resource->verbs_action =
 		mlx5_glue->dv_create_flow_action_packet_reformat
-			(priv->sh->ctx, cache_resource->size,
-			 (cache_resource->size ? cache_resource->buf : NULL),
-			 cache_resource->reformat_type,
-			 cache_resource->ft_type);
+			(priv->sh->ctx, cache_resource->reformat_type,
+			 cache_resource->ft_type, ns, cache_resource->flags,
+			 cache_resource->size,
+			 (cache_resource->size ? cache_resource->buf : NULL));
 	if (!cache_resource->verbs_action) {
 		rte_free(cache_resource);
 		return rte_flow_error_set(error, ENOMEM,
@@ -1442,6 +1451,10 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
 
+	struct mlx5dv_dr_ns *ns =
+		resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX  ?
+		priv->tx_ns : priv->rx_ns;
+
 	/* Lookup a matching resource from cache. */
 	LIST_FOREACH(cache_resource, &priv->modify_cmds, next) {
 		if (resource->ft_type == cache_resource->ft_type &&
@@ -1467,11 +1480,11 @@ struct field_modify_info modify_tcp[] = {
 	*cache_resource = *resource;
 	cache_resource->verbs_action =
 		mlx5_glue->dv_create_flow_action_modify_header
-					(priv->sh->ctx,
+					(priv->sh->ctx, cache_resource->ft_type,
+					 ns, 0,
 					 cache_resource->actions_num *
 					 sizeof(cache_resource->actions[0]),
-					 (uint64_t *)cache_resource->actions,
-					 cache_resource->ft_type);
+					 (uint64_t *)cache_resource->actions);
 	if (!cache_resource->verbs_action) {
 		rte_free(cache_resource);
 		return rte_flow_error_set(error, ENOMEM,
@@ -1596,11 +1609,13 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	uint32_t priority_max = priv->config.flow_prio - 1;
 
+#ifdef HAVE_MLX5DV_DR
 	if (attributes->group)
 		return rte_flow_error_set(error, ENOTSUP,
 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
 					  NULL,
 					  "groups is not supported");
+#endif
 	if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
 	    attributes->priority >= priority_max)
 		return rte_flow_error_set(error, ENOTSUP,
@@ -2173,11 +2188,13 @@ struct field_modify_info modify_tcp[] = {
  *   Flow pattern to translate.
  * @param[in] inner
  *   Item is inner pattern.
+ * @param[in] group
+ *   The group to insert the rule.
  */
 static void
 flow_dv_translate_item_ipv4(void *matcher, void *key,
 			    const struct rte_flow_item *item,
-			    int inner)
+			    int inner, uint32_t group)
 {
 	const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
 	const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
@@ -2204,7 +2221,10 @@ struct field_modify_info modify_tcp[] = {
 					 outer_headers);
 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
 	}
-	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+	if (group == 0)
+		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+	else
+		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
 	if (!ipv4_v)
 		return;
@@ -2246,11 +2266,13 @@ struct field_modify_info modify_tcp[] = {
  *   Flow pattern to translate.
  * @param[in] inner
  *   Item is inner pattern.
+ * @param[in] group
+ *   The group to insert the rule.
  */
 static void
 flow_dv_translate_item_ipv6(void *matcher, void *key,
 			    const struct rte_flow_item *item,
-			    int inner)
+			    int inner, uint32_t group)
 {
 	const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
 	const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
@@ -2287,7 +2309,10 @@ struct field_modify_info modify_tcp[] = {
 					 outer_headers);
 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
 	}
-	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+	if (group == 0)
+		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+	else
+		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
 	if (!ipv6_v)
 		return;
@@ -2727,7 +2752,11 @@ struct field_modify_info modify_tcp[] = {
 	match_criteria_enable |=
 		(!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
 		MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
-
+#ifdef HAVE_MLX5DV_DR
+	match_criteria_enable |=
+		(!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
+		MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
+#endif
 	return match_criteria_enable;
 }
 
@@ -2758,12 +2787,14 @@ struct field_modify_info modify_tcp[] = {
 		.type = IBV_FLOW_ATTR_NORMAL,
 		.match_mask = (void *)&matcher->mask,
 	};
+	struct mlx5_flow_tbl_resource *tbl = NULL;
 
 	/* Lookup from cache. */
 	LIST_FOREACH(cache_matcher, &priv->matchers, next) {
 		if (matcher->crc == cache_matcher->crc &&
 		    matcher->priority == cache_matcher->priority &&
 		    matcher->egress == cache_matcher->egress &&
+		    matcher->group == cache_matcher->group &&
 		    !memcmp((const void *)matcher->mask.buf,
 			    (const void *)cache_matcher->mask.buf,
 			    cache_matcher->mask.size)) {
@@ -2778,6 +2809,27 @@ struct field_modify_info modify_tcp[] = {
 			return 0;
 		}
 	}
+#ifdef HAVE_MLX5DV_DR
+	if (matcher->egress) {
+		tbl = &priv->tx_tbl[matcher->group];
+		if (!tbl->obj)
+			tbl->obj = mlx5_glue->dr_create_flow_tbl
+				(priv->tx_ns,
+				 matcher->group * MLX5_GROUP_FACTOR);
+	} else {
+		tbl = &priv->rx_tbl[matcher->group];
+		if (!tbl->obj)
+			tbl->obj = mlx5_glue->dr_create_flow_tbl
+				(priv->rx_ns,
+				 matcher->group * MLX5_GROUP_FACTOR);
+	}
+	if (!tbl->obj)
+		return rte_flow_error_set(error, ENOMEM,
+					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+					  NULL, "cannot create table");
+
+	rte_atomic32_inc(&tbl->refcnt);
+#endif
 	/* Register new matcher. */
 	cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
 	if (!cache_matcher)
@@ -2791,9 +2843,16 @@ struct field_modify_info modify_tcp[] = {
 	if (matcher->egress)
 		dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
 	cache_matcher->matcher_object =
-		mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, &dv_attr);
+		mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, &dv_attr,
+						  tbl->obj);
 	if (!cache_matcher->matcher_object) {
 		rte_free(cache_matcher);
+#ifdef HAVE_MLX5DV_DR
+		if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
+			mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+			tbl->obj = NULL;
+		}
+#endif
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 					  NULL, "cannot create matcher");
@@ -2805,6 +2864,7 @@ struct field_modify_info modify_tcp[] = {
 		cache_matcher->priority,
 		cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
 		rte_atomic32_read(&cache_matcher->refcnt));
+	rte_atomic32_inc(&tbl->refcnt);
 	return 0;
 }
 
@@ -3226,7 +3286,7 @@ struct field_modify_info modify_tcp[] = {
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV4:
 			flow_dv_translate_item_ipv4(match_mask, match_value,
-						    items, tunnel);
+						    items, tunnel, attr->group);
 			matcher.priority = MLX5_PRIORITY_MAP_L3;
 			dev_flow->dv.hash_fields |=
 				mlx5_flow_hashfields_adjust
@@ -3238,7 +3298,7 @@ struct field_modify_info modify_tcp[] = {
 			break;
 		case RTE_FLOW_ITEM_TYPE_IPV6:
 			flow_dv_translate_item_ipv6(match_mask, match_value,
-						    items, tunnel);
+						    items, tunnel, attr->group);
 			matcher.priority = MLX5_PRIORITY_MAP_L3;
 			dev_flow->dv.hash_fields |=
 				mlx5_flow_hashfields_adjust
@@ -3316,6 +3376,7 @@ struct field_modify_info modify_tcp[] = {
 	matcher.priority = mlx5_flow_adjust_priority(dev, priority,
 						     matcher.priority);
 	matcher.egress = attr->egress;
+	matcher.group = attr->group;
 	if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
 		return -rte_errno;
 	return 0;
@@ -3431,6 +3492,8 @@ struct field_modify_info modify_tcp[] = {
 			struct mlx5_flow *flow)
 {
 	struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_flow_tbl_resource *tbl;
 
 	assert(matcher->matcher_object);
 	DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
@@ -3440,6 +3503,14 @@ struct field_modify_info modify_tcp[] = {
 		claim_zero(mlx5_glue->dv_destroy_flow_matcher
 			   (matcher->matcher_object));
 		LIST_REMOVE(matcher, next);
+		if (matcher->egress)
+			tbl = &priv->tx_tbl[matcher->group];
+		else
+			tbl = &priv->rx_tbl[matcher->group];
+		if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
+			mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+			tbl->obj = NULL;
+		}
 		rte_free(matcher);
 		DRV_LOG(DEBUG, "port %u matcher %p: removed",
 			dev->data->port_id, (void *)matcher);
@@ -3529,7 +3600,7 @@ struct field_modify_info modify_tcp[] = {
 	LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
 		dv = &dev_flow->dv;
 		if (dv->flow) {
-			claim_zero(mlx5_glue->destroy_flow(dv->flow));
+			claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
 			dv->flow = NULL;
 		}
 		if (dv->hrxq) {
diff --git a/drivers/net/mlx5/mlx5_glue.c b/drivers/net/mlx5/mlx5_glue.c
index 4b5aade..b0b144c 100644
--- a/drivers/net/mlx5/mlx5_glue.c
+++ b/drivers/net/mlx5/mlx5_glue.c
@@ -178,6 +178,9 @@
 mlx5_glue_destroy_flow_action(void *action)
 {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+	return mlx5dv_dr_destroy_action(action);
+#else
 	struct mlx5dv_flow_action_attr *attr = action;
 	int res = 0;
 	switch (attr->type) {
@@ -189,6 +192,7 @@
 	}
 	free(action);
 	return res;
+#endif
 #else
 	(void)action;
 	return ENOTSUP;
@@ -365,6 +369,53 @@
 	return ibv_cq_ex_to_cq(cq);
 }
 
+static void *
+mlx5_glue_dr_create_flow_tbl(void *ns, uint32_t level)
+{
+#ifdef HAVE_MLX5DV_DR
+	return mlx5dv_dr_create_ft(ns, level);
+#else
+	(void)ns;
+	(void)level;
+	return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dr_destroy_flow_tbl(void *tbl)
+{
+#ifdef HAVE_MLX5DV_DR
+	return mlx5dv_dr_destroy_ft(tbl);
+#else
+	(void)tbl;
+	return 0;
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_ns(struct ibv_context *ctx,
+		       enum  mlx5dv_dr_ns_domain domain)
+{
+#ifdef HAVE_MLX5DV_DR
+	return mlx5dv_dr_create_ns(ctx, domain);
+#else
+	(void)ctx;
+	(void)domain;
+	return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dr_destroy_ns(void *ns)
+{
+#ifdef HAVE_MLX5DV_DR
+	return mlx5dv_dr_destroy_ns(ns);
+#else
+	(void)ns;
+	return 0;
+#endif
+}
+
 static struct ibv_cq_ex *
 mlx5_glue_dv_create_cq(struct ibv_context *context,
 		       struct ibv_cq_init_attr_ex *cq_attr,
@@ -423,26 +474,40 @@
 #endif
 }
 
-static struct mlx5dv_flow_matcher *
+static void *
 mlx5_glue_dv_create_flow_matcher(struct ibv_context *context,
-				 struct mlx5dv_flow_matcher_attr *matcher_attr)
+				 struct mlx5dv_flow_matcher_attr *matcher_attr,
+				 void *tbl)
 {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+	(void)context;
+	return mlx5dv_dr_create_matcher(tbl, matcher_attr->priority,
+				       matcher_attr->match_criteria_enable,
+				       matcher_attr->match_mask);
+#else
+	(void)tbl;
 	return mlx5dv_create_flow_matcher(context, matcher_attr);
+#endif
 #else
 	(void)context;
 	(void)matcher_attr;
+	(void)tbl;
 	return NULL;
 #endif
 }
 
-static struct ibv_flow *
-mlx5_glue_dv_create_flow(struct mlx5dv_flow_matcher *matcher,
-			 struct mlx5dv_flow_match_parameters *match_value,
+static void *
+mlx5_glue_dv_create_flow(void *matcher,
+			 void *match_value,
 			 size_t num_actions,
 			 void *actions[])
 {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+	return mlx5dv_dr_create_rule(matcher, match_value, num_actions,
+				     (struct mlx5dv_dr_action **)actions);
+#else
 	struct mlx5dv_flow_action_attr actions_attr[8];
 
 	if (num_actions > 8)
@@ -452,6 +517,7 @@
 			*((struct mlx5dv_flow_action_attr *)(actions[i]));
 	return mlx5dv_create_flow(matcher, match_value,
 				  num_actions, actions_attr);
+#endif
 #else
 	(void)matcher;
 	(void)match_value;
@@ -461,21 +527,13 @@
 #endif
 }
 
-static int
-mlx5_glue_dv_destroy_flow_matcher(struct mlx5dv_flow_matcher *matcher)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-	return mlx5dv_destroy_flow_matcher(matcher);
-#else
-	(void)matcher;
-	return 0;
-#endif
-}
-
 static void *
 mlx5_glue_dv_create_flow_action_counter(void *counter_obj, uint32_t offset)
 {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+	return mlx5dv_dr_create_action_devx_counter(counter_obj, offset);
+#else
 	struct mlx5dv_flow_action_attr *action;
 
 	(void)offset;
@@ -485,6 +543,7 @@
 	action->type = MLX5DV_FLOW_ACTION_COUNTERS_DEVX;
 	action->obj = counter_obj;
 	return action;
+#endif
 #else
 	(void)counter_obj;
 	(void)offset;
@@ -496,6 +555,9 @@
 mlx5_glue_dv_create_flow_action_dest_ibv_qp(void *qp)
 {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+	return mlx5dv_dr_create_action_dest_ibv_qp(qp);
+#else
 	struct mlx5dv_flow_action_attr *action;
 
 	action = malloc(sizeof(*action));
@@ -504,6 +566,7 @@
 	action->type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
 	action->obj = qp;
 	return action;
+#endif
 #else
 	(void)qp;
 	return NULL;
@@ -513,13 +576,22 @@
 static void *
 mlx5_glue_dv_create_flow_action_modify_header
 					(struct ibv_context *ctx,
+					 enum mlx5dv_flow_table_type ft_type,
+					 void *ns, uint64_t flags,
 					 size_t actions_sz,
-					 uint64_t actions[],
-					 enum mlx5dv_flow_table_type ft_type)
+					 uint64_t actions[])
 {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+	(void)ctx;
+	(void)ft_type;
+	return mlx5dv_dr_create_action_modify_header(ns, flags, actions_sz,
+						    actions);
+#else
 	struct mlx5dv_flow_action_attr *action;
 
+	(void)ns;
+	(void)flags;
 	action = malloc(sizeof(*action));
 	if (!action)
 		return NULL;
@@ -527,11 +599,14 @@
 	action->action = mlx5dv_create_flow_action_modify_header
 		(ctx, actions_sz, actions, ft_type);
 	return action;
+#endif
 #else
 	(void)ctx;
+	(void)ft_type;
+	(void)ns;
+	(void)flags;
 	(void)actions_sz;
 	(void)actions;
-	(void)ft_type;
 	return NULL;
 #endif
 }
@@ -539,12 +614,20 @@
 static void *
 mlx5_glue_dv_create_flow_action_packet_reformat
 		(struct ibv_context *ctx,
-		 size_t data_sz,
-		 void *data,
 		 enum mlx5dv_flow_action_packet_reformat_type reformat_type,
-		 enum mlx5dv_flow_table_type ft_type)
+		 enum mlx5dv_flow_table_type ft_type, struct mlx5dv_dr_ns *ns,
+		 uint32_t flags, size_t data_sz, void *data)
 {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+	(void)ctx;
+	(void)ft_type;
+	return mlx5dv_dr_create_action_packet_reformat(ns, flags,
+						       reformat_type, data_sz,
+						       data);
+#else
+	(void)ns;
+	(void)flags;
 	struct mlx5dv_flow_action_attr *action;
 
 	action = malloc(sizeof(*action));
@@ -554,12 +637,15 @@
 	action->action = mlx5dv_create_flow_action_packet_reformat
 		(ctx, data_sz, data, reformat_type, ft_type);
 	return action;
+#endif
 #else
 	(void)ctx;
-	(void)data_sz;
-	(void)data;
 	(void)reformat_type;
 	(void)ft_type;
+	(void)ns;
+	(void)flags;
+	(void)data_sz;
+	(void)data;
 	return NULL;
 #endif
 }
@@ -568,6 +654,9 @@
 mlx5_glue_dv_create_flow_action_tag(uint32_t tag)
 {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+	return mlx5dv_dr_create_action_tag(tag);
+#else
 	struct mlx5dv_flow_action_attr *action;
 	action = malloc(sizeof(*action));
 	if (!action)
@@ -576,10 +665,36 @@
 	action->tag_value = tag;
 	return action;
 #endif
+#endif
 	(void)tag;
 	return NULL;
 }
 
+static int
+mlx5_glue_dv_destroy_flow(void *flow_id)
+{
+#ifdef HAVE_MLX5DV_DR
+	return mlx5dv_dr_destroy_rule(flow_id);
+#else
+	return ibv_destroy_flow(flow_id);
+#endif
+}
+
+static int
+mlx5_glue_dv_destroy_flow_matcher(void *matcher)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+	return mlx5dv_dr_destroy_matcher(matcher);
+#else
+	return mlx5dv_destroy_flow_matcher(matcher);
+#endif
+#else
+	(void)matcher;
+	return 0;
+#endif
+}
+
 static struct ibv_context *
 mlx5_glue_dv_open_device(struct ibv_device *device)
 {
@@ -718,6 +833,10 @@
 	.get_async_event = mlx5_glue_get_async_event,
 	.port_state_str = mlx5_glue_port_state_str,
 	.cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,
+	.dr_create_flow_tbl = mlx5_glue_dr_create_flow_tbl,
+	.dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,
+	.dr_create_ns = mlx5_glue_dr_create_ns,
+	.dr_destroy_ns = mlx5_glue_dr_destroy_ns,
 	.dv_create_cq = mlx5_glue_dv_create_cq,
 	.dv_create_wq = mlx5_glue_dv_create_wq,
 	.dv_query_device = mlx5_glue_dv_query_device,
@@ -725,7 +844,6 @@
 	.dv_init_obj = mlx5_glue_dv_init_obj,
 	.dv_create_qp = mlx5_glue_dv_create_qp,
 	.dv_create_flow_matcher = mlx5_glue_dv_create_flow_matcher,
-	.dv_destroy_flow_matcher = mlx5_glue_dv_destroy_flow_matcher,
 	.dv_create_flow = mlx5_glue_dv_create_flow,
 	.dv_create_flow_action_counter =
 		mlx5_glue_dv_create_flow_action_counter,
@@ -736,6 +854,8 @@
 	.dv_create_flow_action_packet_reformat =
 		mlx5_glue_dv_create_flow_action_packet_reformat,
 	.dv_create_flow_action_tag =  mlx5_glue_dv_create_flow_action_tag,
+	.dv_destroy_flow = mlx5_glue_dv_destroy_flow,
+	.dv_destroy_flow_matcher = mlx5_glue_dv_destroy_flow_matcher,
 	.dv_open_device = mlx5_glue_dv_open_device,
 	.devx_obj_create = mlx5_glue_devx_obj_create,
 	.devx_obj_destroy = mlx5_glue_devx_obj_destroy,
diff --git a/drivers/net/mlx5/mlx5_glue.h b/drivers/net/mlx5/mlx5_glue.h
index 32487ea..eb29ffa 100644
--- a/drivers/net/mlx5/mlx5_glue.h
+++ b/drivers/net/mlx5/mlx5_glue.h
@@ -63,6 +63,11 @@
 struct mlx5dv_devx_obj;
 #endif
 
+#ifndef HAVE_MLX5DV_DR
+struct mlx5dv_dr_ns;
+enum  mlx5dv_dr_ns_domain { unused, };
+#endif
+
 /* LIB_GLUE_VERSION must be updated every time this structure is modified. */
 struct mlx5_glue {
 	const char *version;
@@ -140,6 +145,11 @@ struct mlx5_glue {
 			       struct ibv_async_event *event);
 	const char *(*port_state_str)(enum ibv_port_state port_state);
 	struct ibv_cq *(*cq_ex_to_cq)(struct ibv_cq_ex *cq);
+	void *(*dr_create_flow_tbl)(void *ns, uint32_t level);
+	int (*dr_destroy_flow_tbl)(void *tbl);
+	void *(*dr_create_ns)(struct ibv_context *ctx,
+			      enum mlx5dv_dr_ns_domain domain);
+	int (*dr_destroy_ns)(void *ns);
 	struct ibv_cq_ex *(*dv_create_cq)
 		(struct ibv_context *context,
 		 struct ibv_cq_init_attr_ex *cq_attr,
@@ -158,23 +168,26 @@ struct mlx5_glue {
 		(struct ibv_context *context,
 		 struct ibv_qp_init_attr_ex *qp_init_attr_ex,
 		 struct mlx5dv_qp_init_attr *dv_qp_init_attr);
-	struct mlx5dv_flow_matcher *(*dv_create_flow_matcher)
+	void *(*dv_create_flow_matcher)
 		(struct ibv_context *context,
-		 struct mlx5dv_flow_matcher_attr *matcher_attr);
-	int (*dv_destroy_flow_matcher)(struct mlx5dv_flow_matcher *matcher);
-	struct ibv_flow *(*dv_create_flow)(struct mlx5dv_flow_matcher *matcher,
-			  struct mlx5dv_flow_match_parameters *match_value,
+		 struct mlx5dv_flow_matcher_attr *matcher_attr,
+		 void *tbl);
+	void *(*dv_create_flow)(void *matcher, void *match_value,
 			  size_t num_actions, void *actions[]);
 	void *(*dv_create_flow_action_counter)(void *obj, uint32_t  offset);
 	void *(*dv_create_flow_action_dest_ibv_qp)(void *qp);
 	void *(*dv_create_flow_action_modify_header)
-		(struct ibv_context *ctx, size_t actions_sz, uint64_t actions[],
-		 enum mlx5dv_flow_table_type ft_type);
+		(struct ibv_context *ctx, enum mlx5dv_flow_table_type ft_type,
+		 void *ns, uint64_t flags, size_t actions_sz,
+		 uint64_t actions[]);
 	void *(*dv_create_flow_action_packet_reformat)
-		(struct ibv_context *ctx, size_t data_sz, void *data,
+		(struct ibv_context *ctx,
 		 enum mlx5dv_flow_action_packet_reformat_type reformat_type,
-		 enum mlx5dv_flow_table_type ft_type);
+		 enum mlx5dv_flow_table_type ft_type, struct mlx5dv_dr_ns *ns,
+		 uint32_t flags, size_t data_sz, void *data);
 	void *(*dv_create_flow_action_tag)(uint32_t tag);
+	int (*dv_destroy_flow)(void *flow);
+	int (*dv_destroy_flow_matcher)(void *matcher);
 	struct ibv_context *(*dv_open_device)(struct ibv_device *device);
 	struct mlx5dv_devx_obj *(*devx_obj_create)
 					(struct ibv_context *ctx,
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index da1219e..b15266f 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -492,20 +492,40 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
 	u8 reserved_at_1a0[0x60];
 };
 
+struct mlx5_ifc_fte_match_set_misc3_bits {
+	u8 inner_tcp_seq_num[0x20];
+	u8 outer_tcp_seq_num[0x20];
+	u8 inner_tcp_ack_num[0x20];
+	u8 outer_tcp_ack_num[0x20];
+	u8 reserved_at_auto1[0x8];
+	u8 outer_vxlan_gpe_vni[0x18];
+	u8 outer_vxlan_gpe_next_protocol[0x8];
+	u8 outer_vxlan_gpe_flags[0x8];
+	u8 reserved_at_a8[0x10];
+	u8 icmp_header_data[0x20];
+	u8 icmpv6_header_data[0x20];
+	u8 icmp_type[0x8];
+	u8 icmp_code[0x8];
+	u8 icmpv6_type[0x8];
+	u8 icmpv6_code[0x8];
+	u8 reserved_at_1a0[0xe0];
+};
+
 /* Flow matcher. */
 struct mlx5_ifc_fte_match_param_bits {
 	struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
 	struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
 	struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
 	struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
-	u8 reserved_at_800[0x800];
+	struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
 };
 
 enum {
 	MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT,
 	MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT,
 	MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT,
-	MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT
+	MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT,
+	MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT
 };
 
 enum {
-- 
1.8.3.1


  parent reply	other threads:[~2019-04-03 13:22 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-20 15:38 [dpdk-dev] [PATCH 0/3]net/mlx5: Add Direct Rule support Ori Kam
2019-03-20 15:38 ` Ori Kam
2019-03-20 15:39 ` [dpdk-dev] [PATCH 1/3] net/mlx5: prepare Direct Verbs for Direct Rule Ori Kam
2019-03-20 15:39   ` Ori Kam
2019-03-20 15:39 ` [dpdk-dev] [PATCH 2/3] net/mlx5: add Direct Rules API Ori Kam
2019-03-20 15:39   ` Ori Kam
2019-03-20 15:39 ` [dpdk-dev] [PATCH 3/3] net/mlx5: add jump action support for NIC Ori Kam
2019-03-20 15:39   ` Ori Kam
2019-03-28 16:32 ` [dpdk-dev] [PATCH v2 0/3] net/mlx5: Add Direct Rule support Ori Kam
2019-03-28 16:32   ` Ori Kam
2019-03-28 16:32   ` [dpdk-dev] [PATCH v2 1/3] net/mlx5: prepare Direct Verbs for Direct Rule Ori Kam
2019-03-28 16:32     ` Ori Kam
2019-04-01 14:38     ` Slava Ovsiienko
2019-04-01 14:38       ` Slava Ovsiienko
2019-04-03 10:15     ` Shahaf Shuler
2019-04-03 10:15       ` Shahaf Shuler
2019-03-28 16:32   ` [dpdk-dev] [PATCH v2 2/3] net/mlx5: add Direct Rules API Ori Kam
2019-03-28 16:32     ` Ori Kam
2019-04-01 14:38     ` Slava Ovsiienko
2019-04-01 14:38       ` Slava Ovsiienko
2019-03-28 16:32   ` [dpdk-dev] [PATCH v2 3/3] net/mlx5: add jump action support for NIC Ori Kam
2019-03-28 16:32     ` Ori Kam
2019-04-01 14:38     ` Slava Ovsiienko
2019-04-01 14:38       ` Slava Ovsiienko
2019-04-03 10:16     ` Shahaf Shuler
2019-04-03 10:16       ` Shahaf Shuler
2019-04-03 10:17   ` [dpdk-dev] [PATCH v2 0/3] net/mlx5: Add Direct Rule support Shahaf Shuler
2019-04-03 10:17     ` Shahaf Shuler
2019-04-03 13:21   ` [dpdk-dev] [PATCH v3 " Ori Kam
2019-04-03 13:21     ` Ori Kam
2019-04-03 13:21     ` [dpdk-dev] [PATCH v3 1/3] net/mlx5: prepare Direct Verbs for Direct Rule Ori Kam
2019-04-03 13:21       ` Ori Kam
2019-04-03 13:21     ` Ori Kam [this message]
2019-04-03 13:21       ` [dpdk-dev] [PATCH v3 2/3] net/mlx5: add Direct Rules API Ori Kam
2019-04-03 13:21     ` [dpdk-dev] [PATCH v3 3/3] net/mlx5: add jump action support for NIC Ori Kam
2019-04-03 13:21       ` Ori Kam
2019-04-04  5:26       ` Shahaf Shuler
2019-04-04  5:26         ` Shahaf Shuler
2019-04-04  9:54   ` [dpdk-dev] [PATCH v4 0/3] net/mlx5: Add Direct Rule support Ori Kam
2019-04-04  9:54     ` Ori Kam
2019-04-04  9:54     ` [dpdk-dev] [PATCH v4 1/3] net/mlx5: prepare Direct Verbs for Direct Rule Ori Kam
2019-04-04  9:54       ` Ori Kam
2019-04-12 23:51       ` dwilder
2019-04-12 23:51         ` dwilder
2019-04-13  0:16         ` Yongseok Koh
2019-04-13  0:16           ` Yongseok Koh
2019-04-04  9:54     ` [dpdk-dev] [PATCH v4 2/3] net/mlx5: add Direct Rules API Ori Kam
2019-04-04  9:54       ` Ori Kam
2019-04-04  9:54     ` [dpdk-dev] [PATCH v4 3/3] net/mlx5: add jump action support for NIC Ori Kam
2019-04-04  9:54       ` Ori Kam
2019-04-04 11:01     ` [dpdk-dev] [PATCH v4 0/3] net/mlx5: Add Direct Rule support Shahaf Shuler
2019-04-04 11:01       ` Shahaf Shuler

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1554297697-13525-3-git-send-email-orika@mellanox.com \
    --to=orika@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=matan@mellanox.com \
    --cc=shahafs@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).