DPDK patches and discussions
 help / color / mirror / Atom feed
From: Xueming Li <xuemingl@mellanox.com>
To: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>,
	Shahaf Shuler <shahafs@mellanox.com>
Cc: Xueming Li <xuemingl@mellanox.com>, dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 11/15] net/mlx5: support MPLS-in-GRE and MPLS-in-UDP
Date: Tue, 10 Apr 2018 21:34:11 +0800	[thread overview]
Message-ID: <20180410133415.189905-12-xuemingl@mellanox.com> (raw)
In-Reply-To: <20180410133415.189905-1-xuemingl@mellanox.com>

This patch supports new tunnel type MPLS-in-GRE and MPLS-in-UDP.
Flow pattern example:
  ipv4 proto is 47 / gre proto is 0x8847 / mpls
  ipv4 / udp dst is 6635 / mpls / end

Signed-off-by: Xueming Li <xuemingl@mellanox.com>
---
 drivers/net/mlx5/Makefile    |   5 ++
 drivers/net/mlx5/mlx5.c      |  15 +++++
 drivers/net/mlx5/mlx5.h      |   1 +
 drivers/net/mlx5/mlx5_flow.c | 148 ++++++++++++++++++++++++++++++++++++++++++-
 4 files changed, 166 insertions(+), 3 deletions(-)

diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index f9a6c460b..33553483e 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -131,6 +131,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
 		enum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \
 		$(AUTOCONF_OUTPUT)
 	$Q sh -- '$<' '$@' \
+		HAVE_IBV_DEVICE_MPLS_SUPPORT \
+		infiniband/verbs.h \
+		enum IBV_FLOW_SPEC_MPLS \
+		$(AUTOCONF_OUTPUT)
+	$Q sh -- '$<' '$@' \
 		HAVE_IBV_WQ_FLAG_RX_END_PADDING \
 		infiniband/verbs.h \
 		enum IBV_WQ_FLAG_RX_END_PADDING \
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index a1f2799e5..2124439b3 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -613,6 +613,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	unsigned int mps;
 	unsigned int cqe_comp;
 	unsigned int tunnel_en = 0;
+	unsigned int mpls_en = 0;
 	int idx;
 	int i;
 	struct mlx5dv_context attrs_out = {0};
@@ -719,12 +720,25 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 			      MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
 			     (attrs_out.tunnel_offloads_caps &
 			      MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+		mpls_en = ((attrs_out.tunnel_offloads_caps &
+			    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_MPLS_GRE) &&
+			   (attrs_out.tunnel_offloads_caps &
+			    MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_MPLS_UDP) &&
+			   (attrs_out.tunnel_offloads_caps &
+			  MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CTRL_DW_MPLS));
+#endif
 	}
 	DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
 		tunnel_en ? "" : "not ");
+	DRV_LOG(DEBUG, "MPLS over GRE/UDP offloading is %ssupported",
+		mpls_en ? "" : "not ");
 #else
 	DRV_LOG(WARNING,
 		"tunnel offloading disabled due to old OFED/rdma-core version");
+	DRV_LOG(WARNING,
+		"MPLS over GRE/UDP offloading disabled due to old"
+		" OFED/rdma-core version or firmware configuration");
 #endif
 	if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) {
 		err = errno;
@@ -748,6 +762,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 			.cqe_comp = cqe_comp,
 			.mps = mps,
 			.tunnel_en = tunnel_en,
+			.mpls_en = mpls_en,
 			.tx_vec_en = 1,
 			.rx_vec_en = 1,
 			.mpw_hdr_dseg = 0,
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 708272f6d..1868abd8d 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -81,6 +81,7 @@ struct mlx5_dev_config {
 	unsigned int vf:1; /* This is a VF. */
 	unsigned int mps:2; /* Multi-packet send supported mode. */
 	unsigned int tunnel_en:1;
+	unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
 	/* Whether tunnel stateless offloads are supported. */
 	unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
 	unsigned int cqe_comp:1; /* CQE compression is enabled. */
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 58d437308..5784f2ee0 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -97,6 +97,11 @@ mlx5_flow_create_gre(const struct rte_flow_item *item,
 		       const void *default_mask,
 		       struct mlx5_flow_data *data);
 
+static int
+mlx5_flow_create_mpls(const struct rte_flow_item *item,
+		      const void *default_mask,
+		      struct mlx5_flow_data *data);
+
 struct mlx5_flow_parse;
 
 static void
@@ -244,12 +249,14 @@ struct rte_flow {
 #define IS_TUNNEL(type) ( \
 	(type) == RTE_FLOW_ITEM_TYPE_VXLAN || \
 	(type) == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || \
+	(type) == RTE_FLOW_ITEM_TYPE_MPLS || \
 	(type) == RTE_FLOW_ITEM_TYPE_GRE)
 
 const uint32_t flow_ptype[] = {
 	[RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
 	[RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = RTE_PTYPE_TUNNEL_VXLAN_GPE,
 	[RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE,
+	[RTE_FLOW_ITEM_TYPE_MPLS] = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
 };
 
 #define PTYPE_IDX(t) ((RTE_PTYPE_TUNNEL_MASK & (t)) >> 12)
@@ -260,6 +267,10 @@ const uint32_t ptype_ext[] = {
 	[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)]	= RTE_PTYPE_TUNNEL_VXLAN_GPE |
 						  RTE_PTYPE_L4_UDP,
 	[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE,
+	[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)] =
+		RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
+	[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)] =
+		RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
 };
 
 /** Structure to generate a simple graph of layers supported by the NIC. */
@@ -396,7 +407,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 	},
 	[RTE_FLOW_ITEM_TYPE_UDP] = {
 		.items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN,
-			       RTE_FLOW_ITEM_TYPE_VXLAN_GPE),
+			       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+			       RTE_FLOW_ITEM_TYPE_MPLS),
 		.actions = valid_actions,
 		.mask = &(const struct rte_flow_item_udp){
 			.hdr = {
@@ -425,7 +437,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 	[RTE_FLOW_ITEM_TYPE_GRE] = {
 		.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
 			       RTE_FLOW_ITEM_TYPE_IPV4,
-			       RTE_FLOW_ITEM_TYPE_IPV6),
+			       RTE_FLOW_ITEM_TYPE_IPV6,
+			       RTE_FLOW_ITEM_TYPE_MPLS),
 		.actions = valid_actions,
 		.mask = &(const struct rte_flow_item_gre){
 			.protocol = -1,
@@ -433,7 +446,11 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 		.default_mask = &rte_flow_item_gre_mask,
 		.mask_sz = sizeof(struct rte_flow_item_gre),
 		.convert = mlx5_flow_create_gre,
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+		.dst_sz = sizeof(struct ibv_flow_spec_gre),
+#else
 		.dst_sz = sizeof(struct ibv_flow_spec_tunnel),
+#endif
 	},
 	[RTE_FLOW_ITEM_TYPE_VXLAN] = {
 		.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
@@ -461,6 +478,21 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
 		.convert = mlx5_flow_create_vxlan_gpe,
 		.dst_sz = sizeof(struct ibv_flow_spec_tunnel),
 	},
+	[RTE_FLOW_ITEM_TYPE_MPLS] = {
+		.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
+			       RTE_FLOW_ITEM_TYPE_IPV4,
+			       RTE_FLOW_ITEM_TYPE_IPV6),
+		.actions = valid_actions,
+		.mask = &(const struct rte_flow_item_mpls){
+			.label_tc_s = "\xff\xff\xf0",
+		},
+		.default_mask = &rte_flow_item_mpls_mask,
+		.mask_sz = sizeof(struct rte_flow_item_mpls),
+		.convert = mlx5_flow_create_mpls,
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+		.dst_sz = sizeof(struct ibv_flow_spec_mpls),
+#endif
+	},
 };
 
 /** Structure to pass to the conversion function. */
@@ -916,7 +948,9 @@ mlx5_flow_convert_items_validate(struct rte_eth_dev *dev,
 		if (ret)
 			goto exit_item_not_supported;
 		if (IS_TUNNEL(items->type)) {
-			if (parser->tunnel) {
+			if (parser->tunnel &&
+			   !(parser->tunnel == RTE_PTYPE_TUNNEL_GRE &&
+			     items->type == RTE_FLOW_ITEM_TYPE_MPLS)) {
 				rte_flow_error_set(error, ENOTSUP,
 						   RTE_FLOW_ERROR_TYPE_ITEM,
 						   items,
@@ -924,6 +958,16 @@ mlx5_flow_convert_items_validate(struct rte_eth_dev *dev,
 						   " tunnel encapsulations.");
 				return -rte_errno;
 			}
+			if (items->type == RTE_FLOW_ITEM_TYPE_MPLS &&
+			    !priv->config.mpls_en) {
+				rte_flow_error_set(error, ENOTSUP,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   items,
+						   "MPLS not supported or"
+						   " disabled in firmware"
+						   " configuration.");
+				return -rte_errno;
+			}
 			if (!priv->config.tunnel_en &&
 			    parser->rss_conf.level) {
 				rte_flow_error_set(error, ENOTSUP,
@@ -1883,6 +1927,80 @@ mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item,
 }
 
 /**
+ * Convert MPLS item to Verbs specification.
+ * Tunnel types currently supported are MPLS-in-GRE and MPLS-in-UDP.
+ *
+ * @param item[in]
+ *   Item specification.
+ * @param default_mask[in]
+ *   Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ *   User structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_create_mpls(const struct rte_flow_item *item __rte_unused,
+		      const void *default_mask __rte_unused,
+		      struct mlx5_flow_data *data __rte_unused)
+{
+#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
+	return rte_flow_error_set(data->error, EINVAL,
+				  RTE_FLOW_ERROR_TYPE_ITEM,
+				  item,
+				  "MPLS not supported by driver");
+#else
+	unsigned int i;
+	const struct rte_flow_item_mpls *spec = item->spec;
+	const struct rte_flow_item_mpls *mask = item->mask;
+	struct mlx5_flow_parse *parser = data->parser;
+	unsigned int size = sizeof(struct ibv_flow_spec_mpls);
+	struct ibv_flow_spec_mpls mpls = {
+		.type = IBV_FLOW_SPEC_MPLS,
+		.size = size,
+	};
+	union tag {
+		uint32_t tag;
+		uint8_t label[4];
+	} id;
+
+	id.tag = 0;
+	parser->inner = IBV_FLOW_SPEC_INNER;
+	if (parser->layer == HASH_RXQ_UDPV4 ||
+	    parser->layer == HASH_RXQ_UDPV6) {
+		parser->tunnel =
+			ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)];
+		parser->out_layer = parser->layer;
+	} else {
+		parser->tunnel =
+			ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)];
+	}
+	parser->layer = HASH_RXQ_TUNNEL;
+	if (spec) {
+		if (!mask)
+			mask = default_mask;
+		memcpy(&id.label[1], spec->label_tc_s, 3);
+		id.label[0] = spec->ttl;
+		mpls.val.tag = id.tag;
+		memcpy(&id.label[1], mask->label_tc_s, 3);
+		id.label[0] = mask->ttl;
+		mpls.mask.tag = id.tag;
+		/* Remove unwanted bits from values. */
+		mpls.val.tag &= mpls.mask.tag;
+	}
+	mlx5_flow_create_copy(parser, &mpls, size);
+	for (i = 0; i != hash_rxq_init_n; ++i) {
+		if (!parser->queue[i].ibv_attr)
+			continue;
+		parser->queue[i].ibv_attr->flags |=
+			IBV_FLOW_ATTR_FLAGS_ORDERED_SPEC_LIST;
+	}
+	return 0;
+#endif
+}
+
+/**
  * Convert GRE item to Verbs specification.
  *
  * @param item[in]
@@ -1901,16 +2019,40 @@ mlx5_flow_create_gre(const struct rte_flow_item *item __rte_unused,
 		     struct mlx5_flow_data *data)
 {
 	struct mlx5_flow_parse *parser = data->parser;
+#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
 	unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
 	struct ibv_flow_spec_tunnel tunnel = {
 		.type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
 		.size = size,
 	};
+#else
+	const struct rte_flow_item_gre *spec = item->spec;
+	const struct rte_flow_item_gre *mask = item->mask;
+	unsigned int size = sizeof(struct ibv_flow_spec_gre);
+	struct ibv_flow_spec_gre tunnel = {
+		.type = parser->inner | IBV_FLOW_SPEC_GRE,
+		.size = size,
+	};
+#endif
 
 	parser->inner = IBV_FLOW_SPEC_INNER;
 	parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)];
 	parser->out_layer = parser->layer;
 	parser->layer = HASH_RXQ_TUNNEL;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+	if (spec) {
+		if (!mask)
+			mask = default_mask;
+		tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+		tunnel.val.protocol = spec->protocol;
+		tunnel.val.c_ks_res0_ver = mask->c_rsvd0_ver;
+		tunnel.val.protocol = mask->protocol;
+		/* Remove unwanted bits from values. */
+		tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+		tunnel.val.protocol &= tunnel.mask.protocol;
+		tunnel.val.key &= tunnel.mask.key;
+	}
+#endif
 	mlx5_flow_create_copy(parser, &tunnel, size);
 	return 0;
 }
-- 
2.13.3

  parent reply	other threads:[~2018-04-10 13:34 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-04-10 13:34 [dpdk-dev] [PATCH v2 00/15] mlx5 Rx tunnel offloading Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 01/15] net/mlx5: support 16 hardware priorities Xueming Li
2018-04-10 14:41   ` Nélio Laranjeiro
2018-04-10 15:22     ` Xueming(Steven) Li
2018-04-12  9:09       ` Nélio Laranjeiro
2018-04-12 13:43         ` Xueming(Steven) Li
2018-04-12 14:02           ` Nélio Laranjeiro
2018-04-12 14:46             ` Xueming(Steven) Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 02/15] net/mlx5: support GRE tunnel flow Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 03/15] net/mlx5: support L3 vxlan flow Xueming Li
2018-04-10 14:53   ` Nélio Laranjeiro
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 04/15] net/mlx5: support Rx tunnel type identification Xueming Li
2018-04-10 15:17   ` Nélio Laranjeiro
2018-04-11  8:11     ` Xueming(Steven) Li
2018-04-12  9:50       ` Nélio Laranjeiro
2018-04-12 14:27         ` Xueming(Steven) Li
2018-04-13  8:37           ` Nélio Laranjeiro
2018-04-13 12:09             ` Xueming(Steven) Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 05/15] net/mlx5: support tunnel inner checksum offloads Xueming Li
2018-04-10 15:27   ` Nélio Laranjeiro
2018-04-11  8:46     ` Xueming(Steven) Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 06/15] net/mlx5: split flow RSS handling logic Xueming Li
2018-04-10 15:28   ` Nélio Laranjeiro
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 07/15] net/mlx5: support tunnel RSS level Xueming Li
2018-04-11  8:55   ` Nélio Laranjeiro
2018-04-14 12:25     ` Xueming(Steven) Li
2018-04-16  7:14       ` Nélio Laranjeiro
2018-04-16  7:46         ` Xueming(Steven) Li
2018-04-16  8:09           ` Nélio Laranjeiro
2018-04-16 10:06             ` Xueming(Steven) Li
2018-04-16 12:27               ` Nélio Laranjeiro
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 08/15] net/mlx5: add hardware flow debug dump Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 09/15] net/mlx5: introduce VXLAN-GPE tunnel type Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 10/15] net/mlx5: allow flow tunnel ID 0 with outer pattern Xueming Li
2018-04-11 12:25   ` Nélio Laranjeiro
2018-04-10 13:34 ` Xueming Li [this message]
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 12/15] doc: update mlx5 guide on tunnel offloading Xueming Li
2018-04-11 12:32   ` Nélio Laranjeiro
2018-04-11 12:43     ` Thomas Monjalon
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 13/15] net/mlx5: setup RSS flow regardless of queue count Xueming Li
2018-04-11 12:37   ` Nélio Laranjeiro
2018-04-11 13:01     ` Xueming(Steven) Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 14/15] net/mlx5: fix invalid flow item check Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 15/15] net/mlx5: support RSS configuration in isolated mode Xueming Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180410133415.189905-12-xuemingl@mellanox.com \
    --to=xuemingl@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=nelio.laranjeiro@6wind.com \
    --cc=shahafs@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).