DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/mlx5: support matching on ICMP/ICMP6
@ 2019-05-28  2:14 Xiaoyu Min
  2019-07-02  6:26 ` [dpdk-dev] [PATCH v2] " Xiaoyu Min
  2019-07-03  7:22 ` [dpdk-dev] [PATCH v3] " Xiaoyu Min
  0 siblings, 2 replies; 7+ messages in thread
From: Xiaoyu Min @ 2019-05-28  2:14 UTC (permalink / raw)
  To: Shahaf Shuler, Yongseok Koh, John McNamara, Marko Kovacevic; +Cc: dev, jackmin

On DV/DR flow engine, MLX5 can match on ICMP/ICMP6's code and type field
via FLEX Parser, which can be enabled by config FW using FLEX Parser
profile 2:

mlxconfig -d <mst device> -y set FLEX_PARSER_PROFILE_ENABLE=2

Signed-off-by: Xiaoyu Min <jackmin@mellanox.com>
---
 doc/guides/nics/mlx5.rst        |  15 ++++
 drivers/net/mlx5/mlx5_flow.c    | 102 ++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_flow.h    |  12 ++++
 drivers/net/mlx5/mlx5_flow_dv.c | 122 ++++++++++++++++++++++++++++++++
 4 files changed, 251 insertions(+)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 5176aa845c..5538973209 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -160,6 +160,8 @@ Limitations
   - can be applied to VF ports only.
   - must specify PF port action (packet redirection from VF to PF).
 
+- ICMP/ICMP6's code/type matching cannot be supported togeter with IP-in-IP tunnel.
+
 Statistics
 ----------
 
@@ -518,6 +520,19 @@ Firmware configuration
      IP_OVER_VXLAN_EN                    True(1)
      IP_OVER_VXLAN_PORT                  <udp dport>
 
+- enable ICMP/ICMP6's code/type field matching
+
+   .. code-block:: console
+
+     mlxconfig -d <mst device> set FLEX_PARSER_PROFILE_ENABLE=2
+
+  Verify configurations are set:
+
+   .. code-block:: console
+
+     mlxconfig -d <mst device> query | grep FLEX_PARSER_PROFILE_ENABLE
+     FLEX_PARSER_PROFILE_ENABLE         2
+
 Prerequisites
 -------------
 
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 98870184d0..8890ffe281 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1049,6 +1049,108 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
 	return 0;
 }
 
+/**
+ * Validate ICMP6 item.
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
+			       uint64_t item_flags,
+			       uint8_t target_protocol,
+			       struct rte_flow_error *error)
+{
+	const struct rte_flow_item_icmp6 *mask = item->mask;
+	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+				      MLX5_FLOW_LAYER_OUTER_L3;
+	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+				      MLX5_FLOW_LAYER_OUTER_L4;
+	int ret;
+
+	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "protocol filtering not compatible"
+					  " with ICMP6 layer");
+	if (!(item_flags & l3m))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "L3 is mandatory to filter on ICMP");
+	if (item_flags & l4m)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "multiple L4 layers not supported");
+	if (!mask)
+		mask = &rte_flow_item_icmp6_mask;
+	ret = mlx5_flow_item_acceptable
+		(item, (const uint8_t *)mask,
+		 (const uint8_t *)&rte_flow_item_icmp6_mask,
+		 sizeof(struct rte_flow_item_icmp6), error);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+/**
+ * Validate ICMP item.
+ *
+ * @param[in] item
+ *   Item specification.
+ * @param[in] item_flags
+ *   Bit-fields that holds the items detected until now.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
+			     uint64_t item_flags,
+			     uint8_t target_protocol,
+			     struct rte_flow_error *error)
+{
+	const struct rte_flow_item_icmp *mask = item->mask;
+	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+	const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+				      MLX5_FLOW_LAYER_OUTER_L3;
+	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+				      MLX5_FLOW_LAYER_OUTER_L4;
+	int ret;
+
+	if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "protocol filtering not compatible"
+					  " with ICMP layer");
+	if (!(item_flags & l3m))
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "L3 is mandatory to filter on ICMP");
+	if (item_flags & l4m)
+		return rte_flow_error_set(error, EINVAL,
+					  RTE_FLOW_ERROR_TYPE_ITEM, item,
+					  "multiple L4 layers not supported");
+	if (!mask)
+		mask = &rte_flow_item_icmp_mask;
+	ret = mlx5_flow_item_acceptable
+		(item, (const uint8_t *)mask,
+		 (const uint8_t *)&rte_flow_item_icmp_mask,
+		 sizeof(struct rte_flow_item_icmp), error);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
 /**
  * Validate Ethernet item.
  *
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index b6654200cb..cb192aac2c 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -50,6 +50,10 @@
 #define MLX5_FLOW_ITEM_METADATA (1u << 16)
 #define MLX5_FLOW_ITEM_PORT_ID (1u << 17)
 
+/* Pattern MISC bits. */
+#define MLX5_FLOW_LAYER_ICMP (1u << 18)
+#define MLX5_FLOW_LAYER_ICMP6 (1u << 18)
+
 /* Outer Masks. */
 #define MLX5_FLOW_LAYER_OUTER_L3 \
 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@@ -512,6 +516,14 @@ int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
 				      uint64_t item_flags,
 				      struct rte_eth_dev *dev,
 				      struct rte_flow_error *error);
+int mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
+				 uint64_t item_flags,
+				 uint8_t target_protocol,
+				 struct rte_flow_error *error);
+int mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
+				   uint64_t item_flags,
+				   uint8_t target_protocol,
+				   struct rte_flow_error *error);
 
 /* mlx5_flow_tcf.c */
 
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d096b02bba..565589251a 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -2207,6 +2207,22 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
 				return ret;
 			last_item = MLX5_FLOW_ITEM_METADATA;
 			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			ret = mlx5_flow_validate_item_icmp(items, item_flags,
+							   next_protocol,
+							   error);
+			if (ret < 0)
+				return ret;
+			item_flags |= MLX5_FLOW_LAYER_ICMP;
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6:
+			ret = mlx5_flow_validate_item_icmp6(items, item_flags,
+							    next_protocol,
+							    error);
+			if (ret < 0)
+				return ret;
+			item_flags |= MLX5_FLOW_LAYER_ICMP6;
+			break;
 		default:
 			return rte_flow_error_set(error, ENOTSUP,
 						  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -3245,6 +3261,102 @@ flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
 	return 0;
 }
 
+/**
+ * Add ICMP6 item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_icmp6(void *matcher, void *key,
+			      const struct rte_flow_item *item,
+			      int inner)
+{
+	const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
+	const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
+	void *headers_m;
+	void *headers_v;
+	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
+				     misc_parameters_3);
+	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+	if (inner) {
+		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+					 inner_headers);
+		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+	} else {
+		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+					 outer_headers);
+		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+	}
+	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
+	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
+	if (!icmp6_v)
+		return;
+	if (!icmp6_m)
+		icmp6_m = &rte_flow_item_icmp6_mask;
+	MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
+	MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
+		 icmp6_v->type & icmp6_m->type);
+	MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
+	MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
+		 icmp6_v->code & icmp6_m->code);
+}
+
+/**
+ * Add ICMP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ *   Flow matcher.
+ * @param[in, out] key
+ *   Flow matcher value.
+ * @param[in] item
+ *   Flow pattern to translate.
+ * @param[in] inner
+ *   Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_icmp(void *matcher, void *key,
+			    const struct rte_flow_item *item,
+			    int inner)
+{
+	const struct rte_flow_item_icmp *icmp_m = item->mask;
+	const struct rte_flow_item_icmp *icmp_v = item->spec;
+	void *headers_m;
+	void *headers_v;
+	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
+				     misc_parameters_3);
+	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+	if (inner) {
+		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+					 inner_headers);
+		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+	} else {
+		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+					 outer_headers);
+		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+	}
+	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
+	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
+	if (!icmp_v)
+		return;
+	if (!icmp_m)
+		icmp_m = &rte_flow_item_icmp_mask;
+	MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
+		 icmp_m->hdr.icmp_type);
+	MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
+		 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
+	MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
+		 icmp_m->hdr.icmp_code);
+	MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
+		 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
+}
+
 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
 
 #define HEADER_IS_ZERO(match_criteria, headers)				     \
@@ -4020,6 +4132,16 @@ flow_dv_translate(struct rte_eth_dev *dev,
 						    items);
 			last_item = MLX5_FLOW_ITEM_METADATA;
 			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			flow_dv_translate_item_icmp(match_mask, match_value,
+						    items, tunnel);
+			item_flags |= MLX5_FLOW_LAYER_ICMP;
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6:
+			flow_dv_translate_item_icmp6(match_mask, match_value,
+						      items, tunnel);
+			item_flags |= MLX5_FLOW_LAYER_ICMP6;
+			break;
 		default:
 			break;
 		}
-- 
2.21.0


^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2019-07-08 11:42 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-05-28  2:14 [dpdk-dev] [PATCH] net/mlx5: support matching on ICMP/ICMP6 Xiaoyu Min
2019-07-02  6:26 ` [dpdk-dev] [PATCH v2] " Xiaoyu Min
2019-07-03  6:14   ` Slava Ovsiienko
2019-07-03  6:31     ` Jack Min
2019-07-03  7:22 ` [dpdk-dev] [PATCH v3] " Xiaoyu Min
2019-07-03  7:46   ` [dpdk-dev] [Suspected-Phishing][PATCH " Slava Ovsiienko
2019-07-08 11:42   ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).