DPDK patches and discussions
 help / color / mirror / Atom feed
From: Alex Vesker <valex@nvidia.com>
To: <valex@nvidia.com>, <viacheslavo@nvidia.com>,
	<thomas@monjalon.net>, "Matan Azrad" <matan@nvidia.com>
Cc: <dev@dpdk.org>, <orika@nvidia.com>
Subject: [v2 09/16] net/mlx5/hws: support range match
Date: Wed, 1 Feb 2023 09:28:08 +0200	[thread overview]
Message-ID: <20230201072815.1329101-10-valex@nvidia.com> (raw)
In-Reply-To: <20230201072815.1329101-1-valex@nvidia.com>

Support range matching over selected items and range
is not supported over all the items. The range match
is done using:
item->last.field - maximum value
item->mask.field - bitmask
item->spec.field - minimum value

When items are processed if item last and mask fields are
non zero range matching will be done over these fields.
There are two field setter, field copy (fc) and field copy
range (fcr).

Signed-off-by: Alex Vesker <valex@nvidia.com>
---
 drivers/net/mlx5/hws/mlx5dr_definer.c | 73 +++++++++++++++++++++++++--
 drivers/net/mlx5/hws/mlx5dr_definer.h |  5 +-
 2 files changed, 72 insertions(+), 6 deletions(-)

diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 6b98eb8c96..c268f94ad3 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -123,6 +123,7 @@ struct mlx5dr_definer_conv_data {
 	X(SET,		ipv4_next_proto,	v->next_proto_id,	rte_ipv4_hdr) \
 	X(SET,		ipv4_version,		STE_IPV4,		rte_ipv4_hdr) \
 	X(SET_BE16,	ipv4_frag,		v->fragment_offset,	rte_ipv4_hdr) \
+	X(SET_BE16,	ipv4_len,		v->total_length,	rte_ipv4_hdr) \
 	X(SET_BE16,	ipv6_payload_len,	v->hdr.payload_len,	rte_flow_item_ipv6) \
 	X(SET,		ipv6_proto,		v->hdr.proto,		rte_flow_item_ipv6) \
 	X(SET,		ipv6_hop_limits,	v->hdr.hop_limits,	rte_flow_item_ipv6) \
@@ -516,6 +517,7 @@ mlx5dr_definer_conv_item_ipv4(struct mlx5dr_definer_conv_data *cd,
 			      int item_idx)
 {
 	const struct rte_ipv4_hdr *m = item->mask;
+	const struct rte_ipv4_hdr *l = item->last;
 	struct mlx5dr_definer_fc *fc;
 	bool inner = cd->tunnel;
 
@@ -533,8 +535,8 @@ mlx5dr_definer_conv_item_ipv4(struct mlx5dr_definer_conv_data *cd,
 	if (!m)
 		return 0;
 
-	if (m->total_length || m->packet_id ||
-	    m->hdr_checksum) {
+	if (m->packet_id || m->hdr_checksum ||
+	    (l && (l->next_proto_id || l->type_of_service))) {
 		rte_errno = ENOTSUP;
 		return rte_errno;
 	}
@@ -553,9 +555,18 @@ mlx5dr_definer_conv_item_ipv4(struct mlx5dr_definer_conv_data *cd,
 		DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
 	}
 
+	if (m->total_length) {
+		fc = &cd->fc[DR_CALC_FNAME(IP_LEN, inner)];
+		fc->item_idx = item_idx;
+		fc->is_range = l && l->total_length;
+		fc->tag_set = &mlx5dr_definer_ipv4_len_set;
+		DR_CALC_SET(fc, eth_l3, ipv4_total_length, inner);
+	}
+
 	if (m->dst_addr) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV4_DST, inner)];
 		fc->item_idx = item_idx;
+		fc->is_range = l && l->dst_addr;
 		fc->tag_set = &mlx5dr_definer_ipv4_dst_addr_set;
 		DR_CALC_SET(fc, ipv4_src_dest, destination_address, inner);
 	}
@@ -563,6 +574,7 @@ mlx5dr_definer_conv_item_ipv4(struct mlx5dr_definer_conv_data *cd,
 	if (m->src_addr) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV4_SRC, inner)];
 		fc->item_idx = item_idx;
+		fc->is_range = l && l->src_addr;
 		fc->tag_set = &mlx5dr_definer_ipv4_src_addr_set;
 		DR_CALC_SET(fc, ipv4_src_dest, source_address, inner);
 	}
@@ -570,6 +582,7 @@ mlx5dr_definer_conv_item_ipv4(struct mlx5dr_definer_conv_data *cd,
 	if (m->ihl) {
 		fc = &cd->fc[DR_CALC_FNAME(IPV4_IHL, inner)];
 		fc->item_idx = item_idx;
+		fc->is_range = l && l->ihl;
 		fc->tag_set = &mlx5dr_definer_ipv4_ihl_set;
 		DR_CALC_SET(fc, eth_l3, ihl, inner);
 	}
@@ -577,6 +590,7 @@ mlx5dr_definer_conv_item_ipv4(struct mlx5dr_definer_conv_data *cd,
 	if (m->time_to_live) {
 		fc = &cd->fc[DR_CALC_FNAME(IP_TTL, inner)];
 		fc->item_idx = item_idx;
+		fc->is_range = l && l->time_to_live;
 		fc->tag_set = &mlx5dr_definer_ipv4_time_to_live_set;
 		DR_CALC_SET(fc, eth_l3, time_to_live_hop_limit, inner);
 	}
@@ -597,6 +611,7 @@ mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,
 			      int item_idx)
 {
 	const struct rte_flow_item_ipv6 *m = item->mask;
+	const struct rte_flow_item_ipv6 *l = item->last;
 	struct mlx5dr_definer_fc *fc;
 	bool inner = cd->tunnel;
 
@@ -616,7 +631,10 @@ mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,
 
 	if (m->has_hop_ext || m->has_route_ext || m->has_auth_ext ||
 	    m->has_esp_ext || m->has_dest_ext || m->has_mobil_ext ||
-	    m->has_hip_ext || m->has_shim6_ext) {
+	    m->has_hip_ext || m->has_shim6_ext ||
+	    (l && (l->has_frag_ext || l->hdr.vtc_flow || l->hdr.proto ||
+		   !is_mem_zero(l->hdr.src_addr, 16) ||
+		   !is_mem_zero(l->hdr.dst_addr, 16)))) {
 		rte_errno = ENOTSUP;
 		return rte_errno;
 	}
@@ -643,8 +661,9 @@ mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,
 	}
 
 	if (m->hdr.payload_len) {
-		fc = &cd->fc[DR_CALC_FNAME(IPV6_PAYLOAD_LEN, inner)];
+		fc = &cd->fc[DR_CALC_FNAME(IP_LEN, inner)];
 		fc->item_idx = item_idx;
+		fc->is_range = l && l->hdr.payload_len;
 		fc->tag_set = &mlx5dr_definer_ipv6_payload_len_set;
 		DR_CALC_SET(fc, eth_l3, ipv6_payload_length, inner);
 	}
@@ -659,6 +678,7 @@ mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,
 	if (m->hdr.hop_limits) {
 		fc = &cd->fc[DR_CALC_FNAME(IP_TTL, inner)];
 		fc->item_idx = item_idx;
+		fc->is_range = l && l->hdr.hop_limits;
 		fc->tag_set = &mlx5dr_definer_ipv6_hop_limits_set;
 		DR_CALC_SET(fc, eth_l3, time_to_live_hop_limit, inner);
 	}
@@ -728,6 +748,7 @@ mlx5dr_definer_conv_item_udp(struct mlx5dr_definer_conv_data *cd,
 			     int item_idx)
 {
 	const struct rte_flow_item_udp *m = item->mask;
+	const struct rte_flow_item_udp *l = item->last;
 	struct mlx5dr_definer_fc *fc;
 	bool inner = cd->tunnel;
 
@@ -751,6 +772,7 @@ mlx5dr_definer_conv_item_udp(struct mlx5dr_definer_conv_data *cd,
 	if (m->hdr.src_port) {
 		fc = &cd->fc[DR_CALC_FNAME(L4_SPORT, inner)];
 		fc->item_idx = item_idx;
+		fc->is_range = l && l->hdr.src_port;
 		fc->tag_set = &mlx5dr_definer_udp_src_port_set;
 		DR_CALC_SET(fc, eth_l4, source_port, inner);
 	}
@@ -758,6 +780,7 @@ mlx5dr_definer_conv_item_udp(struct mlx5dr_definer_conv_data *cd,
 	if (m->hdr.dst_port) {
 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
 		fc->item_idx = item_idx;
+		fc->is_range = l && l->hdr.dst_port;
 		fc->tag_set = &mlx5dr_definer_udp_dst_port_set;
 		DR_CALC_SET(fc, eth_l4, destination_port, inner);
 	}
@@ -771,6 +794,7 @@ mlx5dr_definer_conv_item_tcp(struct mlx5dr_definer_conv_data *cd,
 			     int item_idx)
 {
 	const struct rte_flow_item_tcp *m = item->mask;
+	const struct rte_flow_item_tcp *l = item->last;
 	struct mlx5dr_definer_fc *fc;
 	bool inner = cd->tunnel;
 
@@ -786,9 +810,16 @@ mlx5dr_definer_conv_item_tcp(struct mlx5dr_definer_conv_data *cd,
 	if (!m)
 		return 0;
 
+	if (m->hdr.sent_seq || m->hdr.recv_ack || m->hdr.data_off ||
+	    m->hdr.rx_win || m->hdr.cksum || m->hdr.tcp_urp) {
+		rte_errno = ENOTSUP;
+		return rte_errno;
+	}
+
 	if (m->hdr.tcp_flags) {
 		fc = &cd->fc[DR_CALC_FNAME(TCP_FLAGS, inner)];
 		fc->item_idx = item_idx;
+		fc->is_range = l && l->hdr.tcp_flags;
 		fc->tag_set = &mlx5dr_definer_tcp_flags_set;
 		DR_CALC_SET(fc, eth_l4, tcp_flags, inner);
 	}
@@ -796,6 +827,7 @@ mlx5dr_definer_conv_item_tcp(struct mlx5dr_definer_conv_data *cd,
 	if (m->hdr.src_port) {
 		fc = &cd->fc[DR_CALC_FNAME(L4_SPORT, inner)];
 		fc->item_idx = item_idx;
+		fc->is_range = l && l->hdr.src_port;
 		fc->tag_set = &mlx5dr_definer_tcp_src_port_set;
 		DR_CALC_SET(fc, eth_l4, source_port, inner);
 	}
@@ -803,6 +835,7 @@ mlx5dr_definer_conv_item_tcp(struct mlx5dr_definer_conv_data *cd,
 	if (m->hdr.dst_port) {
 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
 		fc->item_idx = item_idx;
+		fc->is_range = l && l->hdr.dst_port;
 		fc->tag_set = &mlx5dr_definer_tcp_dst_port_set;
 		DR_CALC_SET(fc, eth_l4, destination_port, inner);
 	}
@@ -1108,6 +1141,7 @@ mlx5dr_definer_conv_item_tag(struct mlx5dr_definer_conv_data *cd,
 {
 	const struct rte_flow_item_tag *m = item->mask;
 	const struct rte_flow_item_tag *v = item->spec;
+	const struct rte_flow_item_tag *l = item->last;
 	struct mlx5dr_definer_fc *fc;
 	int reg;
 
@@ -1130,7 +1164,9 @@ mlx5dr_definer_conv_item_tag(struct mlx5dr_definer_conv_data *cd,
 		return rte_errno;
 
 	fc->item_idx = item_idx;
+	fc->is_range = l && l->index;
 	fc->tag_set = &mlx5dr_definer_tag_set;
+
 	return 0;
 }
 
@@ -1140,6 +1176,7 @@ mlx5dr_definer_conv_item_metadata(struct mlx5dr_definer_conv_data *cd,
 				  int item_idx)
 {
 	const struct rte_flow_item_meta *m = item->mask;
+	const struct rte_flow_item_meta *l = item->last;
 	struct mlx5dr_definer_fc *fc;
 	int reg;
 
@@ -1158,7 +1195,9 @@ mlx5dr_definer_conv_item_metadata(struct mlx5dr_definer_conv_data *cd,
 		return rte_errno;
 
 	fc->item_idx = item_idx;
+	fc->is_range = l && l->data;
 	fc->tag_set = &mlx5dr_definer_metadata_set;
+
 	return 0;
 }
 
@@ -1465,6 +1504,28 @@ mlx5dr_definer_conv_item_meter_color(struct mlx5dr_definer_conv_data *cd,
 	return 0;
 }
 
+static int
+mlx5dr_definer_check_item_range_supp(struct rte_flow_item *item)
+{
+	if (!item->last)
+		return 0;
+
+	switch ((int)item->type) {
+	case RTE_FLOW_ITEM_TYPE_IPV4:
+	case RTE_FLOW_ITEM_TYPE_IPV6:
+	case RTE_FLOW_ITEM_TYPE_UDP:
+	case RTE_FLOW_ITEM_TYPE_TCP:
+	case RTE_FLOW_ITEM_TYPE_TAG:
+	case RTE_FLOW_ITEM_TYPE_META:
+	case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
+		return 0;
+	default:
+		DR_LOG(ERR, "Range not supported over item type %d", item->type);
+		rte_errno = ENOTSUP;
+		return rte_errno;
+	}
+}
+
 static int
 mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 				struct mlx5dr_match_template *mt,
@@ -1487,6 +1548,10 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
 	for (i = 0; items->type != RTE_FLOW_ITEM_TYPE_END; i++, items++) {
 		cd.tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
 
+		ret = mlx5dr_definer_check_item_range_supp(items);
+		if (ret)
+			return ret;
+
 		switch ((int)items->type) {
 		case RTE_FLOW_ITEM_TYPE_ETH:
 			ret = mlx5dr_definer_conv_item_eth(&cd, items, i);
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h
index d52c6b0627..bab4baae4a 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.h
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.h
@@ -38,8 +38,8 @@ enum mlx5dr_definer_fname {
 	MLX5DR_DEFINER_FNAME_IP_VERSION_I,
 	MLX5DR_DEFINER_FNAME_IP_FRAG_O,
 	MLX5DR_DEFINER_FNAME_IP_FRAG_I,
-	MLX5DR_DEFINER_FNAME_IPV6_PAYLOAD_LEN_O,
-	MLX5DR_DEFINER_FNAME_IPV6_PAYLOAD_LEN_I,
+	MLX5DR_DEFINER_FNAME_IP_LEN_O,
+	MLX5DR_DEFINER_FNAME_IP_LEN_I,
 	MLX5DR_DEFINER_FNAME_IP_TOS_O,
 	MLX5DR_DEFINER_FNAME_IP_TOS_I,
 	MLX5DR_DEFINER_FNAME_IPV6_FLOW_LABEL_O,
@@ -116,6 +116,7 @@ enum mlx5dr_definer_type {
 
 struct mlx5dr_definer_fc {
 	uint8_t item_idx;
+	uint8_t is_range;
 	uint32_t byte_off;
 	int bit_off;
 	uint32_t bit_mask;
-- 
2.18.1


  parent reply	other threads:[~2023-02-01  7:30 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-01-31  9:33 [v1 00/16] net/mlx5/hws: support range and partial hash matching Alex Vesker
2023-01-31  9:33 ` [v1 01/16] net/mlx5/hws: support synchronous drain Alex Vesker
2023-01-31  9:33 ` [v1 02/16] net/mlx5/hws: matcher remove AT and MT limitation Alex Vesker
2023-01-31  9:33 ` [v1 03/16] net/mlx5/hws: support GTA WQE write using FW command Alex Vesker
2023-01-31  9:33 ` [v1 04/16] net/mlx5/hws: add capability query for gen wqe command Alex Vesker
2023-01-31  9:33 ` [v1 05/16] net/mlx5/hws: align RTC create command with PRM format Alex Vesker
2023-01-31  9:33 ` [v1 06/16] net/mlx5/hws: add send FW match STE using gen WQE Alex Vesker
2023-01-31  9:33 ` [v1 07/16] net/mlx5/hws: add send FW range STE WQE Alex Vesker
2023-01-31  9:33 ` [v1 08/16] net/mlx5/hws: move matcher size check to function Alex Vesker
2023-01-31  9:33 ` [v1 09/16] net/mlx5/hws: support range match Alex Vesker
2023-01-31  9:33 ` [v1 10/16] net/mlx5/hws: redesign definer create Alex Vesker
2023-01-31  9:33 ` [v1 11/16] net/mlx5/hws: support partial hash Alex Vesker
2023-01-31  9:33 ` [v1 12/16] net/mlx5/hws: add range definer creation support Alex Vesker
2023-01-31  9:33 ` [v1 13/16] net/mlx5/hws: add FW WQE rule creation logic Alex Vesker
2023-01-31  9:33 ` [v1 14/16] net/mlx5/hws: add debug dump support for range and hash Alex Vesker
2023-01-31  9:33 ` [v1 15/16] net/mlx5/hws: rename pattern cache object Alex Vesker
2023-01-31  9:33 ` [v1 16/16] net/mlx5/hws: cache definer for reuse Alex Vesker
2023-02-01  7:27 ` [v2 00/16] net/mlx5/hws: support range and partial hash matching Alex Vesker
2023-02-01  7:28   ` [v2 01/16] net/mlx5/hws: support synchronous drain Alex Vesker
2023-02-01  7:28   ` [v2 02/16] net/mlx5/hws: matcher remove AT and MT limitation Alex Vesker
2023-02-01  7:28   ` [v2 03/16] net/mlx5/hws: support GTA WQE write using FW command Alex Vesker
2023-02-01  7:28   ` [v2 04/16] net/mlx5/hws: add capability query for gen wqe command Alex Vesker
2023-02-01  7:28   ` [v2 05/16] net/mlx5/hws: align RTC create command with PRM format Alex Vesker
2023-02-01  7:28   ` [v2 06/16] net/mlx5/hws: add send FW match STE using gen WQE Alex Vesker
2023-02-01  7:28   ` [v2 07/16] net/mlx5/hws: add send FW range STE WQE Alex Vesker
2023-02-01  7:28   ` [v2 08/16] net/mlx5/hws: move matcher size check to function Alex Vesker
2023-02-01  7:28   ` Alex Vesker [this message]
2023-02-01  7:28   ` [v2 10/16] net/mlx5/hws: redesign definer create Alex Vesker
2023-02-01  7:28   ` [v2 11/16] net/mlx5/hws: support partial hash Alex Vesker
2023-02-01  7:28   ` [v2 12/16] net/mlx5/hws: add range definer creation support Alex Vesker
2023-02-01  7:28   ` [v2 13/16] net/mlx5/hws: add FW WQE rule creation logic Alex Vesker
2023-02-01  7:28   ` [v2 14/16] net/mlx5/hws: add debug dump support for range and hash Alex Vesker
2023-02-01  7:28   ` [v2 15/16] net/mlx5/hws: rename pattern cache object Alex Vesker
2023-02-01  7:28   ` [v2 16/16] net/mlx5/hws: cache definer for reuse Alex Vesker
2023-02-06 15:07   ` [v2 00/16] net/mlx5/hws: support range and partial hash matching Matan Azrad
2023-02-13  8:27   ` Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230201072815.1329101-10-valex@nvidia.com \
    --to=valex@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=matan@nvidia.com \
    --cc=orika@nvidia.com \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).