DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/iavf: support raw packet for flow subscription
@ 2022-10-09  9:00 Jie Wang
  2022-10-13  6:03 ` Zhang, Qi Z
  0 siblings, 1 reply; 2+ messages in thread
From: Jie Wang @ 2022-10-09  9:00 UTC (permalink / raw)
  To: dev
  Cc: stevex.yang, qi.z.zhang, qiming.yang, jingjing.wu, beilei.xing, Jie Wang

Add Protocol Agnostic Flow (raw flow) support for flow subscription
in AVF.

For example, testpmd creates a flow subscription raw packet rule:
rule: eth + ipv4 src is 1.1.1.1 dst is 2.2.2.2

cmd: flow create 0 ingress pattern raw pattern spec \
00000000000000000000000008004500001400000000000000000101010102020202 \
pattern mask \
0000000000000000000000000000000000000000000000000000FFFFFFFFFFFFFFFF \
/ end actions port_representor port_id 0 / passthru / end

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf_fsub.c | 99 +++++++++++++++++++++++++++++++-----
 1 file changed, 87 insertions(+), 12 deletions(-)

diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 3be75923a5..ee162f575f 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -57,6 +57,7 @@ static struct iavf_flow_parser iavf_fsub_parser;
 
 static struct
 iavf_pattern_match_item iavf_fsub_pattern_list[] = {
+	{iavf_pattern_raw,				IAVF_INSET_NONE,			IAVF_INSET_NONE},
 	{iavf_pattern_ethertype,			IAVF_SW_INSET_ETHER,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_ipv4,				IAVF_SW_INSET_MAC_IPV4,			IAVF_INSET_NONE},
 	{iavf_pattern_eth_vlan_ipv4,			IAVF_SW_INSET_MAC_VLAN_IPV4,		IAVF_INSET_NONE},
@@ -153,6 +154,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
 {
 	struct virtchnl_proto_hdrs *hdrs = &filter->sub_fltr.proto_hdrs;
 	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_raw *raw_spec, *raw_mask;
 	const struct rte_flow_item_eth *eth_spec, *eth_mask;
 	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
 	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
@@ -164,20 +166,83 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
 	uint64_t outer_input_set = IAVF_INSET_NONE;
 	uint64_t *input = NULL;
 	uint16_t input_set_byte = 0;
-	uint16_t j;
+	uint8_t item_num = 0;
 	uint32_t layer = 0;
 
-	for (item = pattern; item->type !=
-			RTE_FLOW_ITEM_TYPE_END; item++) {
+	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->last) {
 			rte_flow_error_set(error, EINVAL,
 					   RTE_FLOW_ERROR_TYPE_ITEM,
 					   item, "Not support range");
-			return false;
+			return -rte_errno;
 		}
+
 		item_type = item->type;
+		item_num++;
 
 		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_RAW: {
+			raw_spec = item->spec;
+			raw_mask = item->mask;
+
+			if (item_num != 1)
+				return -rte_errno;
+
+			if (raw_spec->length != raw_mask->length)
+				return -rte_errno;
+
+			uint16_t pkt_len = 0;
+			uint16_t tmp_val = 0;
+			uint8_t tmp = 0;
+			int i, j;
+
+			pkt_len = raw_spec->length;
+
+			for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
+				tmp = raw_spec->pattern[i];
+				if (tmp >= 'a' && tmp <= 'f')
+					tmp_val = tmp - 'a' + 10;
+				if (tmp >= 'A' && tmp <= 'F')
+					tmp_val = tmp - 'A' + 10;
+				if (tmp >= '0' && tmp <= '9')
+					tmp_val = tmp - '0';
+
+				tmp_val *= 16;
+				tmp = raw_spec->pattern[i + 1];
+				if (tmp >= 'a' && tmp <= 'f')
+					tmp_val += (tmp - 'a' + 10);
+				if (tmp >= 'A' && tmp <= 'F')
+					tmp_val += (tmp - 'A' + 10);
+				if (tmp >= '0' && tmp <= '9')
+					tmp_val += (tmp - '0');
+
+				hdrs->raw.spec[j] = tmp_val;
+
+				tmp = raw_mask->pattern[i];
+				if (tmp >= 'a' && tmp <= 'f')
+					tmp_val = tmp - 'a' + 10;
+				if (tmp >= 'A' && tmp <= 'F')
+					tmp_val = tmp - 'A' + 10;
+				if (tmp >= '0' && tmp <= '9')
+					tmp_val = tmp - '0';
+
+				tmp_val *= 16;
+				tmp = raw_mask->pattern[i + 1];
+				if (tmp >= 'a' && tmp <= 'f')
+					tmp_val += (tmp - 'a' + 10);
+				if (tmp >= 'A' && tmp <= 'F')
+					tmp_val += (tmp - 'A' + 10);
+				if (tmp >= '0' && tmp <= '9')
+					tmp_val += (tmp - '0');
+
+				hdrs->raw.mask[j] = tmp_val;
+			}
+
+			hdrs->raw.pkt_len = pkt_len / 2;
+			hdrs->tunnel_level = 0;
+			hdrs->count = 0;
+			return 0;
+		}
 		case RTE_FLOW_ITEM_TYPE_ETH:
 			eth_spec = item->spec;
 			eth_mask = item->mask;
@@ -236,7 +301,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
 					rte_flow_error_set(error, EINVAL,
 						RTE_FLOW_ERROR_TYPE_ITEM,
 						item, "Invalid IPv4 mask.");
-					return false;
+					return -rte_errno;
 				}
 
 				if (ipv4_mask->hdr.src_addr) {
@@ -268,7 +333,9 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
 
 			hdrs->count = ++layer;
 			break;
-		case RTE_FLOW_ITEM_TYPE_IPV6:
+		case RTE_FLOW_ITEM_TYPE_IPV6: {
+			int j;
+
 			ipv6_spec = item->spec;
 			ipv6_mask = item->mask;
 
@@ -283,7 +350,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
 					rte_flow_error_set(error, EINVAL,
 						RTE_FLOW_ERROR_TYPE_ITEM,
 						item, "Invalid IPv6 mask");
-					return false;
+					return -rte_errno;
 				}
 
 				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
@@ -329,6 +396,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
 
 			hdrs->count = ++layer;
 			break;
+		}
 		case RTE_FLOW_ITEM_TYPE_UDP:
 			udp_spec = item->spec;
 			udp_mask = item->mask;
@@ -345,7 +413,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
 					rte_flow_error_set(error, EINVAL,
 						RTE_FLOW_ERROR_TYPE_ITEM,
 						item, "Invalid UDP mask");
-					return false;
+					return -rte_errno;
 				}
 
 				if (udp_mask->hdr.src_port) {
@@ -386,7 +454,7 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
 					rte_flow_error_set(error, EINVAL,
 						RTE_FLOW_ERROR_TYPE_ITEM,
 						item, "Invalid TCP mask");
-					return false;
+					return -rte_errno;
 				}
 
 				if (tcp_mask->hdr.src_port) {
@@ -425,9 +493,8 @@ iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
 				if (vlan_mask->inner_type) {
 					rte_flow_error_set(error, EINVAL,
 						RTE_FLOW_ERROR_TYPE_ITEM,
-						item,
-						"Invalid VLAN input set.");
-					return false;
+						item, "Invalid VLAN input set.");
+					return -rte_errno;
 				}
 
 				rte_memcpy(hdr->buffer_spec, &vlan_spec->hdr,
@@ -494,6 +561,13 @@ iavf_fsub_parse_action(struct iavf_adapter *ad,
 			if (rule_port_id != act_ethdev->port_id)
 				goto error1;
 
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+			dest_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
 			filter->sub_fltr.actions.count = ++num;
 			break;
 		case RTE_FLOW_ACTION_TYPE_QUEUE:
@@ -615,6 +689,7 @@ iavf_fsub_check_action(const struct rte_flow_action *actions,
 			vf_valid = true;
 			actions_num++;
 			break;
+		case RTE_FLOW_ACTION_TYPE_PASSTHRU:
 		case RTE_FLOW_ACTION_TYPE_RSS:
 		case RTE_FLOW_ACTION_TYPE_QUEUE:
 			queue_valid = true;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* RE: [PATCH] net/iavf: support raw packet for flow subscription
  2022-10-09  9:00 [PATCH] net/iavf: support raw packet for flow subscription Jie Wang
@ 2022-10-13  6:03 ` Zhang, Qi Z
  0 siblings, 0 replies; 2+ messages in thread
From: Zhang, Qi Z @ 2022-10-13  6:03 UTC (permalink / raw)
  To: Wang, Jie1X, dev; +Cc: Yang, SteveX, Yang, Qiming, Wu, Jingjing, Xing, Beilei



> -----Original Message-----
> From: Wang, Jie1X <jie1x.wang@intel.com>
> Sent: Sunday, October 9, 2022 5:01 PM
> To: dev@dpdk.org
> Cc: Yang, SteveX <stevex.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Yang, Qiming <qiming.yang@intel.com>; Wu,
> Jingjing <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Wang,
> Jie1X <jie1x.wang@intel.com>
> Subject: [PATCH] net/iavf: support raw packet for flow subscription
> 
> Add Protocol Agnostic Flow (raw flow) support for flow subscription in AVF.
> 
> For example, testpmd creates a flow subscription raw packet rule:
> rule: eth + ipv4 src is 1.1.1.1 dst is 2.2.2.2
> 
> cmd: flow create 0 ingress pattern raw pattern spec \
> 0000000000000000000000000800450000140000000000000000010101010202
> 0202 \ pattern mask \
> 0000000000000000000000000000000000000000000000000000FFFFFFFFFFFFFF
> FF \ / end actions port_representor port_id 0 / passthru / end
> 
> Signed-off-by: Jie Wang <jie1x.wang@intel.com>

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-10-13  6:03 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-09  9:00 [PATCH] net/iavf: support raw packet for flow subscription Jie Wang
2022-10-13  6:03 ` Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).