DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/2] support flow subscription
@ 2022-08-09  6:21 Jie Wang
  2022-08-09  6:21 ` [PATCH 1/2] common/iavf: " Jie Wang
                   ` (6 more replies)
  0 siblings, 7 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-09  6:21 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add support AVF can be able to subscribe a flow from PF.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>

Jie Wang (2):
  common/iavf: support flow subscription
  net/iavf: enable flow subscription rule support for AVF

 doc/guides/rel_notes/release_22_11.rst |   4 +
 drivers/common/iavf/virtchnl.h         | 104 +++-
 drivers/net/iavf/iavf.h                |  10 +
 drivers/net/iavf/iavf_fdir.c           |   4 +
 drivers/net/iavf/iavf_fsub.c           | 746 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.c   |  40 +-
 drivers/net/iavf/iavf_generic_flow.h   |   2 +
 drivers/net/iavf/iavf_hash.c           |   5 +
 drivers/net/iavf/iavf_ipsec_crypto.c   |  16 +-
 drivers/net/iavf/iavf_vchnl.c          | 133 +++++
 drivers/net/iavf/meson.build           |   1 +
 11 files changed, 1044 insertions(+), 21 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_fsub.c

-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH 1/2] common/iavf: support flow subscription
  2022-08-09  6:21 [PATCH 0/2] support flow subscription Jie Wang
@ 2022-08-09  6:21 ` Jie Wang
  2022-08-09  6:21 ` [PATCH 2/2] net/iavf: enable flow subscription rule support for AVF Jie Wang
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-09  6:21 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

VF is able to subscribe a flow from PF by VIRTCHNL_FLOW_SUBSCRIBE.

PF is expected to offload a rule to hardware which will redirect
the packet that matching the required pattern to this VF.

Only a flow with dst mac address as PF's mac address can be subscribed.

VIRTCHNL_VF_OFFLOAD_FSUB_PF is used for Flow subscription capability
negotiation and only a trusted VF can be granted with this capability.

A flow can be unsubscribed by VIRTCHNL_FLOW_UNSUBSCRIBE.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/common/iavf/virtchnl.h | 104 +++++++++++++++++++++++++++++++--
 1 file changed, 100 insertions(+), 4 deletions(-)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index f123daec8e..e02eec4935 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -168,6 +168,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
 	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
 	VIRTCHNL_OP_CONFIG_QUANTA = 113,
+	VIRTCHNL_OP_FLOW_SUBSCRIBE = 114,
+	VIRTCHNL_OP_FLOW_UNSUBSCRIBE = 115,
 	VIRTCHNL_OP_MAX,
 };
 
@@ -282,6 +284,10 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_1588_PTP_GET_CAPS";
 	case VIRTCHNL_OP_1588_PTP_GET_TIME:
 		return "VIRTCHNL_OP_1588_PTP_GET_TIME";
+	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
+		return "VIRTCHNL_OP_FLOW_SUBSCRIBE";
+	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
+		return "VIRTCHNL_OP_FLOW_UNSUBSCRIBE";
 	case VIRTCHNL_OP_MAX:
 		return "VIRTCHNL_OP_MAX";
 	default:
@@ -401,6 +407,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_FSUB_PF		BIT(14)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
 #define VIRTCHNL_VF_OFFLOAD_VLAN		BIT(16)
 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING		BIT(17)
@@ -1503,6 +1510,7 @@ enum virtchnl_vfr_states {
 };
 
 #define VIRTCHNL_MAX_NUM_PROTO_HDRS	32
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK	16
 #define VIRTCHNL_MAX_SIZE_RAW_PACKET	1024
 #define PROTO_HDR_SHIFT			5
 #define PROTO_HDR_FIELD_START(proto_hdr_type) \
@@ -1695,6 +1703,22 @@ struct virtchnl_proto_hdr {
 
 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
 
+struct virtchnl_proto_hdr_w_msk {
+	/* see enum virtchnl_proto_hdr_type */
+	s32 type;
+	u32 pad;
+	/**
+	 * binary buffer in network order for specific header type.
+	 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+	 * header is expected to be copied into the buffer.
+	 */
+	u8 buffer_spec[64];
+	/* binary buffer for bit-mask applied to specific header type */
+	u8 buffer_mask[64];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(136, virtchnl_proto_hdr_w_msk);
+
 struct virtchnl_proto_hdrs {
 	u8 tunnel_level;
 	/**
@@ -1706,11 +1730,18 @@ struct virtchnl_proto_hdrs {
 	 */
 	int count;
 	/**
-	 * number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS
-	 * must be 0 for a raw packet request.
+	 * count must <=
+	 * VIRTCHNL_MAX_NUM_PROTO_HDRS + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK
+	 * count = 0 :					select raw
+	 * 1 < count <= VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr
+	 * count > VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr_w_msk
+	 * last valid index = count - VIRTCHNL_MAX_NUM_PROTO_HDRS
 	 */
 	union {
-		struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct virtchnl_proto_hdr
+			proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct virtchnl_proto_hdr_w_msk
+			proto_hdr_w_msk[VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK];
 		struct {
 			u16 pkt_len;
 			u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
@@ -1731,7 +1762,7 @@ struct virtchnl_rss_cfg {
 
 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
 
-/* action configuration for FDIR */
+/* action configuration for FDIR and FSUB */
 struct virtchnl_filter_action {
 	/* see enum virtchnl_action type */
 	s32 type;
@@ -1849,6 +1880,65 @@ struct virtchnl_fdir_del {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
 
+/* Status returned to VF after VF requests FSUB commands
+ * VIRTCHNL_FSUB_SUCCESS
+ * VF FLOW related request is successfully done by PF
+ * The request can be OP_FLOW_SUBSCRIBE/UNSUBSCRIBE.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE
+ * OP_FLOW_SUBSCRIBE request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_EXIST
+ * OP_FLOW_SUBSCRIBE request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST
+ * OP_FLOW_UNSUBSCRIBE request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_INVALID
+ * OP_FLOW_SUBSCRIBE request is failed due to parameters validation
+ * or HW doesn't support.
+ */
+enum virtchnl_fsub_prgm_status {
+	VIRTCHNL_FSUB_SUCCESS = 0,
+	VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE,
+	VIRTCHNL_FSUB_FAILURE_RULE_EXIST,
+	VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST,
+	VIRTCHNL_FSUB_FAILURE_RULE_INVALID,
+};
+
+/* VIRTCHNL_OP_FLOW_SUBSCRIBE
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only, priority, proto_hdrs and actions.
+ * PF will return flow_id
+ * if the request is successfully done and return status to VF.
+ */
+struct virtchnl_flow_sub {
+	u16 vsi_id; /* INPUT */
+	u8 validate_only; /* INPUT */
+	u8 priority; /* INPUT */
+	u32 flow_id; /* OUTPUT */
+	struct virtchnl_proto_hdrs proto_hdrs; /* INPUT */
+	struct virtchnl_filter_action_set actions; /* INPUT */
+	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_flow_sub);
+
+/* VIRTCHNL_OP_FLOW_UNSUBSCRIBE
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return status to VF.
+ */
+struct virtchnl_flow_unsub {
+	u16 vsi_id; /* INPUT */
+	u16 pad;
+	u32 flow_id; /* INPUT */
+	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_flow_unsub);
+
 /* VIRTCHNL_OP_GET_QOS_CAPS
  * VF sends this message to get its QoS Caps, such as
  * TC number, Arbiter and Bandwidth.
@@ -2318,6 +2408,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
 		valid_len = sizeof(struct virtchnl_fdir_del);
 		break;
+	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
+		valid_len = sizeof(struct virtchnl_flow_sub);
+		break;
+	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
+		valid_len = sizeof(struct virtchnl_flow_unsub);
+		break;
 	case VIRTCHNL_OP_GET_QOS_CAPS:
 		break;
 	case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH 2/2] net/iavf: enable flow subscription rule support for AVF
  2022-08-09  6:21 [PATCH 0/2] support flow subscription Jie Wang
  2022-08-09  6:21 ` [PATCH 1/2] common/iavf: " Jie Wang
@ 2022-08-09  6:21 ` Jie Wang
  2022-08-12 17:04 ` [PATCH v2 0/5] support flow subscription Jie Wang
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-09  6:21 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add support AVF can be able to subscribe a flow from PF.

The supported patterns are listed below:
eth/vlan/ipv4
eth/ipv4(6)
eth/ipv4(6)/udp
eth/ipv4(6)/tcp

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 doc/guides/rel_notes/release_22_11.rst |   4 +
 drivers/net/iavf/iavf.h                |  10 +
 drivers/net/iavf/iavf_fdir.c           |   4 +
 drivers/net/iavf/iavf_fsub.c           | 746 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.c   |  40 +-
 drivers/net/iavf/iavf_generic_flow.h   |   2 +
 drivers/net/iavf/iavf_hash.c           |   5 +
 drivers/net/iavf/iavf_ipsec_crypto.c   |  16 +-
 drivers/net/iavf/iavf_vchnl.c          | 133 +++++
 drivers/net/iavf/meson.build           |   1 +
 10 files changed, 944 insertions(+), 17 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_fsub.c

diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst
index 8c021cf050..bb77a03e24 100644
--- a/doc/guides/rel_notes/release_22_11.rst
+++ b/doc/guides/rel_notes/release_22_11.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel iavf driver.**
+
+  * Added flow subscription support.
+
 
 Removed Items
 -------------
diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 025ab3ff60..6792c1de74 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -148,6 +148,13 @@ struct iavf_fdir_info {
 	struct iavf_fdir_conf conf;
 };
 
+struct iavf_fsub_conf {
+	struct virtchnl_flow_sub sub_fltr;
+	struct virtchnl_flow_unsub unsub_fltr;
+	uint64_t input_set;
+	uint32_t flow_id;
+};
+
 struct iavf_qv_map {
 	uint16_t queue_id;
 	uint16_t vector_id;
@@ -482,4 +489,7 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 extern const struct rte_tm_ops iavf_tm_ops;
 int iavf_get_ptp_cap(struct iavf_adapter *adapter);
 int iavf_get_phc_time(struct iavf_rx_queue *rxq);
+int iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter);
+int iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter);
+int iavf_flow_sub_check(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 2e6b3a9097..f5e23da5bc 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -1551,6 +1551,7 @@ iavf_fdir_parse(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error)
 {
@@ -1561,6 +1562,9 @@ iavf_fdir_parse(struct iavf_adapter *ad,
 
 	memset(filter, 0, sizeof(*filter));
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
 	if (!item)
 		return -rte_errno;
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
new file mode 100644
index 0000000000..910e519cc1
--- /dev/null
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -0,0 +1,746 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <iavf.h>
+#include "iavf_generic_flow.h"
+
+#define MAX_QGRP_NUM_TYPE	7
+#define IAVF_IPV6_ADDR_LENGTH	16
+#define MAX_INPUT_SET_BYTE	32
+
+#define IAVF_FLTR_RX		BIT(0)
+#define IAVF_FLTR_TX		BIT(1)
+#define IAVF_FLTR_TX_RX		(IAVF_FLTR_RX | IAVF_FLTR_TX)
+
+#define IAVF_SW_INSET_ETHER ( \
+	IAVF_INSET_DMAC | IAVF_INSET_SMAC | IAVF_INSET_ETHERTYPE)
+#define IAVF_SW_INSET_MAC_IPV4 ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS)
+#define IAVF_SW_INSET_MAC_VLAN_IPV4 ( \
+	IAVF_SW_INSET_MAC_IPV4 | IAVF_INSET_VLAN_OUTER)
+#define IAVF_SW_INSET_MAC_IPV4_TCP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV4_UDP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6 ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_IPV6_NEXT_HDR)
+#define IAVF_SW_INSET_MAC_IPV6_TCP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6_UDP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+
+
+static struct iavf_flow_parser iavf_fsub_parser;
+
+static struct
+iavf_pattern_match_item iavf_fsub_pattern_list[] = {
+	{iavf_pattern_ethertype,			IAVF_SW_INSET_ETHER,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4,				IAVF_SW_INSET_MAC_IPV4,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_vlan_ipv4,			IAVF_SW_INSET_MAC_VLAN_IPV4,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp,			IAVF_SW_INSET_MAC_IPV4_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_tcp,			IAVF_SW_INSET_MAC_IPV4_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6,				IAVF_SW_INSET_MAC_IPV6,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp,			IAVF_SW_INSET_MAC_IPV6_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_tcp,			IAVF_SW_INSET_MAC_IPV6_TCP,		IAVF_INSET_NONE},
+};
+
+static int
+iavf_fsub_create(struct iavf_adapter *ad, struct rte_flow *flow,
+		 void *meta, struct rte_flow_error *error)
+{
+	struct iavf_fsub_conf *filter = meta;
+	struct iavf_fsub_conf *rule;
+	int ret;
+
+	rule = rte_zmalloc("fsub_entry", sizeof(*rule), 0);
+	if (!rule) {
+		rte_flow_error_set(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to allocate memory for fsub rule");
+		return -rte_errno;
+	}
+
+	ret = iavf_flow_sub(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to subscribe flow rule.");
+		goto free_entry;
+	}
+
+	rte_memcpy(rule, filter, sizeof(*rule));
+	flow->rule = rule;
+
+	return 0;
+
+free_entry:
+	rte_free(rule);
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_destroy(struct iavf_adapter *ad, struct rte_flow *flow,
+		  struct rte_flow_error *error)
+{
+	struct iavf_fsub_conf *filter;
+	int ret;
+
+	filter = (struct iavf_fsub_conf *)flow->rule;
+
+	ret = iavf_flow_unsub(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to unsubscribe flow rule.");
+		return -rte_errno;
+	}
+
+	flow->rule = NULL;
+	rte_free(filter);
+
+	return ret;
+}
+
+static int
+iavf_fsub_validation(struct iavf_adapter *ad,
+		     __rte_unused struct rte_flow *flow,
+		     void *meta,
+		     struct rte_flow_error *error)
+{
+	struct iavf_fsub_conf *filter = meta;
+	int ret;
+
+	ret = iavf_flow_sub_check(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to validate filter rule.");
+		return -rte_errno;
+	}
+
+	return 0;
+};
+
+static int
+iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
+			const uint64_t input_set_mask,
+			struct rte_flow_error *error,
+			struct iavf_fsub_conf *filter)
+{
+	struct virtchnl_proto_hdrs *hdrs = &filter->sub_fltr.proto_hdrs;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+	const struct rte_flow_item *item = pattern;
+	struct virtchnl_proto_hdr_w_msk *hdr, *hdr1 = NULL;
+	uint64_t outer_input_set = IAVF_INSET_NONE;
+	uint64_t *input = NULL;
+	uint16_t input_set_byte = 0;
+	uint16_t j;
+	uint32_t layer = 0;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item, "Not support range");
+			return false;
+		}
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			hdr1 = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
+
+			if (eth_spec && eth_mask) {
+				input = &outer_input_set;
+
+				if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
+					*input |= IAVF_INSET_DMAC;
+					input_set_byte += 6;
+				} else {
+					/* flow subscribe filter will add dst mac in kernel */
+					input_set_byte += 6;
+				}
+
+				if (!rte_is_zero_ether_addr(&eth_mask->src)) {
+					*input |= IAVF_INSET_DMAC;
+					input_set_byte += 6;
+				}
+
+				if (eth_mask->type) {
+					*input |= IAVF_INSET_ETHERTYPE;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr1->buffer_spec, eth_spec,
+					   sizeof(struct rte_ether_hdr));
+				rte_memcpy(hdr1->buffer_mask, eth_mask,
+					   sizeof(struct rte_ether_hdr));
+			} else {
+				/* flow subscribe filter will add dst mac in kernel */
+				input_set_byte += 6;
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+			if (ipv4_spec && ipv4_mask) {
+				input = &outer_input_set;
+				/* Check IPv4 mask and update input set */
+				if (ipv4_mask->hdr.version_ihl ||
+					ipv4_mask->hdr.total_length ||
+					ipv4_mask->hdr.packet_id ||
+					ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv4 mask.");
+					return false;
+				}
+
+				if (ipv4_mask->hdr.src_addr) {
+					*input |= IAVF_INSET_IPV4_SRC;
+					input_set_byte += 2;
+				}
+				if (ipv4_mask->hdr.dst_addr) {
+					*input |= IAVF_INSET_IPV4_DST;
+					input_set_byte += 2;
+				}
+				if (ipv4_mask->hdr.time_to_live) {
+					*input |= IAVF_INSET_IPV4_TTL;
+					input_set_byte++;
+				}
+				if (ipv4_mask->hdr.next_proto_id) {
+					*input |= IAVF_INSET_IPV4_PROTO;
+					input_set_byte++;
+				}
+				if (ipv4_mask->hdr.type_of_service) {
+					*input |= IAVF_INSET_IPV4_TOS;
+					input_set_byte++;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &ipv4_spec->hdr,
+					   sizeof(ipv4_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &ipv4_mask->hdr,
+					   sizeof(ipv4_spec->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+			if (ipv6_spec && ipv6_mask) {
+				input = &outer_input_set;
+
+				if (ipv6_mask->hdr.payload_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv6 mask");
+					return false;
+				}
+
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j]) {
+						*input |= IAVF_INSET_IPV6_SRC;
+						break;
+					}
+				}
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.dst_addr[j]) {
+						*input |= IAVF_INSET_IPV6_DST;
+						break;
+					}
+				}
+
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j])
+						input_set_byte++;
+
+					if (ipv6_mask->hdr.dst_addr[j])
+						input_set_byte++;
+				}
+
+				if (ipv6_mask->hdr.proto) {
+					*input |= IAVF_INSET_IPV6_NEXT_HDR;
+					input_set_byte++;
+				}
+				if (ipv6_mask->hdr.hop_limits) {
+					*input |= IAVF_INSET_IPV6_HOP_LIMIT;
+					input_set_byte++;
+				}
+				if (ipv6_mask->hdr.vtc_flow &
+				    rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK)) {
+					*input |= IAVF_INSET_IPV6_TC;
+					input_set_byte += 4;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &ipv6_spec->hdr,
+					   sizeof(ipv6_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &ipv6_mask->hdr,
+					   sizeof(ipv6_spec->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+			if (udp_spec && udp_mask) {
+				input = &outer_input_set;
+				/* Check UDP mask and update input set*/
+				if (udp_mask->hdr.dgram_len ||
+				    udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid UDP mask");
+					return false;
+				}
+
+				if (udp_mask->hdr.src_port) {
+					*input |= IAVF_INSET_UDP_SRC_PORT;
+					input_set_byte += 2;
+				}
+				if (udp_mask->hdr.dst_port) {
+					*input |= IAVF_INSET_UDP_DST_PORT;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &udp_spec->hdr,
+					   sizeof(udp_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &udp_mask->hdr,
+					   sizeof(udp_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+			if (tcp_spec && tcp_mask) {
+				input = &outer_input_set;
+				/* Check TCP mask and update input set */
+				if (tcp_mask->hdr.sent_seq ||
+					tcp_mask->hdr.recv_ack ||
+					tcp_mask->hdr.data_off ||
+					tcp_mask->hdr.tcp_flags ||
+					tcp_mask->hdr.rx_win ||
+					tcp_mask->hdr.cksum ||
+					tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid TCP mask");
+					return false;
+				}
+
+				if (tcp_mask->hdr.src_port) {
+					*input |= IAVF_INSET_TCP_SRC_PORT;
+					input_set_byte += 2;
+				}
+				if (tcp_mask->hdr.dst_port) {
+					*input |= IAVF_INSET_TCP_DST_PORT;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &tcp_spec->hdr,
+					   sizeof(tcp_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &tcp_mask->hdr,
+					   sizeof(tcp_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VLAN:
+			vlan_spec = item->spec;
+			vlan_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, S_VLAN);
+
+			if (vlan_spec && vlan_spec) {
+				input = &outer_input_set;
+
+				*input |= IAVF_INSET_VLAN_OUTER;
+
+				if (vlan_mask->tci)
+					input_set_byte += 2;
+
+				if (vlan_mask->inner_type) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid VLAN input set.");
+					return false;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &vlan_spec->hdr,
+					   sizeof(vlan_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &vlan_mask->hdr,
+					   sizeof(vlan_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+					   "Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	hdrs->count += VIRTCHNL_MAX_NUM_PROTO_HDRS;
+
+	if (input_set_byte > MAX_INPUT_SET_BYTE) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   item, "too much input set");
+		return -rte_errno;
+	}
+
+	if (!outer_input_set || (outer_input_set & ~input_set_mask))
+		return -rte_errno;
+
+	return 0;
+}
+
+static int
+iavf_fsub_parse_action(struct iavf_adapter *ad,
+		       const struct rte_flow_action *actions,
+		       uint32_t priority,
+		       struct rte_flow_error *error,
+		       struct iavf_fsub_conf *filter)
+{
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_vf *act_vf;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *act_qgrop;
+	struct virtchnl_filter_action *filter_action;
+	uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
+		2, 4, 8, 16, 32, 64, 128};
+	uint16_t i, num = 0, dest_num = 0, vf_num = 0;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		switch (action->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_VF:
+			vf_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_vf = action->conf;
+			if (act_vf->id != ad->hw.device_id &&
+				!act_vf->original) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION,
+					actions, "Invalid vf id");
+				return -rte_errno;
+			}
+
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			dest_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_q = action->conf;
+			if (act_q->index >= ad->dev_data->nb_rx_queues)
+				goto error;
+
+			filter_action->type = VIRTCHNL_ACTION_QUEUE;
+			filter_action->act_conf.queue.index = act_q->index;
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			dest_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_qgrop = action->conf;
+			if (act_qgrop->queue_num <= 1)
+				goto error;
+
+			filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+			filter_action->act_conf.queue.index =
+							act_qgrop->queue[0];
+			for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
+				if (act_qgrop->queue_num ==
+				    valid_qgrop_number[i])
+					break;
+			}
+
+			if (i == MAX_QGRP_NUM_TYPE)
+				goto error;
+
+			if ((act_qgrop->queue[0] + act_qgrop->queue_num) >
+			    ad->dev_data->nb_rx_queues)
+				goto error1;
+
+			for (i = 0; i < act_qgrop->queue_num - 1; i++)
+				if (act_qgrop->queue[i + 1] !=
+				    act_qgrop->queue[i] + 1)
+					goto error2;
+
+			filter_action->act_conf.queue.region = act_qgrop->queue_num;
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	/* 0 denotes lowest priority of recipe and highest priority
+	 * of rte_flow. Change rte_flow priority into recipe priority.
+	 */
+	filter->sub_fltr.priority = priority;
+
+	if (num > VIRTCHNL_MAX_NUM_ACTIONS) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Action numbers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	if (vf_num == 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action, vf action must be added");
+		return -rte_errno;
+	}
+
+	if (dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Unsupported action combination");
+		return -rte_errno;
+	}
+
+	return 0;
+error:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid action type or queue number");
+	return -rte_errno;
+
+error1:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid queue region indexes");
+	return -rte_errno;
+
+error2:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Discontinuous queue region");
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_check_action(const struct rte_flow_action *actions,
+		       struct rte_flow_error *error)
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	uint16_t actions_num = 0;
+	bool vf_valid = false;
+	bool queue_valid = false;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_VF:
+			vf_valid = true;
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			queue_valid = true;
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			continue;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	if (!((actions_num == 1 && !queue_valid) ||
+	      (actions_num == 2 && vf_valid && queue_valid))) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   actions, "Invalid action number");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+iavf_fsub_parse(struct iavf_adapter *ad,
+		struct iavf_pattern_match_item *array,
+		uint32_t array_len,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		uint32_t priority,
+		void **meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fsub_conf *filter;
+	struct iavf_pattern_match_item *pattern_match_item = NULL;
+	int ret = 0;
+
+	filter = rte_zmalloc(NULL, sizeof(*filter), 0);
+	if (!filter) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "No memory for iavf_fsub_conf_ptr");
+		goto error;
+	}
+
+	/* search flow subscribe pattern */
+	pattern_match_item = iavf_search_pattern_match_item(pattern, array,
+							    array_len, error);
+	if (!pattern_match_item)
+		return -rte_errno;
+
+	/* parse flow subscribe pattern */
+	ret = iavf_fsub_parse_pattern(pattern,
+				      pattern_match_item->input_set_mask,
+				      error, filter);
+	if (ret)
+		goto error;
+
+	/* check flow subscribe pattern action */
+	ret = iavf_fsub_check_action(actions, error);
+	if (ret)
+		goto error;
+
+	/* parse flow subscribe pattern action */
+	ret = iavf_fsub_parse_action((void *)ad, actions, priority,
+				     error, filter);
+	if (ret)
+		goto error;
+
+	if (meta)
+		*meta = filter;
+
+error:
+	rte_free(pattern_match_item);
+	return ret;
+}
+
+static int
+iavf_fsub_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FSUB_PF)
+		parser = &iavf_fsub_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_fsub_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_fsub_parser, ad);
+}
+
+static struct
+iavf_flow_engine iavf_fsub_engine = {
+	.init = iavf_fsub_init,
+	.uninit = iavf_fsub_uninit,
+	.create = iavf_fsub_create,
+	.destroy = iavf_fsub_destroy,
+	.validation = iavf_fsub_validation,
+	.type = IAVF_FLOW_ENGINE_FSUB,
+};
+
+static struct
+iavf_flow_parser iavf_fsub_parser = {
+	.engine = &iavf_fsub_engine,
+	.array = iavf_fsub_pattern_list,
+	.array_len = RTE_DIM(iavf_fsub_pattern_list),
+	.parse_pattern_action = iavf_fsub_parse,
+	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(iavf_fsub_engine_init)
+{
+	iavf_register_flow_engine(&iavf_fsub_engine);
+}
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index e1a611e319..f33c764764 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1785,6 +1785,7 @@ enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_tcp[] = {
 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error);
@@ -1866,6 +1867,8 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 {
 	struct iavf_parser_list *list = NULL;
 	struct iavf_flow_parser_node *parser_node;
+	struct iavf_flow_parser_node *existing_node;
+	void *temp;
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
 
 	parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
@@ -1880,14 +1883,26 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 		TAILQ_INSERT_TAIL(list, parser_node, node);
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
+		RTE_TAILQ_FOREACH_SAFE(existing_node, list, node, temp) {
+			if (existing_node->parser->engine->type ==
+			    IAVF_FLOW_ENGINE_FSUB) {
+				TAILQ_INSERT_AFTER(list, existing_node,
+						   parser_node, node);
+				goto DONE;
+			}
+		}
 		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
 		list = &vf->ipsec_crypto_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FSUB) {
+		list = &vf->dist_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
 
+DONE:
 	return 0;
 }
 
@@ -1902,7 +1917,8 @@ iavf_unregister_parser(struct iavf_flow_parser *parser,
 
 	if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
 		list = &vf->rss_parser_list;
-	else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
+	else if ((parser->engine->type == IAVF_FLOW_ENGINE_FDIR) ||
+		 (parser->engine->type == IAVF_FLOW_ENGINE_FSUB))
 		list = &vf->dist_parser_list;
 
 	if (list == NULL)
@@ -1936,11 +1952,11 @@ iavf_flow_valid_attr(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
-	/* Not supported */
-	if (attr->priority) {
+	/* support priority for flow subscribe */
+	if (attr->priority > 1) {
 		rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
-				attr, "Not support priority.");
+				attr, "Only support priority 0 and 1.");
 		return -rte_errno;
 	}
 
@@ -2083,6 +2099,7 @@ static struct iavf_flow_engine *
 iavf_parse_engine_create(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error)
@@ -2096,7 +2113,7 @@ iavf_parse_engine_create(struct iavf_adapter *ad,
 		if (parser_node->parser->parse_pattern_action(ad,
 				parser_node->parser->array,
 				parser_node->parser->array_len,
-				pattern, actions, &meta, error) < 0)
+				pattern, actions, priority, &meta, error) < 0)
 			continue;
 
 		engine = parser_node->parser->engine;
@@ -2112,6 +2129,7 @@ static struct iavf_flow_engine *
 iavf_parse_engine_validate(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error)
@@ -2125,7 +2143,7 @@ iavf_parse_engine_validate(struct iavf_adapter *ad,
 		if (parser_node->parser->parse_pattern_action(ad,
 				parser_node->parser->array,
 				parser_node->parser->array_len,
-				pattern, actions, &meta,  error) < 0)
+				pattern, actions, priority, &meta, error) < 0)
 			continue;
 
 		engine = parser_node->parser->engine;
@@ -2186,18 +2204,18 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 	if (ret)
 		return ret;
 
-	*engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
-				    actions, error);
+	*engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list,
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
-	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
-				    actions, error);
+	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list,
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
 	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
-			pattern, actions, error);
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 52eb1caf29..60d8ab02b4 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -471,6 +471,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error);
 
@@ -480,6 +481,7 @@ enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
+	IAVF_FLOW_ENGINE_FSUB,
 	IAVF_FLOW_ENGINE_MAX,
 };
 
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 42df7c4e48..dea4e0aa0a 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -86,6 +86,7 @@ iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
 			       uint32_t array_len,
 			       const struct rte_flow_item pattern[],
 			       const struct rte_flow_action actions[],
+			       uint32_t priority,
 			       void **meta,
 			       struct rte_flow_error *error);
 
@@ -1509,6 +1510,7 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
 			       uint32_t array_len,
 			       const struct rte_flow_item pattern[],
 			       const struct rte_flow_action actions[],
+			       uint32_t priority,
 			       void **meta,
 			       struct rte_flow_error *error)
 {
@@ -1517,6 +1519,9 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
 	uint64_t phint = IAVF_PHINT_NONE;
 	int ret = 0;
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	rss_meta_ptr = rte_zmalloc(NULL, sizeof(*rss_meta_ptr), 0);
 	if (!rss_meta_ptr) {
 		rte_flow_error_set(error, EINVAL,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
index 75f05ee558..322934b5f6 100644
--- a/drivers/net/iavf/iavf_ipsec_crypto.c
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -1932,16 +1932,20 @@ static struct iavf_flow_engine iavf_ipsec_flow_engine = {
 
 static int
 iavf_ipsec_flow_parse(struct iavf_adapter *ad,
-		       struct iavf_pattern_match_item *array,
-		       uint32_t array_len,
-		       const struct rte_flow_item pattern[],
-		       const struct rte_flow_action actions[],
-		       void **meta,
-		       struct rte_flow_error *error)
+		      struct iavf_pattern_match_item *array,
+		      uint32_t array_len,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      uint32_t priority,
+		      void **meta,
+		      struct rte_flow_error *error)
 {
 	struct iavf_pattern_match_item *item = NULL;
 	int ret = -1;
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
 	if (item && item->meta) {
 		uint32_t type = (uint64_t)(item->meta);
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 21bd1e2193..ea3e1e8bb2 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -502,6 +502,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
 		VIRTCHNL_VF_OFFLOAD_FDIR_PF |
 		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+		VIRTCHNL_VF_OFFLOAD_FSUB_PF |
 		VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
@@ -1533,6 +1534,138 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	return 0;
 }
 
+int
+iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_sub *fsub_cfg;
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->sub_fltr.validate_only = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->sub_fltr);
+	args.in_args_size = sizeof(*(&filter->sub_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of "
+				 "OP_FLOW_SUBSCRIBE");
+
+	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+	filter->flow_id = fsub_cfg->flow_id;
+
+	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in adding rule request by PF");
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to no hw "
+				 "resource");
+		err = -1;
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_EXIST) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to the rule "
+				 "is already existed");
+		err = -1;
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to the hw "
+				 "doesn't support");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
+int
+iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_unsub *unsub_cfg;
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->unsub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->unsub_fltr.flow_id = filter->flow_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_FLOW_UNSUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->unsub_fltr);
+	args.in_args_size = sizeof(filter->unsub_fltr);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of "
+				 "OP_FLOW_UNSUBSCRIBE");
+
+	unsub_cfg = (struct virtchnl_flow_unsub *)args.out_buffer;
+
+	if (unsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in deleting rule request by PF");
+	} else if (unsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST) {
+		PMD_DRV_LOG(ERR, "Failed to delete rule request due to this "
+				 "rule doesn't exist");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to delete rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
+int
+iavf_flow_sub_check(struct iavf_adapter *adapter,
+		    struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_sub *fsub_cfg;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->sub_fltr.validate_only = 1;
+
+	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->sub_fltr);
+	args.in_args_size = sizeof(*(&filter->sub_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to check flow director rule");
+		return err;
+	}
+
+	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+
+	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in checking rule request by PF");
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR, "Failed to check rule request due to "
+				 "parameters validation or HW doesn't "
+				 "support");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to check rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return 0;
+}
+
 int
 iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 		     struct virtchnl_rss_cfg *rss_cfg, bool add)
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 2da37de662..6df771f917 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -16,6 +16,7 @@ sources = files(
         'iavf_hash.c',
         'iavf_tm.c',
         'iavf_ipsec_crypto.c',
+        'iavf_fsub.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v2 0/5] support flow subscription
  2022-08-09  6:21 [PATCH 0/2] support flow subscription Jie Wang
  2022-08-09  6:21 ` [PATCH 1/2] common/iavf: " Jie Wang
  2022-08-09  6:21 ` [PATCH 2/2] net/iavf: enable flow subscription rule support for AVF Jie Wang
@ 2022-08-12 17:04 ` Jie Wang
  2022-08-12 17:04   ` [PATCH v2 1/5] common/iavf: " Jie Wang
                     ` (4 more replies)
  2022-08-30 18:05 ` [PATCH v3 0/5] support flow subscription Jie Wang
                   ` (3 subsequent siblings)
  6 siblings, 5 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-12 17:04 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add support AVF can be able to subscribe a flow from PF.

--
v2:
 * split v1 patch 2/2 to 4 small patches.
 * remove rule action RTE_FLOW_ACTION_TYPE_VF and add 
   RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT.

Jie Wang (5):
  common/iavf: support flow subscription
  net/iavf: add flow subscription to AVF
  net/iavf: support flow subscrption pattern
  net/iavf: support flow subscription rule
  net/iavf: support priority of flow rule

 doc/guides/rel_notes/release_22_11.rst |   4 +
 drivers/common/iavf/virtchnl.h         | 104 +++-
 drivers/net/iavf/iavf.h                |  13 +
 drivers/net/iavf/iavf_fdir.c           |   4 +
 drivers/net/iavf/iavf_fsub.c           | 745 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.c   |  40 +-
 drivers/net/iavf/iavf_generic_flow.h   |   2 +
 drivers/net/iavf/iavf_hash.c           |   5 +
 drivers/net/iavf/iavf_ipsec_crypto.c   |  16 +-
 drivers/net/iavf/iavf_vchnl.c          | 133 +++++
 drivers/net/iavf/meson.build           |   1 +
 11 files changed, 1046 insertions(+), 21 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_fsub.c

-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v2 1/5] common/iavf: support flow subscription
  2022-08-12 17:04 ` [PATCH v2 0/5] support flow subscription Jie Wang
@ 2022-08-12 17:04   ` Jie Wang
  2022-08-12 17:04   ` [PATCH v2 2/5] net/iavf: add flow subscription to AVF Jie Wang
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-12 17:04 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

VF is able to subscribe a flow from PF by VIRTCHNL_FLOW_SUBSCRIBE.

PF is expected to offload a rule to hardware which will redirect
the packet that matching the required pattern to this VF.

Only a flow with dst mac address as PF's mac address can be subscribed.

VIRTCHNL_VF_OFFLOAD_FSUB_PF is used for Flow subscription capability
negotiation and only a trusted VF can be granted with this capability.

A flow can be unsubscribed by VIRTCHNL_FLOW_UNSUBSCRIBE.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/common/iavf/virtchnl.h | 104 +++++++++++++++++++++++++++++++--
 1 file changed, 100 insertions(+), 4 deletions(-)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index f123daec8e..e02eec4935 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -168,6 +168,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
 	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
 	VIRTCHNL_OP_CONFIG_QUANTA = 113,
+	VIRTCHNL_OP_FLOW_SUBSCRIBE = 114,
+	VIRTCHNL_OP_FLOW_UNSUBSCRIBE = 115,
 	VIRTCHNL_OP_MAX,
 };
 
@@ -282,6 +284,10 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_1588_PTP_GET_CAPS";
 	case VIRTCHNL_OP_1588_PTP_GET_TIME:
 		return "VIRTCHNL_OP_1588_PTP_GET_TIME";
+	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
+		return "VIRTCHNL_OP_FLOW_SUBSCRIBE";
+	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
+		return "VIRTCHNL_OP_FLOW_UNSUBSCRIBE";
 	case VIRTCHNL_OP_MAX:
 		return "VIRTCHNL_OP_MAX";
 	default:
@@ -401,6 +407,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_FSUB_PF		BIT(14)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
 #define VIRTCHNL_VF_OFFLOAD_VLAN		BIT(16)
 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING		BIT(17)
@@ -1503,6 +1510,7 @@ enum virtchnl_vfr_states {
 };
 
 #define VIRTCHNL_MAX_NUM_PROTO_HDRS	32
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK	16
 #define VIRTCHNL_MAX_SIZE_RAW_PACKET	1024
 #define PROTO_HDR_SHIFT			5
 #define PROTO_HDR_FIELD_START(proto_hdr_type) \
@@ -1695,6 +1703,22 @@ struct virtchnl_proto_hdr {
 
 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
 
+struct virtchnl_proto_hdr_w_msk {
+	/* see enum virtchnl_proto_hdr_type */
+	s32 type;
+	u32 pad;
+	/**
+	 * binary buffer in network order for specific header type.
+	 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+	 * header is expected to be copied into the buffer.
+	 */
+	u8 buffer_spec[64];
+	/* binary buffer for bit-mask applied to specific header type */
+	u8 buffer_mask[64];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(136, virtchnl_proto_hdr_w_msk);
+
 struct virtchnl_proto_hdrs {
 	u8 tunnel_level;
 	/**
@@ -1706,11 +1730,18 @@ struct virtchnl_proto_hdrs {
 	 */
 	int count;
 	/**
-	 * number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS
-	 * must be 0 for a raw packet request.
+	 * count must <=
+	 * VIRTCHNL_MAX_NUM_PROTO_HDRS + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK
+	 * count = 0 :					select raw
+	 * 1 < count <= VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr
+	 * count > VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr_w_msk
+	 * last valid index = count - VIRTCHNL_MAX_NUM_PROTO_HDRS
 	 */
 	union {
-		struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct virtchnl_proto_hdr
+			proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct virtchnl_proto_hdr_w_msk
+			proto_hdr_w_msk[VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK];
 		struct {
 			u16 pkt_len;
 			u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
@@ -1731,7 +1762,7 @@ struct virtchnl_rss_cfg {
 
 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
 
-/* action configuration for FDIR */
+/* action configuration for FDIR and FSUB */
 struct virtchnl_filter_action {
 	/* see enum virtchnl_action type */
 	s32 type;
@@ -1849,6 +1880,65 @@ struct virtchnl_fdir_del {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
 
+/* Status returned to VF after VF requests FSUB commands
+ * VIRTCHNL_FSUB_SUCCESS
+ * VF FLOW related request is successfully done by PF
+ * The request can be OP_FLOW_SUBSCRIBE/UNSUBSCRIBE.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE
+ * OP_FLOW_SUBSCRIBE request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_EXIST
+ * OP_FLOW_SUBSCRIBE request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST
+ * OP_FLOW_UNSUBSCRIBE request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_INVALID
+ * OP_FLOW_SUBSCRIBE request is failed due to parameters validation
+ * or HW doesn't support.
+ */
+enum virtchnl_fsub_prgm_status {
+	VIRTCHNL_FSUB_SUCCESS = 0,
+	VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE,
+	VIRTCHNL_FSUB_FAILURE_RULE_EXIST,
+	VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST,
+	VIRTCHNL_FSUB_FAILURE_RULE_INVALID,
+};
+
+/* VIRTCHNL_OP_FLOW_SUBSCRIBE
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only, priority, proto_hdrs and actions.
+ * PF will return flow_id
+ * if the request is successfully done and return status to VF.
+ */
+struct virtchnl_flow_sub {
+	u16 vsi_id; /* INPUT */
+	u8 validate_only; /* INPUT */
+	u8 priority; /* INPUT */
+	u32 flow_id; /* OUTPUT */
+	struct virtchnl_proto_hdrs proto_hdrs; /* INPUT */
+	struct virtchnl_filter_action_set actions; /* INPUT */
+	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_flow_sub);
+
+/* VIRTCHNL_OP_FLOW_UNSUBSCRIBE
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return status to VF.
+ */
+struct virtchnl_flow_unsub {
+	u16 vsi_id; /* INPUT */
+	u16 pad;
+	u32 flow_id; /* INPUT */
+	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_flow_unsub);
+
 /* VIRTCHNL_OP_GET_QOS_CAPS
  * VF sends this message to get its QoS Caps, such as
  * TC number, Arbiter and Bandwidth.
@@ -2318,6 +2408,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
 		valid_len = sizeof(struct virtchnl_fdir_del);
 		break;
+	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
+		valid_len = sizeof(struct virtchnl_flow_sub);
+		break;
+	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
+		valid_len = sizeof(struct virtchnl_flow_unsub);
+		break;
 	case VIRTCHNL_OP_GET_QOS_CAPS:
 		break;
 	case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v2 2/5] net/iavf: add flow subscription to AVF
  2022-08-12 17:04 ` [PATCH v2 0/5] support flow subscription Jie Wang
  2022-08-12 17:04   ` [PATCH v2 1/5] common/iavf: " Jie Wang
@ 2022-08-12 17:04   ` Jie Wang
  2022-08-12 17:04   ` [PATCH v2 3/5] net/iavf: support flow subscrption pattern Jie Wang
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-12 17:04 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add the skeletal code of flow subscription to AVF driver.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 doc/guides/rel_notes/release_22_11.rst |   4 +
 drivers/net/iavf/iavf_fsub.c           | 112 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.c   |  17 +++-
 drivers/net/iavf/iavf_generic_flow.h   |   1 +
 drivers/net/iavf/iavf_vchnl.c          |   1 +
 drivers/net/iavf/meson.build           |   1 +
 6 files changed, 135 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fsub.c

diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst
index 8c021cf050..bb77a03e24 100644
--- a/doc/guides/rel_notes/release_22_11.rst
+++ b/doc/guides/rel_notes/release_22_11.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel iavf driver.**
+
+  * Added flow subscription support.
+
 
 Removed Items
 -------------
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
new file mode 100644
index 0000000000..17f9bb2976
--- /dev/null
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <iavf.h>
+#include "iavf_generic_flow.h"
+
+
+static struct iavf_flow_parser iavf_fsub_parser;
+
+static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = {};
+
+static int
+iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
+		 __rte_unused struct rte_flow *flow,
+		 __rte_unused void *meta,
+		 __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_destroy(__rte_unused struct iavf_adapter *ad,
+		  __rte_unused struct rte_flow *flow,
+		  __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
+		     __rte_unused struct rte_flow *flow,
+		     __rte_unused void *meta,
+		     __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+};
+
+static int
+iavf_fsub_parse(__rte_unused struct iavf_adapter *ad,
+		__rte_unused struct iavf_pattern_match_item *array,
+		__rte_unused uint32_t array_len,
+		__rte_unused const struct rte_flow_item pattern[],
+		__rte_unused const struct rte_flow_action actions[],
+		__rte_unused void **meta,
+		__rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FSUB_PF)
+		parser = &iavf_fsub_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_fsub_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_fsub_parser, ad);
+}
+
+static struct
+iavf_flow_engine iavf_fsub_engine = {
+	.init = iavf_fsub_init,
+	.uninit = iavf_fsub_uninit,
+	.create = iavf_fsub_create,
+	.destroy = iavf_fsub_destroy,
+	.validation = iavf_fsub_validation,
+	.type = IAVF_FLOW_ENGINE_FSUB,
+};
+
+static struct
+iavf_flow_parser iavf_fsub_parser = {
+	.engine = &iavf_fsub_engine,
+	.array = iavf_fsub_pattern_list,
+	.array_len = RTE_DIM(iavf_fsub_pattern_list),
+	.parse_pattern_action = iavf_fsub_parse,
+	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(iavf_fsub_engine_init)
+{
+	iavf_register_flow_engine(&iavf_fsub_engine);
+}
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index e1a611e319..b04614ba6e 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1866,6 +1866,8 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 {
 	struct iavf_parser_list *list = NULL;
 	struct iavf_flow_parser_node *parser_node;
+	struct iavf_flow_parser_node *existing_node;
+	void *temp;
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
 
 	parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
@@ -1880,14 +1882,26 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 		TAILQ_INSERT_TAIL(list, parser_node, node);
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
+		RTE_TAILQ_FOREACH_SAFE(existing_node, list, node, temp) {
+			if (existing_node->parser->engine->type ==
+			    IAVF_FLOW_ENGINE_FSUB) {
+				TAILQ_INSERT_AFTER(list, existing_node,
+						   parser_node, node);
+				goto DONE;
+			}
+		}
 		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
 		list = &vf->ipsec_crypto_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FSUB) {
+		list = &vf->dist_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
 
+DONE:
 	return 0;
 }
 
@@ -1902,7 +1916,8 @@ iavf_unregister_parser(struct iavf_flow_parser *parser,
 
 	if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
 		list = &vf->rss_parser_list;
-	else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
+	else if ((parser->engine->type == IAVF_FLOW_ENGINE_FDIR) ||
+		 (parser->engine->type == IAVF_FLOW_ENGINE_FSUB))
 		list = &vf->dist_parser_list;
 
 	if (list == NULL)
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 52eb1caf29..448facffa5 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -480,6 +480,7 @@ enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
+	IAVF_FLOW_ENGINE_FSUB,
 	IAVF_FLOW_ENGINE_MAX,
 };
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 21bd1e2193..6d84add423 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -502,6 +502,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
 		VIRTCHNL_VF_OFFLOAD_FDIR_PF |
 		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+		VIRTCHNL_VF_OFFLOAD_FSUB_PF |
 		VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 2da37de662..6df771f917 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -16,6 +16,7 @@ sources = files(
         'iavf_hash.c',
         'iavf_tm.c',
         'iavf_ipsec_crypto.c',
+        'iavf_fsub.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v2 3/5] net/iavf: support flow subscrption pattern
  2022-08-12 17:04 ` [PATCH v2 0/5] support flow subscription Jie Wang
  2022-08-12 17:04   ` [PATCH v2 1/5] common/iavf: " Jie Wang
  2022-08-12 17:04   ` [PATCH v2 2/5] net/iavf: add flow subscription to AVF Jie Wang
@ 2022-08-12 17:04   ` Jie Wang
  2022-08-12 17:04   ` [PATCH v2 4/5] net/iavf: support flow subscription rule Jie Wang
  2022-08-12 17:04   ` [PATCH v2 5/5] net/iavf: support priority of flow rule Jie Wang
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-12 17:04 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add flow subscription pattern support for AVF.

The supported patterns are listed below:
eth/vlan/ipv4
eth/ipv4(6)
eth/ipv4(6)/udp
eth/ipv4(6)/tcp

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf.h      |   7 +
 drivers/net/iavf/iavf_fsub.c | 598 ++++++++++++++++++++++++++++++++++-
 2 files changed, 597 insertions(+), 8 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 025ab3ff60..f79c7f9f6e 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -148,6 +148,13 @@ struct iavf_fdir_info {
 	struct iavf_fdir_conf conf;
 };
 
+struct iavf_fsub_conf {
+	struct virtchnl_flow_sub sub_fltr;
+	struct virtchnl_flow_unsub unsub_fltr;
+	uint64_t input_set;
+	uint32_t flow_id;
+};
+
 struct iavf_qv_map {
 	uint16_t queue_id;
 	uint16_t vector_id;
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 17f9bb2976..89e60c5d57 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -22,9 +22,51 @@
 #include "iavf_generic_flow.h"
 
 
+#define MAX_QGRP_NUM_TYPE      7
+#define IAVF_IPV6_ADDR_LENGTH  16
+#define MAX_INPUT_SET_BYTE     32
+
+#define IAVF_SW_INSET_ETHER ( \
+	IAVF_INSET_DMAC | IAVF_INSET_SMAC | IAVF_INSET_ETHERTYPE)
+#define IAVF_SW_INSET_MAC_IPV4 ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS)
+#define IAVF_SW_INSET_MAC_VLAN_IPV4 ( \
+	IAVF_SW_INSET_MAC_IPV4 | IAVF_INSET_VLAN_OUTER)
+#define IAVF_SW_INSET_MAC_IPV4_TCP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV4_UDP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6 ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_IPV6_NEXT_HDR)
+#define IAVF_SW_INSET_MAC_IPV6_TCP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6_UDP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+
 static struct iavf_flow_parser iavf_fsub_parser;
 
-static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = {};
+static struct
+iavf_pattern_match_item iavf_fsub_pattern_list[] = {
+	{iavf_pattern_ethertype,			IAVF_SW_INSET_ETHER,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4,				IAVF_SW_INSET_MAC_IPV4,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_vlan_ipv4,			IAVF_SW_INSET_MAC_VLAN_IPV4,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp,			IAVF_SW_INSET_MAC_IPV4_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_tcp,			IAVF_SW_INSET_MAC_IPV4_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6,				IAVF_SW_INSET_MAC_IPV6,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp,			IAVF_SW_INSET_MAC_IPV6_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_tcp,			IAVF_SW_INSET_MAC_IPV6_TCP,		IAVF_INSET_NONE},
+};
 
 static int
 iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
@@ -53,17 +95,557 @@ iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
 };
 
 static int
-iavf_fsub_parse(__rte_unused struct iavf_adapter *ad,
-		__rte_unused struct iavf_pattern_match_item *array,
-		__rte_unused uint32_t array_len,
-		__rte_unused const struct rte_flow_item pattern[],
-		__rte_unused const struct rte_flow_action actions[],
-		__rte_unused void **meta,
-		__rte_unused struct rte_flow_error *error)
+iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
+			const uint64_t input_set_mask,
+			struct rte_flow_error *error,
+			struct iavf_fsub_conf *filter)
+{
+	struct virtchnl_proto_hdrs *hdrs = &filter->sub_fltr.proto_hdrs;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+	const struct rte_flow_item *item = pattern;
+	struct virtchnl_proto_hdr_w_msk *hdr, *hdr1 = NULL;
+	uint64_t outer_input_set = IAVF_INSET_NONE;
+	uint64_t *input = NULL;
+	uint16_t input_set_byte = 0;
+	uint16_t j;
+	uint32_t layer = 0;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item, "Not support range");
+			return false;
+		}
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			hdr1 = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
+
+			if (eth_spec && eth_mask) {
+				input = &outer_input_set;
+
+				if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
+					*input |= IAVF_INSET_DMAC;
+					input_set_byte += 6;
+				} else {
+					/* flow subscribe filter will add dst mac in kernel */
+					input_set_byte += 6;
+				}
+
+				if (!rte_is_zero_ether_addr(&eth_mask->src)) {
+					*input |= IAVF_INSET_DMAC;
+					input_set_byte += 6;
+				}
+
+				if (eth_mask->type) {
+					*input |= IAVF_INSET_ETHERTYPE;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr1->buffer_spec, eth_spec,
+					   sizeof(struct rte_ether_hdr));
+				rte_memcpy(hdr1->buffer_mask, eth_mask,
+					   sizeof(struct rte_ether_hdr));
+			} else {
+				/* flow subscribe filter will add dst mac in kernel */
+				input_set_byte += 6;
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+			if (ipv4_spec && ipv4_mask) {
+				input = &outer_input_set;
+				/* Check IPv4 mask and update input set */
+				if (ipv4_mask->hdr.version_ihl ||
+					ipv4_mask->hdr.total_length ||
+					ipv4_mask->hdr.packet_id ||
+					ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv4 mask.");
+					return false;
+				}
+
+				if (ipv4_mask->hdr.src_addr) {
+					*input |= IAVF_INSET_IPV4_SRC;
+					input_set_byte += 2;
+				}
+				if (ipv4_mask->hdr.dst_addr) {
+					*input |= IAVF_INSET_IPV4_DST;
+					input_set_byte += 2;
+				}
+				if (ipv4_mask->hdr.time_to_live) {
+					*input |= IAVF_INSET_IPV4_TTL;
+					input_set_byte++;
+				}
+				if (ipv4_mask->hdr.next_proto_id) {
+					*input |= IAVF_INSET_IPV4_PROTO;
+					input_set_byte++;
+				}
+				if (ipv4_mask->hdr.type_of_service) {
+					*input |= IAVF_INSET_IPV4_TOS;
+					input_set_byte++;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &ipv4_spec->hdr,
+					   sizeof(ipv4_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &ipv4_mask->hdr,
+					   sizeof(ipv4_spec->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+			if (ipv6_spec && ipv6_mask) {
+				input = &outer_input_set;
+
+				if (ipv6_mask->hdr.payload_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv6 mask");
+					return false;
+				}
+
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j]) {
+						*input |= IAVF_INSET_IPV6_SRC;
+						break;
+					}
+				}
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.dst_addr[j]) {
+						*input |= IAVF_INSET_IPV6_DST;
+						break;
+					}
+				}
+
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j])
+						input_set_byte++;
+
+					if (ipv6_mask->hdr.dst_addr[j])
+						input_set_byte++;
+				}
+
+				if (ipv6_mask->hdr.proto) {
+					*input |= IAVF_INSET_IPV6_NEXT_HDR;
+					input_set_byte++;
+				}
+				if (ipv6_mask->hdr.hop_limits) {
+					*input |= IAVF_INSET_IPV6_HOP_LIMIT;
+					input_set_byte++;
+				}
+				if (ipv6_mask->hdr.vtc_flow &
+				    rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK)) {
+					*input |= IAVF_INSET_IPV6_TC;
+					input_set_byte += 4;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &ipv6_spec->hdr,
+					   sizeof(ipv6_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &ipv6_mask->hdr,
+					   sizeof(ipv6_spec->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+			if (udp_spec && udp_mask) {
+				input = &outer_input_set;
+				/* Check UDP mask and update input set*/
+				if (udp_mask->hdr.dgram_len ||
+				    udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid UDP mask");
+					return false;
+				}
+
+				if (udp_mask->hdr.src_port) {
+					*input |= IAVF_INSET_UDP_SRC_PORT;
+					input_set_byte += 2;
+				}
+				if (udp_mask->hdr.dst_port) {
+					*input |= IAVF_INSET_UDP_DST_PORT;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &udp_spec->hdr,
+					   sizeof(udp_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &udp_mask->hdr,
+					   sizeof(udp_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+			if (tcp_spec && tcp_mask) {
+				input = &outer_input_set;
+				/* Check TCP mask and update input set */
+				if (tcp_mask->hdr.sent_seq ||
+					tcp_mask->hdr.recv_ack ||
+					tcp_mask->hdr.data_off ||
+					tcp_mask->hdr.tcp_flags ||
+					tcp_mask->hdr.rx_win ||
+					tcp_mask->hdr.cksum ||
+					tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid TCP mask");
+					return false;
+				}
+
+				if (tcp_mask->hdr.src_port) {
+					*input |= IAVF_INSET_TCP_SRC_PORT;
+					input_set_byte += 2;
+				}
+				if (tcp_mask->hdr.dst_port) {
+					*input |= IAVF_INSET_TCP_DST_PORT;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &tcp_spec->hdr,
+					   sizeof(tcp_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &tcp_mask->hdr,
+					   sizeof(tcp_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VLAN:
+			vlan_spec = item->spec;
+			vlan_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, S_VLAN);
+
+			if (vlan_spec && vlan_spec) {
+				input = &outer_input_set;
+
+				*input |= IAVF_INSET_VLAN_OUTER;
+
+				if (vlan_mask->tci)
+					input_set_byte += 2;
+
+				if (vlan_mask->inner_type) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid VLAN input set.");
+					return false;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &vlan_spec->hdr,
+					   sizeof(vlan_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &vlan_mask->hdr,
+					   sizeof(vlan_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+					   "Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	hdrs->count += VIRTCHNL_MAX_NUM_PROTO_HDRS;
+
+	if (input_set_byte > MAX_INPUT_SET_BYTE) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   item, "too much input set");
+		return -rte_errno;
+	}
+
+	if (!outer_input_set || (outer_input_set & ~input_set_mask))
+		return -rte_errno;
+
+	return 0;
+}
+
+static int
+iavf_fsub_parse_action(struct iavf_adapter *ad,
+		       const struct rte_flow_action *actions,
+		       uint32_t priority,
+		       struct rte_flow_error *error,
+		       struct iavf_fsub_conf *filter)
 {
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *act_qgrop;
+	struct virtchnl_filter_action *filter_action;
+	uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
+		2, 4, 8, 16, 32, 64, 128};
+	uint16_t i, num = 0, dest_num = 0, vf_num = 0;
+	uint16_t rule_port_id;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		switch (action->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+			vf_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_ethdev = action->conf;
+			rule_port_id = ad->dev_data->port_id;
+			if (rule_port_id != act_ethdev->port_id)
+				goto error1;
+
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			dest_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_q = action->conf;
+			if (act_q->index >= ad->dev_data->nb_rx_queues)
+				goto error2;
+
+			filter_action->type = VIRTCHNL_ACTION_QUEUE;
+			filter_action->act_conf.queue.index = act_q->index;
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			dest_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_qgrop = action->conf;
+			if (act_qgrop->queue_num <= 1)
+				goto error2;
+
+			filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+			filter_action->act_conf.queue.index =
+							act_qgrop->queue[0];
+			for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
+				if (act_qgrop->queue_num ==
+				    valid_qgrop_number[i])
+					break;
+			}
+
+			if (i == MAX_QGRP_NUM_TYPE)
+				goto error2;
+
+			if ((act_qgrop->queue[0] + act_qgrop->queue_num) >
+			    ad->dev_data->nb_rx_queues)
+				goto error3;
+
+			for (i = 0; i < act_qgrop->queue_num - 1; i++)
+				if (act_qgrop->queue[i + 1] !=
+				    act_qgrop->queue[i] + 1)
+					goto error4;
+
+			filter_action->act_conf.queue.region = act_qgrop->queue_num;
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	/* 0 denotes lowest priority of recipe and highest priority
+	 * of rte_flow. Change rte_flow priority into recipe priority.
+	 */
+	filter->sub_fltr.priority = priority;
+
+	if (num > VIRTCHNL_MAX_NUM_ACTIONS) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Action numbers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	if (vf_num == 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action, vf action must be added");
+		return -rte_errno;
+	}
+
+	if (dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Unsupported action combination");
+		return -rte_errno;
+	}
+
+	return 0;
+
+error1:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid ethdev_port_id");
+	return -rte_errno;
+
+error2:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid action type or queue number");
+	return -rte_errno;
+
+error3:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid queue region indexes");
+	return -rte_errno;
+
+error4:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Discontinuous queue region");
 	return -rte_errno;
 }
 
+static int
+iavf_fsub_check_action(const struct rte_flow_action *actions,
+		       struct rte_flow_error *error)
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	uint16_t actions_num = 0;
+	bool vf_valid = false;
+	bool queue_valid = false;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+			vf_valid = true;
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			queue_valid = true;
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			continue;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	if (!((actions_num == 1 && !queue_valid) ||
+	      (actions_num == 2 && vf_valid && queue_valid))) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   actions, "Invalid action number");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+iavf_fsub_parse(struct iavf_adapter *ad,
+		struct iavf_pattern_match_item *array,
+		uint32_t array_len,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		void **meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fsub_conf *filter;
+	struct iavf_pattern_match_item *pattern_match_item = NULL;
+	int ret = 0;
+	uint32_t priority = 0;
+
+	filter = rte_zmalloc(NULL, sizeof(*filter), 0);
+	if (!filter) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "No memory for iavf_fsub_conf_ptr");
+		goto error;
+	}
+
+	/* search flow subscribe pattern */
+	pattern_match_item = iavf_search_pattern_match_item(pattern, array,
+							    array_len, error);
+	if (!pattern_match_item)
+		return -rte_errno;
+
+	/* parse flow subscribe pattern */
+	ret = iavf_fsub_parse_pattern(pattern,
+				      pattern_match_item->input_set_mask,
+				      error, filter);
+	if (ret)
+		goto error;
+
+	/* check flow subscribe pattern action */
+	ret = iavf_fsub_check_action(actions, error);
+	if (ret)
+		goto error;
+
+	/* parse flow subscribe pattern action */
+	ret = iavf_fsub_parse_action((void *)ad, actions, priority,
+				     error, filter);
+	if (ret)
+		goto error;
+
+	if (meta)
+		*meta = filter;
+
+error:
+	rte_free(pattern_match_item);
+	return ret;
+}
+
 static int
 iavf_fsub_init(struct iavf_adapter *ad)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v2 4/5] net/iavf: support flow subscription rule
  2022-08-12 17:04 ` [PATCH v2 0/5] support flow subscription Jie Wang
                     ` (2 preceding siblings ...)
  2022-08-12 17:04   ` [PATCH v2 3/5] net/iavf: support flow subscrption pattern Jie Wang
@ 2022-08-12 17:04   ` Jie Wang
  2022-08-12 17:04   ` [PATCH v2 5/5] net/iavf: support priority of flow rule Jie Wang
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-12 17:04 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Support flow subscribption create/destroy/validation flow
rule for AVF.

For examples:
testpmd> flow create 0 ingress pattern eth / ipv4 / udp src is 11
          / end actions represented_port port_id 1 / end
testpmd> flow validate 1 ingress pattern eth / ipv4 / tcp src is 22
          / end actions represented_port port_id 1 / end
testpmd> flow destroy 1 rule 0

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf.h       |   6 ++
 drivers/net/iavf/iavf_fsub.c  |  75 +++++++++++++++----
 drivers/net/iavf/iavf_vchnl.c | 132 ++++++++++++++++++++++++++++++++++
 3 files changed, 201 insertions(+), 12 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index f79c7f9f6e..26b858f6f0 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -489,4 +489,10 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 extern const struct rte_tm_ops iavf_tm_ops;
 int iavf_get_ptp_cap(struct iavf_adapter *adapter);
 int iavf_get_phc_time(struct iavf_rx_queue *rxq);
+int iavf_flow_sub(struct iavf_adapter *adapter,
+		  struct iavf_fsub_conf *filter);
+int iavf_flow_unsub(struct iavf_adapter *adapter,
+		    struct iavf_fsub_conf *filter);
+int iavf_flow_sub_check(struct iavf_adapter *adapter,
+			struct iavf_fsub_conf *filter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 89e60c5d57..441bf478be 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -69,29 +69,80 @@ iavf_pattern_match_item iavf_fsub_pattern_list[] = {
 };
 
 static int
-iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
-		 __rte_unused struct rte_flow *flow,
-		 __rte_unused void *meta,
-		 __rte_unused struct rte_flow_error *error)
+iavf_fsub_create(struct iavf_adapter *ad, struct rte_flow *flow,
+		 void *meta, struct rte_flow_error *error)
 {
+	struct iavf_fsub_conf *filter = meta;
+	struct iavf_fsub_conf *rule;
+	int ret;
+
+	rule = rte_zmalloc("fsub_entry", sizeof(*rule), 0);
+	if (!rule) {
+		rte_flow_error_set(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to allocate memory for fsub rule");
+		return -rte_errno;
+	}
+
+	ret = iavf_flow_sub(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to subscribe flow rule.");
+		goto free_entry;
+	}
+
+	rte_memcpy(rule, filter, sizeof(*rule));
+	flow->rule = rule;
+
+	return ret;
+
+free_entry:
+	rte_free(rule);
 	return -rte_errno;
 }
 
 static int
-iavf_fsub_destroy(__rte_unused struct iavf_adapter *ad,
-		  __rte_unused struct rte_flow *flow,
-		  __rte_unused struct rte_flow_error *error)
+iavf_fsub_destroy(struct iavf_adapter *ad, struct rte_flow *flow,
+		  struct rte_flow_error *error)
 {
-	return -rte_errno;
+	struct iavf_fsub_conf *filter;
+	int ret;
+
+	filter = (struct iavf_fsub_conf *)flow->rule;
+
+	ret = iavf_flow_unsub(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to unsubscribe flow rule.");
+		return -rte_errno;
+	}
+
+	flow->rule = NULL;
+	rte_free(filter);
+
+	return ret;
 }
 
 static int
-iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
+iavf_fsub_validation(struct iavf_adapter *ad,
 		     __rte_unused struct rte_flow *flow,
-		     __rte_unused void *meta,
-		     __rte_unused struct rte_flow_error *error)
+		     void *meta,
+		     struct rte_flow_error *error)
 {
-	return -rte_errno;
+	struct iavf_fsub_conf *filter = meta;
+	int ret;
+
+	ret = iavf_flow_sub_check(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to validate filter rule.");
+		return -rte_errno;
+	}
+
+	return ret;
 };
 
 static int
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 6d84add423..cc0db8d093 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1534,6 +1534,138 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	return 0;
 }
 
+int
+iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_sub *fsub_cfg;
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->sub_fltr.validate_only = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->sub_fltr);
+	args.in_args_size = sizeof(*(&filter->sub_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of "
+				 "OP_FLOW_SUBSCRIBE");
+
+	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+	filter->flow_id = fsub_cfg->flow_id;
+
+	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in adding rule request by PF");
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to no hw "
+				 "resource");
+		err = -1;
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_EXIST) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to the rule "
+				 "is already existed");
+		err = -1;
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to the hw "
+				 "doesn't support");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
+int
+iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_unsub *unsub_cfg;
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->unsub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->unsub_fltr.flow_id = filter->flow_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_FLOW_UNSUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->unsub_fltr);
+	args.in_args_size = sizeof(filter->unsub_fltr);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of "
+				 "OP_FLOW_UNSUBSCRIBE");
+
+	unsub_cfg = (struct virtchnl_flow_unsub *)args.out_buffer;
+
+	if (unsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in deleting rule request by PF");
+	} else if (unsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST) {
+		PMD_DRV_LOG(ERR, "Failed to delete rule request due to this "
+				 "rule doesn't exist");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to delete rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
+int
+iavf_flow_sub_check(struct iavf_adapter *adapter,
+		    struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_sub *fsub_cfg;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->sub_fltr.validate_only = 1;
+
+	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->sub_fltr);
+	args.in_args_size = sizeof(*(&filter->sub_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to check flow director rule");
+		return err;
+	}
+
+	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+
+	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in checking rule request by PF");
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR, "Failed to check rule request due to "
+				 "parameters validation or HW doesn't "
+				 "support");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to check rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
 int
 iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 		     struct virtchnl_rss_cfg *rss_cfg, bool add)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v2 5/5] net/iavf: support priority of flow rule
  2022-08-12 17:04 ` [PATCH v2 0/5] support flow subscription Jie Wang
                     ` (3 preceding siblings ...)
  2022-08-12 17:04   ` [PATCH v2 4/5] net/iavf: support flow subscription rule Jie Wang
@ 2022-08-12 17:04   ` Jie Wang
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-12 17:04 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add flow rule attribute "priority" support for AVF.

Lower values denote higher priority, the highest priority for
a flow rule is 0.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf_fdir.c         |  4 ++++
 drivers/net/iavf/iavf_fsub.c         |  2 +-
 drivers/net/iavf/iavf_generic_flow.c | 23 +++++++++++++----------
 drivers/net/iavf/iavf_generic_flow.h |  1 +
 drivers/net/iavf/iavf_hash.c         |  5 +++++
 drivers/net/iavf/iavf_ipsec_crypto.c | 16 ++++++++++------
 6 files changed, 34 insertions(+), 17 deletions(-)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index 2e6b3a9097..f5e23da5bc 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -1551,6 +1551,7 @@ iavf_fdir_parse(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error)
 {
@@ -1561,6 +1562,9 @@ iavf_fdir_parse(struct iavf_adapter *ad,
 
 	memset(filter, 0, sizeof(*filter));
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
 	if (!item)
 		return -rte_errno;
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 441bf478be..91193bb010 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -649,13 +649,13 @@ iavf_fsub_parse(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error)
 {
 	struct iavf_fsub_conf *filter;
 	struct iavf_pattern_match_item *pattern_match_item = NULL;
 	int ret = 0;
-	uint32_t priority = 0;
 
 	filter = rte_zmalloc(NULL, sizeof(*filter), 0);
 	if (!filter) {
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index b04614ba6e..f33c764764 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1785,6 +1785,7 @@ enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_tcp[] = {
 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error);
@@ -1951,11 +1952,11 @@ iavf_flow_valid_attr(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
-	/* Not supported */
-	if (attr->priority) {
+	/* support priority for flow subscribe */
+	if (attr->priority > 1) {
 		rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
-				attr, "Not support priority.");
+				attr, "Only support priority 0 and 1.");
 		return -rte_errno;
 	}
 
@@ -2098,6 +2099,7 @@ static struct iavf_flow_engine *
 iavf_parse_engine_create(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error)
@@ -2111,7 +2113,7 @@ iavf_parse_engine_create(struct iavf_adapter *ad,
 		if (parser_node->parser->parse_pattern_action(ad,
 				parser_node->parser->array,
 				parser_node->parser->array_len,
-				pattern, actions, &meta, error) < 0)
+				pattern, actions, priority, &meta, error) < 0)
 			continue;
 
 		engine = parser_node->parser->engine;
@@ -2127,6 +2129,7 @@ static struct iavf_flow_engine *
 iavf_parse_engine_validate(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error)
@@ -2140,7 +2143,7 @@ iavf_parse_engine_validate(struct iavf_adapter *ad,
 		if (parser_node->parser->parse_pattern_action(ad,
 				parser_node->parser->array,
 				parser_node->parser->array_len,
-				pattern, actions, &meta,  error) < 0)
+				pattern, actions, priority, &meta, error) < 0)
 			continue;
 
 		engine = parser_node->parser->engine;
@@ -2201,18 +2204,18 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 	if (ret)
 		return ret;
 
-	*engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
-				    actions, error);
+	*engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list,
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
-	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
-				    actions, error);
+	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list,
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
 	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
-			pattern, actions, error);
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 448facffa5..60d8ab02b4 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -471,6 +471,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error);
 
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 42df7c4e48..dea4e0aa0a 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -86,6 +86,7 @@ iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
 			       uint32_t array_len,
 			       const struct rte_flow_item pattern[],
 			       const struct rte_flow_action actions[],
+			       uint32_t priority,
 			       void **meta,
 			       struct rte_flow_error *error);
 
@@ -1509,6 +1510,7 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
 			       uint32_t array_len,
 			       const struct rte_flow_item pattern[],
 			       const struct rte_flow_action actions[],
+			       uint32_t priority,
 			       void **meta,
 			       struct rte_flow_error *error)
 {
@@ -1517,6 +1519,9 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
 	uint64_t phint = IAVF_PHINT_NONE;
 	int ret = 0;
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	rss_meta_ptr = rte_zmalloc(NULL, sizeof(*rss_meta_ptr), 0);
 	if (!rss_meta_ptr) {
 		rte_flow_error_set(error, EINVAL,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
index 75f05ee558..322934b5f6 100644
--- a/drivers/net/iavf/iavf_ipsec_crypto.c
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -1932,16 +1932,20 @@ static struct iavf_flow_engine iavf_ipsec_flow_engine = {
 
 static int
 iavf_ipsec_flow_parse(struct iavf_adapter *ad,
-		       struct iavf_pattern_match_item *array,
-		       uint32_t array_len,
-		       const struct rte_flow_item pattern[],
-		       const struct rte_flow_action actions[],
-		       void **meta,
-		       struct rte_flow_error *error)
+		      struct iavf_pattern_match_item *array,
+		      uint32_t array_len,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      uint32_t priority,
+		      void **meta,
+		      struct rte_flow_error *error)
 {
 	struct iavf_pattern_match_item *item = NULL;
 	int ret = -1;
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
 	if (item && item->meta) {
 		uint32_t type = (uint64_t)(item->meta);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v3 0/5] support flow subscription
  2022-08-09  6:21 [PATCH 0/2] support flow subscription Jie Wang
                   ` (2 preceding siblings ...)
  2022-08-12 17:04 ` [PATCH v2 0/5] support flow subscription Jie Wang
@ 2022-08-30 18:05 ` Jie Wang
  2022-08-30 18:05   ` [PATCH v3 1/5] common/iavf: " Jie Wang
                     ` (5 more replies)
  2022-09-07  3:35 ` [PATCH v4 " Jie Wang
                   ` (2 subsequent siblings)
  6 siblings, 6 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-30 18:05 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add support AVF can be able to subscribe a flow from PF.

--
v3:
 * fix eth layer inputset.
 * rebase.
v2:
 * split v1 patch 2/2 to 4 small patches.
 * remove rule action RTE_FLOW_ACTION_TYPE_VF and add
   RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT.

Jie Wang (5):
  common/iavf: support flow subscription
  net/iavf: add flow subscription to AVF
  net/iavf: support flow subscrption pattern
  net/iavf: support flow subscription rule
  net/iavf: support priority of flow rule

 doc/guides/rel_notes/release_22_11.rst |   4 +
 drivers/common/iavf/virtchnl.h         | 104 +++-
 drivers/net/iavf/iavf.h                |  13 +
 drivers/net/iavf/iavf_fdir.c           |   4 +
 drivers/net/iavf/iavf_fsub.c           | 745 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.c   |  40 +-
 drivers/net/iavf/iavf_generic_flow.h   |   2 +
 drivers/net/iavf/iavf_hash.c           |   5 +
 drivers/net/iavf/iavf_ipsec_crypto.c   |  16 +-
 drivers/net/iavf/iavf_vchnl.c          | 133 +++++
 drivers/net/iavf/meson.build           |   1 +
 11 files changed, 1046 insertions(+), 21 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_fsub.c

-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v3 1/5] common/iavf: support flow subscription
  2022-08-30 18:05 ` [PATCH v3 0/5] support flow subscription Jie Wang
@ 2022-08-30 18:05   ` Jie Wang
  2022-08-30 18:05   ` [PATCH v3 2/5] net/iavf: add flow subscription to AVF Jie Wang
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-30 18:05 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

VF is able to subscribe a flow from PF by VIRTCHNL_FLOW_SUBSCRIBE.

PF is expected to offload a rule to hardware which will redirect
the packet that matching the required pattern to this VF.

Only a flow with dst mac address as PF's mac address can be subscribed.

VIRTCHNL_VF_OFFLOAD_FSUB_PF is used for Flow subscription capability
negotiation and only a trusted VF can be granted with this capability.

A flow can be unsubscribed by VIRTCHNL_FLOW_UNSUBSCRIBE.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/common/iavf/virtchnl.h | 104 +++++++++++++++++++++++++++++++--
 1 file changed, 100 insertions(+), 4 deletions(-)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index f123daec8e..e02eec4935 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -168,6 +168,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
 	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
 	VIRTCHNL_OP_CONFIG_QUANTA = 113,
+	VIRTCHNL_OP_FLOW_SUBSCRIBE = 114,
+	VIRTCHNL_OP_FLOW_UNSUBSCRIBE = 115,
 	VIRTCHNL_OP_MAX,
 };
 
@@ -282,6 +284,10 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_1588_PTP_GET_CAPS";
 	case VIRTCHNL_OP_1588_PTP_GET_TIME:
 		return "VIRTCHNL_OP_1588_PTP_GET_TIME";
+	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
+		return "VIRTCHNL_OP_FLOW_SUBSCRIBE";
+	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
+		return "VIRTCHNL_OP_FLOW_UNSUBSCRIBE";
 	case VIRTCHNL_OP_MAX:
 		return "VIRTCHNL_OP_MAX";
 	default:
@@ -401,6 +407,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_FSUB_PF		BIT(14)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
 #define VIRTCHNL_VF_OFFLOAD_VLAN		BIT(16)
 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING		BIT(17)
@@ -1503,6 +1510,7 @@ enum virtchnl_vfr_states {
 };
 
 #define VIRTCHNL_MAX_NUM_PROTO_HDRS	32
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK	16
 #define VIRTCHNL_MAX_SIZE_RAW_PACKET	1024
 #define PROTO_HDR_SHIFT			5
 #define PROTO_HDR_FIELD_START(proto_hdr_type) \
@@ -1695,6 +1703,22 @@ struct virtchnl_proto_hdr {
 
 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
 
+struct virtchnl_proto_hdr_w_msk {
+	/* see enum virtchnl_proto_hdr_type */
+	s32 type;
+	u32 pad;
+	/**
+	 * binary buffer in network order for specific header type.
+	 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+	 * header is expected to be copied into the buffer.
+	 */
+	u8 buffer_spec[64];
+	/* binary buffer for bit-mask applied to specific header type */
+	u8 buffer_mask[64];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(136, virtchnl_proto_hdr_w_msk);
+
 struct virtchnl_proto_hdrs {
 	u8 tunnel_level;
 	/**
@@ -1706,11 +1730,18 @@ struct virtchnl_proto_hdrs {
 	 */
 	int count;
 	/**
-	 * number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS
-	 * must be 0 for a raw packet request.
+	 * count must <=
+	 * VIRTCHNL_MAX_NUM_PROTO_HDRS + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK
+	 * count = 0 :					select raw
+	 * 1 < count <= VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr
+	 * count > VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr_w_msk
+	 * last valid index = count - VIRTCHNL_MAX_NUM_PROTO_HDRS
 	 */
 	union {
-		struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct virtchnl_proto_hdr
+			proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct virtchnl_proto_hdr_w_msk
+			proto_hdr_w_msk[VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK];
 		struct {
 			u16 pkt_len;
 			u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
@@ -1731,7 +1762,7 @@ struct virtchnl_rss_cfg {
 
 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
 
-/* action configuration for FDIR */
+/* action configuration for FDIR and FSUB */
 struct virtchnl_filter_action {
 	/* see enum virtchnl_action type */
 	s32 type;
@@ -1849,6 +1880,65 @@ struct virtchnl_fdir_del {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
 
+/* Status returned to VF after VF requests FSUB commands
+ * VIRTCHNL_FSUB_SUCCESS
+ * VF FLOW related request is successfully done by PF
+ * The request can be OP_FLOW_SUBSCRIBE/UNSUBSCRIBE.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE
+ * OP_FLOW_SUBSCRIBE request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_EXIST
+ * OP_FLOW_SUBSCRIBE request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST
+ * OP_FLOW_UNSUBSCRIBE request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_INVALID
+ * OP_FLOW_SUBSCRIBE request is failed due to parameters validation
+ * or HW doesn't support.
+ */
+enum virtchnl_fsub_prgm_status {
+	VIRTCHNL_FSUB_SUCCESS = 0,
+	VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE,
+	VIRTCHNL_FSUB_FAILURE_RULE_EXIST,
+	VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST,
+	VIRTCHNL_FSUB_FAILURE_RULE_INVALID,
+};
+
+/* VIRTCHNL_OP_FLOW_SUBSCRIBE
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only, priority, proto_hdrs and actions.
+ * PF will return flow_id
+ * if the request is successfully done and return status to VF.
+ */
+struct virtchnl_flow_sub {
+	u16 vsi_id; /* INPUT */
+	u8 validate_only; /* INPUT */
+	u8 priority; /* INPUT */
+	u32 flow_id; /* OUTPUT */
+	struct virtchnl_proto_hdrs proto_hdrs; /* INPUT */
+	struct virtchnl_filter_action_set actions; /* INPUT */
+	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_flow_sub);
+
+/* VIRTCHNL_OP_FLOW_UNSUBSCRIBE
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return status to VF.
+ */
+struct virtchnl_flow_unsub {
+	u16 vsi_id; /* INPUT */
+	u16 pad;
+	u32 flow_id; /* INPUT */
+	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_flow_unsub);
+
 /* VIRTCHNL_OP_GET_QOS_CAPS
  * VF sends this message to get its QoS Caps, such as
  * TC number, Arbiter and Bandwidth.
@@ -2318,6 +2408,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
 		valid_len = sizeof(struct virtchnl_fdir_del);
 		break;
+	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
+		valid_len = sizeof(struct virtchnl_flow_sub);
+		break;
+	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
+		valid_len = sizeof(struct virtchnl_flow_unsub);
+		break;
 	case VIRTCHNL_OP_GET_QOS_CAPS:
 		break;
 	case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v3 2/5] net/iavf: add flow subscription to AVF
  2022-08-30 18:05 ` [PATCH v3 0/5] support flow subscription Jie Wang
  2022-08-30 18:05   ` [PATCH v3 1/5] common/iavf: " Jie Wang
@ 2022-08-30 18:05   ` Jie Wang
  2022-08-30 18:05   ` [PATCH v3 3/5] net/iavf: support flow subscrption pattern Jie Wang
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-30 18:05 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add the skeletal code of flow subscription to AVF driver.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 doc/guides/rel_notes/release_22_11.rst |   4 +
 drivers/net/iavf/iavf_fsub.c           | 112 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.c   |  17 +++-
 drivers/net/iavf/iavf_generic_flow.h   |   1 +
 drivers/net/iavf/iavf_vchnl.c          |   1 +
 drivers/net/iavf/meson.build           |   1 +
 6 files changed, 135 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fsub.c

diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst
index 8c021cf050..bb77a03e24 100644
--- a/doc/guides/rel_notes/release_22_11.rst
+++ b/doc/guides/rel_notes/release_22_11.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel iavf driver.**
+
+  * Added flow subscription support.
+
 
 Removed Items
 -------------
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
new file mode 100644
index 0000000000..17f9bb2976
--- /dev/null
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <iavf.h>
+#include "iavf_generic_flow.h"
+
+
+static struct iavf_flow_parser iavf_fsub_parser;
+
+static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = {};
+
+static int
+iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
+		 __rte_unused struct rte_flow *flow,
+		 __rte_unused void *meta,
+		 __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_destroy(__rte_unused struct iavf_adapter *ad,
+		  __rte_unused struct rte_flow *flow,
+		  __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
+		     __rte_unused struct rte_flow *flow,
+		     __rte_unused void *meta,
+		     __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+};
+
+static int
+iavf_fsub_parse(__rte_unused struct iavf_adapter *ad,
+		__rte_unused struct iavf_pattern_match_item *array,
+		__rte_unused uint32_t array_len,
+		__rte_unused const struct rte_flow_item pattern[],
+		__rte_unused const struct rte_flow_action actions[],
+		__rte_unused void **meta,
+		__rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FSUB_PF)
+		parser = &iavf_fsub_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_fsub_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_fsub_parser, ad);
+}
+
+static struct
+iavf_flow_engine iavf_fsub_engine = {
+	.init = iavf_fsub_init,
+	.uninit = iavf_fsub_uninit,
+	.create = iavf_fsub_create,
+	.destroy = iavf_fsub_destroy,
+	.validation = iavf_fsub_validation,
+	.type = IAVF_FLOW_ENGINE_FSUB,
+};
+
+static struct
+iavf_flow_parser iavf_fsub_parser = {
+	.engine = &iavf_fsub_engine,
+	.array = iavf_fsub_pattern_list,
+	.array_len = RTE_DIM(iavf_fsub_pattern_list),
+	.parse_pattern_action = iavf_fsub_parse,
+	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(iavf_fsub_engine_init)
+{
+	iavf_register_flow_engine(&iavf_fsub_engine);
+}
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index e1a611e319..b04614ba6e 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1866,6 +1866,8 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 {
 	struct iavf_parser_list *list = NULL;
 	struct iavf_flow_parser_node *parser_node;
+	struct iavf_flow_parser_node *existing_node;
+	void *temp;
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
 
 	parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
@@ -1880,14 +1882,26 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 		TAILQ_INSERT_TAIL(list, parser_node, node);
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
+		RTE_TAILQ_FOREACH_SAFE(existing_node, list, node, temp) {
+			if (existing_node->parser->engine->type ==
+			    IAVF_FLOW_ENGINE_FSUB) {
+				TAILQ_INSERT_AFTER(list, existing_node,
+						   parser_node, node);
+				goto DONE;
+			}
+		}
 		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
 		list = &vf->ipsec_crypto_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FSUB) {
+		list = &vf->dist_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
 
+DONE:
 	return 0;
 }
 
@@ -1902,7 +1916,8 @@ iavf_unregister_parser(struct iavf_flow_parser *parser,
 
 	if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
 		list = &vf->rss_parser_list;
-	else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
+	else if ((parser->engine->type == IAVF_FLOW_ENGINE_FDIR) ||
+		 (parser->engine->type == IAVF_FLOW_ENGINE_FSUB))
 		list = &vf->dist_parser_list;
 
 	if (list == NULL)
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 52eb1caf29..448facffa5 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -480,6 +480,7 @@ enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
+	IAVF_FLOW_ENGINE_FSUB,
 	IAVF_FLOW_ENGINE_MAX,
 };
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 21bd1e2193..6d84add423 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -502,6 +502,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
 		VIRTCHNL_VF_OFFLOAD_FDIR_PF |
 		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+		VIRTCHNL_VF_OFFLOAD_FSUB_PF |
 		VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 2da37de662..6df771f917 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -16,6 +16,7 @@ sources = files(
         'iavf_hash.c',
         'iavf_tm.c',
         'iavf_ipsec_crypto.c',
+        'iavf_fsub.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v3 3/5] net/iavf: support flow subscrption pattern
  2022-08-30 18:05 ` [PATCH v3 0/5] support flow subscription Jie Wang
  2022-08-30 18:05   ` [PATCH v3 1/5] common/iavf: " Jie Wang
  2022-08-30 18:05   ` [PATCH v3 2/5] net/iavf: add flow subscription to AVF Jie Wang
@ 2022-08-30 18:05   ` Jie Wang
  2022-09-06  7:30     ` Zhang, Qi Z
  2022-08-30 18:05   ` [PATCH v3 4/5] net/iavf: support flow subscription rule Jie Wang
                     ` (2 subsequent siblings)
  5 siblings, 1 reply; 40+ messages in thread
From: Jie Wang @ 2022-08-30 18:05 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add flow subscription pattern support for AVF.

The supported patterns are listed below:
eth/vlan/ipv4
eth/ipv4(6)
eth/ipv4(6)/udp
eth/ipv4(6)/tcp

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf.h      |   7 +
 drivers/net/iavf/iavf_fsub.c | 598 ++++++++++++++++++++++++++++++++++-
 2 files changed, 597 insertions(+), 8 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 025ab3ff60..f79c7f9f6e 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -148,6 +148,13 @@ struct iavf_fdir_info {
 	struct iavf_fdir_conf conf;
 };
 
+struct iavf_fsub_conf {
+	struct virtchnl_flow_sub sub_fltr;
+	struct virtchnl_flow_unsub unsub_fltr;
+	uint64_t input_set;
+	uint32_t flow_id;
+};
+
 struct iavf_qv_map {
 	uint16_t queue_id;
 	uint16_t vector_id;
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 17f9bb2976..4600d52b91 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -22,9 +22,51 @@
 #include "iavf_generic_flow.h"
 
 
+#define MAX_QGRP_NUM_TYPE      7
+#define IAVF_IPV6_ADDR_LENGTH  16
+#define MAX_INPUT_SET_BYTE     32
+
+#define IAVF_SW_INSET_ETHER ( \
+	IAVF_INSET_DMAC | IAVF_INSET_SMAC | IAVF_INSET_ETHERTYPE)
+#define IAVF_SW_INSET_MAC_IPV4 ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS)
+#define IAVF_SW_INSET_MAC_VLAN_IPV4 ( \
+	IAVF_SW_INSET_MAC_IPV4 | IAVF_INSET_VLAN_OUTER)
+#define IAVF_SW_INSET_MAC_IPV4_TCP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV4_UDP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6 ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_IPV6_NEXT_HDR)
+#define IAVF_SW_INSET_MAC_IPV6_TCP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6_UDP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+
 static struct iavf_flow_parser iavf_fsub_parser;
 
-static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = {};
+static struct
+iavf_pattern_match_item iavf_fsub_pattern_list[] = {
+	{iavf_pattern_ethertype,			IAVF_SW_INSET_ETHER,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4,				IAVF_SW_INSET_MAC_IPV4,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_vlan_ipv4,			IAVF_SW_INSET_MAC_VLAN_IPV4,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp,			IAVF_SW_INSET_MAC_IPV4_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_tcp,			IAVF_SW_INSET_MAC_IPV4_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6,				IAVF_SW_INSET_MAC_IPV6,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp,			IAVF_SW_INSET_MAC_IPV6_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_tcp,			IAVF_SW_INSET_MAC_IPV6_TCP,		IAVF_INSET_NONE},
+};
 
 static int
 iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
@@ -53,17 +95,557 @@ iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
 };
 
 static int
-iavf_fsub_parse(__rte_unused struct iavf_adapter *ad,
-		__rte_unused struct iavf_pattern_match_item *array,
-		__rte_unused uint32_t array_len,
-		__rte_unused const struct rte_flow_item pattern[],
-		__rte_unused const struct rte_flow_action actions[],
-		__rte_unused void **meta,
-		__rte_unused struct rte_flow_error *error)
+iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
+			const uint64_t input_set_mask,
+			struct rte_flow_error *error,
+			struct iavf_fsub_conf *filter)
+{
+	struct virtchnl_proto_hdrs *hdrs = &filter->sub_fltr.proto_hdrs;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+	const struct rte_flow_item *item = pattern;
+	struct virtchnl_proto_hdr_w_msk *hdr, *hdr1 = NULL;
+	uint64_t outer_input_set = IAVF_INSET_NONE;
+	uint64_t *input = NULL;
+	uint16_t input_set_byte = 0;
+	uint16_t j;
+	uint32_t layer = 0;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item, "Not support range");
+			return false;
+		}
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			hdr1 = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
+
+			if (eth_spec && eth_mask) {
+				input = &outer_input_set;
+
+				if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
+					*input |= IAVF_INSET_DMAC;
+					input_set_byte += 6;
+				} else {
+					/* flow subscribe filter will add dst mac in kernel */
+					input_set_byte += 6;
+				}
+
+				if (!rte_is_zero_ether_addr(&eth_mask->src)) {
+					*input |= IAVF_INSET_SMAC;
+					input_set_byte += 6;
+				}
+
+				if (eth_mask->type) {
+					*input |= IAVF_INSET_ETHERTYPE;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr1->buffer_spec, eth_spec,
+					   sizeof(struct rte_ether_hdr));
+				rte_memcpy(hdr1->buffer_mask, eth_mask,
+					   sizeof(struct rte_ether_hdr));
+			} else {
+				/* flow subscribe filter will add dst mac in kernel */
+				input_set_byte += 6;
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+			if (ipv4_spec && ipv4_mask) {
+				input = &outer_input_set;
+				/* Check IPv4 mask and update input set */
+				if (ipv4_mask->hdr.version_ihl ||
+					ipv4_mask->hdr.total_length ||
+					ipv4_mask->hdr.packet_id ||
+					ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv4 mask.");
+					return false;
+				}
+
+				if (ipv4_mask->hdr.src_addr) {
+					*input |= IAVF_INSET_IPV4_SRC;
+					input_set_byte += 2;
+				}
+				if (ipv4_mask->hdr.dst_addr) {
+					*input |= IAVF_INSET_IPV4_DST;
+					input_set_byte += 2;
+				}
+				if (ipv4_mask->hdr.time_to_live) {
+					*input |= IAVF_INSET_IPV4_TTL;
+					input_set_byte++;
+				}
+				if (ipv4_mask->hdr.next_proto_id) {
+					*input |= IAVF_INSET_IPV4_PROTO;
+					input_set_byte++;
+				}
+				if (ipv4_mask->hdr.type_of_service) {
+					*input |= IAVF_INSET_IPV4_TOS;
+					input_set_byte++;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &ipv4_spec->hdr,
+					   sizeof(ipv4_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &ipv4_mask->hdr,
+					   sizeof(ipv4_spec->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+			if (ipv6_spec && ipv6_mask) {
+				input = &outer_input_set;
+
+				if (ipv6_mask->hdr.payload_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv6 mask");
+					return false;
+				}
+
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j]) {
+						*input |= IAVF_INSET_IPV6_SRC;
+						break;
+					}
+				}
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.dst_addr[j]) {
+						*input |= IAVF_INSET_IPV6_DST;
+						break;
+					}
+				}
+
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j])
+						input_set_byte++;
+
+					if (ipv6_mask->hdr.dst_addr[j])
+						input_set_byte++;
+				}
+
+				if (ipv6_mask->hdr.proto) {
+					*input |= IAVF_INSET_IPV6_NEXT_HDR;
+					input_set_byte++;
+				}
+				if (ipv6_mask->hdr.hop_limits) {
+					*input |= IAVF_INSET_IPV6_HOP_LIMIT;
+					input_set_byte++;
+				}
+				if (ipv6_mask->hdr.vtc_flow &
+				    rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK)) {
+					*input |= IAVF_INSET_IPV6_TC;
+					input_set_byte += 4;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &ipv6_spec->hdr,
+					   sizeof(ipv6_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &ipv6_mask->hdr,
+					   sizeof(ipv6_spec->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+			if (udp_spec && udp_mask) {
+				input = &outer_input_set;
+				/* Check UDP mask and update input set*/
+				if (udp_mask->hdr.dgram_len ||
+				    udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid UDP mask");
+					return false;
+				}
+
+				if (udp_mask->hdr.src_port) {
+					*input |= IAVF_INSET_UDP_SRC_PORT;
+					input_set_byte += 2;
+				}
+				if (udp_mask->hdr.dst_port) {
+					*input |= IAVF_INSET_UDP_DST_PORT;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &udp_spec->hdr,
+					   sizeof(udp_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &udp_mask->hdr,
+					   sizeof(udp_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+			if (tcp_spec && tcp_mask) {
+				input = &outer_input_set;
+				/* Check TCP mask and update input set */
+				if (tcp_mask->hdr.sent_seq ||
+					tcp_mask->hdr.recv_ack ||
+					tcp_mask->hdr.data_off ||
+					tcp_mask->hdr.tcp_flags ||
+					tcp_mask->hdr.rx_win ||
+					tcp_mask->hdr.cksum ||
+					tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid TCP mask");
+					return false;
+				}
+
+				if (tcp_mask->hdr.src_port) {
+					*input |= IAVF_INSET_TCP_SRC_PORT;
+					input_set_byte += 2;
+				}
+				if (tcp_mask->hdr.dst_port) {
+					*input |= IAVF_INSET_TCP_DST_PORT;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &tcp_spec->hdr,
+					   sizeof(tcp_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &tcp_mask->hdr,
+					   sizeof(tcp_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VLAN:
+			vlan_spec = item->spec;
+			vlan_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, S_VLAN);
+
+			if (vlan_spec && vlan_spec) {
+				input = &outer_input_set;
+
+				*input |= IAVF_INSET_VLAN_OUTER;
+
+				if (vlan_mask->tci)
+					input_set_byte += 2;
+
+				if (vlan_mask->inner_type) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid VLAN input set.");
+					return false;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &vlan_spec->hdr,
+					   sizeof(vlan_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &vlan_mask->hdr,
+					   sizeof(vlan_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+					   "Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	hdrs->count += VIRTCHNL_MAX_NUM_PROTO_HDRS;
+
+	if (input_set_byte > MAX_INPUT_SET_BYTE) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   item, "too much input set");
+		return -rte_errno;
+	}
+
+	if (!outer_input_set || (outer_input_set & ~input_set_mask))
+		return -rte_errno;
+
+	return 0;
+}
+
+static int
+iavf_fsub_parse_action(struct iavf_adapter *ad,
+		       const struct rte_flow_action *actions,
+		       uint32_t priority,
+		       struct rte_flow_error *error,
+		       struct iavf_fsub_conf *filter)
 {
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *act_qgrop;
+	struct virtchnl_filter_action *filter_action;
+	uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
+		2, 4, 8, 16, 32, 64, 128};
+	uint16_t i, num = 0, dest_num = 0, vf_num = 0;
+	uint16_t rule_port_id;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		switch (action->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+			vf_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_ethdev = action->conf;
+			rule_port_id = ad->dev_data->port_id;
+			if (rule_port_id != act_ethdev->port_id)
+				goto error1;
+
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			dest_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_q = action->conf;
+			if (act_q->index >= ad->dev_data->nb_rx_queues)
+				goto error2;
+
+			filter_action->type = VIRTCHNL_ACTION_QUEUE;
+			filter_action->act_conf.queue.index = act_q->index;
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			dest_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_qgrop = action->conf;
+			if (act_qgrop->queue_num <= 1)
+				goto error2;
+
+			filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+			filter_action->act_conf.queue.index =
+							act_qgrop->queue[0];
+			for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
+				if (act_qgrop->queue_num ==
+				    valid_qgrop_number[i])
+					break;
+			}
+
+			if (i == MAX_QGRP_NUM_TYPE)
+				goto error2;
+
+			if ((act_qgrop->queue[0] + act_qgrop->queue_num) >
+			    ad->dev_data->nb_rx_queues)
+				goto error3;
+
+			for (i = 0; i < act_qgrop->queue_num - 1; i++)
+				if (act_qgrop->queue[i + 1] !=
+				    act_qgrop->queue[i] + 1)
+					goto error4;
+
+			filter_action->act_conf.queue.region = act_qgrop->queue_num;
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	/* 0 denotes lowest priority of recipe and highest priority
+	 * of rte_flow. Change rte_flow priority into recipe priority.
+	 */
+	filter->sub_fltr.priority = priority;
+
+	if (num > VIRTCHNL_MAX_NUM_ACTIONS) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Action numbers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	if (vf_num == 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action, vf action must be added");
+		return -rte_errno;
+	}
+
+	if (dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Unsupported action combination");
+		return -rte_errno;
+	}
+
+	return 0;
+
+error1:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid ethdev_port_id");
+	return -rte_errno;
+
+error2:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid action type or queue number");
+	return -rte_errno;
+
+error3:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid queue region indexes");
+	return -rte_errno;
+
+error4:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Discontinuous queue region");
 	return -rte_errno;
 }
 
+static int
+iavf_fsub_check_action(const struct rte_flow_action *actions,
+		       struct rte_flow_error *error)
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	uint16_t actions_num = 0;
+	bool vf_valid = false;
+	bool queue_valid = false;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+			vf_valid = true;
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			queue_valid = true;
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			continue;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	if (!((actions_num == 1 && !queue_valid) ||
+	      (actions_num == 2 && vf_valid && queue_valid))) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   actions, "Invalid action number");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+iavf_fsub_parse(struct iavf_adapter *ad,
+		struct iavf_pattern_match_item *array,
+		uint32_t array_len,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		void **meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fsub_conf *filter;
+	struct iavf_pattern_match_item *pattern_match_item = NULL;
+	int ret = 0;
+	uint32_t priority = 0;
+
+	filter = rte_zmalloc(NULL, sizeof(*filter), 0);
+	if (!filter) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "No memory for iavf_fsub_conf_ptr");
+		goto error;
+	}
+
+	/* search flow subscribe pattern */
+	pattern_match_item = iavf_search_pattern_match_item(pattern, array,
+							    array_len, error);
+	if (!pattern_match_item)
+		return -rte_errno;
+
+	/* parse flow subscribe pattern */
+	ret = iavf_fsub_parse_pattern(pattern,
+				      pattern_match_item->input_set_mask,
+				      error, filter);
+	if (ret)
+		goto error;
+
+	/* check flow subscribe pattern action */
+	ret = iavf_fsub_check_action(actions, error);
+	if (ret)
+		goto error;
+
+	/* parse flow subscribe pattern action */
+	ret = iavf_fsub_parse_action((void *)ad, actions, priority,
+				     error, filter);
+	if (ret)
+		goto error;
+
+	if (meta)
+		*meta = filter;
+
+error:
+	rte_free(pattern_match_item);
+	return ret;
+}
+
 static int
 iavf_fsub_init(struct iavf_adapter *ad)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v3 4/5] net/iavf: support flow subscription rule
  2022-08-30 18:05 ` [PATCH v3 0/5] support flow subscription Jie Wang
                     ` (2 preceding siblings ...)
  2022-08-30 18:05   ` [PATCH v3 3/5] net/iavf: support flow subscrption pattern Jie Wang
@ 2022-08-30 18:05   ` Jie Wang
  2022-08-30 18:05   ` [PATCH v3 5/5] net/iavf: support priority of flow rule Jie Wang
  2022-08-31 10:56   ` [PATCH v3 0/5] support flow subscription Ferruh Yigit
  5 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-30 18:05 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Support flow subscribption create/destroy/validation flow
rule for AVF.

For examples:
testpmd> flow create 0 ingress pattern eth / ipv4 / udp src is 11
          / end actions represented_port port_id 1 / end
testpmd> flow validate 1 ingress pattern eth / ipv4 / tcp src is 22
          / end actions represented_port port_id 1 / end
testpmd> flow destroy 1 rule 0

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf.h       |   6 ++
 drivers/net/iavf/iavf_fsub.c  |  75 +++++++++++++++----
 drivers/net/iavf/iavf_vchnl.c | 132 ++++++++++++++++++++++++++++++++++
 3 files changed, 201 insertions(+), 12 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index f79c7f9f6e..26b858f6f0 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -489,4 +489,10 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 extern const struct rte_tm_ops iavf_tm_ops;
 int iavf_get_ptp_cap(struct iavf_adapter *adapter);
 int iavf_get_phc_time(struct iavf_rx_queue *rxq);
+int iavf_flow_sub(struct iavf_adapter *adapter,
+		  struct iavf_fsub_conf *filter);
+int iavf_flow_unsub(struct iavf_adapter *adapter,
+		    struct iavf_fsub_conf *filter);
+int iavf_flow_sub_check(struct iavf_adapter *adapter,
+			struct iavf_fsub_conf *filter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 4600d52b91..b9ad3531ff 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -69,29 +69,80 @@ iavf_pattern_match_item iavf_fsub_pattern_list[] = {
 };
 
 static int
-iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
-		 __rte_unused struct rte_flow *flow,
-		 __rte_unused void *meta,
-		 __rte_unused struct rte_flow_error *error)
+iavf_fsub_create(struct iavf_adapter *ad, struct rte_flow *flow,
+		 void *meta, struct rte_flow_error *error)
 {
+	struct iavf_fsub_conf *filter = meta;
+	struct iavf_fsub_conf *rule;
+	int ret;
+
+	rule = rte_zmalloc("fsub_entry", sizeof(*rule), 0);
+	if (!rule) {
+		rte_flow_error_set(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to allocate memory for fsub rule");
+		return -rte_errno;
+	}
+
+	ret = iavf_flow_sub(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to subscribe flow rule.");
+		goto free_entry;
+	}
+
+	rte_memcpy(rule, filter, sizeof(*rule));
+	flow->rule = rule;
+
+	return ret;
+
+free_entry:
+	rte_free(rule);
 	return -rte_errno;
 }
 
 static int
-iavf_fsub_destroy(__rte_unused struct iavf_adapter *ad,
-		  __rte_unused struct rte_flow *flow,
-		  __rte_unused struct rte_flow_error *error)
+iavf_fsub_destroy(struct iavf_adapter *ad, struct rte_flow *flow,
+		  struct rte_flow_error *error)
 {
-	return -rte_errno;
+	struct iavf_fsub_conf *filter;
+	int ret;
+
+	filter = (struct iavf_fsub_conf *)flow->rule;
+
+	ret = iavf_flow_unsub(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to unsubscribe flow rule.");
+		return -rte_errno;
+	}
+
+	flow->rule = NULL;
+	rte_free(filter);
+
+	return ret;
 }
 
 static int
-iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
+iavf_fsub_validation(struct iavf_adapter *ad,
 		     __rte_unused struct rte_flow *flow,
-		     __rte_unused void *meta,
-		     __rte_unused struct rte_flow_error *error)
+		     void *meta,
+		     struct rte_flow_error *error)
 {
-	return -rte_errno;
+	struct iavf_fsub_conf *filter = meta;
+	int ret;
+
+	ret = iavf_flow_sub_check(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to validate filter rule.");
+		return -rte_errno;
+	}
+
+	return ret;
 };
 
 static int
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 6d84add423..cc0db8d093 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1534,6 +1534,138 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	return 0;
 }
 
+int
+iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_sub *fsub_cfg;
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->sub_fltr.validate_only = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->sub_fltr);
+	args.in_args_size = sizeof(*(&filter->sub_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of "
+				 "OP_FLOW_SUBSCRIBE");
+
+	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+	filter->flow_id = fsub_cfg->flow_id;
+
+	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in adding rule request by PF");
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to no hw "
+				 "resource");
+		err = -1;
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_EXIST) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to the rule "
+				 "is already existed");
+		err = -1;
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to the hw "
+				 "doesn't support");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
+int
+iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_unsub *unsub_cfg;
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->unsub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->unsub_fltr.flow_id = filter->flow_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_FLOW_UNSUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->unsub_fltr);
+	args.in_args_size = sizeof(filter->unsub_fltr);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of "
+				 "OP_FLOW_UNSUBSCRIBE");
+
+	unsub_cfg = (struct virtchnl_flow_unsub *)args.out_buffer;
+
+	if (unsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in deleting rule request by PF");
+	} else if (unsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST) {
+		PMD_DRV_LOG(ERR, "Failed to delete rule request due to this "
+				 "rule doesn't exist");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to delete rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
+int
+iavf_flow_sub_check(struct iavf_adapter *adapter,
+		    struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_sub *fsub_cfg;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->sub_fltr.validate_only = 1;
+
+	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->sub_fltr);
+	args.in_args_size = sizeof(*(&filter->sub_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to check flow director rule");
+		return err;
+	}
+
+	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+
+	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in checking rule request by PF");
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR, "Failed to check rule request due to "
+				 "parameters validation or HW doesn't "
+				 "support");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to check rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
 int
 iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 		     struct virtchnl_rss_cfg *rss_cfg, bool add)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v3 5/5] net/iavf: support priority of flow rule
  2022-08-30 18:05 ` [PATCH v3 0/5] support flow subscription Jie Wang
                     ` (3 preceding siblings ...)
  2022-08-30 18:05   ` [PATCH v3 4/5] net/iavf: support flow subscription rule Jie Wang
@ 2022-08-30 18:05   ` Jie Wang
  2022-08-31 10:56   ` [PATCH v3 0/5] support flow subscription Ferruh Yigit
  5 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-08-30 18:05 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add flow rule attribute "priority" support for AVF.

Lower values denote higher priority, the highest priority for
a flow rule is 0.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf_fdir.c         |  4 ++++
 drivers/net/iavf/iavf_fsub.c         |  2 +-
 drivers/net/iavf/iavf_generic_flow.c | 23 +++++++++++++----------
 drivers/net/iavf/iavf_generic_flow.h |  1 +
 drivers/net/iavf/iavf_hash.c         |  5 +++++
 drivers/net/iavf/iavf_ipsec_crypto.c | 16 ++++++++++------
 6 files changed, 34 insertions(+), 17 deletions(-)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index a397047fdb..8f80873925 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -1583,6 +1583,7 @@ iavf_fdir_parse(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error)
 {
@@ -1593,6 +1594,9 @@ iavf_fdir_parse(struct iavf_adapter *ad,
 
 	memset(filter, 0, sizeof(*filter));
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
 	if (!item)
 		return -rte_errno;
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index b9ad3531ff..46effda9a0 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -649,13 +649,13 @@ iavf_fsub_parse(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error)
 {
 	struct iavf_fsub_conf *filter;
 	struct iavf_pattern_match_item *pattern_match_item = NULL;
 	int ret = 0;
-	uint32_t priority = 0;
 
 	filter = rte_zmalloc(NULL, sizeof(*filter), 0);
 	if (!filter) {
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index b04614ba6e..f33c764764 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1785,6 +1785,7 @@ enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_tcp[] = {
 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error);
@@ -1951,11 +1952,11 @@ iavf_flow_valid_attr(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
-	/* Not supported */
-	if (attr->priority) {
+	/* support priority for flow subscribe */
+	if (attr->priority > 1) {
 		rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
-				attr, "Not support priority.");
+				attr, "Only support priority 0 and 1.");
 		return -rte_errno;
 	}
 
@@ -2098,6 +2099,7 @@ static struct iavf_flow_engine *
 iavf_parse_engine_create(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error)
@@ -2111,7 +2113,7 @@ iavf_parse_engine_create(struct iavf_adapter *ad,
 		if (parser_node->parser->parse_pattern_action(ad,
 				parser_node->parser->array,
 				parser_node->parser->array_len,
-				pattern, actions, &meta, error) < 0)
+				pattern, actions, priority, &meta, error) < 0)
 			continue;
 
 		engine = parser_node->parser->engine;
@@ -2127,6 +2129,7 @@ static struct iavf_flow_engine *
 iavf_parse_engine_validate(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error)
@@ -2140,7 +2143,7 @@ iavf_parse_engine_validate(struct iavf_adapter *ad,
 		if (parser_node->parser->parse_pattern_action(ad,
 				parser_node->parser->array,
 				parser_node->parser->array_len,
-				pattern, actions, &meta,  error) < 0)
+				pattern, actions, priority, &meta, error) < 0)
 			continue;
 
 		engine = parser_node->parser->engine;
@@ -2201,18 +2204,18 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 	if (ret)
 		return ret;
 
-	*engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
-				    actions, error);
+	*engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list,
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
-	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
-				    actions, error);
+	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list,
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
 	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
-			pattern, actions, error);
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 448facffa5..60d8ab02b4 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -471,6 +471,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error);
 
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 42df7c4e48..dea4e0aa0a 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -86,6 +86,7 @@ iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
 			       uint32_t array_len,
 			       const struct rte_flow_item pattern[],
 			       const struct rte_flow_action actions[],
+			       uint32_t priority,
 			       void **meta,
 			       struct rte_flow_error *error);
 
@@ -1509,6 +1510,7 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
 			       uint32_t array_len,
 			       const struct rte_flow_item pattern[],
 			       const struct rte_flow_action actions[],
+			       uint32_t priority,
 			       void **meta,
 			       struct rte_flow_error *error)
 {
@@ -1517,6 +1519,9 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
 	uint64_t phint = IAVF_PHINT_NONE;
 	int ret = 0;
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	rss_meta_ptr = rte_zmalloc(NULL, sizeof(*rss_meta_ptr), 0);
 	if (!rss_meta_ptr) {
 		rte_flow_error_set(error, EINVAL,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
index 1d465b4419..cec1b968fe 100644
--- a/drivers/net/iavf/iavf_ipsec_crypto.c
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -1933,16 +1933,20 @@ static struct iavf_flow_engine iavf_ipsec_flow_engine = {
 
 static int
 iavf_ipsec_flow_parse(struct iavf_adapter *ad,
-		       struct iavf_pattern_match_item *array,
-		       uint32_t array_len,
-		       const struct rte_flow_item pattern[],
-		       const struct rte_flow_action actions[],
-		       void **meta,
-		       struct rte_flow_error *error)
+		      struct iavf_pattern_match_item *array,
+		      uint32_t array_len,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      uint32_t priority,
+		      void **meta,
+		      struct rte_flow_error *error)
 {
 	struct iavf_pattern_match_item *item = NULL;
 	int ret = -1;
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
 	if (item && item->meta) {
 		uint32_t type = (uint64_t)(item->meta);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v3 0/5] support flow subscription
  2022-08-30 18:05 ` [PATCH v3 0/5] support flow subscription Jie Wang
                     ` (4 preceding siblings ...)
  2022-08-30 18:05   ` [PATCH v3 5/5] net/iavf: support priority of flow rule Jie Wang
@ 2022-08-31 10:56   ` Ferruh Yigit
  2022-08-31 12:28     ` Zhang, Qi Z
  5 siblings, 1 reply; 40+ messages in thread
From: Ferruh Yigit @ 2022-08-31 10:56 UTC (permalink / raw)
  To: Jie Wang, dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang

On 8/30/2022 7:05 PM, Jie Wang wrote:
> Add support AVF can be able to subscribe a flow from PF.
> 

Hi Jie,

Can you please provide more details, what does subscribing a flow from 
PF mean?

Thanks,
ferruh

> --
> v3:
>   * fix eth layer inputset.
>   * rebase.
> v2:
>   * split v1 patch 2/2 to 4 small patches.
>   * remove rule action RTE_FLOW_ACTION_TYPE_VF and add
>     RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT.
> 
> Jie Wang (5):
>    common/iavf: support flow subscription
>    net/iavf: add flow subscription to AVF
>    net/iavf: support flow subscrption pattern
>    net/iavf: support flow subscription rule
>    net/iavf: support priority of flow rule
> 
>   doc/guides/rel_notes/release_22_11.rst |   4 +
>   drivers/common/iavf/virtchnl.h         | 104 +++-
>   drivers/net/iavf/iavf.h                |  13 +
>   drivers/net/iavf/iavf_fdir.c           |   4 +
>   drivers/net/iavf/iavf_fsub.c           | 745 +++++++++++++++++++++++++
>   drivers/net/iavf/iavf_generic_flow.c   |  40 +-
>   drivers/net/iavf/iavf_generic_flow.h   |   2 +
>   drivers/net/iavf/iavf_hash.c           |   5 +
>   drivers/net/iavf/iavf_ipsec_crypto.c   |  16 +-
>   drivers/net/iavf/iavf_vchnl.c          | 133 +++++
>   drivers/net/iavf/meson.build           |   1 +
>   11 files changed, 1046 insertions(+), 21 deletions(-)
>   create mode 100644 drivers/net/iavf/iavf_fsub.c
> 


^ permalink raw reply	[flat|nested] 40+ messages in thread

* RE: [PATCH v3 0/5] support flow subscription
  2022-08-31 10:56   ` [PATCH v3 0/5] support flow subscription Ferruh Yigit
@ 2022-08-31 12:28     ` Zhang, Qi Z
  2022-08-31 12:53       ` Ferruh Yigit
  0 siblings, 1 reply; 40+ messages in thread
From: Zhang, Qi Z @ 2022-08-31 12:28 UTC (permalink / raw)
  To: Ferruh Yigit, Wang, Jie1X, dev
  Cc: Yang, Qiming, Wu, Jingjing, Xing, Beilei, Yang, SteveX



> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@xilinx.com>
> Sent: Wednesday, August 31, 2022 6:57 PM
> To: Wang, Jie1X <jie1x.wang@intel.com>; dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Yang, SteveX <stevex.yang@intel.com>
> Subject: Re: [PATCH v3 0/5] support flow subscription
> 
> On 8/30/2022 7:05 PM, Jie Wang wrote:
> > Add support AVF can be able to subscribe a flow from PF.
> >
> 
> Hi Jie,
> 
> Can you please provide more details, what does subscribing a flow from PF
> mean?

The answer is in patch 1/5's commit log which would be better in cover letter also :)

> 
> Thanks,
> ferruh
> 
> > --
> > v3:
> >   * fix eth layer inputset.
> >   * rebase.
> > v2:
> >   * split v1 patch 2/2 to 4 small patches.
> >   * remove rule action RTE_FLOW_ACTION_TYPE_VF and add
> >     RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT.
> >
> > Jie Wang (5):
> >    common/iavf: support flow subscription
> >    net/iavf: add flow subscription to AVF
> >    net/iavf: support flow subscrption pattern
> >    net/iavf: support flow subscription rule
> >    net/iavf: support priority of flow rule
> >
> >   doc/guides/rel_notes/release_22_11.rst |   4 +
> >   drivers/common/iavf/virtchnl.h         | 104 +++-
> >   drivers/net/iavf/iavf.h                |  13 +
> >   drivers/net/iavf/iavf_fdir.c           |   4 +
> >   drivers/net/iavf/iavf_fsub.c           | 745 +++++++++++++++++++++++++
> >   drivers/net/iavf/iavf_generic_flow.c   |  40 +-
> >   drivers/net/iavf/iavf_generic_flow.h   |   2 +
> >   drivers/net/iavf/iavf_hash.c           |   5 +
> >   drivers/net/iavf/iavf_ipsec_crypto.c   |  16 +-
> >   drivers/net/iavf/iavf_vchnl.c          | 133 +++++
> >   drivers/net/iavf/meson.build           |   1 +
> >   11 files changed, 1046 insertions(+), 21 deletions(-)
> >   create mode 100644 drivers/net/iavf/iavf_fsub.c
> >


^ permalink raw reply	[flat|nested] 40+ messages in thread

* Re: [PATCH v3 0/5] support flow subscription
  2022-08-31 12:28     ` Zhang, Qi Z
@ 2022-08-31 12:53       ` Ferruh Yigit
  2022-09-01  0:59         ` Zhang, Qi Z
  0 siblings, 1 reply; 40+ messages in thread
From: Ferruh Yigit @ 2022-08-31 12:53 UTC (permalink / raw)
  To: Zhang, Qi Z, Wang, Jie1X, dev
  Cc: Yang, Qiming, Wu, Jingjing, Xing, Beilei, Yang, SteveX

On 8/31/2022 1:28 PM, Zhang, Qi Z wrote:
> 
> 
>> -----Original Message-----
>> From: Ferruh Yigit <ferruh.yigit@xilinx.com>
>> Sent: Wednesday, August 31, 2022 6:57 PM
>> To: Wang, Jie1X <jie1x.wang@intel.com>; dev@dpdk.org
>> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
>> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
>> <beilei.xing@intel.com>; Yang, SteveX <stevex.yang@intel.com>
>> Subject: Re: [PATCH v3 0/5] support flow subscription
>>
>> On 8/30/2022 7:05 PM, Jie Wang wrote:
>>> Add support AVF can be able to subscribe a flow from PF.
>>>
>>
>> Hi Jie,
>>
>> Can you please provide more details, what does subscribing a flow from PF
>> mean?
> 
> The answer is in patch 1/5's commit log which would be better in cover letter also :)
> 

Hi Qi,

I checked all commit logs, but the feature wasn't clear to me, it can be 
useful to elaborate more.

Is it the case that:
- PF create a flow rule
- VF subscribes to this rule, which means
   - Packets will be sent to VF instead of PF

If above is correct, will both PF and VF receive packets, or only VF?
Can multiple VF subscribe to same rule?
Can PF destroy the rule while VF is subscribed?
How PF can prevent subscription to some flows from not trusted VF for 
security concern?
...

>>
>> Thanks,
>> ferruh
>>
>>> --
>>> v3:
>>>    * fix eth layer inputset.
>>>    * rebase.
>>> v2:
>>>    * split v1 patch 2/2 to 4 small patches.
>>>    * remove rule action RTE_FLOW_ACTION_TYPE_VF and add
>>>      RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT.
>>>
>>> Jie Wang (5):
>>>     common/iavf: support flow subscription
>>>     net/iavf: add flow subscription to AVF
>>>     net/iavf: support flow subscrption pattern
>>>     net/iavf: support flow subscription rule
>>>     net/iavf: support priority of flow rule
>>>
>>>    doc/guides/rel_notes/release_22_11.rst |   4 +
>>>    drivers/common/iavf/virtchnl.h         | 104 +++-
>>>    drivers/net/iavf/iavf.h                |  13 +
>>>    drivers/net/iavf/iavf_fdir.c           |   4 +
>>>    drivers/net/iavf/iavf_fsub.c           | 745 +++++++++++++++++++++++++
>>>    drivers/net/iavf/iavf_generic_flow.c   |  40 +-
>>>    drivers/net/iavf/iavf_generic_flow.h   |   2 +
>>>    drivers/net/iavf/iavf_hash.c           |   5 +
>>>    drivers/net/iavf/iavf_ipsec_crypto.c   |  16 +-
>>>    drivers/net/iavf/iavf_vchnl.c          | 133 +++++
>>>    drivers/net/iavf/meson.build           |   1 +
>>>    11 files changed, 1046 insertions(+), 21 deletions(-)
>>>    create mode 100644 drivers/net/iavf/iavf_fsub.c
>>>
> 


^ permalink raw reply	[flat|nested] 40+ messages in thread

* RE: [PATCH v3 0/5] support flow subscription
  2022-08-31 12:53       ` Ferruh Yigit
@ 2022-09-01  0:59         ` Zhang, Qi Z
  0 siblings, 0 replies; 40+ messages in thread
From: Zhang, Qi Z @ 2022-09-01  0:59 UTC (permalink / raw)
  To: Ferruh Yigit, Wang, Jie1X, dev
  Cc: Yang, Qiming, Wu, Jingjing, Xing, Beilei, Yang, SteveX



> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@xilinx.com>
> Sent: Wednesday, August 31, 2022 8:54 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Wang, Jie1X <jie1x.wang@intel.com>;
> dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Xing, Beilei <beilei.xing@intel.com>; Yang, SteveX
> <stevex.yang@intel.com>
> Subject: Re: [PATCH v3 0/5] support flow subscription
> 
> On 8/31/2022 1:28 PM, Zhang, Qi Z wrote:
> >
> >
> >> -----Original Message-----
> >> From: Ferruh Yigit <ferruh.yigit@xilinx.com>
> >> Sent: Wednesday, August 31, 2022 6:57 PM
> >> To: Wang, Jie1X <jie1x.wang@intel.com>; dev@dpdk.org
> >> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> >> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing,
> >> Beilei <beilei.xing@intel.com>; Yang, SteveX <stevex.yang@intel.com>
> >> Subject: Re: [PATCH v3 0/5] support flow subscription
> >>
> >> On 8/30/2022 7:05 PM, Jie Wang wrote:
> >>> Add support AVF can be able to subscribe a flow from PF.
> >>>
> >>
> >> Hi Jie,
> >>
> >> Can you please provide more details, what does subscribing a flow
> >> from PF mean?
> >
> > The answer is in patch 1/5's commit log which would be better in cover
> > letter also :)
> >
> 
> Hi Qi,
> 
> I checked all commit logs, but the feature wasn't clear to me, it can be useful
> to elaborate more.
> 
> Is it the case that:
> - PF create a flow rule
> - VF subscribes to this rule, which means
>    - Packets will be sent to VF instead of PF

Correct.
> 
> If above is correct, will both PF and VF receive packets, or only VF?

Only VF will receive the packet.

> Can multiple VF subscribe to same rule?

Yes, it is allowed and the packet should be replicated, though we didn't see real use cases.

> Can PF destroy the rule while VF is subscribed

PF will destroy all subscription during VF reset.

> How PF can prevent subscription to some flows from not trusted VF for
> security concern?

Good question, you almost let me go through the internal security review the second time. :)

All subscription rule will have a lower priority than the rules that be created by host (e.g.: rules created by switchdev or DCF).

I will suggest author to put all these information in the commit log. 

Thanks
Qi

> ...
> 
> >>
> >> Thanks,
> >> ferruh
> >>
> >>> --
> >>> v3:
> >>>    * fix eth layer inputset.
> >>>    * rebase.
> >>> v2:
> >>>    * split v1 patch 2/2 to 4 small patches.
> >>>    * remove rule action RTE_FLOW_ACTION_TYPE_VF and add
> >>>      RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT.
> >>>
> >>> Jie Wang (5):
> >>>     common/iavf: support flow subscription
> >>>     net/iavf: add flow subscription to AVF
> >>>     net/iavf: support flow subscrption pattern
> >>>     net/iavf: support flow subscription rule
> >>>     net/iavf: support priority of flow rule
> >>>
> >>>    doc/guides/rel_notes/release_22_11.rst |   4 +
> >>>    drivers/common/iavf/virtchnl.h         | 104 +++-
> >>>    drivers/net/iavf/iavf.h                |  13 +
> >>>    drivers/net/iavf/iavf_fdir.c           |   4 +
> >>>    drivers/net/iavf/iavf_fsub.c           | 745 +++++++++++++++++++++++++
> >>>    drivers/net/iavf/iavf_generic_flow.c   |  40 +-
> >>>    drivers/net/iavf/iavf_generic_flow.h   |   2 +
> >>>    drivers/net/iavf/iavf_hash.c           |   5 +
> >>>    drivers/net/iavf/iavf_ipsec_crypto.c   |  16 +-
> >>>    drivers/net/iavf/iavf_vchnl.c          | 133 +++++
> >>>    drivers/net/iavf/meson.build           |   1 +
> >>>    11 files changed, 1046 insertions(+), 21 deletions(-)
> >>>    create mode 100644 drivers/net/iavf/iavf_fsub.c
> >>>
> >


^ permalink raw reply	[flat|nested] 40+ messages in thread

* RE: [PATCH v3 3/5] net/iavf: support flow subscrption pattern
  2022-08-30 18:05   ` [PATCH v3 3/5] net/iavf: support flow subscrption pattern Jie Wang
@ 2022-09-06  7:30     ` Zhang, Qi Z
  0 siblings, 0 replies; 40+ messages in thread
From: Zhang, Qi Z @ 2022-09-06  7:30 UTC (permalink / raw)
  To: Wang, Jie1X, dev; +Cc: Yang, Qiming, Wu, Jingjing, Xing, Beilei, Yang, SteveX



> -----Original Message-----
> From: Wang, Jie1X <jie1x.wang@intel.com>
> Sent: Wednesday, August 31, 2022 2:05 AM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Yang, SteveX <stevex.yang@intel.com>; Wang, Jie1X
> <jie1x.wang@intel.com>
> Subject: [PATCH v3 3/5] net/iavf: support flow subscrption pattern
> 
...
> +static int
> +iavf_fsub_parse_action(struct iavf_adapter *ad,
> +		       const struct rte_flow_action *actions,
> +		       uint32_t priority,
> +		       struct rte_flow_error *error,
> +		       struct iavf_fsub_conf *filter)
>  {
> +	const struct rte_flow_action *action;
> +	const struct rte_flow_action_ethdev *act_ethdev;
> +	const struct rte_flow_action_queue *act_q;
> +	const struct rte_flow_action_rss *act_qgrop;
> +	struct virtchnl_filter_action *filter_action;
> +	uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
> +		2, 4, 8, 16, 32, 64, 128};
> +	uint16_t i, num = 0, dest_num = 0, vf_num = 0;
> +	uint16_t rule_port_id;
> +
> +	for (action = actions; action->type !=
> +				RTE_FLOW_ACTION_TYPE_END; action++) {
> +		switch (action->type) {
> +		case RTE_FLOW_ACTION_TYPE_VOID:
> +			break;
> +
> +		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:

Should be RTE_FLOW_ACTION_PORT_REPRESENTOR, as the traffic is expected to be sent to the given ethdev.



^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v4 0/5] support flow subscription
  2022-08-09  6:21 [PATCH 0/2] support flow subscription Jie Wang
                   ` (3 preceding siblings ...)
  2022-08-30 18:05 ` [PATCH v3 0/5] support flow subscription Jie Wang
@ 2022-09-07  3:35 ` Jie Wang
  2022-09-07  3:35   ` [PATCH v4 1/5] common/iavf: " Jie Wang
                     ` (4 more replies)
  2022-09-07  4:38 ` [PATCH v4 0/5] support flow subscription Jie Wang
  2022-09-07  5:10 ` [PATCH v5 0/5] support flow subscription Jie Wang
  6 siblings, 5 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  3:35 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add support AVF can be able to subscribe a flow from PF.

--
v4: update commit log and rebase.
v3:
 * fix eth layer inputset.
 * rebase.
v2:
 * split v1 patch 2/2 to 4 small patches.
 * remove rule action RTE_FLOW_ACTION_TYPE_VF and add
   RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT.

Jie Wang (5):
  common/iavf: support flow subscription
  net/iavf: add flow subscription to AVF
  net/iavf: support flow subscrption pattern
  net/iavf: support flow subscription rule
  net/iavf: support priority of flow rule

 doc/guides/rel_notes/release_22_11.rst |   4 +
 drivers/common/iavf/virtchnl.h         | 104 +++-
 drivers/net/iavf/iavf.h                |  13 +
 drivers/net/iavf/iavf_fdir.c           |   4 +
 drivers/net/iavf/iavf_fsub.c           | 745 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.c   |  40 +-
 drivers/net/iavf/iavf_generic_flow.h   |   2 +
 drivers/net/iavf/iavf_hash.c           |   5 +
 drivers/net/iavf/iavf_ipsec_crypto.c   |  16 +-
 drivers/net/iavf/iavf_vchnl.c          | 133 +++++
 drivers/net/iavf/meson.build           |   1 +
 11 files changed, 1046 insertions(+), 21 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_fsub.c

-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v4 1/5] common/iavf: support flow subscription
  2022-09-07  3:35 ` [PATCH v4 " Jie Wang
@ 2022-09-07  3:35   ` Jie Wang
  2022-09-07  3:35   ` [PATCH v4 2/5] net/iavf: add flow subscription to AVF Jie Wang
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  3:35 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

VF is able to subscribe a flow from PF by VIRTCHNL_FLOW_SUBSCRIBE.

PF is expected to offload a rule to hardware which will redirect
the packet that matching the required pattern to this VF.

Only a flow with dst mac address as PF's mac address can be subscribed.

VIRTCHNL_VF_OFFLOAD_FSUB_PF is used for Flow subscription capability
negotiation and only a trusted VF can be granted with this capability.

A flow can be unsubscribed by VIRTCHNL_FLOW_UNSUBSCRIBE.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/common/iavf/virtchnl.h | 104 +++++++++++++++++++++++++++++++--
 1 file changed, 100 insertions(+), 4 deletions(-)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index f123daec8e..e02eec4935 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -168,6 +168,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
 	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
 	VIRTCHNL_OP_CONFIG_QUANTA = 113,
+	VIRTCHNL_OP_FLOW_SUBSCRIBE = 114,
+	VIRTCHNL_OP_FLOW_UNSUBSCRIBE = 115,
 	VIRTCHNL_OP_MAX,
 };
 
@@ -282,6 +284,10 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_1588_PTP_GET_CAPS";
 	case VIRTCHNL_OP_1588_PTP_GET_TIME:
 		return "VIRTCHNL_OP_1588_PTP_GET_TIME";
+	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
+		return "VIRTCHNL_OP_FLOW_SUBSCRIBE";
+	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
+		return "VIRTCHNL_OP_FLOW_UNSUBSCRIBE";
 	case VIRTCHNL_OP_MAX:
 		return "VIRTCHNL_OP_MAX";
 	default:
@@ -401,6 +407,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_FSUB_PF		BIT(14)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
 #define VIRTCHNL_VF_OFFLOAD_VLAN		BIT(16)
 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING		BIT(17)
@@ -1503,6 +1510,7 @@ enum virtchnl_vfr_states {
 };
 
 #define VIRTCHNL_MAX_NUM_PROTO_HDRS	32
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK	16
 #define VIRTCHNL_MAX_SIZE_RAW_PACKET	1024
 #define PROTO_HDR_SHIFT			5
 #define PROTO_HDR_FIELD_START(proto_hdr_type) \
@@ -1695,6 +1703,22 @@ struct virtchnl_proto_hdr {
 
 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
 
+struct virtchnl_proto_hdr_w_msk {
+	/* see enum virtchnl_proto_hdr_type */
+	s32 type;
+	u32 pad;
+	/**
+	 * binary buffer in network order for specific header type.
+	 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+	 * header is expected to be copied into the buffer.
+	 */
+	u8 buffer_spec[64];
+	/* binary buffer for bit-mask applied to specific header type */
+	u8 buffer_mask[64];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(136, virtchnl_proto_hdr_w_msk);
+
 struct virtchnl_proto_hdrs {
 	u8 tunnel_level;
 	/**
@@ -1706,11 +1730,18 @@ struct virtchnl_proto_hdrs {
 	 */
 	int count;
 	/**
-	 * number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS
-	 * must be 0 for a raw packet request.
+	 * count must <=
+	 * VIRTCHNL_MAX_NUM_PROTO_HDRS + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK
+	 * count = 0 :					select raw
+	 * 1 < count <= VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr
+	 * count > VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr_w_msk
+	 * last valid index = count - VIRTCHNL_MAX_NUM_PROTO_HDRS
 	 */
 	union {
-		struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct virtchnl_proto_hdr
+			proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct virtchnl_proto_hdr_w_msk
+			proto_hdr_w_msk[VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK];
 		struct {
 			u16 pkt_len;
 			u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
@@ -1731,7 +1762,7 @@ struct virtchnl_rss_cfg {
 
 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
 
-/* action configuration for FDIR */
+/* action configuration for FDIR and FSUB */
 struct virtchnl_filter_action {
 	/* see enum virtchnl_action type */
 	s32 type;
@@ -1849,6 +1880,65 @@ struct virtchnl_fdir_del {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
 
+/* Status returned to VF after VF requests FSUB commands
+ * VIRTCHNL_FSUB_SUCCESS
+ * VF FLOW related request is successfully done by PF
+ * The request can be OP_FLOW_SUBSCRIBE/UNSUBSCRIBE.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE
+ * OP_FLOW_SUBSCRIBE request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_EXIST
+ * OP_FLOW_SUBSCRIBE request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST
+ * OP_FLOW_UNSUBSCRIBE request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_INVALID
+ * OP_FLOW_SUBSCRIBE request is failed due to parameters validation
+ * or HW doesn't support.
+ */
+enum virtchnl_fsub_prgm_status {
+	VIRTCHNL_FSUB_SUCCESS = 0,
+	VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE,
+	VIRTCHNL_FSUB_FAILURE_RULE_EXIST,
+	VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST,
+	VIRTCHNL_FSUB_FAILURE_RULE_INVALID,
+};
+
+/* VIRTCHNL_OP_FLOW_SUBSCRIBE
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only, priority, proto_hdrs and actions.
+ * PF will return flow_id
+ * if the request is successfully done and return status to VF.
+ */
+struct virtchnl_flow_sub {
+	u16 vsi_id; /* INPUT */
+	u8 validate_only; /* INPUT */
+	u8 priority; /* INPUT */
+	u32 flow_id; /* OUTPUT */
+	struct virtchnl_proto_hdrs proto_hdrs; /* INPUT */
+	struct virtchnl_filter_action_set actions; /* INPUT */
+	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_flow_sub);
+
+/* VIRTCHNL_OP_FLOW_UNSUBSCRIBE
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return status to VF.
+ */
+struct virtchnl_flow_unsub {
+	u16 vsi_id; /* INPUT */
+	u16 pad;
+	u32 flow_id; /* INPUT */
+	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_flow_unsub);
+
 /* VIRTCHNL_OP_GET_QOS_CAPS
  * VF sends this message to get its QoS Caps, such as
  * TC number, Arbiter and Bandwidth.
@@ -2318,6 +2408,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
 		valid_len = sizeof(struct virtchnl_fdir_del);
 		break;
+	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
+		valid_len = sizeof(struct virtchnl_flow_sub);
+		break;
+	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
+		valid_len = sizeof(struct virtchnl_flow_unsub);
+		break;
 	case VIRTCHNL_OP_GET_QOS_CAPS:
 		break;
 	case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v4 2/5] net/iavf: add flow subscription to AVF
  2022-09-07  3:35 ` [PATCH v4 " Jie Wang
  2022-09-07  3:35   ` [PATCH v4 1/5] common/iavf: " Jie Wang
@ 2022-09-07  3:35   ` Jie Wang
  2022-09-07  3:35   ` [PATCH v4 3/5] net/iavf: support flow subscrption pattern Jie Wang
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  3:35 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add the skeletal code of flow subscription to AVF driver.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 doc/guides/rel_notes/release_22_11.rst |   4 +
 drivers/net/iavf/iavf_fsub.c           | 112 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.c   |  17 +++-
 drivers/net/iavf/iavf_generic_flow.h   |   1 +
 drivers/net/iavf/iavf_vchnl.c          |   1 +
 drivers/net/iavf/meson.build           |   1 +
 6 files changed, 135 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fsub.c

diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst
index 8c021cf050..bb77a03e24 100644
--- a/doc/guides/rel_notes/release_22_11.rst
+++ b/doc/guides/rel_notes/release_22_11.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel iavf driver.**
+
+  * Added flow subscription support.
+
 
 Removed Items
 -------------
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
new file mode 100644
index 0000000000..17f9bb2976
--- /dev/null
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <iavf.h>
+#include "iavf_generic_flow.h"
+
+
+static struct iavf_flow_parser iavf_fsub_parser;
+
+static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = {};
+
+static int
+iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
+		 __rte_unused struct rte_flow *flow,
+		 __rte_unused void *meta,
+		 __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_destroy(__rte_unused struct iavf_adapter *ad,
+		  __rte_unused struct rte_flow *flow,
+		  __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
+		     __rte_unused struct rte_flow *flow,
+		     __rte_unused void *meta,
+		     __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+};
+
+static int
+iavf_fsub_parse(__rte_unused struct iavf_adapter *ad,
+		__rte_unused struct iavf_pattern_match_item *array,
+		__rte_unused uint32_t array_len,
+		__rte_unused const struct rte_flow_item pattern[],
+		__rte_unused const struct rte_flow_action actions[],
+		__rte_unused void **meta,
+		__rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FSUB_PF)
+		parser = &iavf_fsub_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_fsub_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_fsub_parser, ad);
+}
+
+static struct
+iavf_flow_engine iavf_fsub_engine = {
+	.init = iavf_fsub_init,
+	.uninit = iavf_fsub_uninit,
+	.create = iavf_fsub_create,
+	.destroy = iavf_fsub_destroy,
+	.validation = iavf_fsub_validation,
+	.type = IAVF_FLOW_ENGINE_FSUB,
+};
+
+static struct
+iavf_flow_parser iavf_fsub_parser = {
+	.engine = &iavf_fsub_engine,
+	.array = iavf_fsub_pattern_list,
+	.array_len = RTE_DIM(iavf_fsub_pattern_list),
+	.parse_pattern_action = iavf_fsub_parse,
+	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(iavf_fsub_engine_init)
+{
+	iavf_register_flow_engine(&iavf_fsub_engine);
+}
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index e1a611e319..b04614ba6e 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1866,6 +1866,8 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 {
 	struct iavf_parser_list *list = NULL;
 	struct iavf_flow_parser_node *parser_node;
+	struct iavf_flow_parser_node *existing_node;
+	void *temp;
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
 
 	parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
@@ -1880,14 +1882,26 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 		TAILQ_INSERT_TAIL(list, parser_node, node);
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
+		RTE_TAILQ_FOREACH_SAFE(existing_node, list, node, temp) {
+			if (existing_node->parser->engine->type ==
+			    IAVF_FLOW_ENGINE_FSUB) {
+				TAILQ_INSERT_AFTER(list, existing_node,
+						   parser_node, node);
+				goto DONE;
+			}
+		}
 		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
 		list = &vf->ipsec_crypto_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FSUB) {
+		list = &vf->dist_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
 
+DONE:
 	return 0;
 }
 
@@ -1902,7 +1916,8 @@ iavf_unregister_parser(struct iavf_flow_parser *parser,
 
 	if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
 		list = &vf->rss_parser_list;
-	else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
+	else if ((parser->engine->type == IAVF_FLOW_ENGINE_FDIR) ||
+		 (parser->engine->type == IAVF_FLOW_ENGINE_FSUB))
 		list = &vf->dist_parser_list;
 
 	if (list == NULL)
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 52eb1caf29..448facffa5 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -480,6 +480,7 @@ enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
+	IAVF_FLOW_ENGINE_FSUB,
 	IAVF_FLOW_ENGINE_MAX,
 };
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 21bd1e2193..6d84add423 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -502,6 +502,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
 		VIRTCHNL_VF_OFFLOAD_FDIR_PF |
 		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+		VIRTCHNL_VF_OFFLOAD_FSUB_PF |
 		VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 2da37de662..6df771f917 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -16,6 +16,7 @@ sources = files(
         'iavf_hash.c',
         'iavf_tm.c',
         'iavf_ipsec_crypto.c',
+        'iavf_fsub.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v4 3/5] net/iavf: support flow subscrption pattern
  2022-09-07  3:35 ` [PATCH v4 " Jie Wang
  2022-09-07  3:35   ` [PATCH v4 1/5] common/iavf: " Jie Wang
  2022-09-07  3:35   ` [PATCH v4 2/5] net/iavf: add flow subscription to AVF Jie Wang
@ 2022-09-07  3:35   ` Jie Wang
  2022-09-07  3:35   ` [PATCH v4 4/5] net/iavf: support flow subscription rule Jie Wang
  2022-09-07  3:35   ` [PATCH v4 5/5] net/iavf: support priority of flow rule Jie Wang
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  3:35 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add flow subscription pattern support for AVF.

The supported patterns are listed below:
eth/vlan/ipv4
eth/ipv4(6)
eth/ipv4(6)/udp
eth/ipv4(6)/tcp

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf.h      |   7 +
 drivers/net/iavf/iavf_fsub.c | 598 ++++++++++++++++++++++++++++++++++-
 2 files changed, 597 insertions(+), 8 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 025ab3ff60..f79c7f9f6e 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -148,6 +148,13 @@ struct iavf_fdir_info {
 	struct iavf_fdir_conf conf;
 };
 
+struct iavf_fsub_conf {
+	struct virtchnl_flow_sub sub_fltr;
+	struct virtchnl_flow_unsub unsub_fltr;
+	uint64_t input_set;
+	uint32_t flow_id;
+};
+
 struct iavf_qv_map {
 	uint16_t queue_id;
 	uint16_t vector_id;
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 17f9bb2976..4600d52b91 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -22,9 +22,51 @@
 #include "iavf_generic_flow.h"
 
 
+#define MAX_QGRP_NUM_TYPE      7
+#define IAVF_IPV6_ADDR_LENGTH  16
+#define MAX_INPUT_SET_BYTE     32
+
+#define IAVF_SW_INSET_ETHER ( \
+	IAVF_INSET_DMAC | IAVF_INSET_SMAC | IAVF_INSET_ETHERTYPE)
+#define IAVF_SW_INSET_MAC_IPV4 ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS)
+#define IAVF_SW_INSET_MAC_VLAN_IPV4 ( \
+	IAVF_SW_INSET_MAC_IPV4 | IAVF_INSET_VLAN_OUTER)
+#define IAVF_SW_INSET_MAC_IPV4_TCP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV4_UDP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6 ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_IPV6_NEXT_HDR)
+#define IAVF_SW_INSET_MAC_IPV6_TCP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6_UDP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+
 static struct iavf_flow_parser iavf_fsub_parser;
 
-static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = {};
+static struct
+iavf_pattern_match_item iavf_fsub_pattern_list[] = {
+	{iavf_pattern_ethertype,			IAVF_SW_INSET_ETHER,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4,				IAVF_SW_INSET_MAC_IPV4,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_vlan_ipv4,			IAVF_SW_INSET_MAC_VLAN_IPV4,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp,			IAVF_SW_INSET_MAC_IPV4_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_tcp,			IAVF_SW_INSET_MAC_IPV4_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6,				IAVF_SW_INSET_MAC_IPV6,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp,			IAVF_SW_INSET_MAC_IPV6_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_tcp,			IAVF_SW_INSET_MAC_IPV6_TCP,		IAVF_INSET_NONE},
+};
 
 static int
 iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
@@ -53,17 +95,557 @@ iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
 };
 
 static int
-iavf_fsub_parse(__rte_unused struct iavf_adapter *ad,
-		__rte_unused struct iavf_pattern_match_item *array,
-		__rte_unused uint32_t array_len,
-		__rte_unused const struct rte_flow_item pattern[],
-		__rte_unused const struct rte_flow_action actions[],
-		__rte_unused void **meta,
-		__rte_unused struct rte_flow_error *error)
+iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
+			const uint64_t input_set_mask,
+			struct rte_flow_error *error,
+			struct iavf_fsub_conf *filter)
+{
+	struct virtchnl_proto_hdrs *hdrs = &filter->sub_fltr.proto_hdrs;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+	const struct rte_flow_item *item = pattern;
+	struct virtchnl_proto_hdr_w_msk *hdr, *hdr1 = NULL;
+	uint64_t outer_input_set = IAVF_INSET_NONE;
+	uint64_t *input = NULL;
+	uint16_t input_set_byte = 0;
+	uint16_t j;
+	uint32_t layer = 0;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item, "Not support range");
+			return false;
+		}
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			hdr1 = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
+
+			if (eth_spec && eth_mask) {
+				input = &outer_input_set;
+
+				if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
+					*input |= IAVF_INSET_DMAC;
+					input_set_byte += 6;
+				} else {
+					/* flow subscribe filter will add dst mac in kernel */
+					input_set_byte += 6;
+				}
+
+				if (!rte_is_zero_ether_addr(&eth_mask->src)) {
+					*input |= IAVF_INSET_SMAC;
+					input_set_byte += 6;
+				}
+
+				if (eth_mask->type) {
+					*input |= IAVF_INSET_ETHERTYPE;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr1->buffer_spec, eth_spec,
+					   sizeof(struct rte_ether_hdr));
+				rte_memcpy(hdr1->buffer_mask, eth_mask,
+					   sizeof(struct rte_ether_hdr));
+			} else {
+				/* flow subscribe filter will add dst mac in kernel */
+				input_set_byte += 6;
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+			if (ipv4_spec && ipv4_mask) {
+				input = &outer_input_set;
+				/* Check IPv4 mask and update input set */
+				if (ipv4_mask->hdr.version_ihl ||
+					ipv4_mask->hdr.total_length ||
+					ipv4_mask->hdr.packet_id ||
+					ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv4 mask.");
+					return false;
+				}
+
+				if (ipv4_mask->hdr.src_addr) {
+					*input |= IAVF_INSET_IPV4_SRC;
+					input_set_byte += 2;
+				}
+				if (ipv4_mask->hdr.dst_addr) {
+					*input |= IAVF_INSET_IPV4_DST;
+					input_set_byte += 2;
+				}
+				if (ipv4_mask->hdr.time_to_live) {
+					*input |= IAVF_INSET_IPV4_TTL;
+					input_set_byte++;
+				}
+				if (ipv4_mask->hdr.next_proto_id) {
+					*input |= IAVF_INSET_IPV4_PROTO;
+					input_set_byte++;
+				}
+				if (ipv4_mask->hdr.type_of_service) {
+					*input |= IAVF_INSET_IPV4_TOS;
+					input_set_byte++;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &ipv4_spec->hdr,
+					   sizeof(ipv4_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &ipv4_mask->hdr,
+					   sizeof(ipv4_spec->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+			if (ipv6_spec && ipv6_mask) {
+				input = &outer_input_set;
+
+				if (ipv6_mask->hdr.payload_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv6 mask");
+					return false;
+				}
+
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j]) {
+						*input |= IAVF_INSET_IPV6_SRC;
+						break;
+					}
+				}
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.dst_addr[j]) {
+						*input |= IAVF_INSET_IPV6_DST;
+						break;
+					}
+				}
+
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j])
+						input_set_byte++;
+
+					if (ipv6_mask->hdr.dst_addr[j])
+						input_set_byte++;
+				}
+
+				if (ipv6_mask->hdr.proto) {
+					*input |= IAVF_INSET_IPV6_NEXT_HDR;
+					input_set_byte++;
+				}
+				if (ipv6_mask->hdr.hop_limits) {
+					*input |= IAVF_INSET_IPV6_HOP_LIMIT;
+					input_set_byte++;
+				}
+				if (ipv6_mask->hdr.vtc_flow &
+				    rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK)) {
+					*input |= IAVF_INSET_IPV6_TC;
+					input_set_byte += 4;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &ipv6_spec->hdr,
+					   sizeof(ipv6_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &ipv6_mask->hdr,
+					   sizeof(ipv6_spec->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+			if (udp_spec && udp_mask) {
+				input = &outer_input_set;
+				/* Check UDP mask and update input set*/
+				if (udp_mask->hdr.dgram_len ||
+				    udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid UDP mask");
+					return false;
+				}
+
+				if (udp_mask->hdr.src_port) {
+					*input |= IAVF_INSET_UDP_SRC_PORT;
+					input_set_byte += 2;
+				}
+				if (udp_mask->hdr.dst_port) {
+					*input |= IAVF_INSET_UDP_DST_PORT;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &udp_spec->hdr,
+					   sizeof(udp_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &udp_mask->hdr,
+					   sizeof(udp_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+			if (tcp_spec && tcp_mask) {
+				input = &outer_input_set;
+				/* Check TCP mask and update input set */
+				if (tcp_mask->hdr.sent_seq ||
+					tcp_mask->hdr.recv_ack ||
+					tcp_mask->hdr.data_off ||
+					tcp_mask->hdr.tcp_flags ||
+					tcp_mask->hdr.rx_win ||
+					tcp_mask->hdr.cksum ||
+					tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid TCP mask");
+					return false;
+				}
+
+				if (tcp_mask->hdr.src_port) {
+					*input |= IAVF_INSET_TCP_SRC_PORT;
+					input_set_byte += 2;
+				}
+				if (tcp_mask->hdr.dst_port) {
+					*input |= IAVF_INSET_TCP_DST_PORT;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &tcp_spec->hdr,
+					   sizeof(tcp_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &tcp_mask->hdr,
+					   sizeof(tcp_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VLAN:
+			vlan_spec = item->spec;
+			vlan_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, S_VLAN);
+
+			if (vlan_spec && vlan_spec) {
+				input = &outer_input_set;
+
+				*input |= IAVF_INSET_VLAN_OUTER;
+
+				if (vlan_mask->tci)
+					input_set_byte += 2;
+
+				if (vlan_mask->inner_type) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid VLAN input set.");
+					return false;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &vlan_spec->hdr,
+					   sizeof(vlan_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &vlan_mask->hdr,
+					   sizeof(vlan_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+					   "Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	hdrs->count += VIRTCHNL_MAX_NUM_PROTO_HDRS;
+
+	if (input_set_byte > MAX_INPUT_SET_BYTE) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   item, "too much input set");
+		return -rte_errno;
+	}
+
+	if (!outer_input_set || (outer_input_set & ~input_set_mask))
+		return -rte_errno;
+
+	return 0;
+}
+
+static int
+iavf_fsub_parse_action(struct iavf_adapter *ad,
+		       const struct rte_flow_action *actions,
+		       uint32_t priority,
+		       struct rte_flow_error *error,
+		       struct iavf_fsub_conf *filter)
 {
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *act_qgrop;
+	struct virtchnl_filter_action *filter_action;
+	uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
+		2, 4, 8, 16, 32, 64, 128};
+	uint16_t i, num = 0, dest_num = 0, vf_num = 0;
+	uint16_t rule_port_id;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		switch (action->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+			vf_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_ethdev = action->conf;
+			rule_port_id = ad->dev_data->port_id;
+			if (rule_port_id != act_ethdev->port_id)
+				goto error1;
+
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			dest_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_q = action->conf;
+			if (act_q->index >= ad->dev_data->nb_rx_queues)
+				goto error2;
+
+			filter_action->type = VIRTCHNL_ACTION_QUEUE;
+			filter_action->act_conf.queue.index = act_q->index;
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			dest_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_qgrop = action->conf;
+			if (act_qgrop->queue_num <= 1)
+				goto error2;
+
+			filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+			filter_action->act_conf.queue.index =
+							act_qgrop->queue[0];
+			for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
+				if (act_qgrop->queue_num ==
+				    valid_qgrop_number[i])
+					break;
+			}
+
+			if (i == MAX_QGRP_NUM_TYPE)
+				goto error2;
+
+			if ((act_qgrop->queue[0] + act_qgrop->queue_num) >
+			    ad->dev_data->nb_rx_queues)
+				goto error3;
+
+			for (i = 0; i < act_qgrop->queue_num - 1; i++)
+				if (act_qgrop->queue[i + 1] !=
+				    act_qgrop->queue[i] + 1)
+					goto error4;
+
+			filter_action->act_conf.queue.region = act_qgrop->queue_num;
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	/* 0 denotes lowest priority of recipe and highest priority
+	 * of rte_flow. Change rte_flow priority into recipe priority.
+	 */
+	filter->sub_fltr.priority = priority;
+
+	if (num > VIRTCHNL_MAX_NUM_ACTIONS) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Action numbers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	if (vf_num == 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action, vf action must be added");
+		return -rte_errno;
+	}
+
+	if (dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Unsupported action combination");
+		return -rte_errno;
+	}
+
+	return 0;
+
+error1:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid ethdev_port_id");
+	return -rte_errno;
+
+error2:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid action type or queue number");
+	return -rte_errno;
+
+error3:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid queue region indexes");
+	return -rte_errno;
+
+error4:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Discontinuous queue region");
 	return -rte_errno;
 }
 
+static int
+iavf_fsub_check_action(const struct rte_flow_action *actions,
+		       struct rte_flow_error *error)
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	uint16_t actions_num = 0;
+	bool vf_valid = false;
+	bool queue_valid = false;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+			vf_valid = true;
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			queue_valid = true;
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			continue;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	if (!((actions_num == 1 && !queue_valid) ||
+	      (actions_num == 2 && vf_valid && queue_valid))) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   actions, "Invalid action number");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+iavf_fsub_parse(struct iavf_adapter *ad,
+		struct iavf_pattern_match_item *array,
+		uint32_t array_len,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		void **meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fsub_conf *filter;
+	struct iavf_pattern_match_item *pattern_match_item = NULL;
+	int ret = 0;
+	uint32_t priority = 0;
+
+	filter = rte_zmalloc(NULL, sizeof(*filter), 0);
+	if (!filter) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "No memory for iavf_fsub_conf_ptr");
+		goto error;
+	}
+
+	/* search flow subscribe pattern */
+	pattern_match_item = iavf_search_pattern_match_item(pattern, array,
+							    array_len, error);
+	if (!pattern_match_item)
+		return -rte_errno;
+
+	/* parse flow subscribe pattern */
+	ret = iavf_fsub_parse_pattern(pattern,
+				      pattern_match_item->input_set_mask,
+				      error, filter);
+	if (ret)
+		goto error;
+
+	/* check flow subscribe pattern action */
+	ret = iavf_fsub_check_action(actions, error);
+	if (ret)
+		goto error;
+
+	/* parse flow subscribe pattern action */
+	ret = iavf_fsub_parse_action((void *)ad, actions, priority,
+				     error, filter);
+	if (ret)
+		goto error;
+
+	if (meta)
+		*meta = filter;
+
+error:
+	rte_free(pattern_match_item);
+	return ret;
+}
+
 static int
 iavf_fsub_init(struct iavf_adapter *ad)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v4 4/5] net/iavf: support flow subscription rule
  2022-09-07  3:35 ` [PATCH v4 " Jie Wang
                     ` (2 preceding siblings ...)
  2022-09-07  3:35   ` [PATCH v4 3/5] net/iavf: support flow subscrption pattern Jie Wang
@ 2022-09-07  3:35   ` Jie Wang
  2022-09-07  3:35   ` [PATCH v4 5/5] net/iavf: support priority of flow rule Jie Wang
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  3:35 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Support flow subscribption create/destroy/validation flow
rule for AVF.

For examples:
testpmd> flow create 0 ingress pattern eth / ipv4 / udp src is 11
          / end actions represented_port port_id 1 / end
testpmd> flow validate 1 ingress pattern eth / ipv4 / tcp src is 22
          / end actions represented_port port_id 1 / end
testpmd> flow destroy 1 rule 0

VF subscribes to a rule, which means the packets will be sent to VF
instead of PF, and olny VF will receive the packets.

It is allowed multiple VF subscribe to same rule, the packets will
be replicated and received by each VF.

PF will destroy all subscriptions during VF reset.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf.h       |   6 ++
 drivers/net/iavf/iavf_fsub.c  |  75 +++++++++++++++----
 drivers/net/iavf/iavf_vchnl.c | 132 ++++++++++++++++++++++++++++++++++
 3 files changed, 201 insertions(+), 12 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index f79c7f9f6e..26b858f6f0 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -489,4 +489,10 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 extern const struct rte_tm_ops iavf_tm_ops;
 int iavf_get_ptp_cap(struct iavf_adapter *adapter);
 int iavf_get_phc_time(struct iavf_rx_queue *rxq);
+int iavf_flow_sub(struct iavf_adapter *adapter,
+		  struct iavf_fsub_conf *filter);
+int iavf_flow_unsub(struct iavf_adapter *adapter,
+		    struct iavf_fsub_conf *filter);
+int iavf_flow_sub_check(struct iavf_adapter *adapter,
+			struct iavf_fsub_conf *filter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 4600d52b91..b9ad3531ff 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -69,29 +69,80 @@ iavf_pattern_match_item iavf_fsub_pattern_list[] = {
 };
 
 static int
-iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
-		 __rte_unused struct rte_flow *flow,
-		 __rte_unused void *meta,
-		 __rte_unused struct rte_flow_error *error)
+iavf_fsub_create(struct iavf_adapter *ad, struct rte_flow *flow,
+		 void *meta, struct rte_flow_error *error)
 {
+	struct iavf_fsub_conf *filter = meta;
+	struct iavf_fsub_conf *rule;
+	int ret;
+
+	rule = rte_zmalloc("fsub_entry", sizeof(*rule), 0);
+	if (!rule) {
+		rte_flow_error_set(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to allocate memory for fsub rule");
+		return -rte_errno;
+	}
+
+	ret = iavf_flow_sub(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to subscribe flow rule.");
+		goto free_entry;
+	}
+
+	rte_memcpy(rule, filter, sizeof(*rule));
+	flow->rule = rule;
+
+	return ret;
+
+free_entry:
+	rte_free(rule);
 	return -rte_errno;
 }
 
 static int
-iavf_fsub_destroy(__rte_unused struct iavf_adapter *ad,
-		  __rte_unused struct rte_flow *flow,
-		  __rte_unused struct rte_flow_error *error)
+iavf_fsub_destroy(struct iavf_adapter *ad, struct rte_flow *flow,
+		  struct rte_flow_error *error)
 {
-	return -rte_errno;
+	struct iavf_fsub_conf *filter;
+	int ret;
+
+	filter = (struct iavf_fsub_conf *)flow->rule;
+
+	ret = iavf_flow_unsub(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to unsubscribe flow rule.");
+		return -rte_errno;
+	}
+
+	flow->rule = NULL;
+	rte_free(filter);
+
+	return ret;
 }
 
 static int
-iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
+iavf_fsub_validation(struct iavf_adapter *ad,
 		     __rte_unused struct rte_flow *flow,
-		     __rte_unused void *meta,
-		     __rte_unused struct rte_flow_error *error)
+		     void *meta,
+		     struct rte_flow_error *error)
 {
-	return -rte_errno;
+	struct iavf_fsub_conf *filter = meta;
+	int ret;
+
+	ret = iavf_flow_sub_check(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to validate filter rule.");
+		return -rte_errno;
+	}
+
+	return ret;
 };
 
 static int
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 6d84add423..cc0db8d093 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1534,6 +1534,138 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	return 0;
 }
 
+int
+iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_sub *fsub_cfg;
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->sub_fltr.validate_only = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->sub_fltr);
+	args.in_args_size = sizeof(*(&filter->sub_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of "
+				 "OP_FLOW_SUBSCRIBE");
+
+	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+	filter->flow_id = fsub_cfg->flow_id;
+
+	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in adding rule request by PF");
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to no hw "
+				 "resource");
+		err = -1;
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_EXIST) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to the rule "
+				 "is already existed");
+		err = -1;
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to the hw "
+				 "doesn't support");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
+int
+iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_unsub *unsub_cfg;
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->unsub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->unsub_fltr.flow_id = filter->flow_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_FLOW_UNSUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->unsub_fltr);
+	args.in_args_size = sizeof(filter->unsub_fltr);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of "
+				 "OP_FLOW_UNSUBSCRIBE");
+
+	unsub_cfg = (struct virtchnl_flow_unsub *)args.out_buffer;
+
+	if (unsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in deleting rule request by PF");
+	} else if (unsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST) {
+		PMD_DRV_LOG(ERR, "Failed to delete rule request due to this "
+				 "rule doesn't exist");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to delete rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
+int
+iavf_flow_sub_check(struct iavf_adapter *adapter,
+		    struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_sub *fsub_cfg;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->sub_fltr.validate_only = 1;
+
+	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->sub_fltr);
+	args.in_args_size = sizeof(*(&filter->sub_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to check flow director rule");
+		return err;
+	}
+
+	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+
+	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in checking rule request by PF");
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR, "Failed to check rule request due to "
+				 "parameters validation or HW doesn't "
+				 "support");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to check rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
 int
 iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 		     struct virtchnl_rss_cfg *rss_cfg, bool add)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v4 5/5] net/iavf: support priority of flow rule
  2022-09-07  3:35 ` [PATCH v4 " Jie Wang
                     ` (3 preceding siblings ...)
  2022-09-07  3:35   ` [PATCH v4 4/5] net/iavf: support flow subscription rule Jie Wang
@ 2022-09-07  3:35   ` Jie Wang
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  3:35 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add flow rule attribute "priority" support for AVF.

Lower values denote higher priority, the highest priority for
a flow rule is 0.

All subscription rule will have a lower priority than the rules
that be created by host.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf_fdir.c         |  4 ++++
 drivers/net/iavf/iavf_fsub.c         |  2 +-
 drivers/net/iavf/iavf_generic_flow.c | 23 +++++++++++++----------
 drivers/net/iavf/iavf_generic_flow.h |  1 +
 drivers/net/iavf/iavf_hash.c         |  5 +++++
 drivers/net/iavf/iavf_ipsec_crypto.c | 16 ++++++++++------
 6 files changed, 34 insertions(+), 17 deletions(-)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index a397047fdb..8f80873925 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -1583,6 +1583,7 @@ iavf_fdir_parse(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error)
 {
@@ -1593,6 +1594,9 @@ iavf_fdir_parse(struct iavf_adapter *ad,
 
 	memset(filter, 0, sizeof(*filter));
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
 	if (!item)
 		return -rte_errno;
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index b9ad3531ff..46effda9a0 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -649,13 +649,13 @@ iavf_fsub_parse(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error)
 {
 	struct iavf_fsub_conf *filter;
 	struct iavf_pattern_match_item *pattern_match_item = NULL;
 	int ret = 0;
-	uint32_t priority = 0;
 
 	filter = rte_zmalloc(NULL, sizeof(*filter), 0);
 	if (!filter) {
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index b04614ba6e..f33c764764 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1785,6 +1785,7 @@ enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_tcp[] = {
 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error);
@@ -1951,11 +1952,11 @@ iavf_flow_valid_attr(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
-	/* Not supported */
-	if (attr->priority) {
+	/* support priority for flow subscribe */
+	if (attr->priority > 1) {
 		rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
-				attr, "Not support priority.");
+				attr, "Only support priority 0 and 1.");
 		return -rte_errno;
 	}
 
@@ -2098,6 +2099,7 @@ static struct iavf_flow_engine *
 iavf_parse_engine_create(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error)
@@ -2111,7 +2113,7 @@ iavf_parse_engine_create(struct iavf_adapter *ad,
 		if (parser_node->parser->parse_pattern_action(ad,
 				parser_node->parser->array,
 				parser_node->parser->array_len,
-				pattern, actions, &meta, error) < 0)
+				pattern, actions, priority, &meta, error) < 0)
 			continue;
 
 		engine = parser_node->parser->engine;
@@ -2127,6 +2129,7 @@ static struct iavf_flow_engine *
 iavf_parse_engine_validate(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error)
@@ -2140,7 +2143,7 @@ iavf_parse_engine_validate(struct iavf_adapter *ad,
 		if (parser_node->parser->parse_pattern_action(ad,
 				parser_node->parser->array,
 				parser_node->parser->array_len,
-				pattern, actions, &meta,  error) < 0)
+				pattern, actions, priority, &meta, error) < 0)
 			continue;
 
 		engine = parser_node->parser->engine;
@@ -2201,18 +2204,18 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 	if (ret)
 		return ret;
 
-	*engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
-				    actions, error);
+	*engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list,
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
-	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
-				    actions, error);
+	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list,
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
 	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
-			pattern, actions, error);
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 448facffa5..60d8ab02b4 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -471,6 +471,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error);
 
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 42df7c4e48..dea4e0aa0a 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -86,6 +86,7 @@ iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
 			       uint32_t array_len,
 			       const struct rte_flow_item pattern[],
 			       const struct rte_flow_action actions[],
+			       uint32_t priority,
 			       void **meta,
 			       struct rte_flow_error *error);
 
@@ -1509,6 +1510,7 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
 			       uint32_t array_len,
 			       const struct rte_flow_item pattern[],
 			       const struct rte_flow_action actions[],
+			       uint32_t priority,
 			       void **meta,
 			       struct rte_flow_error *error)
 {
@@ -1517,6 +1519,9 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
 	uint64_t phint = IAVF_PHINT_NONE;
 	int ret = 0;
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	rss_meta_ptr = rte_zmalloc(NULL, sizeof(*rss_meta_ptr), 0);
 	if (!rss_meta_ptr) {
 		rte_flow_error_set(error, EINVAL,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
index 1d465b4419..cec1b968fe 100644
--- a/drivers/net/iavf/iavf_ipsec_crypto.c
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -1933,16 +1933,20 @@ static struct iavf_flow_engine iavf_ipsec_flow_engine = {
 
 static int
 iavf_ipsec_flow_parse(struct iavf_adapter *ad,
-		       struct iavf_pattern_match_item *array,
-		       uint32_t array_len,
-		       const struct rte_flow_item pattern[],
-		       const struct rte_flow_action actions[],
-		       void **meta,
-		       struct rte_flow_error *error)
+		      struct iavf_pattern_match_item *array,
+		      uint32_t array_len,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      uint32_t priority,
+		      void **meta,
+		      struct rte_flow_error *error)
 {
 	struct iavf_pattern_match_item *item = NULL;
 	int ret = -1;
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
 	if (item && item->meta) {
 		uint32_t type = (uint64_t)(item->meta);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v4 0/5] support flow subscription
  2022-08-09  6:21 [PATCH 0/2] support flow subscription Jie Wang
                   ` (4 preceding siblings ...)
  2022-09-07  3:35 ` [PATCH v4 " Jie Wang
@ 2022-09-07  4:38 ` Jie Wang
  2022-09-07  4:38   ` [PATCH v4 1/5] common/iavf: " Jie Wang
                     ` (4 more replies)
  2022-09-07  5:10 ` [PATCH v5 0/5] support flow subscription Jie Wang
  6 siblings, 5 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  4:38 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add support AVF can be able to subscribe a flow from PF.

--
v4:
 * replace flow action represented_port with port_representor.
 * update commit log and rebase.
v3:
 * fix eth layer inputset.
 * rebase.
v2:
 * split v1 patch 2/2 to 4 small patches.
 * remove rule action RTE_FLOW_ACTION_TYPE_VF and add
   RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT.

Jie Wang (5):
  common/iavf: support flow subscription
  net/iavf: add flow subscription to AVF
  net/iavf: support flow subscrption pattern
  net/iavf: support flow subscription rule
  net/iavf: support priority of flow rule

 doc/guides/rel_notes/release_22_11.rst |   4 +
 drivers/common/iavf/virtchnl.h         | 104 +++-
 drivers/net/iavf/iavf.h                |  13 +
 drivers/net/iavf/iavf_fdir.c           |   4 +
 drivers/net/iavf/iavf_fsub.c           | 745 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.c   |  40 +-
 drivers/net/iavf/iavf_generic_flow.h   |   2 +
 drivers/net/iavf/iavf_hash.c           |   5 +
 drivers/net/iavf/iavf_ipsec_crypto.c   |  16 +-
 drivers/net/iavf/iavf_vchnl.c          | 133 +++++
 drivers/net/iavf/meson.build           |   1 +
 11 files changed, 1046 insertions(+), 21 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_fsub.c

-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v4 1/5] common/iavf: support flow subscription
  2022-09-07  4:38 ` [PATCH v4 0/5] support flow subscription Jie Wang
@ 2022-09-07  4:38   ` Jie Wang
  2022-09-07  4:38   ` [PATCH v4 2/5] net/iavf: add flow subscription to AVF Jie Wang
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  4:38 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

VF is able to subscribe a flow from PF by VIRTCHNL_FLOW_SUBSCRIBE.

PF is expected to offload a rule to hardware which will redirect
the packet that matching the required pattern to this VF.

Only a flow with dst mac address as PF's mac address can be subscribed.

VIRTCHNL_VF_OFFLOAD_FSUB_PF is used for Flow subscription capability
negotiation and only a trusted VF can be granted with this capability.

A flow can be unsubscribed by VIRTCHNL_FLOW_UNSUBSCRIBE.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/common/iavf/virtchnl.h | 104 +++++++++++++++++++++++++++++++--
 1 file changed, 100 insertions(+), 4 deletions(-)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index f123daec8e..e02eec4935 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -168,6 +168,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
 	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
 	VIRTCHNL_OP_CONFIG_QUANTA = 113,
+	VIRTCHNL_OP_FLOW_SUBSCRIBE = 114,
+	VIRTCHNL_OP_FLOW_UNSUBSCRIBE = 115,
 	VIRTCHNL_OP_MAX,
 };
 
@@ -282,6 +284,10 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_1588_PTP_GET_CAPS";
 	case VIRTCHNL_OP_1588_PTP_GET_TIME:
 		return "VIRTCHNL_OP_1588_PTP_GET_TIME";
+	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
+		return "VIRTCHNL_OP_FLOW_SUBSCRIBE";
+	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
+		return "VIRTCHNL_OP_FLOW_UNSUBSCRIBE";
 	case VIRTCHNL_OP_MAX:
 		return "VIRTCHNL_OP_MAX";
 	default:
@@ -401,6 +407,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_FSUB_PF		BIT(14)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
 #define VIRTCHNL_VF_OFFLOAD_VLAN		BIT(16)
 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING		BIT(17)
@@ -1503,6 +1510,7 @@ enum virtchnl_vfr_states {
 };
 
 #define VIRTCHNL_MAX_NUM_PROTO_HDRS	32
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK	16
 #define VIRTCHNL_MAX_SIZE_RAW_PACKET	1024
 #define PROTO_HDR_SHIFT			5
 #define PROTO_HDR_FIELD_START(proto_hdr_type) \
@@ -1695,6 +1703,22 @@ struct virtchnl_proto_hdr {
 
 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
 
+struct virtchnl_proto_hdr_w_msk {
+	/* see enum virtchnl_proto_hdr_type */
+	s32 type;
+	u32 pad;
+	/**
+	 * binary buffer in network order for specific header type.
+	 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+	 * header is expected to be copied into the buffer.
+	 */
+	u8 buffer_spec[64];
+	/* binary buffer for bit-mask applied to specific header type */
+	u8 buffer_mask[64];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(136, virtchnl_proto_hdr_w_msk);
+
 struct virtchnl_proto_hdrs {
 	u8 tunnel_level;
 	/**
@@ -1706,11 +1730,18 @@ struct virtchnl_proto_hdrs {
 	 */
 	int count;
 	/**
-	 * number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS
-	 * must be 0 for a raw packet request.
+	 * count must <=
+	 * VIRTCHNL_MAX_NUM_PROTO_HDRS + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK
+	 * count = 0 :					select raw
+	 * 1 < count <= VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr
+	 * count > VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr_w_msk
+	 * last valid index = count - VIRTCHNL_MAX_NUM_PROTO_HDRS
 	 */
 	union {
-		struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct virtchnl_proto_hdr
+			proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct virtchnl_proto_hdr_w_msk
+			proto_hdr_w_msk[VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK];
 		struct {
 			u16 pkt_len;
 			u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
@@ -1731,7 +1762,7 @@ struct virtchnl_rss_cfg {
 
 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
 
-/* action configuration for FDIR */
+/* action configuration for FDIR and FSUB */
 struct virtchnl_filter_action {
 	/* see enum virtchnl_action type */
 	s32 type;
@@ -1849,6 +1880,65 @@ struct virtchnl_fdir_del {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
 
+/* Status returned to VF after VF requests FSUB commands
+ * VIRTCHNL_FSUB_SUCCESS
+ * VF FLOW related request is successfully done by PF
+ * The request can be OP_FLOW_SUBSCRIBE/UNSUBSCRIBE.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE
+ * OP_FLOW_SUBSCRIBE request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_EXIST
+ * OP_FLOW_SUBSCRIBE request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST
+ * OP_FLOW_UNSUBSCRIBE request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_INVALID
+ * OP_FLOW_SUBSCRIBE request is failed due to parameters validation
+ * or HW doesn't support.
+ */
+enum virtchnl_fsub_prgm_status {
+	VIRTCHNL_FSUB_SUCCESS = 0,
+	VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE,
+	VIRTCHNL_FSUB_FAILURE_RULE_EXIST,
+	VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST,
+	VIRTCHNL_FSUB_FAILURE_RULE_INVALID,
+};
+
+/* VIRTCHNL_OP_FLOW_SUBSCRIBE
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only, priority, proto_hdrs and actions.
+ * PF will return flow_id
+ * if the request is successfully done and return status to VF.
+ */
+struct virtchnl_flow_sub {
+	u16 vsi_id; /* INPUT */
+	u8 validate_only; /* INPUT */
+	u8 priority; /* INPUT */
+	u32 flow_id; /* OUTPUT */
+	struct virtchnl_proto_hdrs proto_hdrs; /* INPUT */
+	struct virtchnl_filter_action_set actions; /* INPUT */
+	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_flow_sub);
+
+/* VIRTCHNL_OP_FLOW_UNSUBSCRIBE
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return status to VF.
+ */
+struct virtchnl_flow_unsub {
+	u16 vsi_id; /* INPUT */
+	u16 pad;
+	u32 flow_id; /* INPUT */
+	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_flow_unsub);
+
 /* VIRTCHNL_OP_GET_QOS_CAPS
  * VF sends this message to get its QoS Caps, such as
  * TC number, Arbiter and Bandwidth.
@@ -2318,6 +2408,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
 		valid_len = sizeof(struct virtchnl_fdir_del);
 		break;
+	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
+		valid_len = sizeof(struct virtchnl_flow_sub);
+		break;
+	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
+		valid_len = sizeof(struct virtchnl_flow_unsub);
+		break;
 	case VIRTCHNL_OP_GET_QOS_CAPS:
 		break;
 	case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v4 2/5] net/iavf: add flow subscription to AVF
  2022-09-07  4:38 ` [PATCH v4 0/5] support flow subscription Jie Wang
  2022-09-07  4:38   ` [PATCH v4 1/5] common/iavf: " Jie Wang
@ 2022-09-07  4:38   ` Jie Wang
  2022-09-07  4:38   ` [PATCH v4 3/5] net/iavf: support flow subscrption pattern Jie Wang
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  4:38 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add the skeletal code of flow subscription to AVF driver.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 doc/guides/rel_notes/release_22_11.rst |   4 +
 drivers/net/iavf/iavf_fsub.c           | 112 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.c   |  17 +++-
 drivers/net/iavf/iavf_generic_flow.h   |   1 +
 drivers/net/iavf/iavf_vchnl.c          |   1 +
 drivers/net/iavf/meson.build           |   1 +
 6 files changed, 135 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fsub.c

diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst
index 8c021cf050..bb77a03e24 100644
--- a/doc/guides/rel_notes/release_22_11.rst
+++ b/doc/guides/rel_notes/release_22_11.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel iavf driver.**
+
+  * Added flow subscription support.
+
 
 Removed Items
 -------------
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
new file mode 100644
index 0000000000..17f9bb2976
--- /dev/null
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <iavf.h>
+#include "iavf_generic_flow.h"
+
+
+static struct iavf_flow_parser iavf_fsub_parser;
+
+static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = {};
+
+static int
+iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
+		 __rte_unused struct rte_flow *flow,
+		 __rte_unused void *meta,
+		 __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_destroy(__rte_unused struct iavf_adapter *ad,
+		  __rte_unused struct rte_flow *flow,
+		  __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
+		     __rte_unused struct rte_flow *flow,
+		     __rte_unused void *meta,
+		     __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+};
+
+static int
+iavf_fsub_parse(__rte_unused struct iavf_adapter *ad,
+		__rte_unused struct iavf_pattern_match_item *array,
+		__rte_unused uint32_t array_len,
+		__rte_unused const struct rte_flow_item pattern[],
+		__rte_unused const struct rte_flow_action actions[],
+		__rte_unused void **meta,
+		__rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FSUB_PF)
+		parser = &iavf_fsub_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_fsub_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_fsub_parser, ad);
+}
+
+static struct
+iavf_flow_engine iavf_fsub_engine = {
+	.init = iavf_fsub_init,
+	.uninit = iavf_fsub_uninit,
+	.create = iavf_fsub_create,
+	.destroy = iavf_fsub_destroy,
+	.validation = iavf_fsub_validation,
+	.type = IAVF_FLOW_ENGINE_FSUB,
+};
+
+static struct
+iavf_flow_parser iavf_fsub_parser = {
+	.engine = &iavf_fsub_engine,
+	.array = iavf_fsub_pattern_list,
+	.array_len = RTE_DIM(iavf_fsub_pattern_list),
+	.parse_pattern_action = iavf_fsub_parse,
+	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(iavf_fsub_engine_init)
+{
+	iavf_register_flow_engine(&iavf_fsub_engine);
+}
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index e1a611e319..b04614ba6e 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1866,6 +1866,8 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 {
 	struct iavf_parser_list *list = NULL;
 	struct iavf_flow_parser_node *parser_node;
+	struct iavf_flow_parser_node *existing_node;
+	void *temp;
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
 
 	parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
@@ -1880,14 +1882,26 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 		TAILQ_INSERT_TAIL(list, parser_node, node);
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
+		RTE_TAILQ_FOREACH_SAFE(existing_node, list, node, temp) {
+			if (existing_node->parser->engine->type ==
+			    IAVF_FLOW_ENGINE_FSUB) {
+				TAILQ_INSERT_AFTER(list, existing_node,
+						   parser_node, node);
+				goto DONE;
+			}
+		}
 		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
 		list = &vf->ipsec_crypto_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FSUB) {
+		list = &vf->dist_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
 
+DONE:
 	return 0;
 }
 
@@ -1902,7 +1916,8 @@ iavf_unregister_parser(struct iavf_flow_parser *parser,
 
 	if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
 		list = &vf->rss_parser_list;
-	else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
+	else if ((parser->engine->type == IAVF_FLOW_ENGINE_FDIR) ||
+		 (parser->engine->type == IAVF_FLOW_ENGINE_FSUB))
 		list = &vf->dist_parser_list;
 
 	if (list == NULL)
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 52eb1caf29..448facffa5 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -480,6 +480,7 @@ enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
+	IAVF_FLOW_ENGINE_FSUB,
 	IAVF_FLOW_ENGINE_MAX,
 };
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 21bd1e2193..6d84add423 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -502,6 +502,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
 		VIRTCHNL_VF_OFFLOAD_FDIR_PF |
 		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+		VIRTCHNL_VF_OFFLOAD_FSUB_PF |
 		VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 2da37de662..6df771f917 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -16,6 +16,7 @@ sources = files(
         'iavf_hash.c',
         'iavf_tm.c',
         'iavf_ipsec_crypto.c',
+        'iavf_fsub.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v4 3/5] net/iavf: support flow subscrption pattern
  2022-09-07  4:38 ` [PATCH v4 0/5] support flow subscription Jie Wang
  2022-09-07  4:38   ` [PATCH v4 1/5] common/iavf: " Jie Wang
  2022-09-07  4:38   ` [PATCH v4 2/5] net/iavf: add flow subscription to AVF Jie Wang
@ 2022-09-07  4:38   ` Jie Wang
  2022-09-07  4:38   ` [PATCH v4 4/5] net/iavf: support flow subscription rule Jie Wang
  2022-09-07  4:38   ` [PATCH v4 5/5] net/iavf: support priority of flow rule Jie Wang
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  4:38 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add flow subscription pattern support for AVF.

The supported patterns are listed below:
eth/vlan/ipv4
eth/ipv4(6)
eth/ipv4(6)/udp
eth/ipv4(6)/tcp

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf.h      |   7 +
 drivers/net/iavf/iavf_fsub.c | 598 ++++++++++++++++++++++++++++++++++-
 2 files changed, 597 insertions(+), 8 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 025ab3ff60..f79c7f9f6e 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -148,6 +148,13 @@ struct iavf_fdir_info {
 	struct iavf_fdir_conf conf;
 };
 
+struct iavf_fsub_conf {
+	struct virtchnl_flow_sub sub_fltr;
+	struct virtchnl_flow_unsub unsub_fltr;
+	uint64_t input_set;
+	uint32_t flow_id;
+};
+
 struct iavf_qv_map {
 	uint16_t queue_id;
 	uint16_t vector_id;
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 17f9bb2976..66e403d585 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -22,9 +22,51 @@
 #include "iavf_generic_flow.h"
 
 
+#define MAX_QGRP_NUM_TYPE      7
+#define IAVF_IPV6_ADDR_LENGTH  16
+#define MAX_INPUT_SET_BYTE     32
+
+#define IAVF_SW_INSET_ETHER ( \
+	IAVF_INSET_DMAC | IAVF_INSET_SMAC | IAVF_INSET_ETHERTYPE)
+#define IAVF_SW_INSET_MAC_IPV4 ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS)
+#define IAVF_SW_INSET_MAC_VLAN_IPV4 ( \
+	IAVF_SW_INSET_MAC_IPV4 | IAVF_INSET_VLAN_OUTER)
+#define IAVF_SW_INSET_MAC_IPV4_TCP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV4_UDP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6 ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_IPV6_NEXT_HDR)
+#define IAVF_SW_INSET_MAC_IPV6_TCP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6_UDP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+
 static struct iavf_flow_parser iavf_fsub_parser;
 
-static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = {};
+static struct
+iavf_pattern_match_item iavf_fsub_pattern_list[] = {
+	{iavf_pattern_ethertype,			IAVF_SW_INSET_ETHER,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4,				IAVF_SW_INSET_MAC_IPV4,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_vlan_ipv4,			IAVF_SW_INSET_MAC_VLAN_IPV4,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp,			IAVF_SW_INSET_MAC_IPV4_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_tcp,			IAVF_SW_INSET_MAC_IPV4_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6,				IAVF_SW_INSET_MAC_IPV6,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp,			IAVF_SW_INSET_MAC_IPV6_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_tcp,			IAVF_SW_INSET_MAC_IPV6_TCP,		IAVF_INSET_NONE},
+};
 
 static int
 iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
@@ -53,17 +95,557 @@ iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
 };
 
 static int
-iavf_fsub_parse(__rte_unused struct iavf_adapter *ad,
-		__rte_unused struct iavf_pattern_match_item *array,
-		__rte_unused uint32_t array_len,
-		__rte_unused const struct rte_flow_item pattern[],
-		__rte_unused const struct rte_flow_action actions[],
-		__rte_unused void **meta,
-		__rte_unused struct rte_flow_error *error)
+iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
+			const uint64_t input_set_mask,
+			struct rte_flow_error *error,
+			struct iavf_fsub_conf *filter)
+{
+	struct virtchnl_proto_hdrs *hdrs = &filter->sub_fltr.proto_hdrs;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+	const struct rte_flow_item *item = pattern;
+	struct virtchnl_proto_hdr_w_msk *hdr, *hdr1 = NULL;
+	uint64_t outer_input_set = IAVF_INSET_NONE;
+	uint64_t *input = NULL;
+	uint16_t input_set_byte = 0;
+	uint16_t j;
+	uint32_t layer = 0;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item, "Not support range");
+			return false;
+		}
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			hdr1 = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
+
+			if (eth_spec && eth_mask) {
+				input = &outer_input_set;
+
+				if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
+					*input |= IAVF_INSET_DMAC;
+					input_set_byte += 6;
+				} else {
+					/* flow subscribe filter will add dst mac in kernel */
+					input_set_byte += 6;
+				}
+
+				if (!rte_is_zero_ether_addr(&eth_mask->src)) {
+					*input |= IAVF_INSET_SMAC;
+					input_set_byte += 6;
+				}
+
+				if (eth_mask->type) {
+					*input |= IAVF_INSET_ETHERTYPE;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr1->buffer_spec, eth_spec,
+					   sizeof(struct rte_ether_hdr));
+				rte_memcpy(hdr1->buffer_mask, eth_mask,
+					   sizeof(struct rte_ether_hdr));
+			} else {
+				/* flow subscribe filter will add dst mac in kernel */
+				input_set_byte += 6;
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+			if (ipv4_spec && ipv4_mask) {
+				input = &outer_input_set;
+				/* Check IPv4 mask and update input set */
+				if (ipv4_mask->hdr.version_ihl ||
+					ipv4_mask->hdr.total_length ||
+					ipv4_mask->hdr.packet_id ||
+					ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv4 mask.");
+					return false;
+				}
+
+				if (ipv4_mask->hdr.src_addr) {
+					*input |= IAVF_INSET_IPV4_SRC;
+					input_set_byte += 2;
+				}
+				if (ipv4_mask->hdr.dst_addr) {
+					*input |= IAVF_INSET_IPV4_DST;
+					input_set_byte += 2;
+				}
+				if (ipv4_mask->hdr.time_to_live) {
+					*input |= IAVF_INSET_IPV4_TTL;
+					input_set_byte++;
+				}
+				if (ipv4_mask->hdr.next_proto_id) {
+					*input |= IAVF_INSET_IPV4_PROTO;
+					input_set_byte++;
+				}
+				if (ipv4_mask->hdr.type_of_service) {
+					*input |= IAVF_INSET_IPV4_TOS;
+					input_set_byte++;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &ipv4_spec->hdr,
+					   sizeof(ipv4_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &ipv4_mask->hdr,
+					   sizeof(ipv4_spec->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+			if (ipv6_spec && ipv6_mask) {
+				input = &outer_input_set;
+
+				if (ipv6_mask->hdr.payload_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv6 mask");
+					return false;
+				}
+
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j]) {
+						*input |= IAVF_INSET_IPV6_SRC;
+						break;
+					}
+				}
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.dst_addr[j]) {
+						*input |= IAVF_INSET_IPV6_DST;
+						break;
+					}
+				}
+
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j])
+						input_set_byte++;
+
+					if (ipv6_mask->hdr.dst_addr[j])
+						input_set_byte++;
+				}
+
+				if (ipv6_mask->hdr.proto) {
+					*input |= IAVF_INSET_IPV6_NEXT_HDR;
+					input_set_byte++;
+				}
+				if (ipv6_mask->hdr.hop_limits) {
+					*input |= IAVF_INSET_IPV6_HOP_LIMIT;
+					input_set_byte++;
+				}
+				if (ipv6_mask->hdr.vtc_flow &
+				    rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK)) {
+					*input |= IAVF_INSET_IPV6_TC;
+					input_set_byte += 4;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &ipv6_spec->hdr,
+					   sizeof(ipv6_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &ipv6_mask->hdr,
+					   sizeof(ipv6_spec->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+			if (udp_spec && udp_mask) {
+				input = &outer_input_set;
+				/* Check UDP mask and update input set*/
+				if (udp_mask->hdr.dgram_len ||
+				    udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid UDP mask");
+					return false;
+				}
+
+				if (udp_mask->hdr.src_port) {
+					*input |= IAVF_INSET_UDP_SRC_PORT;
+					input_set_byte += 2;
+				}
+				if (udp_mask->hdr.dst_port) {
+					*input |= IAVF_INSET_UDP_DST_PORT;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &udp_spec->hdr,
+					   sizeof(udp_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &udp_mask->hdr,
+					   sizeof(udp_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+			if (tcp_spec && tcp_mask) {
+				input = &outer_input_set;
+				/* Check TCP mask and update input set */
+				if (tcp_mask->hdr.sent_seq ||
+					tcp_mask->hdr.recv_ack ||
+					tcp_mask->hdr.data_off ||
+					tcp_mask->hdr.tcp_flags ||
+					tcp_mask->hdr.rx_win ||
+					tcp_mask->hdr.cksum ||
+					tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid TCP mask");
+					return false;
+				}
+
+				if (tcp_mask->hdr.src_port) {
+					*input |= IAVF_INSET_TCP_SRC_PORT;
+					input_set_byte += 2;
+				}
+				if (tcp_mask->hdr.dst_port) {
+					*input |= IAVF_INSET_TCP_DST_PORT;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &tcp_spec->hdr,
+					   sizeof(tcp_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &tcp_mask->hdr,
+					   sizeof(tcp_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VLAN:
+			vlan_spec = item->spec;
+			vlan_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, S_VLAN);
+
+			if (vlan_spec && vlan_spec) {
+				input = &outer_input_set;
+
+				*input |= IAVF_INSET_VLAN_OUTER;
+
+				if (vlan_mask->tci)
+					input_set_byte += 2;
+
+				if (vlan_mask->inner_type) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid VLAN input set.");
+					return false;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &vlan_spec->hdr,
+					   sizeof(vlan_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &vlan_mask->hdr,
+					   sizeof(vlan_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+					   "Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	hdrs->count += VIRTCHNL_MAX_NUM_PROTO_HDRS;
+
+	if (input_set_byte > MAX_INPUT_SET_BYTE) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   item, "too much input set");
+		return -rte_errno;
+	}
+
+	if (!outer_input_set || (outer_input_set & ~input_set_mask))
+		return -rte_errno;
+
+	return 0;
+}
+
+static int
+iavf_fsub_parse_action(struct iavf_adapter *ad,
+		       const struct rte_flow_action *actions,
+		       uint32_t priority,
+		       struct rte_flow_error *error,
+		       struct iavf_fsub_conf *filter)
 {
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *act_qgrop;
+	struct virtchnl_filter_action *filter_action;
+	uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
+		2, 4, 8, 16, 32, 64, 128};
+	uint16_t i, num = 0, dest_num = 0, vf_num = 0;
+	uint16_t rule_port_id;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		switch (action->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			vf_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_ethdev = action->conf;
+			rule_port_id = ad->dev_data->port_id;
+			if (rule_port_id != act_ethdev->port_id)
+				goto error1;
+
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			dest_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_q = action->conf;
+			if (act_q->index >= ad->dev_data->nb_rx_queues)
+				goto error2;
+
+			filter_action->type = VIRTCHNL_ACTION_QUEUE;
+			filter_action->act_conf.queue.index = act_q->index;
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			dest_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_qgrop = action->conf;
+			if (act_qgrop->queue_num <= 1)
+				goto error2;
+
+			filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+			filter_action->act_conf.queue.index =
+							act_qgrop->queue[0];
+			for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
+				if (act_qgrop->queue_num ==
+				    valid_qgrop_number[i])
+					break;
+			}
+
+			if (i == MAX_QGRP_NUM_TYPE)
+				goto error2;
+
+			if ((act_qgrop->queue[0] + act_qgrop->queue_num) >
+			    ad->dev_data->nb_rx_queues)
+				goto error3;
+
+			for (i = 0; i < act_qgrop->queue_num - 1; i++)
+				if (act_qgrop->queue[i + 1] !=
+				    act_qgrop->queue[i] + 1)
+					goto error4;
+
+			filter_action->act_conf.queue.region = act_qgrop->queue_num;
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	/* 0 denotes lowest priority of recipe and highest priority
+	 * of rte_flow. Change rte_flow priority into recipe priority.
+	 */
+	filter->sub_fltr.priority = priority;
+
+	if (num > VIRTCHNL_MAX_NUM_ACTIONS) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Action numbers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	if (vf_num == 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action, vf action must be added");
+		return -rte_errno;
+	}
+
+	if (dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Unsupported action combination");
+		return -rte_errno;
+	}
+
+	return 0;
+
+error1:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid ethdev_port_id");
+	return -rte_errno;
+
+error2:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid action type or queue number");
+	return -rte_errno;
+
+error3:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid queue region indexes");
+	return -rte_errno;
+
+error4:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Discontinuous queue region");
 	return -rte_errno;
 }
 
+static int
+iavf_fsub_check_action(const struct rte_flow_action *actions,
+		       struct rte_flow_error *error)
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	uint16_t actions_num = 0;
+	bool vf_valid = false;
+	bool queue_valid = false;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			vf_valid = true;
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			queue_valid = true;
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			continue;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	if (!((actions_num == 1 && !queue_valid) ||
+	      (actions_num == 2 && vf_valid && queue_valid))) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   actions, "Invalid action number");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+iavf_fsub_parse(struct iavf_adapter *ad,
+		struct iavf_pattern_match_item *array,
+		uint32_t array_len,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		void **meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fsub_conf *filter;
+	struct iavf_pattern_match_item *pattern_match_item = NULL;
+	int ret = 0;
+	uint32_t priority = 0;
+
+	filter = rte_zmalloc(NULL, sizeof(*filter), 0);
+	if (!filter) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "No memory for iavf_fsub_conf_ptr");
+		goto error;
+	}
+
+	/* search flow subscribe pattern */
+	pattern_match_item = iavf_search_pattern_match_item(pattern, array,
+							    array_len, error);
+	if (!pattern_match_item)
+		return -rte_errno;
+
+	/* parse flow subscribe pattern */
+	ret = iavf_fsub_parse_pattern(pattern,
+				      pattern_match_item->input_set_mask,
+				      error, filter);
+	if (ret)
+		goto error;
+
+	/* check flow subscribe pattern action */
+	ret = iavf_fsub_check_action(actions, error);
+	if (ret)
+		goto error;
+
+	/* parse flow subscribe pattern action */
+	ret = iavf_fsub_parse_action((void *)ad, actions, priority,
+				     error, filter);
+	if (ret)
+		goto error;
+
+	if (meta)
+		*meta = filter;
+
+error:
+	rte_free(pattern_match_item);
+	return ret;
+}
+
 static int
 iavf_fsub_init(struct iavf_adapter *ad)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v4 4/5] net/iavf: support flow subscription rule
  2022-09-07  4:38 ` [PATCH v4 0/5] support flow subscription Jie Wang
                     ` (2 preceding siblings ...)
  2022-09-07  4:38   ` [PATCH v4 3/5] net/iavf: support flow subscrption pattern Jie Wang
@ 2022-09-07  4:38   ` Jie Wang
  2022-09-07  4:38   ` [PATCH v4 5/5] net/iavf: support priority of flow rule Jie Wang
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  4:38 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Support flow subscribption create/destroy/validation flow
rule for AVF.

For examples:
testpmd> flow create 0 ingress pattern eth / ipv4 / udp src is 11
          / end actions represented_port port_id 1 / end
testpmd> flow validate 1 ingress pattern eth / ipv4 / tcp src is 22
          / end actions represented_port port_id 1 / end
testpmd> flow destroy 1 rule 0

VF subscribes to a rule, which means the packets will be sent to VF
instead of PF, and only VF will receive the packets.

It is allowed multiple VF subscribe to same rule, the packets will
be replicated and received by each VF.

PF will destroy all subscriptions during VF reset.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf.h       |   6 ++
 drivers/net/iavf/iavf_fsub.c  |  75 +++++++++++++++----
 drivers/net/iavf/iavf_vchnl.c | 132 ++++++++++++++++++++++++++++++++++
 3 files changed, 201 insertions(+), 12 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index f79c7f9f6e..26b858f6f0 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -489,4 +489,10 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 extern const struct rte_tm_ops iavf_tm_ops;
 int iavf_get_ptp_cap(struct iavf_adapter *adapter);
 int iavf_get_phc_time(struct iavf_rx_queue *rxq);
+int iavf_flow_sub(struct iavf_adapter *adapter,
+		  struct iavf_fsub_conf *filter);
+int iavf_flow_unsub(struct iavf_adapter *adapter,
+		    struct iavf_fsub_conf *filter);
+int iavf_flow_sub_check(struct iavf_adapter *adapter,
+			struct iavf_fsub_conf *filter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 66e403d585..28857d7577 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -69,29 +69,80 @@ iavf_pattern_match_item iavf_fsub_pattern_list[] = {
 };
 
 static int
-iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
-		 __rte_unused struct rte_flow *flow,
-		 __rte_unused void *meta,
-		 __rte_unused struct rte_flow_error *error)
+iavf_fsub_create(struct iavf_adapter *ad, struct rte_flow *flow,
+		 void *meta, struct rte_flow_error *error)
 {
+	struct iavf_fsub_conf *filter = meta;
+	struct iavf_fsub_conf *rule;
+	int ret;
+
+	rule = rte_zmalloc("fsub_entry", sizeof(*rule), 0);
+	if (!rule) {
+		rte_flow_error_set(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to allocate memory for fsub rule");
+		return -rte_errno;
+	}
+
+	ret = iavf_flow_sub(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to subscribe flow rule.");
+		goto free_entry;
+	}
+
+	rte_memcpy(rule, filter, sizeof(*rule));
+	flow->rule = rule;
+
+	return ret;
+
+free_entry:
+	rte_free(rule);
 	return -rte_errno;
 }
 
 static int
-iavf_fsub_destroy(__rte_unused struct iavf_adapter *ad,
-		  __rte_unused struct rte_flow *flow,
-		  __rte_unused struct rte_flow_error *error)
+iavf_fsub_destroy(struct iavf_adapter *ad, struct rte_flow *flow,
+		  struct rte_flow_error *error)
 {
-	return -rte_errno;
+	struct iavf_fsub_conf *filter;
+	int ret;
+
+	filter = (struct iavf_fsub_conf *)flow->rule;
+
+	ret = iavf_flow_unsub(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to unsubscribe flow rule.");
+		return -rte_errno;
+	}
+
+	flow->rule = NULL;
+	rte_free(filter);
+
+	return ret;
 }
 
 static int
-iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
+iavf_fsub_validation(struct iavf_adapter *ad,
 		     __rte_unused struct rte_flow *flow,
-		     __rte_unused void *meta,
-		     __rte_unused struct rte_flow_error *error)
+		     void *meta,
+		     struct rte_flow_error *error)
 {
-	return -rte_errno;
+	struct iavf_fsub_conf *filter = meta;
+	int ret;
+
+	ret = iavf_flow_sub_check(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to validate filter rule.");
+		return -rte_errno;
+	}
+
+	return ret;
 };
 
 static int
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 6d84add423..cc0db8d093 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1534,6 +1534,138 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	return 0;
 }
 
+int
+iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_sub *fsub_cfg;
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->sub_fltr.validate_only = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->sub_fltr);
+	args.in_args_size = sizeof(*(&filter->sub_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of "
+				 "OP_FLOW_SUBSCRIBE");
+
+	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+	filter->flow_id = fsub_cfg->flow_id;
+
+	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in adding rule request by PF");
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to no hw "
+				 "resource");
+		err = -1;
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_EXIST) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to the rule "
+				 "is already existed");
+		err = -1;
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to the hw "
+				 "doesn't support");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
+int
+iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_unsub *unsub_cfg;
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->unsub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->unsub_fltr.flow_id = filter->flow_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_FLOW_UNSUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->unsub_fltr);
+	args.in_args_size = sizeof(filter->unsub_fltr);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of "
+				 "OP_FLOW_UNSUBSCRIBE");
+
+	unsub_cfg = (struct virtchnl_flow_unsub *)args.out_buffer;
+
+	if (unsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in deleting rule request by PF");
+	} else if (unsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST) {
+		PMD_DRV_LOG(ERR, "Failed to delete rule request due to this "
+				 "rule doesn't exist");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to delete rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
+int
+iavf_flow_sub_check(struct iavf_adapter *adapter,
+		    struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_sub *fsub_cfg;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->sub_fltr.validate_only = 1;
+
+	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->sub_fltr);
+	args.in_args_size = sizeof(*(&filter->sub_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to check flow director rule");
+		return err;
+	}
+
+	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+
+	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in checking rule request by PF");
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR, "Failed to check rule request due to "
+				 "parameters validation or HW doesn't "
+				 "support");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to check rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
 int
 iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 		     struct virtchnl_rss_cfg *rss_cfg, bool add)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v4 5/5] net/iavf: support priority of flow rule
  2022-09-07  4:38 ` [PATCH v4 0/5] support flow subscription Jie Wang
                     ` (3 preceding siblings ...)
  2022-09-07  4:38   ` [PATCH v4 4/5] net/iavf: support flow subscription rule Jie Wang
@ 2022-09-07  4:38   ` Jie Wang
  4 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  4:38 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add flow rule attribute "priority" support for AVF.

Lower values denote higher priority, the highest priority for
a flow rule is 0.

All subscription rule will have a lower priority than the rules
that be created by host.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf_fdir.c         |  4 ++++
 drivers/net/iavf/iavf_fsub.c         |  2 +-
 drivers/net/iavf/iavf_generic_flow.c | 23 +++++++++++++----------
 drivers/net/iavf/iavf_generic_flow.h |  1 +
 drivers/net/iavf/iavf_hash.c         |  5 +++++
 drivers/net/iavf/iavf_ipsec_crypto.c | 16 ++++++++++------
 6 files changed, 34 insertions(+), 17 deletions(-)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index a397047fdb..8f80873925 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -1583,6 +1583,7 @@ iavf_fdir_parse(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error)
 {
@@ -1593,6 +1594,9 @@ iavf_fdir_parse(struct iavf_adapter *ad,
 
 	memset(filter, 0, sizeof(*filter));
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
 	if (!item)
 		return -rte_errno;
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 28857d7577..3bb6c30d3c 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -649,13 +649,13 @@ iavf_fsub_parse(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error)
 {
 	struct iavf_fsub_conf *filter;
 	struct iavf_pattern_match_item *pattern_match_item = NULL;
 	int ret = 0;
-	uint32_t priority = 0;
 
 	filter = rte_zmalloc(NULL, sizeof(*filter), 0);
 	if (!filter) {
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index b04614ba6e..f33c764764 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1785,6 +1785,7 @@ enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_tcp[] = {
 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error);
@@ -1951,11 +1952,11 @@ iavf_flow_valid_attr(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
-	/* Not supported */
-	if (attr->priority) {
+	/* support priority for flow subscribe */
+	if (attr->priority > 1) {
 		rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
-				attr, "Not support priority.");
+				attr, "Only support priority 0 and 1.");
 		return -rte_errno;
 	}
 
@@ -2098,6 +2099,7 @@ static struct iavf_flow_engine *
 iavf_parse_engine_create(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error)
@@ -2111,7 +2113,7 @@ iavf_parse_engine_create(struct iavf_adapter *ad,
 		if (parser_node->parser->parse_pattern_action(ad,
 				parser_node->parser->array,
 				parser_node->parser->array_len,
-				pattern, actions, &meta, error) < 0)
+				pattern, actions, priority, &meta, error) < 0)
 			continue;
 
 		engine = parser_node->parser->engine;
@@ -2127,6 +2129,7 @@ static struct iavf_flow_engine *
 iavf_parse_engine_validate(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error)
@@ -2140,7 +2143,7 @@ iavf_parse_engine_validate(struct iavf_adapter *ad,
 		if (parser_node->parser->parse_pattern_action(ad,
 				parser_node->parser->array,
 				parser_node->parser->array_len,
-				pattern, actions, &meta,  error) < 0)
+				pattern, actions, priority, &meta, error) < 0)
 			continue;
 
 		engine = parser_node->parser->engine;
@@ -2201,18 +2204,18 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 	if (ret)
 		return ret;
 
-	*engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
-				    actions, error);
+	*engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list,
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
-	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
-				    actions, error);
+	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list,
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
 	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
-			pattern, actions, error);
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 448facffa5..60d8ab02b4 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -471,6 +471,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error);
 
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 42df7c4e48..dea4e0aa0a 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -86,6 +86,7 @@ iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
 			       uint32_t array_len,
 			       const struct rte_flow_item pattern[],
 			       const struct rte_flow_action actions[],
+			       uint32_t priority,
 			       void **meta,
 			       struct rte_flow_error *error);
 
@@ -1509,6 +1510,7 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
 			       uint32_t array_len,
 			       const struct rte_flow_item pattern[],
 			       const struct rte_flow_action actions[],
+			       uint32_t priority,
 			       void **meta,
 			       struct rte_flow_error *error)
 {
@@ -1517,6 +1519,9 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
 	uint64_t phint = IAVF_PHINT_NONE;
 	int ret = 0;
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	rss_meta_ptr = rte_zmalloc(NULL, sizeof(*rss_meta_ptr), 0);
 	if (!rss_meta_ptr) {
 		rte_flow_error_set(error, EINVAL,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
index 1d465b4419..cec1b968fe 100644
--- a/drivers/net/iavf/iavf_ipsec_crypto.c
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -1933,16 +1933,20 @@ static struct iavf_flow_engine iavf_ipsec_flow_engine = {
 
 static int
 iavf_ipsec_flow_parse(struct iavf_adapter *ad,
-		       struct iavf_pattern_match_item *array,
-		       uint32_t array_len,
-		       const struct rte_flow_item pattern[],
-		       const struct rte_flow_action actions[],
-		       void **meta,
-		       struct rte_flow_error *error)
+		      struct iavf_pattern_match_item *array,
+		      uint32_t array_len,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      uint32_t priority,
+		      void **meta,
+		      struct rte_flow_error *error)
 {
 	struct iavf_pattern_match_item *item = NULL;
 	int ret = -1;
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
 	if (item && item->meta) {
 		uint32_t type = (uint64_t)(item->meta);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v5 0/5] support flow subscription
  2022-08-09  6:21 [PATCH 0/2] support flow subscription Jie Wang
                   ` (5 preceding siblings ...)
  2022-09-07  4:38 ` [PATCH v4 0/5] support flow subscription Jie Wang
@ 2022-09-07  5:10 ` Jie Wang
  2022-09-07  5:10   ` [PATCH v5 1/5] common/iavf: " Jie Wang
                     ` (5 more replies)
  6 siblings, 6 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  5:10 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add support AVF can be able to subscribe a flow from PF.

--
v4:
 * replace flow action represented_port with port_representor.
 * update commit log and rebase.
v3:
 * fix eth layer inputset.
 * rebase.
v2:
 * split v1 patch 2/2 to 4 small patches.
 * remove rule action RTE_FLOW_ACTION_TYPE_VF and add
   RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT.

Jie Wang (5):
  common/iavf: support flow subscription
  net/iavf: add flow subscription to AVF
  net/iavf: support flow subscrption pattern
  net/iavf: support flow subscription rule
  net/iavf: support priority of flow rule

 doc/guides/rel_notes/release_22_11.rst |   4 +
 drivers/common/iavf/virtchnl.h         | 104 +++-
 drivers/net/iavf/iavf.h                |  13 +
 drivers/net/iavf/iavf_fdir.c           |   4 +
 drivers/net/iavf/iavf_fsub.c           | 745 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.c   |  40 +-
 drivers/net/iavf/iavf_generic_flow.h   |   2 +
 drivers/net/iavf/iavf_hash.c           |   5 +
 drivers/net/iavf/iavf_ipsec_crypto.c   |  16 +-
 drivers/net/iavf/iavf_vchnl.c          | 133 +++++
 drivers/net/iavf/meson.build           |   1 +
 11 files changed, 1046 insertions(+), 21 deletions(-)
 create mode 100644 drivers/net/iavf/iavf_fsub.c

-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v5 1/5] common/iavf: support flow subscription
  2022-09-07  5:10 ` [PATCH v5 0/5] support flow subscription Jie Wang
@ 2022-09-07  5:10   ` Jie Wang
  2022-09-07  5:10   ` [PATCH v5 2/5] net/iavf: add flow subscription to AVF Jie Wang
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  5:10 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

VF is able to subscribe a flow from PF by VIRTCHNL_FLOW_SUBSCRIBE.

PF is expected to offload a rule to hardware which will redirect
the packet that matching the required pattern to this VF.

Only a flow with dst mac address as PF's mac address can be subscribed.

VIRTCHNL_VF_OFFLOAD_FSUB_PF is used for Flow subscription capability
negotiation and only a trusted VF can be granted with this capability.

A flow can be unsubscribed by VIRTCHNL_FLOW_UNSUBSCRIBE.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
 drivers/common/iavf/virtchnl.h | 104 +++++++++++++++++++++++++++++++--
 1 file changed, 100 insertions(+), 4 deletions(-)

diff --git a/drivers/common/iavf/virtchnl.h b/drivers/common/iavf/virtchnl.h
index f123daec8e..e02eec4935 100644
--- a/drivers/common/iavf/virtchnl.h
+++ b/drivers/common/iavf/virtchnl.h
@@ -168,6 +168,8 @@ enum virtchnl_ops {
 	VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
 	VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
 	VIRTCHNL_OP_CONFIG_QUANTA = 113,
+	VIRTCHNL_OP_FLOW_SUBSCRIBE = 114,
+	VIRTCHNL_OP_FLOW_UNSUBSCRIBE = 115,
 	VIRTCHNL_OP_MAX,
 };
 
@@ -282,6 +284,10 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
 		return "VIRTCHNL_OP_1588_PTP_GET_CAPS";
 	case VIRTCHNL_OP_1588_PTP_GET_TIME:
 		return "VIRTCHNL_OP_1588_PTP_GET_TIME";
+	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
+		return "VIRTCHNL_OP_FLOW_SUBSCRIBE";
+	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
+		return "VIRTCHNL_OP_FLOW_UNSUBSCRIBE";
 	case VIRTCHNL_OP_MAX:
 		return "VIRTCHNL_OP_MAX";
 	default:
@@ -401,6 +407,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
 #define VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO	BIT(8)
 #define VIRTCHNL_VF_LARGE_NUM_QPAIRS		BIT(9)
 #define VIRTCHNL_VF_OFFLOAD_CRC			BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_FSUB_PF		BIT(14)
 #define VIRTCHNL_VF_OFFLOAD_VLAN_V2		BIT(15)
 #define VIRTCHNL_VF_OFFLOAD_VLAN		BIT(16)
 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING		BIT(17)
@@ -1503,6 +1510,7 @@ enum virtchnl_vfr_states {
 };
 
 #define VIRTCHNL_MAX_NUM_PROTO_HDRS	32
+#define VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK	16
 #define VIRTCHNL_MAX_SIZE_RAW_PACKET	1024
 #define PROTO_HDR_SHIFT			5
 #define PROTO_HDR_FIELD_START(proto_hdr_type) \
@@ -1695,6 +1703,22 @@ struct virtchnl_proto_hdr {
 
 VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
 
+struct virtchnl_proto_hdr_w_msk {
+	/* see enum virtchnl_proto_hdr_type */
+	s32 type;
+	u32 pad;
+	/**
+	 * binary buffer in network order for specific header type.
+	 * For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
+	 * header is expected to be copied into the buffer.
+	 */
+	u8 buffer_spec[64];
+	/* binary buffer for bit-mask applied to specific header type */
+	u8 buffer_mask[64];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(136, virtchnl_proto_hdr_w_msk);
+
 struct virtchnl_proto_hdrs {
 	u8 tunnel_level;
 	/**
@@ -1706,11 +1730,18 @@ struct virtchnl_proto_hdrs {
 	 */
 	int count;
 	/**
-	 * number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS
-	 * must be 0 for a raw packet request.
+	 * count must <=
+	 * VIRTCHNL_MAX_NUM_PROTO_HDRS + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK
+	 * count = 0 :					select raw
+	 * 1 < count <= VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr
+	 * count > VIRTCHNL_MAX_NUM_PROTO_HDRS :	select proto_hdr_w_msk
+	 * last valid index = count - VIRTCHNL_MAX_NUM_PROTO_HDRS
 	 */
 	union {
-		struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct virtchnl_proto_hdr
+			proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+		struct virtchnl_proto_hdr_w_msk
+			proto_hdr_w_msk[VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK];
 		struct {
 			u16 pkt_len;
 			u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
@@ -1731,7 +1762,7 @@ struct virtchnl_rss_cfg {
 
 VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
 
-/* action configuration for FDIR */
+/* action configuration for FDIR and FSUB */
 struct virtchnl_filter_action {
 	/* see enum virtchnl_action type */
 	s32 type;
@@ -1849,6 +1880,65 @@ struct virtchnl_fdir_del {
 
 VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
 
+/* Status returned to VF after VF requests FSUB commands
+ * VIRTCHNL_FSUB_SUCCESS
+ * VF FLOW related request is successfully done by PF
+ * The request can be OP_FLOW_SUBSCRIBE/UNSUBSCRIBE.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE
+ * OP_FLOW_SUBSCRIBE request is failed due to no Hardware resource.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_EXIST
+ * OP_FLOW_SUBSCRIBE request is failed due to the rule is already existed.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST
+ * OP_FLOW_UNSUBSCRIBE request is failed due to this rule doesn't exist.
+ *
+ * VIRTCHNL_FSUB_FAILURE_RULE_INVALID
+ * OP_FLOW_SUBSCRIBE request is failed due to parameters validation
+ * or HW doesn't support.
+ */
+enum virtchnl_fsub_prgm_status {
+	VIRTCHNL_FSUB_SUCCESS = 0,
+	VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE,
+	VIRTCHNL_FSUB_FAILURE_RULE_EXIST,
+	VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST,
+	VIRTCHNL_FSUB_FAILURE_RULE_INVALID,
+};
+
+/* VIRTCHNL_OP_FLOW_SUBSCRIBE
+ * VF sends this request to PF by filling out vsi_id,
+ * validate_only, priority, proto_hdrs and actions.
+ * PF will return flow_id
+ * if the request is successfully done and return status to VF.
+ */
+struct virtchnl_flow_sub {
+	u16 vsi_id; /* INPUT */
+	u8 validate_only; /* INPUT */
+	u8 priority; /* INPUT */
+	u32 flow_id; /* OUTPUT */
+	struct virtchnl_proto_hdrs proto_hdrs; /* INPUT */
+	struct virtchnl_filter_action_set actions; /* INPUT */
+	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_flow_sub);
+
+/* VIRTCHNL_OP_FLOW_UNSUBSCRIBE
+ * VF sends this request to PF by filling out vsi_id
+ * and flow_id. PF will return status to VF.
+ */
+struct virtchnl_flow_unsub {
+	u16 vsi_id; /* INPUT */
+	u16 pad;
+	u32 flow_id; /* INPUT */
+	/* see enum virtchnl_fsub_prgm_status; OUTPUT */
+	s32 status;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_flow_unsub);
+
 /* VIRTCHNL_OP_GET_QOS_CAPS
  * VF sends this message to get its QoS Caps, such as
  * TC number, Arbiter and Bandwidth.
@@ -2318,6 +2408,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
 	case VIRTCHNL_OP_DEL_FDIR_FILTER:
 		valid_len = sizeof(struct virtchnl_fdir_del);
 		break;
+	case VIRTCHNL_OP_FLOW_SUBSCRIBE:
+		valid_len = sizeof(struct virtchnl_flow_sub);
+		break;
+	case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
+		valid_len = sizeof(struct virtchnl_flow_unsub);
+		break;
 	case VIRTCHNL_OP_GET_QOS_CAPS:
 		break;
 	case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v5 2/5] net/iavf: add flow subscription to AVF
  2022-09-07  5:10 ` [PATCH v5 0/5] support flow subscription Jie Wang
  2022-09-07  5:10   ` [PATCH v5 1/5] common/iavf: " Jie Wang
@ 2022-09-07  5:10   ` Jie Wang
  2022-09-07  5:10   ` [PATCH v5 3/5] net/iavf: support flow subscrption pattern Jie Wang
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  5:10 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add the skeletal code of flow subscription to AVF driver.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 doc/guides/rel_notes/release_22_11.rst |   4 +
 drivers/net/iavf/iavf_fsub.c           | 112 +++++++++++++++++++++++++
 drivers/net/iavf/iavf_generic_flow.c   |  17 +++-
 drivers/net/iavf/iavf_generic_flow.h   |   1 +
 drivers/net/iavf/iavf_vchnl.c          |   1 +
 drivers/net/iavf/meson.build           |   1 +
 6 files changed, 135 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/iavf/iavf_fsub.c

diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst
index 8c021cf050..bb77a03e24 100644
--- a/doc/guides/rel_notes/release_22_11.rst
+++ b/doc/guides/rel_notes/release_22_11.rst
@@ -55,6 +55,10 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Intel iavf driver.**
+
+  * Added flow subscription support.
+
 
 Removed Items
 -------------
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
new file mode 100644
index 0000000000..17f9bb2976
--- /dev/null
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+#include <rte_flow.h>
+#include <iavf.h>
+#include "iavf_generic_flow.h"
+
+
+static struct iavf_flow_parser iavf_fsub_parser;
+
+static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = {};
+
+static int
+iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
+		 __rte_unused struct rte_flow *flow,
+		 __rte_unused void *meta,
+		 __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_destroy(__rte_unused struct iavf_adapter *ad,
+		  __rte_unused struct rte_flow *flow,
+		  __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
+		     __rte_unused struct rte_flow *flow,
+		     __rte_unused void *meta,
+		     __rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+};
+
+static int
+iavf_fsub_parse(__rte_unused struct iavf_adapter *ad,
+		__rte_unused struct iavf_pattern_match_item *array,
+		__rte_unused uint32_t array_len,
+		__rte_unused const struct rte_flow_item pattern[],
+		__rte_unused const struct rte_flow_action actions[],
+		__rte_unused void **meta,
+		__rte_unused struct rte_flow_error *error)
+{
+	return -rte_errno;
+}
+
+static int
+iavf_fsub_init(struct iavf_adapter *ad)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
+	struct iavf_flow_parser *parser;
+
+	if (!vf->vf_res)
+		return -EINVAL;
+
+	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FSUB_PF)
+		parser = &iavf_fsub_parser;
+	else
+		return -ENOTSUP;
+
+	return iavf_register_parser(parser, ad);
+}
+
+static void
+iavf_fsub_uninit(struct iavf_adapter *ad)
+{
+	iavf_unregister_parser(&iavf_fsub_parser, ad);
+}
+
+static struct
+iavf_flow_engine iavf_fsub_engine = {
+	.init = iavf_fsub_init,
+	.uninit = iavf_fsub_uninit,
+	.create = iavf_fsub_create,
+	.destroy = iavf_fsub_destroy,
+	.validation = iavf_fsub_validation,
+	.type = IAVF_FLOW_ENGINE_FSUB,
+};
+
+static struct
+iavf_flow_parser iavf_fsub_parser = {
+	.engine = &iavf_fsub_engine,
+	.array = iavf_fsub_pattern_list,
+	.array_len = RTE_DIM(iavf_fsub_pattern_list),
+	.parse_pattern_action = iavf_fsub_parse,
+	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
+};
+
+RTE_INIT(iavf_fsub_engine_init)
+{
+	iavf_register_flow_engine(&iavf_fsub_engine);
+}
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index e1a611e319..b04614ba6e 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1866,6 +1866,8 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 {
 	struct iavf_parser_list *list = NULL;
 	struct iavf_flow_parser_node *parser_node;
+	struct iavf_flow_parser_node *existing_node;
+	void *temp;
 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
 
 	parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
@@ -1880,14 +1882,26 @@ iavf_register_parser(struct iavf_flow_parser *parser,
 		TAILQ_INSERT_TAIL(list, parser_node, node);
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
 		list = &vf->dist_parser_list;
+		RTE_TAILQ_FOREACH_SAFE(existing_node, list, node, temp) {
+			if (existing_node->parser->engine->type ==
+			    IAVF_FLOW_ENGINE_FSUB) {
+				TAILQ_INSERT_AFTER(list, existing_node,
+						   parser_node, node);
+				goto DONE;
+			}
+		}
 		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
 		list = &vf->ipsec_crypto_parser_list;
 		TAILQ_INSERT_HEAD(list, parser_node, node);
+	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FSUB) {
+		list = &vf->dist_parser_list;
+		TAILQ_INSERT_HEAD(list, parser_node, node);
 	} else {
 		return -EINVAL;
 	}
 
+DONE:
 	return 0;
 }
 
@@ -1902,7 +1916,8 @@ iavf_unregister_parser(struct iavf_flow_parser *parser,
 
 	if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
 		list = &vf->rss_parser_list;
-	else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
+	else if ((parser->engine->type == IAVF_FLOW_ENGINE_FDIR) ||
+		 (parser->engine->type == IAVF_FLOW_ENGINE_FSUB))
 		list = &vf->dist_parser_list;
 
 	if (list == NULL)
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 52eb1caf29..448facffa5 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -480,6 +480,7 @@ enum iavf_flow_engine_type {
 	IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
 	IAVF_FLOW_ENGINE_FDIR,
 	IAVF_FLOW_ENGINE_HASH,
+	IAVF_FLOW_ENGINE_FSUB,
 	IAVF_FLOW_ENGINE_MAX,
 };
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 21bd1e2193..6d84add423 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -502,6 +502,7 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
 		VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
 		VIRTCHNL_VF_OFFLOAD_FDIR_PF |
 		VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
+		VIRTCHNL_VF_OFFLOAD_FSUB_PF |
 		VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
 		VIRTCHNL_VF_OFFLOAD_CRC |
 		VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
index 2da37de662..6df771f917 100644
--- a/drivers/net/iavf/meson.build
+++ b/drivers/net/iavf/meson.build
@@ -16,6 +16,7 @@ sources = files(
         'iavf_hash.c',
         'iavf_tm.c',
         'iavf_ipsec_crypto.c',
+        'iavf_fsub.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v5 3/5] net/iavf: support flow subscrption pattern
  2022-09-07  5:10 ` [PATCH v5 0/5] support flow subscription Jie Wang
  2022-09-07  5:10   ` [PATCH v5 1/5] common/iavf: " Jie Wang
  2022-09-07  5:10   ` [PATCH v5 2/5] net/iavf: add flow subscription to AVF Jie Wang
@ 2022-09-07  5:10   ` Jie Wang
  2022-09-07  5:27     ` Zhang, Qi Z
  2022-09-07  5:10   ` [PATCH v5 4/5] net/iavf: support flow subscription rule Jie Wang
                     ` (2 subsequent siblings)
  5 siblings, 1 reply; 40+ messages in thread
From: Jie Wang @ 2022-09-07  5:10 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add flow subscription pattern support for AVF.

The supported patterns are listed below:
eth/vlan/ipv4
eth/ipv4(6)
eth/ipv4(6)/udp
eth/ipv4(6)/tcp

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf.h      |   7 +
 drivers/net/iavf/iavf_fsub.c | 598 ++++++++++++++++++++++++++++++++++-
 2 files changed, 597 insertions(+), 8 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 025ab3ff60..f79c7f9f6e 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -148,6 +148,13 @@ struct iavf_fdir_info {
 	struct iavf_fdir_conf conf;
 };
 
+struct iavf_fsub_conf {
+	struct virtchnl_flow_sub sub_fltr;
+	struct virtchnl_flow_unsub unsub_fltr;
+	uint64_t input_set;
+	uint32_t flow_id;
+};
+
 struct iavf_qv_map {
 	uint16_t queue_id;
 	uint16_t vector_id;
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 17f9bb2976..66e403d585 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -22,9 +22,51 @@
 #include "iavf_generic_flow.h"
 
 
+#define MAX_QGRP_NUM_TYPE      7
+#define IAVF_IPV6_ADDR_LENGTH  16
+#define MAX_INPUT_SET_BYTE     32
+
+#define IAVF_SW_INSET_ETHER ( \
+	IAVF_INSET_DMAC | IAVF_INSET_SMAC | IAVF_INSET_ETHERTYPE)
+#define IAVF_SW_INSET_MAC_IPV4 ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS)
+#define IAVF_SW_INSET_MAC_VLAN_IPV4 ( \
+	IAVF_SW_INSET_MAC_IPV4 | IAVF_INSET_VLAN_OUTER)
+#define IAVF_SW_INSET_MAC_IPV4_TCP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV4_UDP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV4_DST | IAVF_INSET_IPV4_SRC | \
+	IAVF_INSET_IPV4_TTL | IAVF_INSET_IPV4_TOS | \
+	IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6 ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
+	IAVF_INSET_IPV6_NEXT_HDR)
+#define IAVF_SW_INSET_MAC_IPV6_TCP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_TCP_DST_PORT | IAVF_INSET_TCP_SRC_PORT)
+#define IAVF_SW_INSET_MAC_IPV6_UDP ( \
+	IAVF_INSET_DMAC | IAVF_INSET_IPV6_DST | IAVF_INSET_IPV6_SRC | \
+	IAVF_INSET_IPV6_HOP_LIMIT | IAVF_INSET_IPV6_TC | \
+	IAVF_INSET_UDP_DST_PORT | IAVF_INSET_UDP_SRC_PORT)
+
 static struct iavf_flow_parser iavf_fsub_parser;
 
-static struct iavf_pattern_match_item iavf_fsub_pattern_list[] = {};
+static struct
+iavf_pattern_match_item iavf_fsub_pattern_list[] = {
+	{iavf_pattern_ethertype,			IAVF_SW_INSET_ETHER,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4,				IAVF_SW_INSET_MAC_IPV4,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_vlan_ipv4,			IAVF_SW_INSET_MAC_VLAN_IPV4,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_udp,			IAVF_SW_INSET_MAC_IPV4_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv4_tcp,			IAVF_SW_INSET_MAC_IPV4_TCP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6,				IAVF_SW_INSET_MAC_IPV6,			IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_udp,			IAVF_SW_INSET_MAC_IPV6_UDP,		IAVF_INSET_NONE},
+	{iavf_pattern_eth_ipv6_tcp,			IAVF_SW_INSET_MAC_IPV6_TCP,		IAVF_INSET_NONE},
+};
 
 static int
 iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
@@ -53,17 +95,557 @@ iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
 };
 
 static int
-iavf_fsub_parse(__rte_unused struct iavf_adapter *ad,
-		__rte_unused struct iavf_pattern_match_item *array,
-		__rte_unused uint32_t array_len,
-		__rte_unused const struct rte_flow_item pattern[],
-		__rte_unused const struct rte_flow_action actions[],
-		__rte_unused void **meta,
-		__rte_unused struct rte_flow_error *error)
+iavf_fsub_parse_pattern(const struct rte_flow_item pattern[],
+			const uint64_t input_set_mask,
+			struct rte_flow_error *error,
+			struct iavf_fsub_conf *filter)
+{
+	struct virtchnl_proto_hdrs *hdrs = &filter->sub_fltr.proto_hdrs;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+	const struct rte_flow_item *item = pattern;
+	struct virtchnl_proto_hdr_w_msk *hdr, *hdr1 = NULL;
+	uint64_t outer_input_set = IAVF_INSET_NONE;
+	uint64_t *input = NULL;
+	uint16_t input_set_byte = 0;
+	uint16_t j;
+	uint32_t layer = 0;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item, "Not support range");
+			return false;
+		}
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			hdr1 = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr1, ETH);
+
+			if (eth_spec && eth_mask) {
+				input = &outer_input_set;
+
+				if (!rte_is_zero_ether_addr(&eth_mask->dst)) {
+					*input |= IAVF_INSET_DMAC;
+					input_set_byte += 6;
+				} else {
+					/* flow subscribe filter will add dst mac in kernel */
+					input_set_byte += 6;
+				}
+
+				if (!rte_is_zero_ether_addr(&eth_mask->src)) {
+					*input |= IAVF_INSET_SMAC;
+					input_set_byte += 6;
+				}
+
+				if (eth_mask->type) {
+					*input |= IAVF_INSET_ETHERTYPE;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr1->buffer_spec, eth_spec,
+					   sizeof(struct rte_ether_hdr));
+				rte_memcpy(hdr1->buffer_mask, eth_mask,
+					   sizeof(struct rte_ether_hdr));
+			} else {
+				/* flow subscribe filter will add dst mac in kernel */
+				input_set_byte += 6;
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+			if (ipv4_spec && ipv4_mask) {
+				input = &outer_input_set;
+				/* Check IPv4 mask and update input set */
+				if (ipv4_mask->hdr.version_ihl ||
+					ipv4_mask->hdr.total_length ||
+					ipv4_mask->hdr.packet_id ||
+					ipv4_mask->hdr.hdr_checksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv4 mask.");
+					return false;
+				}
+
+				if (ipv4_mask->hdr.src_addr) {
+					*input |= IAVF_INSET_IPV4_SRC;
+					input_set_byte += 2;
+				}
+				if (ipv4_mask->hdr.dst_addr) {
+					*input |= IAVF_INSET_IPV4_DST;
+					input_set_byte += 2;
+				}
+				if (ipv4_mask->hdr.time_to_live) {
+					*input |= IAVF_INSET_IPV4_TTL;
+					input_set_byte++;
+				}
+				if (ipv4_mask->hdr.next_proto_id) {
+					*input |= IAVF_INSET_IPV4_PROTO;
+					input_set_byte++;
+				}
+				if (ipv4_mask->hdr.type_of_service) {
+					*input |= IAVF_INSET_IPV4_TOS;
+					input_set_byte++;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &ipv4_spec->hdr,
+					   sizeof(ipv4_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &ipv4_mask->hdr,
+					   sizeof(ipv4_spec->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+			if (ipv6_spec && ipv6_mask) {
+				input = &outer_input_set;
+
+				if (ipv6_mask->hdr.payload_len) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid IPv6 mask");
+					return false;
+				}
+
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j]) {
+						*input |= IAVF_INSET_IPV6_SRC;
+						break;
+					}
+				}
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.dst_addr[j]) {
+						*input |= IAVF_INSET_IPV6_DST;
+						break;
+					}
+				}
+
+				for (j = 0; j < IAVF_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j])
+						input_set_byte++;
+
+					if (ipv6_mask->hdr.dst_addr[j])
+						input_set_byte++;
+				}
+
+				if (ipv6_mask->hdr.proto) {
+					*input |= IAVF_INSET_IPV6_NEXT_HDR;
+					input_set_byte++;
+				}
+				if (ipv6_mask->hdr.hop_limits) {
+					*input |= IAVF_INSET_IPV6_HOP_LIMIT;
+					input_set_byte++;
+				}
+				if (ipv6_mask->hdr.vtc_flow &
+				    rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK)) {
+					*input |= IAVF_INSET_IPV6_TC;
+					input_set_byte += 4;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &ipv6_spec->hdr,
+					   sizeof(ipv6_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &ipv6_mask->hdr,
+					   sizeof(ipv6_spec->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+			if (udp_spec && udp_mask) {
+				input = &outer_input_set;
+				/* Check UDP mask and update input set*/
+				if (udp_mask->hdr.dgram_len ||
+				    udp_mask->hdr.dgram_cksum) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid UDP mask");
+					return false;
+				}
+
+				if (udp_mask->hdr.src_port) {
+					*input |= IAVF_INSET_UDP_SRC_PORT;
+					input_set_byte += 2;
+				}
+				if (udp_mask->hdr.dst_port) {
+					*input |= IAVF_INSET_UDP_DST_PORT;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &udp_spec->hdr,
+					   sizeof(udp_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &udp_mask->hdr,
+					   sizeof(udp_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+			if (tcp_spec && tcp_mask) {
+				input = &outer_input_set;
+				/* Check TCP mask and update input set */
+				if (tcp_mask->hdr.sent_seq ||
+					tcp_mask->hdr.recv_ack ||
+					tcp_mask->hdr.data_off ||
+					tcp_mask->hdr.tcp_flags ||
+					tcp_mask->hdr.rx_win ||
+					tcp_mask->hdr.cksum ||
+					tcp_mask->hdr.tcp_urp) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item, "Invalid TCP mask");
+					return false;
+				}
+
+				if (tcp_mask->hdr.src_port) {
+					*input |= IAVF_INSET_TCP_SRC_PORT;
+					input_set_byte += 2;
+				}
+				if (tcp_mask->hdr.dst_port) {
+					*input |= IAVF_INSET_TCP_DST_PORT;
+					input_set_byte += 2;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &tcp_spec->hdr,
+					   sizeof(tcp_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &tcp_mask->hdr,
+					   sizeof(tcp_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VLAN:
+			vlan_spec = item->spec;
+			vlan_mask = item->mask;
+
+			hdr = &hdrs->proto_hdr_w_msk[layer];
+
+			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, S_VLAN);
+
+			if (vlan_spec && vlan_spec) {
+				input = &outer_input_set;
+
+				*input |= IAVF_INSET_VLAN_OUTER;
+
+				if (vlan_mask->tci)
+					input_set_byte += 2;
+
+				if (vlan_mask->inner_type) {
+					rte_flow_error_set(error, EINVAL,
+						RTE_FLOW_ERROR_TYPE_ITEM,
+						item,
+						"Invalid VLAN input set.");
+					return false;
+				}
+
+				rte_memcpy(hdr->buffer_spec, &vlan_spec->hdr,
+					   sizeof(vlan_spec->hdr));
+				rte_memcpy(hdr->buffer_mask, &vlan_mask->hdr,
+					   sizeof(vlan_mask->hdr));
+			}
+
+			hdrs->count = ++layer;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VOID:
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+					   "Invalid pattern item.");
+			return -rte_errno;
+		}
+	}
+
+	hdrs->count += VIRTCHNL_MAX_NUM_PROTO_HDRS;
+
+	if (input_set_byte > MAX_INPUT_SET_BYTE) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+				   item, "too much input set");
+		return -rte_errno;
+	}
+
+	if (!outer_input_set || (outer_input_set & ~input_set_mask))
+		return -rte_errno;
+
+	return 0;
+}
+
+static int
+iavf_fsub_parse_action(struct iavf_adapter *ad,
+		       const struct rte_flow_action *actions,
+		       uint32_t priority,
+		       struct rte_flow_error *error,
+		       struct iavf_fsub_conf *filter)
 {
+	const struct rte_flow_action *action;
+	const struct rte_flow_action_ethdev *act_ethdev;
+	const struct rte_flow_action_queue *act_q;
+	const struct rte_flow_action_rss *act_qgrop;
+	struct virtchnl_filter_action *filter_action;
+	uint16_t valid_qgrop_number[MAX_QGRP_NUM_TYPE] = {
+		2, 4, 8, 16, 32, 64, 128};
+	uint16_t i, num = 0, dest_num = 0, vf_num = 0;
+	uint16_t rule_port_id;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		switch (action->type) {
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			vf_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_ethdev = action->conf;
+			rule_port_id = ad->dev_data->port_id;
+			if (rule_port_id != act_ethdev->port_id)
+				goto error1;
+
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			dest_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_q = action->conf;
+			if (act_q->index >= ad->dev_data->nb_rx_queues)
+				goto error2;
+
+			filter_action->type = VIRTCHNL_ACTION_QUEUE;
+			filter_action->act_conf.queue.index = act_q->index;
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+			dest_num++;
+			filter_action = &filter->sub_fltr.actions.actions[num];
+
+			act_qgrop = action->conf;
+			if (act_qgrop->queue_num <= 1)
+				goto error2;
+
+			filter_action->type = VIRTCHNL_ACTION_Q_REGION;
+			filter_action->act_conf.queue.index =
+							act_qgrop->queue[0];
+			for (i = 0; i < MAX_QGRP_NUM_TYPE; i++) {
+				if (act_qgrop->queue_num ==
+				    valid_qgrop_number[i])
+					break;
+			}
+
+			if (i == MAX_QGRP_NUM_TYPE)
+				goto error2;
+
+			if ((act_qgrop->queue[0] + act_qgrop->queue_num) >
+			    ad->dev_data->nb_rx_queues)
+				goto error3;
+
+			for (i = 0; i < act_qgrop->queue_num - 1; i++)
+				if (act_qgrop->queue[i + 1] !=
+				    act_qgrop->queue[i] + 1)
+					goto error4;
+
+			filter_action->act_conf.queue.region = act_qgrop->queue_num;
+			filter->sub_fltr.actions.count = ++num;
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	/* 0 denotes lowest priority of recipe and highest priority
+	 * of rte_flow. Change rte_flow priority into recipe priority.
+	 */
+	filter->sub_fltr.priority = priority;
+
+	if (num > VIRTCHNL_MAX_NUM_ACTIONS) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Action numbers exceed the maximum value");
+		return -rte_errno;
+	}
+
+	if (vf_num == 0) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action, vf action must be added");
+		return -rte_errno;
+	}
+
+	if (dest_num >= 2) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Unsupported action combination");
+		return -rte_errno;
+	}
+
+	return 0;
+
+error1:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid ethdev_port_id");
+	return -rte_errno;
+
+error2:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid action type or queue number");
+	return -rte_errno;
+
+error3:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Invalid queue region indexes");
+	return -rte_errno;
+
+error4:
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+			   "Discontinuous queue region");
 	return -rte_errno;
 }
 
+static int
+iavf_fsub_check_action(const struct rte_flow_action *actions,
+		       struct rte_flow_error *error)
+{
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+	uint16_t actions_num = 0;
+	bool vf_valid = false;
+	bool queue_valid = false;
+
+	for (action = actions; action->type !=
+				RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+			vf_valid = true;
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_RSS:
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			queue_valid = true;
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			actions_num++;
+			break;
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			continue;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	if (!((actions_num == 1 && !queue_valid) ||
+	      (actions_num == 2 && vf_valid && queue_valid))) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   actions, "Invalid action number");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+iavf_fsub_parse(struct iavf_adapter *ad,
+		struct iavf_pattern_match_item *array,
+		uint32_t array_len,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		void **meta,
+		struct rte_flow_error *error)
+{
+	struct iavf_fsub_conf *filter;
+	struct iavf_pattern_match_item *pattern_match_item = NULL;
+	int ret = 0;
+	uint32_t priority = 0;
+
+	filter = rte_zmalloc(NULL, sizeof(*filter), 0);
+	if (!filter) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "No memory for iavf_fsub_conf_ptr");
+		goto error;
+	}
+
+	/* search flow subscribe pattern */
+	pattern_match_item = iavf_search_pattern_match_item(pattern, array,
+							    array_len, error);
+	if (!pattern_match_item)
+		return -rte_errno;
+
+	/* parse flow subscribe pattern */
+	ret = iavf_fsub_parse_pattern(pattern,
+				      pattern_match_item->input_set_mask,
+				      error, filter);
+	if (ret)
+		goto error;
+
+	/* check flow subscribe pattern action */
+	ret = iavf_fsub_check_action(actions, error);
+	if (ret)
+		goto error;
+
+	/* parse flow subscribe pattern action */
+	ret = iavf_fsub_parse_action((void *)ad, actions, priority,
+				     error, filter);
+	if (ret)
+		goto error;
+
+	if (meta)
+		*meta = filter;
+
+error:
+	rte_free(pattern_match_item);
+	return ret;
+}
+
 static int
 iavf_fsub_init(struct iavf_adapter *ad)
 {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v5 4/5] net/iavf: support flow subscription rule
  2022-09-07  5:10 ` [PATCH v5 0/5] support flow subscription Jie Wang
                     ` (2 preceding siblings ...)
  2022-09-07  5:10   ` [PATCH v5 3/5] net/iavf: support flow subscrption pattern Jie Wang
@ 2022-09-07  5:10   ` Jie Wang
  2022-09-07  5:10   ` [PATCH v5 5/5] net/iavf: support priority of flow rule Jie Wang
  2022-09-07  5:28   ` [PATCH v5 0/5] support flow subscription Zhang, Qi Z
  5 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  5:10 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Support flow subscribption create/destroy/validation flow
rule for AVF.

For examples:
testpmd> flow create 0 ingress pattern eth / ipv4 / udp src is 11
          / end actions represented_port port_id 1 / end
testpmd> flow validate 1 ingress pattern eth / ipv4 / tcp src is 22
          / end actions represented_port port_id 1 / end
testpmd> flow destroy 1 rule 0

VF subscribes to a rule, which means the packets will be sent to VF
instead of PF, and only VF will receive the packets.

It is allowed multiple VF subscribe to same rule, the packets will
be replicated and received by each VF.

PF will destroy all subscriptions during VF reset.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf.h       |   6 ++
 drivers/net/iavf/iavf_fsub.c  |  75 +++++++++++++++----
 drivers/net/iavf/iavf_vchnl.c | 132 ++++++++++++++++++++++++++++++++++
 3 files changed, 201 insertions(+), 12 deletions(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index f79c7f9f6e..26b858f6f0 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -489,4 +489,10 @@ int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
 extern const struct rte_tm_ops iavf_tm_ops;
 int iavf_get_ptp_cap(struct iavf_adapter *adapter);
 int iavf_get_phc_time(struct iavf_rx_queue *rxq);
+int iavf_flow_sub(struct iavf_adapter *adapter,
+		  struct iavf_fsub_conf *filter);
+int iavf_flow_unsub(struct iavf_adapter *adapter,
+		    struct iavf_fsub_conf *filter);
+int iavf_flow_sub_check(struct iavf_adapter *adapter,
+			struct iavf_fsub_conf *filter);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 66e403d585..28857d7577 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -69,29 +69,80 @@ iavf_pattern_match_item iavf_fsub_pattern_list[] = {
 };
 
 static int
-iavf_fsub_create(__rte_unused struct iavf_adapter *ad,
-		 __rte_unused struct rte_flow *flow,
-		 __rte_unused void *meta,
-		 __rte_unused struct rte_flow_error *error)
+iavf_fsub_create(struct iavf_adapter *ad, struct rte_flow *flow,
+		 void *meta, struct rte_flow_error *error)
 {
+	struct iavf_fsub_conf *filter = meta;
+	struct iavf_fsub_conf *rule;
+	int ret;
+
+	rule = rte_zmalloc("fsub_entry", sizeof(*rule), 0);
+	if (!rule) {
+		rte_flow_error_set(error, ENOMEM,
+				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				"Failed to allocate memory for fsub rule");
+		return -rte_errno;
+	}
+
+	ret = iavf_flow_sub(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to subscribe flow rule.");
+		goto free_entry;
+	}
+
+	rte_memcpy(rule, filter, sizeof(*rule));
+	flow->rule = rule;
+
+	return ret;
+
+free_entry:
+	rte_free(rule);
 	return -rte_errno;
 }
 
 static int
-iavf_fsub_destroy(__rte_unused struct iavf_adapter *ad,
-		  __rte_unused struct rte_flow *flow,
-		  __rte_unused struct rte_flow_error *error)
+iavf_fsub_destroy(struct iavf_adapter *ad, struct rte_flow *flow,
+		  struct rte_flow_error *error)
 {
-	return -rte_errno;
+	struct iavf_fsub_conf *filter;
+	int ret;
+
+	filter = (struct iavf_fsub_conf *)flow->rule;
+
+	ret = iavf_flow_unsub(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to unsubscribe flow rule.");
+		return -rte_errno;
+	}
+
+	flow->rule = NULL;
+	rte_free(filter);
+
+	return ret;
 }
 
 static int
-iavf_fsub_validation(__rte_unused struct iavf_adapter *ad,
+iavf_fsub_validation(struct iavf_adapter *ad,
 		     __rte_unused struct rte_flow *flow,
-		     __rte_unused void *meta,
-		     __rte_unused struct rte_flow_error *error)
+		     void *meta,
+		     struct rte_flow_error *error)
 {
-	return -rte_errno;
+	struct iavf_fsub_conf *filter = meta;
+	int ret;
+
+	ret = iavf_flow_sub_check(ad, filter);
+	if (ret) {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to validate filter rule.");
+		return -rte_errno;
+	}
+
+	return ret;
 };
 
 static int
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 6d84add423..cc0db8d093 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -1534,6 +1534,138 @@ iavf_fdir_check(struct iavf_adapter *adapter,
 	return 0;
 }
 
+int
+iavf_flow_sub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_sub *fsub_cfg;
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->sub_fltr.validate_only = 0;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->sub_fltr);
+	args.in_args_size = sizeof(*(&filter->sub_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of "
+				 "OP_FLOW_SUBSCRIBE");
+
+	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+	filter->flow_id = fsub_cfg->flow_id;
+
+	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in adding rule request by PF");
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to no hw "
+				 "resource");
+		err = -1;
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_EXIST) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to the rule "
+				 "is already existed");
+		err = -1;
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to the hw "
+				 "doesn't support");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to add rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
+int
+iavf_flow_unsub(struct iavf_adapter *adapter, struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_unsub *unsub_cfg;
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->unsub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->unsub_fltr.flow_id = filter->flow_id;
+
+	memset(&args, 0, sizeof(args));
+	args.ops = VIRTCHNL_OP_FLOW_UNSUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->unsub_fltr);
+	args.in_args_size = sizeof(filter->unsub_fltr);
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err)
+		PMD_DRV_LOG(ERR, "Failed to execute command of "
+				 "OP_FLOW_UNSUBSCRIBE");
+
+	unsub_cfg = (struct virtchnl_flow_unsub *)args.out_buffer;
+
+	if (unsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in deleting rule request by PF");
+	} else if (unsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST) {
+		PMD_DRV_LOG(ERR, "Failed to delete rule request due to this "
+				 "rule doesn't exist");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to delete rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
+int
+iavf_flow_sub_check(struct iavf_adapter *adapter,
+		    struct iavf_fsub_conf *filter)
+{
+	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
+	struct virtchnl_flow_sub *fsub_cfg;
+
+	struct iavf_cmd_info args;
+	int err;
+
+	filter->sub_fltr.vsi_id = vf->vsi_res->vsi_id;
+	filter->sub_fltr.validate_only = 1;
+
+	args.ops = VIRTCHNL_OP_FLOW_SUBSCRIBE;
+	args.in_args = (uint8_t *)(&filter->sub_fltr);
+	args.in_args_size = sizeof(*(&filter->sub_fltr));
+	args.out_buffer = vf->aq_resp;
+	args.out_size = IAVF_AQ_BUF_SZ;
+
+	err = iavf_execute_vf_cmd(adapter, &args, 0);
+	if (err) {
+		PMD_DRV_LOG(ERR, "fail to check flow director rule");
+		return err;
+	}
+
+	fsub_cfg = (struct virtchnl_flow_sub *)args.out_buffer;
+
+	if (fsub_cfg->status == VIRTCHNL_FSUB_SUCCESS) {
+		PMD_DRV_LOG(INFO, "Succeed in checking rule request by PF");
+	} else if (fsub_cfg->status == VIRTCHNL_FSUB_FAILURE_RULE_INVALID) {
+		PMD_DRV_LOG(ERR, "Failed to check rule request due to "
+				 "parameters validation or HW doesn't "
+				 "support");
+		err = -1;
+	} else {
+		PMD_DRV_LOG(ERR, "Failed to check rule request due to other "
+				 "reasons");
+		err = -1;
+	}
+
+	return err;
+}
+
 int
 iavf_add_del_rss_cfg(struct iavf_adapter *adapter,
 		     struct virtchnl_rss_cfg *rss_cfg, bool add)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* [PATCH v5 5/5] net/iavf: support priority of flow rule
  2022-09-07  5:10 ` [PATCH v5 0/5] support flow subscription Jie Wang
                     ` (3 preceding siblings ...)
  2022-09-07  5:10   ` [PATCH v5 4/5] net/iavf: support flow subscription rule Jie Wang
@ 2022-09-07  5:10   ` Jie Wang
  2022-09-07  5:28   ` [PATCH v5 0/5] support flow subscription Zhang, Qi Z
  5 siblings, 0 replies; 40+ messages in thread
From: Jie Wang @ 2022-09-07  5:10 UTC (permalink / raw)
  To: dev
  Cc: qiming.yang, qi.z.zhang, jingjing.wu, beilei.xing, stevex.yang, Jie Wang

Add flow rule attribute "priority" support for AVF.

Lower values denote higher priority, the highest priority for
a flow rule is 0.

All subscription rule will have a lower priority than the rules
that be created by host.

Signed-off-by: Jie Wang <jie1x.wang@intel.com>
---
 drivers/net/iavf/iavf_fdir.c         |  4 ++++
 drivers/net/iavf/iavf_fsub.c         |  2 +-
 drivers/net/iavf/iavf_generic_flow.c | 23 +++++++++++++----------
 drivers/net/iavf/iavf_generic_flow.h |  1 +
 drivers/net/iavf/iavf_hash.c         |  5 +++++
 drivers/net/iavf/iavf_ipsec_crypto.c | 16 ++++++++++------
 6 files changed, 34 insertions(+), 17 deletions(-)

diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
index a397047fdb..8f80873925 100644
--- a/drivers/net/iavf/iavf_fdir.c
+++ b/drivers/net/iavf/iavf_fdir.c
@@ -1583,6 +1583,7 @@ iavf_fdir_parse(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error)
 {
@@ -1593,6 +1594,9 @@ iavf_fdir_parse(struct iavf_adapter *ad,
 
 	memset(filter, 0, sizeof(*filter));
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
 	if (!item)
 		return -rte_errno;
diff --git a/drivers/net/iavf/iavf_fsub.c b/drivers/net/iavf/iavf_fsub.c
index 28857d7577..3bb6c30d3c 100644
--- a/drivers/net/iavf/iavf_fsub.c
+++ b/drivers/net/iavf/iavf_fsub.c
@@ -649,13 +649,13 @@ iavf_fsub_parse(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error)
 {
 	struct iavf_fsub_conf *filter;
 	struct iavf_pattern_match_item *pattern_match_item = NULL;
 	int ret = 0;
-	uint32_t priority = 0;
 
 	filter = rte_zmalloc(NULL, sizeof(*filter), 0);
 	if (!filter) {
diff --git a/drivers/net/iavf/iavf_generic_flow.c b/drivers/net/iavf/iavf_generic_flow.c
index b04614ba6e..f33c764764 100644
--- a/drivers/net/iavf/iavf_generic_flow.c
+++ b/drivers/net/iavf/iavf_generic_flow.c
@@ -1785,6 +1785,7 @@ enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_l2tpv2_ppp_ipv6_tcp[] = {
 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error);
@@ -1951,11 +1952,11 @@ iavf_flow_valid_attr(const struct rte_flow_attr *attr,
 		return -rte_errno;
 	}
 
-	/* Not supported */
-	if (attr->priority) {
+	/* support priority for flow subscribe */
+	if (attr->priority > 1) {
 		rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
-				attr, "Not support priority.");
+				attr, "Only support priority 0 and 1.");
 		return -rte_errno;
 	}
 
@@ -2098,6 +2099,7 @@ static struct iavf_flow_engine *
 iavf_parse_engine_create(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error)
@@ -2111,7 +2113,7 @@ iavf_parse_engine_create(struct iavf_adapter *ad,
 		if (parser_node->parser->parse_pattern_action(ad,
 				parser_node->parser->array,
 				parser_node->parser->array_len,
-				pattern, actions, &meta, error) < 0)
+				pattern, actions, priority, &meta, error) < 0)
 			continue;
 
 		engine = parser_node->parser->engine;
@@ -2127,6 +2129,7 @@ static struct iavf_flow_engine *
 iavf_parse_engine_validate(struct iavf_adapter *ad,
 		struct rte_flow *flow,
 		struct iavf_parser_list *parser_list,
+		uint32_t priority,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
 		struct rte_flow_error *error)
@@ -2140,7 +2143,7 @@ iavf_parse_engine_validate(struct iavf_adapter *ad,
 		if (parser_node->parser->parse_pattern_action(ad,
 				parser_node->parser->array,
 				parser_node->parser->array_len,
-				pattern, actions, &meta,  error) < 0)
+				pattern, actions, priority, &meta, error) < 0)
 			continue;
 
 		engine = parser_node->parser->engine;
@@ -2201,18 +2204,18 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
 	if (ret)
 		return ret;
 
-	*engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
-				    actions, error);
+	*engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list,
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
-	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
-				    actions, error);
+	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list,
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
 	*engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
-			pattern, actions, error);
+				    attr->priority, pattern, actions, error);
 	if (*engine)
 		return 0;
 
diff --git a/drivers/net/iavf/iavf_generic_flow.h b/drivers/net/iavf/iavf_generic_flow.h
index 448facffa5..60d8ab02b4 100644
--- a/drivers/net/iavf/iavf_generic_flow.h
+++ b/drivers/net/iavf/iavf_generic_flow.h
@@ -471,6 +471,7 @@ typedef int (*parse_pattern_action_t)(struct iavf_adapter *ad,
 		uint32_t array_len,
 		const struct rte_flow_item pattern[],
 		const struct rte_flow_action actions[],
+		uint32_t priority,
 		void **meta,
 		struct rte_flow_error *error);
 
diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index 42df7c4e48..dea4e0aa0a 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -86,6 +86,7 @@ iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
 			       uint32_t array_len,
 			       const struct rte_flow_item pattern[],
 			       const struct rte_flow_action actions[],
+			       uint32_t priority,
 			       void **meta,
 			       struct rte_flow_error *error);
 
@@ -1509,6 +1510,7 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
 			       uint32_t array_len,
 			       const struct rte_flow_item pattern[],
 			       const struct rte_flow_action actions[],
+			       uint32_t priority,
 			       void **meta,
 			       struct rte_flow_error *error)
 {
@@ -1517,6 +1519,9 @@ iavf_hash_parse_pattern_action(__rte_unused struct iavf_adapter *ad,
 	uint64_t phint = IAVF_PHINT_NONE;
 	int ret = 0;
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	rss_meta_ptr = rte_zmalloc(NULL, sizeof(*rss_meta_ptr), 0);
 	if (!rss_meta_ptr) {
 		rte_flow_error_set(error, EINVAL,
diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c b/drivers/net/iavf/iavf_ipsec_crypto.c
index 1d465b4419..cec1b968fe 100644
--- a/drivers/net/iavf/iavf_ipsec_crypto.c
+++ b/drivers/net/iavf/iavf_ipsec_crypto.c
@@ -1933,16 +1933,20 @@ static struct iavf_flow_engine iavf_ipsec_flow_engine = {
 
 static int
 iavf_ipsec_flow_parse(struct iavf_adapter *ad,
-		       struct iavf_pattern_match_item *array,
-		       uint32_t array_len,
-		       const struct rte_flow_item pattern[],
-		       const struct rte_flow_action actions[],
-		       void **meta,
-		       struct rte_flow_error *error)
+		      struct iavf_pattern_match_item *array,
+		      uint32_t array_len,
+		      const struct rte_flow_item pattern[],
+		      const struct rte_flow_action actions[],
+		      uint32_t priority,
+		      void **meta,
+		      struct rte_flow_error *error)
 {
 	struct iavf_pattern_match_item *item = NULL;
 	int ret = -1;
 
+	if (priority >= 1)
+		return -rte_errno;
+
 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
 	if (item && item->meta) {
 		uint32_t type = (uint64_t)(item->meta);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 40+ messages in thread

* RE: [PATCH v5 3/5] net/iavf: support flow subscrption pattern
  2022-09-07  5:10   ` [PATCH v5 3/5] net/iavf: support flow subscrption pattern Jie Wang
@ 2022-09-07  5:27     ` Zhang, Qi Z
  0 siblings, 0 replies; 40+ messages in thread
From: Zhang, Qi Z @ 2022-09-07  5:27 UTC (permalink / raw)
  To: Wang, Jie1X, dev; +Cc: Yang, Qiming, Wu, Jingjing, Xing, Beilei, Yang, SteveX



> -----Original Message-----
> From: Wang, Jie1X <jie1x.wang@intel.com>
> Sent: Wednesday, September 7, 2022 1:11 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Yang, SteveX <stevex.yang@intel.com>; Wang, Jie1X
> <jie1x.wang@intel.com>
> Subject: [PATCH v5 3/5] net/iavf: support flow subscrption pattern
> 
> Add flow subscription pattern support for AVF.
> 
> The supported patterns are listed below:
> eth/vlan/ipv4
> eth/ipv4(6)
> eth/ipv4(6)/udp
> eth/ipv4(6)/tcp
> 
> Signed-off-by: Jie Wang <jie1x.wang@intel.com>
> ---
> 
> +static int
> +iavf_fsub_check_action(const struct rte_flow_action *actions,
> +		       struct rte_flow_error *error)
> +{
> +	const struct rte_flow_action *action;
> +	enum rte_flow_action_type action_type;
> +	uint16_t actions_num = 0;
> +	bool vf_valid = false;
> +	bool queue_valid = false;
> +
> +	for (action = actions; action->type !=
> +				RTE_FLOW_ACTION_TYPE_END; action++) {
> +		action_type = action->type;
> +		switch (action_type) {
> +		case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:

Need to sync the document in iavf.ini

[rte_flow actions]
....
port_representor     = Y

will be fixed during code merge.


^ permalink raw reply	[flat|nested] 40+ messages in thread

* RE: [PATCH v5 0/5] support flow subscription
  2022-09-07  5:10 ` [PATCH v5 0/5] support flow subscription Jie Wang
                     ` (4 preceding siblings ...)
  2022-09-07  5:10   ` [PATCH v5 5/5] net/iavf: support priority of flow rule Jie Wang
@ 2022-09-07  5:28   ` Zhang, Qi Z
  5 siblings, 0 replies; 40+ messages in thread
From: Zhang, Qi Z @ 2022-09-07  5:28 UTC (permalink / raw)
  To: Wang, Jie1X, dev; +Cc: Yang, Qiming, Wu, Jingjing, Xing, Beilei, Yang, SteveX



> -----Original Message-----
> From: Wang, Jie1X <jie1x.wang@intel.com>
> Sent: Wednesday, September 7, 2022 1:11 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Yang, SteveX <stevex.yang@intel.com>; Wang, Jie1X
> <jie1x.wang@intel.com>
> Subject: [PATCH v5 0/5] support flow subscription
> 
> Add support AVF can be able to subscribe a flow from PF.
> 
> --
> v4:
>  * replace flow action represented_port with port_representor.
>  * update commit log and rebase.
> v3:
>  * fix eth layer inputset.
>  * rebase.
> v2:
>  * split v1 patch 2/2 to 4 small patches.
>  * remove rule action RTE_FLOW_ACTION_TYPE_VF and add
>    RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT.
> 
> Jie Wang (5):
>   common/iavf: support flow subscription
>   net/iavf: add flow subscription to AVF
>   net/iavf: support flow subscrption pattern
>   net/iavf: support flow subscription rule
>   net/iavf: support priority of flow rule
> 
>  doc/guides/rel_notes/release_22_11.rst |   4 +
>  drivers/common/iavf/virtchnl.h         | 104 +++-
>  drivers/net/iavf/iavf.h                |  13 +
>  drivers/net/iavf/iavf_fdir.c           |   4 +
>  drivers/net/iavf/iavf_fsub.c           | 745 +++++++++++++++++++++++++
>  drivers/net/iavf/iavf_generic_flow.c   |  40 +-
>  drivers/net/iavf/iavf_generic_flow.h   |   2 +
>  drivers/net/iavf/iavf_hash.c           |   5 +
>  drivers/net/iavf/iavf_ipsec_crypto.c   |  16 +-
>  drivers/net/iavf/iavf_vchnl.c          | 133 +++++
>  drivers/net/iavf/meson.build           |   1 +
>  11 files changed, 1046 insertions(+), 21 deletions(-)  create mode 100644
> drivers/net/iavf/iavf_fsub.c
> 
> --
> 2.25.1

Acked-by: Qi Zhang <qi.z.zhang@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi


^ permalink raw reply	[flat|nested] 40+ messages in thread

end of thread, other threads:[~2022-09-07  5:28 UTC | newest]

Thread overview: 40+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-08-09  6:21 [PATCH 0/2] support flow subscription Jie Wang
2022-08-09  6:21 ` [PATCH 1/2] common/iavf: " Jie Wang
2022-08-09  6:21 ` [PATCH 2/2] net/iavf: enable flow subscription rule support for AVF Jie Wang
2022-08-12 17:04 ` [PATCH v2 0/5] support flow subscription Jie Wang
2022-08-12 17:04   ` [PATCH v2 1/5] common/iavf: " Jie Wang
2022-08-12 17:04   ` [PATCH v2 2/5] net/iavf: add flow subscription to AVF Jie Wang
2022-08-12 17:04   ` [PATCH v2 3/5] net/iavf: support flow subscrption pattern Jie Wang
2022-08-12 17:04   ` [PATCH v2 4/5] net/iavf: support flow subscription rule Jie Wang
2022-08-12 17:04   ` [PATCH v2 5/5] net/iavf: support priority of flow rule Jie Wang
2022-08-30 18:05 ` [PATCH v3 0/5] support flow subscription Jie Wang
2022-08-30 18:05   ` [PATCH v3 1/5] common/iavf: " Jie Wang
2022-08-30 18:05   ` [PATCH v3 2/5] net/iavf: add flow subscription to AVF Jie Wang
2022-08-30 18:05   ` [PATCH v3 3/5] net/iavf: support flow subscrption pattern Jie Wang
2022-09-06  7:30     ` Zhang, Qi Z
2022-08-30 18:05   ` [PATCH v3 4/5] net/iavf: support flow subscription rule Jie Wang
2022-08-30 18:05   ` [PATCH v3 5/5] net/iavf: support priority of flow rule Jie Wang
2022-08-31 10:56   ` [PATCH v3 0/5] support flow subscription Ferruh Yigit
2022-08-31 12:28     ` Zhang, Qi Z
2022-08-31 12:53       ` Ferruh Yigit
2022-09-01  0:59         ` Zhang, Qi Z
2022-09-07  3:35 ` [PATCH v4 " Jie Wang
2022-09-07  3:35   ` [PATCH v4 1/5] common/iavf: " Jie Wang
2022-09-07  3:35   ` [PATCH v4 2/5] net/iavf: add flow subscription to AVF Jie Wang
2022-09-07  3:35   ` [PATCH v4 3/5] net/iavf: support flow subscrption pattern Jie Wang
2022-09-07  3:35   ` [PATCH v4 4/5] net/iavf: support flow subscription rule Jie Wang
2022-09-07  3:35   ` [PATCH v4 5/5] net/iavf: support priority of flow rule Jie Wang
2022-09-07  4:38 ` [PATCH v4 0/5] support flow subscription Jie Wang
2022-09-07  4:38   ` [PATCH v4 1/5] common/iavf: " Jie Wang
2022-09-07  4:38   ` [PATCH v4 2/5] net/iavf: add flow subscription to AVF Jie Wang
2022-09-07  4:38   ` [PATCH v4 3/5] net/iavf: support flow subscrption pattern Jie Wang
2022-09-07  4:38   ` [PATCH v4 4/5] net/iavf: support flow subscription rule Jie Wang
2022-09-07  4:38   ` [PATCH v4 5/5] net/iavf: support priority of flow rule Jie Wang
2022-09-07  5:10 ` [PATCH v5 0/5] support flow subscription Jie Wang
2022-09-07  5:10   ` [PATCH v5 1/5] common/iavf: " Jie Wang
2022-09-07  5:10   ` [PATCH v5 2/5] net/iavf: add flow subscription to AVF Jie Wang
2022-09-07  5:10   ` [PATCH v5 3/5] net/iavf: support flow subscrption pattern Jie Wang
2022-09-07  5:27     ` Zhang, Qi Z
2022-09-07  5:10   ` [PATCH v5 4/5] net/iavf: support flow subscription rule Jie Wang
2022-09-07  5:10   ` [PATCH v5 5/5] net/iavf: support priority of flow rule Jie Wang
2022-09-07  5:28   ` [PATCH v5 0/5] support flow subscription Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).