DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver
@ 2019-06-03  9:05 Qiming Yang
  2019-06-03  9:05 ` [dpdk-dev] [PATCH 1/2] net/ice: enable switch filter Qiming Yang
                   ` (10 more replies)
  0 siblings, 11 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-03  9:05 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev

This patch set enables the backend of rte_flow, and
the generic filter related functions in ice driver.
Supported flows include ipv4, tcpv4, udpv4, ipv6,
tcpv6, udpv6, tunnel, etc.
This patch set depends on shared code update.

Qiming Yang (1):
  net/ice: add generic flow API

wei zhao (1):
  net/ice: enable switch filter

 drivers/net/ice/Makefile            |   2 +
 drivers/net/ice/ice_ethdev.c        |  44 +++
 drivers/net/ice/ice_ethdev.h        |  11 +
 drivers/net/ice/ice_generic_flow.c  | 569 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h  | 402 +++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.c | 502 +++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  28 ++
 drivers/net/ice/meson.build         |   1 +
 8 files changed, 1559 insertions(+)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

-- 
2.5.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH 1/2] net/ice: enable switch filter
  2019-06-03  9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
@ 2019-06-03  9:05 ` Qiming Yang
  2019-06-03 17:07   ` Aaron Conole
  2019-06-03  9:05 ` [dpdk-dev] [PATCH 2/2] net/ice: add generic flow API Qiming Yang
                   ` (9 subsequent siblings)
  10 siblings, 1 reply; 73+ messages in thread
From: Qiming Yang @ 2019-06-03  9:05 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev, wei zhao

From: wei zhao <wei.zhao1@intel.com>

The patch enables the backend of rte_flow. It transfers
rte_flow_xxx to device specific data structure and
configures packet process engine's binary classifier
(switch) properly.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/Makefile            |   1 +
 drivers/net/ice/ice_ethdev.h        |   6 +
 drivers/net/ice/ice_switch_filter.c | 502 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  28 ++
 drivers/net/ice/meson.build         |   1 +
 5 files changed, 538 insertions(+)
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 0e5c55e..b10d826 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
 endif
 
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
 	CC_AVX2_SUPPORT=1
 else
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1385afa..67a358a 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,6 +234,12 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+/* Struct to store flow created. */
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) node;
+void *rule;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
new file mode 100644
index 0000000..f2e7751
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -0,0 +1,502 @@
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "ice_logs.h"
+#include "base/ice_type.h"
+#include "ice_switch_filter.h"
+
+static int
+ice_parse_switch_filter(
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct ice_adv_rule_info *rule_info,
+			struct ice_adv_lkup_elem **lkup_list,
+			uint16_t *lkups_num)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
+	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
+	struct ice_adv_lkup_elem *list = *lkup_list;
+	uint16_t i, j, lkups_cnt = 0;
+	uint16_t item_num = 0;
+	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
+			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
+			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
+			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
+			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
+			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			item_num++;
+	}
+
+	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "no memory malloc");
+		goto out;
+	}
+
+	for (item = pattern, i = 0; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++, i++) {
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			if (eth_spec && eth_mask) {
+				list[i].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->src.addr_bytes[j] ==
+								UINT8_MAX) {
+						list[i].h_u.eth_hdr.
+							src_addr[j] =
+						eth_spec->src.addr_bytes[j];
+						list[i].m_u.eth_hdr.
+							src_addr[j] =
+						eth_mask->src.addr_bytes[j];
+					}
+					if (eth_mask->dst.addr_bytes[j] ==
+								UINT8_MAX) {
+						list[i].h_u.eth_hdr.
+							dst_addr[j] =
+						eth_spec->dst.addr_bytes[j];
+						list[i].m_u.eth_hdr.
+							dst_addr[j] =
+						eth_mask->dst.addr_bytes[j];
+					}
+				}
+				if (eth_mask->type == UINT16_MAX) {
+					list[i].h_u.eth_hdr.ethtype_id =
+					rte_be_to_cpu_16(eth_spec->type);
+					list[i].m_u.eth_hdr.ethtype_id =
+						UINT16_MAX;
+				}
+				lkups_cnt++;
+			} else if (!eth_spec && !eth_mask) {
+				list[i].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+			if (ipv4_spec && ipv4_mask) {
+				list[i].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					list[i].h_u.ipv4_hdr.src_addr =
+						ipv4_spec->hdr.src_addr;
+					list[i].m_u.ipv4_hdr.src_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					list[i].h_u.ipv4_hdr.dst_addr =
+						ipv4_spec->hdr.dst_addr;
+					list[i].m_u.ipv4_hdr.dst_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					list[i].h_u.ipv4_hdr.time_to_live =
+						ipv4_spec->hdr.time_to_live;
+					list[i].m_u.ipv4_hdr.time_to_live =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					list[i].h_u.ipv4_hdr.protocol =
+						ipv4_spec->hdr.next_proto_id;
+					list[i].m_u.ipv4_hdr.protocol =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.type_of_service ==
+						UINT8_MAX) {
+					list[i].h_u.ipv4_hdr.tos =
+						ipv4_spec->hdr.type_of_service;
+					list[i].m_u.ipv4_hdr.tos = UINT8_MAX;
+				}
+				lkups_cnt++;
+			} else if (!ipv4_spec && !ipv4_mask) {
+				list[i].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+			if (ipv6_spec && ipv6_mask) {
+				list[i].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV6_OFOS : ICE_IPV6_IL;
+				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j] ==
+								UINT8_MAX) {
+						list[i].h_u.ice_ipv6_ofos_hdr.
+							src_addr[j] =
+						ipv6_spec->hdr.src_addr[j];
+						list[i].m_u.ice_ipv6_ofos_hdr.
+							src_addr[j] =
+						ipv6_mask->hdr.src_addr[j];
+					}
+					if (ipv6_mask->hdr.dst_addr[j] ==
+								UINT8_MAX) {
+						list[i].h_u.ice_ipv6_ofos_hdr.
+							dst_addr[j] =
+						ipv6_spec->hdr.dst_addr[j];
+						list[i].m_u.ice_ipv6_ofos_hdr.
+							dst_addr[j] =
+						ipv6_mask->hdr.dst_addr[j];
+					}
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					list[i].h_u.ice_ipv6_ofos_hdr.next_hdr =
+						ipv6_spec->hdr.proto;
+					list[i].m_u.ice_ipv6_ofos_hdr.next_hdr =
+						UINT8_MAX;
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					list[i].h_u.ice_ipv6_ofos_hdr.
+					hop_limit = ipv6_spec->hdr.hop_limits;
+					list[i].m_u.ice_ipv6_ofos_hdr.
+						hop_limit  = UINT8_MAX;
+				}
+				lkups_cnt++;
+			} else if (!ipv6_spec && !ipv6_mask) {
+				list[i].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+			if (udp_spec && udp_mask) {
+				list[i].type = ICE_UDP_ILOS;
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					list[i].h_u.l4_hdr.src_port =
+						udp_spec->hdr.src_port;
+					list[i].m_u.l4_hdr.src_port =
+						udp_mask->hdr.src_port;
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					list[i].h_u.l4_hdr.dst_port =
+						udp_spec->hdr.dst_port;
+					list[i].m_u.l4_hdr.dst_port =
+						udp_mask->hdr.dst_port;
+				}
+				lkups_cnt++;
+			} else if (!udp_spec && !udp_mask) {
+				list[i].type = ICE_UDP_ILOS;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+			if (tcp_spec && tcp_mask) {
+				list[i].type = ICE_TCP_IL;
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					list[i].h_u.l4_hdr.src_port =
+						tcp_spec->hdr.src_port;
+					list[i].m_u.l4_hdr.src_port =
+						tcp_mask->hdr.src_port;
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					list[i].h_u.l4_hdr.dst_port =
+						tcp_spec->hdr.dst_port;
+					list[i].m_u.l4_hdr.dst_port =
+						tcp_mask->hdr.dst_port;
+				}
+				lkups_cnt++;
+			} else if (!tcp_spec && !tcp_mask) {
+				list[i].type = ICE_TCP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+			if (sctp_spec && sctp_mask) {
+				list[i].type = ICE_SCTP_IL;
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					list[i].h_u.sctp_hdr.src_port =
+						sctp_spec->hdr.src_port;
+					list[i].m_u.sctp_hdr.src_port =
+						sctp_mask->hdr.src_port;
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					list[i].h_u.sctp_hdr.dst_port =
+						sctp_spec->hdr.dst_port;
+					list[i].m_u.sctp_hdr.dst_port =
+						sctp_mask->hdr.dst_port;
+				}
+				lkups_cnt++;
+			} else if (!sctp_spec && !sctp_mask) {
+				list[i].type = ICE_SCTP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			tun_type = ICE_SW_TUN_VXLAN;
+			if (vxlan_spec && vxlan_mask) {
+				list[i].type = ICE_VXLAN;
+				if (vxlan_mask->vni[0] == UINT8_MAX &&
+					vxlan_mask->vni[1] == UINT8_MAX &&
+					vxlan_mask->vni[2] == UINT8_MAX) {
+					list[i].h_u.tnl_hdr.vni =
+						(vxlan_spec->vni[1] << 8) |
+						vxlan_spec->vni[0];
+					list[i].m_u.tnl_hdr.vni =
+						UINT16_MAX;
+				}
+				lkups_cnt++;
+			} else if (!vxlan_spec && !vxlan_mask) {
+				list[i].type = ICE_VXLAN;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			tun_type = ICE_SW_TUN_NVGRE;
+			if (nvgre_spec && nvgre_mask) {
+				list[i].type = ICE_NVGRE;
+				if (nvgre_mask->tni[0] == UINT8_MAX &&
+					nvgre_mask->tni[1] == UINT8_MAX &&
+					nvgre_mask->tni[2] == UINT8_MAX) {
+					list[i].h_u.nvgre_hdr.tni =
+						(nvgre_spec->tni[1] << 8) |
+						nvgre_spec->tni[0];
+					list[i].m_u.nvgre_hdr.tni =
+						UINT16_MAX;
+				}
+				lkups_cnt++;
+			} else if (!nvgre_spec && !nvgre_mask) {
+				list[i].type = ICE_NVGRE;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+		case RTE_FLOW_ITEM_TYPE_END:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "Invalid pattern item.");
+			goto out;
+		}
+	}
+
+	rule_info->tun_type = tun_type;
+	*lkups_num = lkups_cnt;
+
+	return 0;
+out:
+	return -rte_errno;
+}
+
+/* By now ice switch filter action code implement only
+* supports QUEUE or DROP.
+*/
+static int
+ice_parse_switch_action(struct ice_pf *pf,
+				 const struct rte_flow_action *actions,
+				 struct rte_flow_error *error,
+				 struct ice_adv_rule_info *rule_info)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	struct ice_vsi *vsi = pf->main_vsi;
+	const struct rte_flow_action *act;
+	const struct rte_flow_action_queue *act_q;
+	uint16_t base_queue, index = 0;
+	uint32_t reg;
+
+	/* Check if the first non-void action is QUEUE or DROP. */
+	NEXT_ITEM_OF_ACTION(act, actions, index);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   act, "Not supported action.");
+		return -rte_errno;
+	}
+	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+	if (reg & PFLAN_RX_QALLOC_VALID_M) {
+		base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+	} else {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Invalid queue register");
+		return -rte_errno;
+	}
+	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+		act_q = act->conf;
+		rule_info->sw_act.fltr_act = ICE_FWD_TO_Q;
+		rule_info->sw_act.fwd_id.q_id = base_queue + act_q->index;
+		if (act_q->index >= pf->dev_data->nb_rx_queues) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION,
+				act, "Invalid queue ID for"
+				" switch filter.");
+			return -rte_errno;
+		}
+	} else {
+		rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
+	}
+
+	rule_info->sw_act.vsi_handle = vsi->idx;
+	rule_info->rx = 1;
+	rule_info->sw_act.src = vsi->idx;
+	rule_info->sw_act.flag = ICE_FLTR_RX;
+
+	/* Check if the next non-void item is END */
+	index++;
+	NEXT_ITEM_OF_ACTION(act, actions, index);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   act, "Not supported action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_switch_rule_set(struct ice_pf *pf,
+			struct ice_adv_lkup_elem *list,
+			uint16_t lkups_cnt,
+			struct ice_adv_rule_info *rule_info,
+			struct rte_flow *flow)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data rule_added = {0};
+	struct ice_rule_query_data *filter_ptr;
+
+	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
+		PMD_DRV_LOG(ERR, "item number too large for rule");
+		return -ENOTSUP;
+	}
+	if (!list) {
+		PMD_DRV_LOG(ERR, "lookup list should not be NULL");
+		return -ENOTSUP;
+	}
+
+	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
+
+	if (!ret) {
+		filter_ptr = rte_zmalloc("ice_switch_filter",
+			sizeof(struct ice_rule_query_data), 0);
+		if (!filter_ptr) {
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
+			return -EINVAL;
+		}
+		flow->rule = filter_ptr;
+		rte_memcpy(filter_ptr,
+			&rule_added,
+			sizeof(struct ice_rule_query_data));
+	}
+
+	return ret;
+}
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	int ret = 0;
+	struct ice_adv_rule_info rule_info = {0};
+	struct ice_adv_lkup_elem *list = NULL;
+	uint16_t lkups_num = 0;
+
+	ret = ice_parse_switch_filter(pattern, actions, error,
+			&rule_info, &list, &lkups_num);
+	if (ret)
+		goto out;
+
+	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
+	if (ret)
+		goto out;
+
+	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow);
+	if (ret)
+		goto out;
+
+	rte_free(list);
+	return 0;
+
+out:
+	rte_free(list);
+
+	return -rte_errno;
+}
+
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data *filter_ptr;
+	struct ice_rule_query_data rule_added;
+
+	filter_ptr = (struct ice_rule_query_data *)
+			flow->rule;
+	rte_memcpy(&rule_added, filter_ptr,
+		sizeof(struct ice_rule_query_data));
+
+	if (!filter_ptr) {
+		PMD_DRV_LOG(ERR, "no such flow"
+			    " create by switch filter");
+		return -EINVAL;
+	}
+
+	ret = ice_rem_adv_rule_by_id(hw, &rule_added);
+
+	rte_free(filter_ptr);
+
+	return ret;
+}
+
+void
+ice_free_switch_filter_rule(void *rule)
+{
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)rule;
+
+	rte_free(filter_ptr);
+}
diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
new file mode 100644
index 0000000..957d0d1
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.h
@@ -0,0 +1,28 @@
+#ifndef _ICE_SWITCH_FILTER_H_
+#define _ICE_SWITCH_FILTER_H_
+
+#include "base/ice_switch.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
+	do {                                                            \
+		act = actions + index;                                  \
+		while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
+			index++;                                        \
+			act = actions + index;                          \
+		}                                                       \
+	} while (0)
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow);
+void
+ice_free_switch_filter_rule(void *rule);
+#endif /* _ICE_SWITCH_FILTER_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 2bec688..c6bcba3 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -7,6 +7,7 @@ objs = [base_objs]
 sources = files(
 	'ice_ethdev.c',
 	'ice_rxtx.c'
+	'ice_switch_filter.c'
 	)
 
 deps += ['hash']
-- 
2.5.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH 2/2] net/ice: add generic flow API
  2019-06-03  9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
  2019-06-03  9:05 ` [dpdk-dev] [PATCH 1/2] net/ice: enable switch filter Qiming Yang
@ 2019-06-03  9:05 ` Qiming Yang
  2019-06-12  7:50 ` [dpdk-dev] [PATCH v2 0/3] Enable rte_flow API in ice driver Qiming Yang
                   ` (8 subsequent siblings)
  10 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-03  9:05 UTC (permalink / raw)
  To: qi.z.zhang; +Cc: dev

This patch adds ice_flow_create, ice_flow_destroy,
ice_flow_flush and ice_flow_validate support,
these are going to used to handle all the generic filters.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/Makefile           |   1 +
 drivers/net/ice/ice_ethdev.c       |  44 +++
 drivers/net/ice/ice_ethdev.h       |   7 +-
 drivers/net/ice/ice_generic_flow.c | 569 +++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h | 402 ++++++++++++++++++++++++++
 5 files changed, 1022 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index b10d826..32abeb6 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -79,5 +79,6 @@ endif
 ifeq ($(CC_AVX2_SUPPORT), 1)
 	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index bdbceb4..cf6bb1d 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -15,6 +15,7 @@
 #include "base/ice_dcb.h"
 #include "ice_ethdev.h"
 #include "ice_rxtx.h"
+#include "ice_switch_filter.h"
 
 #define ICE_MAX_QP_NUM "max_queue_pair_num"
 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
@@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
 static int ice_xstats_get_names(struct rte_eth_dev *dev,
 				struct rte_eth_xstat_name *xstats_names,
 				unsigned int limit);
+static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+			enum rte_filter_type filter_type,
+			enum rte_filter_op filter_op,
+			void *arg);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get                   = ice_xstats_get,
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
+	.filter_ctrl                  = ice_dev_filter_ctrl,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -1460,6 +1466,8 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* enable uio intr after callback register */
 	rte_intr_enable(intr_handle);
 
+	TAILQ_INIT(&pf->flow_list);
+
 	return 0;
 
 err_pf_setup:
@@ -1602,6 +1610,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
 
 	ice_dev_close(dev);
 
@@ -1619,6 +1629,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	rte_intr_callback_unregister(intr_handle,
 				     ice_interrupt_handler, dev);
 
+	/* Remove all flows */
+	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+		ice_free_switch_filter_rule(p_flow->rule);
+		rte_free(p_flow);
+	}
+
 	return 0;
 }
 
@@ -3603,6 +3620,33 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 }
 
 static int
+ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+		     enum rte_filter_type filter_type,
+		     enum rte_filter_op filter_op,
+		     void *arg)
+{
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ice_flow_ops;
+		break;
+	default:
+		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+					filter_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 67a358a..0905ff9 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,12 +234,16 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+extern const struct rte_flow_ops ice_flow_ops;
+
 /* Struct to store flow created. */
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) node;
-void *rule;
+	void *rule;
 };
 
+TAILQ_HEAD(ice_flow_list, rte_flow);
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -265,6 +269,7 @@ struct ice_pf {
 	struct ice_eth_stats internal_stats;
 	bool offset_loaded;
 	bool adapter_stopped;
+	struct ice_flow_list flow_list;
 };
 
 /**
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
new file mode 100644
index 0000000..e8717b5
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -0,0 +1,569 @@
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "ice_switch_filter.h"
+
+static int ice_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static int ice_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error);
+static int ice_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error);
+
+const struct rte_flow_ops ice_flow_ops = {
+	.validate = ice_flow_validate,
+	.create = ice_flow_create,
+	.destroy = ice_flow_destroy,
+	.flush = ice_flow_flush,
+};
+
+static int
+ice_flow_valid_attr(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	/* Must be input direction */
+	if (!attr->ingress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Not support priority.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->group) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   attr, "Not support group.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+ice_match_pattern(enum rte_flow_item_type *item_array,
+		const struct rte_flow_item *pattern)
+{
+	const struct rte_flow_item *item = pattern;
+
+	while ((*item_array == item->type) &&
+	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+		item_array++;
+		item++;
+	}
+
+	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+		item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
+		struct rte_flow_error *error)
+{
+	uint16_t i = 0;
+	uint64_t inset;
+
+	for (; i < RTE_DIM(ice_supported_patterns); i++)
+		if (ice_match_pattern(ice_supported_patterns[i].items,
+				      pattern)) {
+			inset = ice_supported_patterns[i].sw_fields;
+			return inset;
+		}
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			   pattern, "Unsupported pattern");
+
+	return 0;
+}
+
+static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
+			struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_icmp *icmp_mask;
+	const struct rte_flow_item_icmp6 *icmp6_mask;
+	enum rte_flow_item_type item_type;
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	uint64_t input_set = ICE_INSET_NONE;
+	bool outer_ip = true;
+	bool outer_l4 = true;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return 0;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			if (eth_spec && eth_mask) {
+				if (is_broadcast_ether_addr(&eth_mask->src))
+					input_set |= ICE_INSET_SMAC;
+				if (is_broadcast_ether_addr(&eth_mask->dst))
+					input_set |= ICE_INSET_DMAC;
+				if (eth_mask->type == RTE_BE16(0xffff))
+					input_set |= ICE_INSET_ETHERTYPE;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (!(ipv4_spec && ipv4_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 spec or mask.");
+				return 0;
+			}
+
+			/* Check IPv4 mask and update input set */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 mask.");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_DST;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TTL;
+				if (ipv4_mask->hdr.fragment_offset == 0)
+					input_set |= ICE_INSET_IPV4_PROTO;
+				outer_ip = false;
+			} else {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_DST;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_PROTO;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (!(ipv6_spec && ipv6_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Invalid IPv6 spec or mask");
+				return 0;
+			}
+
+			if (ipv6_mask->hdr.payload_len ||
+			    ipv6_mask->hdr.vtc_flow) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv6 mask");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_NEXT_HDR;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+				outer_ip = false;
+			} else {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_TTL;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (!(udp_spec && udp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid UDP mask");
+				return 0;
+			}
+
+			/* Check UDP mask and update input set*/
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (!(tcp_spec && tcp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid TCP mask");
+				return 0;
+			}
+
+			/* Check TCP mask and update input set */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (!(sctp_spec && sctp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid SCTP mask");
+				return 0;
+			}
+
+			/* Check SCTP mask and update input set */
+			if (sctp_mask->hdr.cksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid SCTP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			icmp_mask = item->mask;
+			if (icmp_mask->hdr.icmp_code ||
+			    icmp_mask->hdr.icmp_cksum ||
+			    icmp_mask->hdr.icmp_ident ||
+			    icmp_mask->hdr.icmp_seq_nb) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP mask");
+				return 0;
+			}
+
+			if (icmp_mask->hdr.icmp_type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP;
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6:
+			icmp6_mask = item->mask;
+			if (icmp6_mask->code ||
+			    icmp6_mask->checksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP6 mask");
+				return 0;
+			}
+
+			if (icmp6_mask->type == UINT8_MAX)
+			input_set |= ICE_INSET_ICMP6;
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid mask no exist");
+			break;
+		}
+	}
+	return input_set;
+}
+
+static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
+			uint64_t inset, struct rte_flow_error *error)
+{
+	uint64_t fields;
+
+	/* get valid field */
+	fields = ice_get_flow_field(pattern, error);
+	if ((!fields) || (fields && (!inset))) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int ice_flow_valid_action(const struct rte_flow_action *actions,
+				       struct rte_flow_error *error)
+{
+	switch (actions->type) {
+	case RTE_FLOW_ACTION_TYPE_QUEUE:
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_flow_validate(__rte_unused struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	uint64_t inset = 0;
+	int ret = ICE_ERR_NOT_SUPPORTED;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = ice_flow_valid_attr(attr, error);
+	if (!ret)
+		return ret;
+
+	inset = ice_flow_valid_pattern(pattern, error);
+	if (!inset)
+		return -rte_errno;
+
+	ret = ice_flow_valid_inset(pattern, inset, error);
+	if (ret)
+		return ret;
+
+	ret = ice_flow_valid_action(actions, error);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct rte_flow *
+ice_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *flow = NULL;
+	int ret;
+
+	flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return flow;
+	}
+
+	ret = ice_flow_validate(dev, attr, pattern, actions, error);
+	if (ret < 0)
+		return NULL;
+
+	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
+	if (!ret)
+		goto free_flow;
+
+	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+	return flow;
+
+free_flow:
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(flow);
+	return NULL;
+}
+
+static int
+ice_flow_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	int ret = 0;
+
+	ret = ice_destroy_switch_filter(pf, flow);
+
+	if (!ret) {
+		TAILQ_REMOVE(&pf->flow_list, flow, node);
+		rte_free(flow);
+	} else
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to destroy flow.");
+
+	return ret;
+}
+
+static int
+ice_flow_flush(struct rte_eth_dev *dev,
+	       struct rte_flow *flow,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
+	int ret;
+
+	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+		ret = ice_flow_destroy(dev, flow, error);
+		if (ret) {
+			rte_flow_error_set(error, -ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to flush SW flows.");
+			return -rte_errno;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
new file mode 100644
index 0000000..d3bdce3
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -0,0 +1,402 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _ICE_GENERIC_FLOW_H_
+#define _ICE_GENERIC_FLOW_H_
+
+#include <rte_flow_driver.h>
+
+struct ice_flow_pattern {
+	enum rte_flow_item_type *items;
+	uint64_t sw_fields;
+};
+
+#define ICE_INSET_NONE            0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define ICE_INSET_SMAC            0x0000000000000001ULL
+#define ICE_INSET_DMAC            0x0000000000000002ULL
+#define ICE_INSET_ETHERTYPE       0x0000000000000020ULL
+
+/* bit 8 ~ bit 15 */
+#define ICE_INSET_IPV4_SRC        0x0000000000000100ULL
+#define ICE_INSET_IPV4_DST        0x0000000000000200ULL
+#define ICE_INSET_IPV6_SRC        0x0000000000000400ULL
+#define ICE_INSET_IPV6_DST        0x0000000000000800ULL
+#define ICE_INSET_SRC_PORT        0x0000000000001000ULL
+#define ICE_INSET_DST_PORT        0x0000000000002000ULL
+#define ICE_INSET_ARP             0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL
+#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL
+#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL
+#define ICE_INSET_IPV6_NEXT_HDR   0x0000000000200000ULL
+#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
+#define ICE_INSET_ICMP            0x0000000001000000ULL
+#define ICE_INSET_ICMP6           0x0000000002000000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define ICE_INSET_TUN_SMAC           0x0000000100000000ULL
+#define ICE_INSET_TUN_DMAC           0x0000000200000000ULL
+#define ICE_INSET_TUN_IPV4_SRC       0x0000000400000000ULL
+#define ICE_INSET_TUN_IPV4_DST       0x0000000800000000ULL
+#define ICE_INSET_TUN_IPV4_TTL       0x0000001000000000ULL
+#define ICE_INSET_TUN_IPV4_PROTO     0x0000002000000000ULL
+#define ICE_INSET_TUN_IPV6_SRC       0x0000004000000000ULL
+#define ICE_INSET_TUN_IPV6_DST       0x0000008000000000ULL
+#define ICE_INSET_TUN_IPV6_TTL       0x0000010000000000ULL
+#define ICE_INSET_TUN_IPV6_PROTO     0x0000020000000000ULL
+#define ICE_INSET_TUN_SRC_PORT       0x0000040000000000ULL
+#define ICE_INSET_TUN_DST_PORT       0x0000080000000000ULL
+#define ICE_INSET_TUN_ID             0x0000100000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+#define ICE_FLAG_VLAN_INNER  0x00000001ULL
+#define ICE_FLAG_VLAN_OUTER  0x00000002ULL
+
+#define INSET_ETHER ( \
+	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define INSET_MAC_IPV4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
+#define INSET_MAC_IPV4_L4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV4_ICMP ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
+#define INSET_MAC_IPV6 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_NEXT_HDR | ICE_INSET_IPV6_HOP_LIMIT)
+#define INSET_MAC_IPV6_L4 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV6_ICMP ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_ICMP6)
+#define INSET_TUNNEL_IPV4_TYPE1 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO)
+#define INSET_TUNNEL_IPV4_TYPE2 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV4_TYPE3 ( \
+	ICE_INSET_TUNNEL_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUNNEL_IPV4_TTL | ICE_INSET_ICMP)
+#define INSET_TUNNEL_IPV6_TYPE1 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO)
+#define INSET_TUNNEL_IPV6_TYPE2 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV6_TYPE3 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6)
+
+/* L2 */
+static enum rte_flow_item_type pattern_ethertype[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv4 */
+static enum rte_flow_item_type pattern_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv6 */
+static enum rte_flow_item_type pattern_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 tunnel IPv4 */
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 tunnel MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 tunnel IPv6 */
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 tunnel MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct ice_flow_pattern ice_supported_patterns[] = {
+	{pattern_ethertype, INSET_ETHER},
+	{pattern_ipv4, INSET_MAC_IPV4},
+	{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
+	{pattern_ipv6, INSET_MAC_IPV6},
+	{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
+	{pattern_ipv4_tunnel_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_tunnel_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_tunnel_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_tunnel_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_tunnel_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_tunnel_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_tunnel_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_tunnel_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_tunnel_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_tunnel_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_tunnel_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_tunnel_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_tunnel_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_tunnel_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_tunnel_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_tunnel_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_tunnel_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_tunnel_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_tunnel_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_tunnel_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+};
-- 
2.5.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] net/ice: enable switch filter
  2019-06-03  9:05 ` [dpdk-dev] [PATCH 1/2] net/ice: enable switch filter Qiming Yang
@ 2019-06-03 17:07   ` Aaron Conole
  2019-06-04  2:02     ` Zhao1, Wei
  0 siblings, 1 reply; 73+ messages in thread
From: Aaron Conole @ 2019-06-03 17:07 UTC (permalink / raw)
  To: Qiming Yang; +Cc: qi.z.zhang, dev, wei zhao

Qiming Yang <qiming.yang@intel.com> writes:

> From: wei zhao <wei.zhao1@intel.com>
>
> The patch enables the backend of rte_flow. It transfers
> rte_flow_xxx to device specific data structure and
> configures packet process engine's binary classifier
> (switch) properly.
>
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
>  drivers/net/ice/Makefile            |   1 +
>  drivers/net/ice/ice_ethdev.h        |   6 +
>  drivers/net/ice/ice_switch_filter.c | 502 ++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_switch_filter.h |  28 ++
>  drivers/net/ice/meson.build         |   1 +
>  5 files changed, 538 insertions(+)
>  create mode 100644 drivers/net/ice/ice_switch_filter.c
>  create mode 100644 drivers/net/ice/ice_switch_filter.h
>
> diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
> index 0e5c55e..b10d826 100644
> --- a/drivers/net/ice/Makefile
> +++ b/drivers/net/ice/Makefile
> @@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
>  SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
>  endif
>  
> +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
>  ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
>  	CC_AVX2_SUPPORT=1
>  else
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
> index 1385afa..67a358a 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -234,6 +234,12 @@ struct ice_vsi {
>  	bool offset_loaded;
>  };
>  
> +/* Struct to store flow created. */
> +struct rte_flow {
> +	TAILQ_ENTRY(rte_flow) node;
> +void *rule;
> +};
> +
>  struct ice_pf {
>  	struct ice_adapter *adapter; /* The adapter this PF associate to */
>  	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
> diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
> new file mode 100644
> index 0000000..f2e7751
> --- /dev/null
> +++ b/drivers/net/ice/ice_switch_filter.c
> @@ -0,0 +1,502 @@
> +#include <sys/queue.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <stdarg.h>
> +
> +#include <rte_debug.h>
> +#include <rte_ether.h>
> +#include <rte_ethdev_driver.h>
> +#include <rte_log.h>
> +#include <rte_malloc.h>
> +#include <rte_eth_ctrl.h>
> +#include <rte_tailq.h>
> +#include <rte_flow_driver.h>
> +
> +#include "ice_logs.h"
> +#include "base/ice_type.h"
> +#include "ice_switch_filter.h"
> +
> +static int
> +ice_parse_switch_filter(
> +			const struct rte_flow_item pattern[],
> +			const struct rte_flow_action actions[],
> +			struct rte_flow_error *error,
> +			struct ice_adv_rule_info *rule_info,
> +			struct ice_adv_lkup_elem **lkup_list,
> +			uint16_t *lkups_num)
> +{
> +	const struct rte_flow_item *item = pattern;
> +	enum rte_flow_item_type item_type;
> +	const struct rte_flow_item_eth *eth_spec, *eth_mask;
> +	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> +	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> +	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> +	const struct rte_flow_item_udp *udp_spec, *udp_mask;
> +	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> +	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
> +	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
> +	struct ice_adv_lkup_elem *list = *lkup_list;
> +	uint16_t i, j, lkups_cnt = 0;
> +	uint16_t item_num = 0;
> +	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
> +
> +	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> +		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
> +			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
> +			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
> +			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
> +			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
> +			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
> +			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
> +			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
> +			item_num++;
> +	}
> +
> +	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
> +	if (!list) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
> +				   "no memory malloc");
> +		goto out;
> +	}
> +
> +	for (item = pattern, i = 0; item->type !=
> +			RTE_FLOW_ITEM_TYPE_END; item++, i++) {
> +		item_type = item->type;
> +
> +		switch (item_type) {
> +		case RTE_FLOW_ITEM_TYPE_ETH:
> +			eth_spec = item->spec;
> +			eth_mask = item->mask;
> +			if (eth_spec && eth_mask) {
> +				list[i].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_MAC_OFOS : ICE_MAC_IL;
> +				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
> +					if (eth_mask->src.addr_bytes[j] ==
> +								UINT8_MAX) {
> +						list[i].h_u.eth_hdr.
> +							src_addr[j] =
> +						eth_spec->src.addr_bytes[j];
> +						list[i].m_u.eth_hdr.
> +							src_addr[j] =
> +						eth_mask->src.addr_bytes[j];
> +					}
> +					if (eth_mask->dst.addr_bytes[j] ==
> +								UINT8_MAX) {
> +						list[i].h_u.eth_hdr.
> +							dst_addr[j] =
> +						eth_spec->dst.addr_bytes[j];
> +						list[i].m_u.eth_hdr.
> +							dst_addr[j] =
> +						eth_mask->dst.addr_bytes[j];
> +					}
> +				}
> +				if (eth_mask->type == UINT16_MAX) {
> +					list[i].h_u.eth_hdr.ethtype_id =
> +					rte_be_to_cpu_16(eth_spec->type);
> +					list[i].m_u.eth_hdr.ethtype_id =
> +						UINT16_MAX;
> +				}
> +				lkups_cnt++;
> +			} else if (!eth_spec && !eth_mask) {
> +				list[i].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_MAC_OFOS : ICE_MAC_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			ipv4_spec = item->spec;
> +			ipv4_mask = item->mask;
> +			if (ipv4_spec && ipv4_mask) {
> +				list[i].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> +				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
> +					list[i].h_u.ipv4_hdr.src_addr =
> +						ipv4_spec->hdr.src_addr;
> +					list[i].m_u.ipv4_hdr.src_addr =
> +						UINT32_MAX;
> +				}
> +				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
> +					list[i].h_u.ipv4_hdr.dst_addr =
> +						ipv4_spec->hdr.dst_addr;
> +					list[i].m_u.ipv4_hdr.dst_addr =
> +						UINT32_MAX;
> +				}
> +				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
> +					list[i].h_u.ipv4_hdr.time_to_live =
> +						ipv4_spec->hdr.time_to_live;
> +					list[i].m_u.ipv4_hdr.time_to_live =
> +						UINT8_MAX;
> +				}
> +				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
> +					list[i].h_u.ipv4_hdr.protocol =
> +						ipv4_spec->hdr.next_proto_id;
> +					list[i].m_u.ipv4_hdr.protocol =
> +						UINT8_MAX;
> +				}
> +				if (ipv4_mask->hdr.type_of_service ==
> +						UINT8_MAX) {
> +					list[i].h_u.ipv4_hdr.tos =
> +						ipv4_spec->hdr.type_of_service;
> +					list[i].m_u.ipv4_hdr.tos = UINT8_MAX;
> +				}
> +				lkups_cnt++;
> +			} else if (!ipv4_spec && !ipv4_mask) {
> +				list[i].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_IPV6:
> +			ipv6_spec = item->spec;
> +			ipv6_mask = item->mask;
> +			if (ipv6_spec && ipv6_mask) {
> +				list[i].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_IPV6_OFOS : ICE_IPV6_IL;
> +				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
> +					if (ipv6_mask->hdr.src_addr[j] ==
> +								UINT8_MAX) {
> +						list[i].h_u.ice_ipv6_ofos_hdr.
> +							src_addr[j] =
> +						ipv6_spec->hdr.src_addr[j];
> +						list[i].m_u.ice_ipv6_ofos_hdr.
> +							src_addr[j] =
> +						ipv6_mask->hdr.src_addr[j];
> +					}
> +					if (ipv6_mask->hdr.dst_addr[j] ==
> +								UINT8_MAX) {
> +						list[i].h_u.ice_ipv6_ofos_hdr.
> +							dst_addr[j] =
> +						ipv6_spec->hdr.dst_addr[j];
> +						list[i].m_u.ice_ipv6_ofos_hdr.
> +							dst_addr[j] =
> +						ipv6_mask->hdr.dst_addr[j];
> +					}
> +				}
> +				if (ipv6_mask->hdr.proto == UINT8_MAX) {
> +					list[i].h_u.ice_ipv6_ofos_hdr.next_hdr =
> +						ipv6_spec->hdr.proto;
> +					list[i].m_u.ice_ipv6_ofos_hdr.next_hdr =
> +						UINT8_MAX;
> +				}
> +				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
> +					list[i].h_u.ice_ipv6_ofos_hdr.
> +					hop_limit = ipv6_spec->hdr.hop_limits;
> +					list[i].m_u.ice_ipv6_ofos_hdr.
> +						hop_limit  = UINT8_MAX;
> +				}
> +				lkups_cnt++;
> +			} else if (!ipv6_spec && !ipv6_mask) {
> +				list[i].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_UDP:
> +			udp_spec = item->spec;
> +			udp_mask = item->mask;
> +			if (udp_spec && udp_mask) {
> +				list[i].type = ICE_UDP_ILOS;
> +				if (udp_mask->hdr.src_port == UINT16_MAX) {
> +					list[i].h_u.l4_hdr.src_port =
> +						udp_spec->hdr.src_port;
> +					list[i].m_u.l4_hdr.src_port =
> +						udp_mask->hdr.src_port;
> +				}
> +				if (udp_mask->hdr.dst_port == UINT16_MAX) {
> +					list[i].h_u.l4_hdr.dst_port =
> +						udp_spec->hdr.dst_port;
> +					list[i].m_u.l4_hdr.dst_port =
> +						udp_mask->hdr.dst_port;
> +				}
> +				lkups_cnt++;
> +			} else if (!udp_spec && !udp_mask) {
> +				list[i].type = ICE_UDP_ILOS;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_TCP:
> +			tcp_spec = item->spec;
> +			tcp_mask = item->mask;
> +			if (tcp_spec && tcp_mask) {
> +				list[i].type = ICE_TCP_IL;
> +				if (tcp_mask->hdr.src_port == UINT16_MAX) {
> +					list[i].h_u.l4_hdr.src_port =
> +						tcp_spec->hdr.src_port;
> +					list[i].m_u.l4_hdr.src_port =
> +						tcp_mask->hdr.src_port;
> +				}
> +				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
> +					list[i].h_u.l4_hdr.dst_port =
> +						tcp_spec->hdr.dst_port;
> +					list[i].m_u.l4_hdr.dst_port =
> +						tcp_mask->hdr.dst_port;
> +				}
> +				lkups_cnt++;
> +			} else if (!tcp_spec && !tcp_mask) {
> +				list[i].type = ICE_TCP_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_SCTP:
> +			sctp_spec = item->spec;
> +			sctp_mask = item->mask;
> +			if (sctp_spec && sctp_mask) {
> +				list[i].type = ICE_SCTP_IL;
> +				if (sctp_mask->hdr.src_port == UINT16_MAX) {
> +					list[i].h_u.sctp_hdr.src_port =
> +						sctp_spec->hdr.src_port;
> +					list[i].m_u.sctp_hdr.src_port =
> +						sctp_mask->hdr.src_port;
> +				}
> +				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
> +					list[i].h_u.sctp_hdr.dst_port =
> +						sctp_spec->hdr.dst_port;
> +					list[i].m_u.sctp_hdr.dst_port =
> +						sctp_mask->hdr.dst_port;
> +				}
> +				lkups_cnt++;
> +			} else if (!sctp_spec && !sctp_mask) {
> +				list[i].type = ICE_SCTP_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_VXLAN:
> +			vxlan_spec = item->spec;
> +			vxlan_mask = item->mask;
> +			tun_type = ICE_SW_TUN_VXLAN;
> +			if (vxlan_spec && vxlan_mask) {
> +				list[i].type = ICE_VXLAN;
> +				if (vxlan_mask->vni[0] == UINT8_MAX &&
> +					vxlan_mask->vni[1] == UINT8_MAX &&
> +					vxlan_mask->vni[2] == UINT8_MAX) {
> +					list[i].h_u.tnl_hdr.vni =
> +						(vxlan_spec->vni[1] << 8) |
> +						vxlan_spec->vni[0];
> +					list[i].m_u.tnl_hdr.vni =
> +						UINT16_MAX;
> +				}
> +				lkups_cnt++;
> +			} else if (!vxlan_spec && !vxlan_mask) {
> +				list[i].type = ICE_VXLAN;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_NVGRE:
> +			nvgre_spec = item->spec;
> +			nvgre_mask = item->mask;
> +			tun_type = ICE_SW_TUN_NVGRE;
> +			if (nvgre_spec && nvgre_mask) {
> +				list[i].type = ICE_NVGRE;
> +				if (nvgre_mask->tni[0] == UINT8_MAX &&
> +					nvgre_mask->tni[1] == UINT8_MAX &&
> +					nvgre_mask->tni[2] == UINT8_MAX) {
> +					list[i].h_u.nvgre_hdr.tni =
> +						(nvgre_spec->tni[1] << 8) |
> +						nvgre_spec->tni[0];
> +					list[i].m_u.nvgre_hdr.tni =
> +						UINT16_MAX;
> +				}
> +				lkups_cnt++;
> +			} else if (!nvgre_spec && !nvgre_mask) {
> +				list[i].type = ICE_NVGRE;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_VOID:
> +		case RTE_FLOW_ITEM_TYPE_END:
> +			break;
> +
> +		default:
> +			rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
> +				   "Invalid pattern item.");
> +			goto out;
> +		}
> +	}
> +
> +	rule_info->tun_type = tun_type;
> +	*lkups_num = lkups_cnt;
> +
> +	return 0;
> +out:
> +	return -rte_errno;
> +}
> +
> +/* By now ice switch filter action code implement only
> +* supports QUEUE or DROP.
> +*/
> +static int
> +ice_parse_switch_action(struct ice_pf *pf,
> +				 const struct rte_flow_action *actions,
> +				 struct rte_flow_error *error,
> +				 struct ice_adv_rule_info *rule_info)
> +{
> +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +	struct ice_vsi *vsi = pf->main_vsi;
> +	const struct rte_flow_action *act;
> +	const struct rte_flow_action_queue *act_q;
> +	uint16_t base_queue, index = 0;
> +	uint32_t reg;
> +
> +	/* Check if the first non-void action is QUEUE or DROP. */
> +	NEXT_ITEM_OF_ACTION(act, actions, index);
> +	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
> +	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
> +		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
> +				   act, "Not supported action.");
> +		return -rte_errno;
> +	}
> +	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
> +	if (reg & PFLAN_RX_QALLOC_VALID_M) {
> +		base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
> +	} else {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ACTION,
> +			act, "Invalid queue register");
> +		return -rte_errno;
> +	}
> +	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
> +		act_q = act->conf;
> +		rule_info->sw_act.fltr_act = ICE_FWD_TO_Q;
> +		rule_info->sw_act.fwd_id.q_id = base_queue + act_q->index;
> +		if (act_q->index >= pf->dev_data->nb_rx_queues) {
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION,
> +				act, "Invalid queue ID for"
> +				" switch filter.");
> +			return -rte_errno;
> +		}
> +	} else {
> +		rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
> +	}
> +
> +	rule_info->sw_act.vsi_handle = vsi->idx;
> +	rule_info->rx = 1;
> +	rule_info->sw_act.src = vsi->idx;
> +	rule_info->sw_act.flag = ICE_FLTR_RX;
> +
> +	/* Check if the next non-void item is END */
> +	index++;
> +	NEXT_ITEM_OF_ACTION(act, actions, index);
> +	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> +		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
> +				   act, "Not supported action.");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +ice_switch_rule_set(struct ice_pf *pf,
> +			struct ice_adv_lkup_elem *list,
> +			uint16_t lkups_cnt,
> +			struct ice_adv_rule_info *rule_info,
> +			struct rte_flow *flow)
> +{
> +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +	int ret;
> +	struct ice_rule_query_data rule_added = {0};
> +	struct ice_rule_query_data *filter_ptr;
> +
> +	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
> +		PMD_DRV_LOG(ERR, "item number too large for rule");
> +		return -ENOTSUP;
> +	}
> +	if (!list) {
> +		PMD_DRV_LOG(ERR, "lookup list should not be NULL");
> +		return -ENOTSUP;
> +	}
> +
> +	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
> +
> +	if (!ret) {
> +		filter_ptr = rte_zmalloc("ice_switch_filter",
> +			sizeof(struct ice_rule_query_data), 0);
> +		if (!filter_ptr) {
> +			PMD_DRV_LOG(ERR, "failed to allocate memory");
> +			return -EINVAL;
> +		}
> +		flow->rule = filter_ptr;
> +		rte_memcpy(filter_ptr,
> +			&rule_added,
> +			sizeof(struct ice_rule_query_data));
> +	}
> +
> +	return ret;
> +}
> +
> +int
> +ice_create_switch_filter(struct ice_pf *pf,
> +			const struct rte_flow_item pattern[],
> +			const struct rte_flow_action actions[],
> +			struct rte_flow *flow,
> +			struct rte_flow_error *error)
> +{
> +	int ret = 0;
> +	struct ice_adv_rule_info rule_info = {0};
> +	struct ice_adv_lkup_elem *list = NULL;
> +	uint16_t lkups_num = 0;
> +
> +	ret = ice_parse_switch_filter(pattern, actions, error,
> +			&rule_info, &list, &lkups_num);
> +	if (ret)
> +		goto out;
> +
> +	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
> +	if (ret)
> +		goto out;
> +
> +	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow);
> +	if (ret)
> +		goto out;
> +
> +	rte_free(list);
> +	return 0;
> +
> +out:
> +	rte_free(list);
> +
> +	return -rte_errno;
> +}
> +
> +int
> +ice_destroy_switch_filter(struct ice_pf *pf,
> +			struct rte_flow *flow)
> +{
> +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +	int ret;
> +	struct ice_rule_query_data *filter_ptr;
> +	struct ice_rule_query_data rule_added;
> +
> +	filter_ptr = (struct ice_rule_query_data *)
> +			flow->rule;
> +	rte_memcpy(&rule_added, filter_ptr,
> +		sizeof(struct ice_rule_query_data));
> +
> +	if (!filter_ptr) {
> +		PMD_DRV_LOG(ERR, "no such flow"
> +			    " create by switch filter");
> +		return -EINVAL;
> +	}
> +
> +	ret = ice_rem_adv_rule_by_id(hw, &rule_added);
> +
> +	rte_free(filter_ptr);
> +
> +	return ret;
> +}
> +
> +void
> +ice_free_switch_filter_rule(void *rule)
> +{
> +	struct ice_rule_query_data *filter_ptr;
> +
> +	filter_ptr = (struct ice_rule_query_data *)rule;
> +
> +	rte_free(filter_ptr);
> +}
> diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
> new file mode 100644
> index 0000000..957d0d1
> --- /dev/null
> +++ b/drivers/net/ice/ice_switch_filter.h
> @@ -0,0 +1,28 @@
> +#ifndef _ICE_SWITCH_FILTER_H_
> +#define _ICE_SWITCH_FILTER_H_
> +
> +#include "base/ice_switch.h"
> +#include "base/ice_type.h"
> +#include "ice_ethdev.h"
> +
> +#define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
> +	do {                                                            \
> +		act = actions + index;                                  \
> +		while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
> +			index++;                                        \
> +			act = actions + index;                          \
> +		}                                                       \
> +	} while (0)
> +
> +int
> +ice_create_switch_filter(struct ice_pf *pf,
> +			const struct rte_flow_item pattern[],
> +			const struct rte_flow_action actions[],
> +			struct rte_flow *flow,
> +			struct rte_flow_error *error);
> +int
> +ice_destroy_switch_filter(struct ice_pf *pf,
> +			struct rte_flow *flow);
> +void
> +ice_free_switch_filter_rule(void *rule);
> +#endif /* _ICE_SWITCH_FILTER_H_ */
> diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
> index 2bec688..c6bcba3 100644
> --- a/drivers/net/ice/meson.build
> +++ b/drivers/net/ice/meson.build
> @@ -7,6 +7,7 @@ objs = [base_objs]
>  sources = files(
>  	'ice_ethdev.c',
>  	'ice_rxtx.c'
> +	'ice_switch_filter.c'
>  	)

This is need of a comma between 'ice_rxtx.c' and 'ice_switch_filter.c'

See:

https://travis-ci.com/ovsrobot/dpdk/jobs/204909867 for example.

>  deps += ['hash']

^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH 1/2] net/ice: enable switch filter
  2019-06-03 17:07   ` Aaron Conole
@ 2019-06-04  2:02     ` Zhao1, Wei
  0 siblings, 0 replies; 73+ messages in thread
From: Zhao1, Wei @ 2019-06-04  2:02 UTC (permalink / raw)
  To: Aaron Conole, Yang, Qiming; +Cc: Zhang, Qi Z, dev

Hi,qiming

> -----Original Message-----
> From: Aaron Conole [mailto:aconole@redhat.com]
> Sent: Tuesday, June 4, 2019 1:08 AM
> To: Yang, Qiming <qiming.yang@intel.com>
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; dev@dpdk.org; Zhao1, Wei
> <wei.zhao1@intel.com>
> Subject: Re: [dpdk-dev] [PATCH 1/2] net/ice: enable switch filter
> 
> Qiming Yang <qiming.yang@intel.com> writes:
> 
> > From: wei zhao <wei.zhao1@intel.com>
> >
> > The patch enables the backend of rte_flow. It transfers rte_flow_xxx
> > to device specific data structure and configures packet process
> > engine's binary classifier
> > (switch) properly.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > ---
> >  drivers/net/ice/Makefile            |   1 +
> >  drivers/net/ice/ice_ethdev.h        |   6 +
> >  drivers/net/ice/ice_switch_filter.c | 502
> > ++++++++++++++++++++++++++++++++++++
> >  drivers/net/ice/ice_switch_filter.h |  28 ++
> >  drivers/net/ice/meson.build         |   1 +
> >  5 files changed, 538 insertions(+)
> >  create mode 100644 drivers/net/ice/ice_switch_filter.c
> >  create mode 100644 drivers/net/ice/ice_switch_filter.h
> >
> > diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile index
> > 0e5c55e..b10d826 100644
> > --- a/drivers/net/ice/Makefile
> > +++ b/drivers/net/ice/Makefile
> > @@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
> >  SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c  endif
> >
> > +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
> >  ifeq ($(findstring
> RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
> >  	CC_AVX2_SUPPORT=1
> >  else
> > diff --git a/drivers/net/ice/ice_ethdev.h
> > b/drivers/net/ice/ice_ethdev.h index 1385afa..67a358a 100644
> > --- a/drivers/net/ice/ice_ethdev.h
> > +++ b/drivers/net/ice/ice_ethdev.h
> > @@ -234,6 +234,12 @@ struct ice_vsi {
> >  	bool offset_loaded;
> >  };
> >
> > +/* Struct to store flow created. */
> > +struct rte_flow {
> > +	TAILQ_ENTRY(rte_flow) node;
> > +void *rule;
> > +};
> > +
> >  struct ice_pf {
> >  	struct ice_adapter *adapter; /* The adapter this PF associate to */
> >  	struct ice_vsi *main_vsi; /* pointer to main VSI structure */ diff
> > --git a/drivers/net/ice/ice_switch_filter.c
> > b/drivers/net/ice/ice_switch_filter.c
> > new file mode 100644
> > index 0000000..f2e7751
> > --- /dev/null
> > +++ b/drivers/net/ice/ice_switch_filter.c
> > @@ -0,0 +1,502 @@
> > +#include <sys/queue.h>
> > +#include <stdio.h>
> > +#include <errno.h>
> > +#include <stdint.h>
> > +#include <string.h>
> > +#include <unistd.h>
> > +#include <stdarg.h>
> > +
> > +#include <rte_debug.h>
> > +#include <rte_ether.h>
> > +#include <rte_ethdev_driver.h>
> > +#include <rte_log.h>
> > +#include <rte_malloc.h>
> > +#include <rte_eth_ctrl.h>
> > +#include <rte_tailq.h>
> > +#include <rte_flow_driver.h>
> > +
> > +#include "ice_logs.h"
> > +#include "base/ice_type.h"
> > +#include "ice_switch_filter.h"
> > +
> > +static int
> > +ice_parse_switch_filter(
> > +			const struct rte_flow_item pattern[],
> > +			const struct rte_flow_action actions[],
> > +			struct rte_flow_error *error,
> > +			struct ice_adv_rule_info *rule_info,
> > +			struct ice_adv_lkup_elem **lkup_list,
> > +			uint16_t *lkups_num)
> > +{
> > +	const struct rte_flow_item *item = pattern;
> > +	enum rte_flow_item_type item_type;
> > +	const struct rte_flow_item_eth *eth_spec, *eth_mask;
> > +	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> > +	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> > +	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> > +	const struct rte_flow_item_udp *udp_spec, *udp_mask;
> > +	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> > +	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
> > +	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
> > +	struct ice_adv_lkup_elem *list = *lkup_list;
> > +	uint16_t i, j, lkups_cnt = 0;
> > +	uint16_t item_num = 0;
> > +	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
> > +
> > +	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > +		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
> > +			item_num++;
> > +	}
> > +
> > +	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
> > +	if (!list) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
> > +				   "no memory malloc");
> > +		goto out;
> > +	}
> > +
> > +	for (item = pattern, i = 0; item->type !=
> > +			RTE_FLOW_ITEM_TYPE_END; item++, i++) {
> > +		item_type = item->type;
> > +
> > +		switch (item_type) {
> > +		case RTE_FLOW_ITEM_TYPE_ETH:
> > +			eth_spec = item->spec;
> > +			eth_mask = item->mask;
> > +			if (eth_spec && eth_mask) {
> > +				list[i].type = (tun_type == ICE_NON_TUN) ?
> > +					ICE_MAC_OFOS : ICE_MAC_IL;
> > +				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
> > +					if (eth_mask->src.addr_bytes[j] ==
> > +								UINT8_MAX) {
> > +						list[i].h_u.eth_hdr.
> > +							src_addr[j] =
> > +						eth_spec->src.addr_bytes[j];
> > +						list[i].m_u.eth_hdr.
> > +							src_addr[j] =
> > +						eth_mask->src.addr_bytes[j];
> > +					}
> > +					if (eth_mask->dst.addr_bytes[j] ==
> > +								UINT8_MAX) {
> > +						list[i].h_u.eth_hdr.
> > +							dst_addr[j] =
> > +						eth_spec->dst.addr_bytes[j];
> > +						list[i].m_u.eth_hdr.
> > +							dst_addr[j] =
> > +						eth_mask->dst.addr_bytes[j];
> > +					}
> > +				}
> > +				if (eth_mask->type == UINT16_MAX) {
> > +					list[i].h_u.eth_hdr.ethtype_id =
> > +					rte_be_to_cpu_16(eth_spec->type);
> > +					list[i].m_u.eth_hdr.ethtype_id =
> > +						UINT16_MAX;
> > +				}
> > +				lkups_cnt++;
> > +			} else if (!eth_spec && !eth_mask) {
> > +				list[i].type = (tun_type == ICE_NON_TUN) ?
> > +					ICE_MAC_OFOS : ICE_MAC_IL;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_IPV4:
> > +			ipv4_spec = item->spec;
> > +			ipv4_mask = item->mask;
> > +			if (ipv4_spec && ipv4_mask) {
> > +				list[i].type = (tun_type == ICE_NON_TUN) ?
> > +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> > +				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> {
> > +					list[i].h_u.ipv4_hdr.src_addr =
> > +						ipv4_spec->hdr.src_addr;
> > +					list[i].m_u.ipv4_hdr.src_addr =
> > +						UINT32_MAX;
> > +				}
> > +				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
> {
> > +					list[i].h_u.ipv4_hdr.dst_addr =
> > +						ipv4_spec->hdr.dst_addr;
> > +					list[i].m_u.ipv4_hdr.dst_addr =
> > +						UINT32_MAX;
> > +				}
> > +				if (ipv4_mask->hdr.time_to_live ==
> UINT8_MAX) {
> > +					list[i].h_u.ipv4_hdr.time_to_live =
> > +						ipv4_spec->hdr.time_to_live;
> > +					list[i].m_u.ipv4_hdr.time_to_live =
> > +						UINT8_MAX;
> > +				}
> > +				if (ipv4_mask->hdr.next_proto_id ==
> UINT8_MAX) {
> > +					list[i].h_u.ipv4_hdr.protocol =
> > +						ipv4_spec->hdr.next_proto_id;
> > +					list[i].m_u.ipv4_hdr.protocol =
> > +						UINT8_MAX;
> > +				}
> > +				if (ipv4_mask->hdr.type_of_service ==
> > +						UINT8_MAX) {
> > +					list[i].h_u.ipv4_hdr.tos =
> > +						ipv4_spec-
> >hdr.type_of_service;
> > +					list[i].m_u.ipv4_hdr.tos = UINT8_MAX;
> > +				}
> > +				lkups_cnt++;
> > +			} else if (!ipv4_spec && !ipv4_mask) {
> > +				list[i].type = (tun_type == ICE_NON_TUN) ?
> > +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_IPV6:
> > +			ipv6_spec = item->spec;
> > +			ipv6_mask = item->mask;
> > +			if (ipv6_spec && ipv6_mask) {
> > +				list[i].type = (tun_type == ICE_NON_TUN) ?
> > +					ICE_IPV6_OFOS : ICE_IPV6_IL;
> > +				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
> > +					if (ipv6_mask->hdr.src_addr[j] ==
> > +								UINT8_MAX) {
> > +						list[i].h_u.ice_ipv6_ofos_hdr.
> > +							src_addr[j] =
> > +						ipv6_spec->hdr.src_addr[j];
> > +						list[i].m_u.ice_ipv6_ofos_hdr.
> > +							src_addr[j] =
> > +						ipv6_mask->hdr.src_addr[j];
> > +					}
> > +					if (ipv6_mask->hdr.dst_addr[j] ==
> > +								UINT8_MAX) {
> > +						list[i].h_u.ice_ipv6_ofos_hdr.
> > +							dst_addr[j] =
> > +						ipv6_spec->hdr.dst_addr[j];
> > +						list[i].m_u.ice_ipv6_ofos_hdr.
> > +							dst_addr[j] =
> > +						ipv6_mask->hdr.dst_addr[j];
> > +					}
> > +				}
> > +				if (ipv6_mask->hdr.proto == UINT8_MAX) {
> > +					list[i].h_u.ice_ipv6_ofos_hdr.next_hdr
> =
> > +						ipv6_spec->hdr.proto;
> > +					list[i].m_u.ice_ipv6_ofos_hdr.next_hdr
> =
> > +						UINT8_MAX;
> > +				}
> > +				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
> {
> > +					list[i].h_u.ice_ipv6_ofos_hdr.
> > +					hop_limit = ipv6_spec->hdr.hop_limits;
> > +					list[i].m_u.ice_ipv6_ofos_hdr.
> > +						hop_limit  = UINT8_MAX;
> > +				}
> > +				lkups_cnt++;
> > +			} else if (!ipv6_spec && !ipv6_mask) {
> > +				list[i].type = (tun_type == ICE_NON_TUN) ?
> > +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_UDP:
> > +			udp_spec = item->spec;
> > +			udp_mask = item->mask;
> > +			if (udp_spec && udp_mask) {
> > +				list[i].type = ICE_UDP_ILOS;
> > +				if (udp_mask->hdr.src_port == UINT16_MAX) {
> > +					list[i].h_u.l4_hdr.src_port =
> > +						udp_spec->hdr.src_port;
> > +					list[i].m_u.l4_hdr.src_port =
> > +						udp_mask->hdr.src_port;
> > +				}
> > +				if (udp_mask->hdr.dst_port == UINT16_MAX) {
> > +					list[i].h_u.l4_hdr.dst_port =
> > +						udp_spec->hdr.dst_port;
> > +					list[i].m_u.l4_hdr.dst_port =
> > +						udp_mask->hdr.dst_port;
> > +				}
> > +				lkups_cnt++;
> > +			} else if (!udp_spec && !udp_mask) {
> > +				list[i].type = ICE_UDP_ILOS;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_TCP:
> > +			tcp_spec = item->spec;
> > +			tcp_mask = item->mask;
> > +			if (tcp_spec && tcp_mask) {
> > +				list[i].type = ICE_TCP_IL;
> > +				if (tcp_mask->hdr.src_port == UINT16_MAX) {
> > +					list[i].h_u.l4_hdr.src_port =
> > +						tcp_spec->hdr.src_port;
> > +					list[i].m_u.l4_hdr.src_port =
> > +						tcp_mask->hdr.src_port;
> > +				}
> > +				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
> > +					list[i].h_u.l4_hdr.dst_port =
> > +						tcp_spec->hdr.dst_port;
> > +					list[i].m_u.l4_hdr.dst_port =
> > +						tcp_mask->hdr.dst_port;
> > +				}
> > +				lkups_cnt++;
> > +			} else if (!tcp_spec && !tcp_mask) {
> > +				list[i].type = ICE_TCP_IL;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_SCTP:
> > +			sctp_spec = item->spec;
> > +			sctp_mask = item->mask;
> > +			if (sctp_spec && sctp_mask) {
> > +				list[i].type = ICE_SCTP_IL;
> > +				if (sctp_mask->hdr.src_port == UINT16_MAX) {
> > +					list[i].h_u.sctp_hdr.src_port =
> > +						sctp_spec->hdr.src_port;
> > +					list[i].m_u.sctp_hdr.src_port =
> > +						sctp_mask->hdr.src_port;
> > +				}
> > +				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
> > +					list[i].h_u.sctp_hdr.dst_port =
> > +						sctp_spec->hdr.dst_port;
> > +					list[i].m_u.sctp_hdr.dst_port =
> > +						sctp_mask->hdr.dst_port;
> > +				}
> > +				lkups_cnt++;
> > +			} else if (!sctp_spec && !sctp_mask) {
> > +				list[i].type = ICE_SCTP_IL;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_VXLAN:
> > +			vxlan_spec = item->spec;
> > +			vxlan_mask = item->mask;
> > +			tun_type = ICE_SW_TUN_VXLAN;
> > +			if (vxlan_spec && vxlan_mask) {
> > +				list[i].type = ICE_VXLAN;
> > +				if (vxlan_mask->vni[0] == UINT8_MAX &&
> > +					vxlan_mask->vni[1] == UINT8_MAX
> &&
> > +					vxlan_mask->vni[2] == UINT8_MAX) {
> > +					list[i].h_u.tnl_hdr.vni =
> > +						(vxlan_spec->vni[1] << 8) |
> > +						vxlan_spec->vni[0];
> > +					list[i].m_u.tnl_hdr.vni =
> > +						UINT16_MAX;
> > +				}
> > +				lkups_cnt++;
> > +			} else if (!vxlan_spec && !vxlan_mask) {
> > +				list[i].type = ICE_VXLAN;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_NVGRE:
> > +			nvgre_spec = item->spec;
> > +			nvgre_mask = item->mask;
> > +			tun_type = ICE_SW_TUN_NVGRE;
> > +			if (nvgre_spec && nvgre_mask) {
> > +				list[i].type = ICE_NVGRE;
> > +				if (nvgre_mask->tni[0] == UINT8_MAX &&
> > +					nvgre_mask->tni[1] == UINT8_MAX
> &&
> > +					nvgre_mask->tni[2] == UINT8_MAX) {
> > +					list[i].h_u.nvgre_hdr.tni =
> > +						(nvgre_spec->tni[1] << 8) |
> > +						nvgre_spec->tni[0];
> > +					list[i].m_u.nvgre_hdr.tni =
> > +						UINT16_MAX;
> > +				}
> > +				lkups_cnt++;
> > +			} else if (!nvgre_spec && !nvgre_mask) {
> > +				list[i].type = ICE_NVGRE;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_VOID:
> > +		case RTE_FLOW_ITEM_TYPE_END:
> > +			break;
> > +
> > +		default:
> > +			rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
> > +				   "Invalid pattern item.");
> > +			goto out;
> > +		}
> > +	}
> > +
> > +	rule_info->tun_type = tun_type;
> > +	*lkups_num = lkups_cnt;
> > +
> > +	return 0;
> > +out:
> > +	return -rte_errno;
> > +}
> > +
> > +/* By now ice switch filter action code implement only
> > +* supports QUEUE or DROP.
> > +*/
> > +static int
> > +ice_parse_switch_action(struct ice_pf *pf,
> > +				 const struct rte_flow_action *actions,
> > +				 struct rte_flow_error *error,
> > +				 struct ice_adv_rule_info *rule_info) {
> > +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +	struct ice_vsi *vsi = pf->main_vsi;
> > +	const struct rte_flow_action *act;
> > +	const struct rte_flow_action_queue *act_q;
> > +	uint16_t base_queue, index = 0;
> > +	uint32_t reg;
> > +
> > +	/* Check if the first non-void action is QUEUE or DROP. */
> > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > +	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
> > +	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
> > +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ACTION,
> > +				   act, "Not supported action.");
> > +		return -rte_errno;
> > +	}
> > +	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
> > +	if (reg & PFLAN_RX_QALLOC_VALID_M) {
> > +		base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
> > +	} else {
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_ACTION,
> > +			act, "Invalid queue register");
> > +		return -rte_errno;
> > +	}
> > +	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
> > +		act_q = act->conf;
> > +		rule_info->sw_act.fltr_act = ICE_FWD_TO_Q;
> > +		rule_info->sw_act.fwd_id.q_id = base_queue + act_q->index;
> > +		if (act_q->index >= pf->dev_data->nb_rx_queues) {
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ACTION,
> > +				act, "Invalid queue ID for"
> > +				" switch filter.");
> > +			return -rte_errno;
> > +		}
> > +	} else {
> > +		rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
> > +	}
> > +
> > +	rule_info->sw_act.vsi_handle = vsi->idx;
> > +	rule_info->rx = 1;
> > +	rule_info->sw_act.src = vsi->idx;
> > +	rule_info->sw_act.flag = ICE_FLTR_RX;
> > +
> > +	/* Check if the next non-void item is END */
> > +	index++;
> > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > +	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> > +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ACTION,
> > +				   act, "Not supported action.");
> > +		return -rte_errno;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +ice_switch_rule_set(struct ice_pf *pf,
> > +			struct ice_adv_lkup_elem *list,
> > +			uint16_t lkups_cnt,
> > +			struct ice_adv_rule_info *rule_info,
> > +			struct rte_flow *flow)
> > +{
> > +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +	int ret;
> > +	struct ice_rule_query_data rule_added = {0};
> > +	struct ice_rule_query_data *filter_ptr;
> > +
> > +	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
> > +		PMD_DRV_LOG(ERR, "item number too large for rule");
> > +		return -ENOTSUP;
> > +	}
> > +	if (!list) {
> > +		PMD_DRV_LOG(ERR, "lookup list should not be NULL");
> > +		return -ENOTSUP;
> > +	}
> > +
> > +	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
> > +
> > +	if (!ret) {
> > +		filter_ptr = rte_zmalloc("ice_switch_filter",
> > +			sizeof(struct ice_rule_query_data), 0);
> > +		if (!filter_ptr) {
> > +			PMD_DRV_LOG(ERR, "failed to allocate memory");
> > +			return -EINVAL;
> > +		}
> > +		flow->rule = filter_ptr;
> > +		rte_memcpy(filter_ptr,
> > +			&rule_added,
> > +			sizeof(struct ice_rule_query_data));
> > +	}
> > +
> > +	return ret;
> > +}
> > +
> > +int
> > +ice_create_switch_filter(struct ice_pf *pf,
> > +			const struct rte_flow_item pattern[],
> > +			const struct rte_flow_action actions[],
> > +			struct rte_flow *flow,
> > +			struct rte_flow_error *error)
> > +{
> > +	int ret = 0;
> > +	struct ice_adv_rule_info rule_info = {0};
> > +	struct ice_adv_lkup_elem *list = NULL;
> > +	uint16_t lkups_num = 0;
> > +
> > +	ret = ice_parse_switch_filter(pattern, actions, error,
> > +			&rule_info, &list, &lkups_num);
> > +	if (ret)
> > +		goto out;
> > +
> > +	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
> > +	if (ret)
> > +		goto out;
> > +
> > +	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow);
> > +	if (ret)
> > +		goto out;
> > +
> > +	rte_free(list);
> > +	return 0;
> > +
> > +out:
> > +	rte_free(list);
> > +
> > +	return -rte_errno;
> > +}
> > +
> > +int
> > +ice_destroy_switch_filter(struct ice_pf *pf,
> > +			struct rte_flow *flow)
> > +{
> > +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +	int ret;
> > +	struct ice_rule_query_data *filter_ptr;
> > +	struct ice_rule_query_data rule_added;
> > +
> > +	filter_ptr = (struct ice_rule_query_data *)
> > +			flow->rule;
> > +	rte_memcpy(&rule_added, filter_ptr,
> > +		sizeof(struct ice_rule_query_data));
> > +
> > +	if (!filter_ptr) {
> > +		PMD_DRV_LOG(ERR, "no such flow"
> > +			    " create by switch filter");
> > +		return -EINVAL;
> > +	}
> > +
> > +	ret = ice_rem_adv_rule_by_id(hw, &rule_added);
> > +
> > +	rte_free(filter_ptr);
> > +
> > +	return ret;
> > +}
> > +
> > +void
> > +ice_free_switch_filter_rule(void *rule) {
> > +	struct ice_rule_query_data *filter_ptr;
> > +
> > +	filter_ptr = (struct ice_rule_query_data *)rule;
> > +
> > +	rte_free(filter_ptr);
> > +}
> > diff --git a/drivers/net/ice/ice_switch_filter.h
> > b/drivers/net/ice/ice_switch_filter.h
> > new file mode 100644
> > index 0000000..957d0d1
> > --- /dev/null
> > +++ b/drivers/net/ice/ice_switch_filter.h
> > @@ -0,0 +1,28 @@
> > +#ifndef _ICE_SWITCH_FILTER_H_
> > +#define _ICE_SWITCH_FILTER_H_
> > +
> > +#include "base/ice_switch.h"
> > +#include "base/ice_type.h"
> > +#include "ice_ethdev.h"
> > +
> > +#define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
> > +	do {                                                            \
> > +		act = actions + index;                                  \
> > +		while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
> > +			index++;                                        \
> > +			act = actions + index;                          \
> > +		}                                                       \
> > +	} while (0)
> > +
> > +int
> > +ice_create_switch_filter(struct ice_pf *pf,
> > +			const struct rte_flow_item pattern[],
> > +			const struct rte_flow_action actions[],
> > +			struct rte_flow *flow,
> > +			struct rte_flow_error *error);
> > +int
> > +ice_destroy_switch_filter(struct ice_pf *pf,
> > +			struct rte_flow *flow);
> > +void
> > +ice_free_switch_filter_rule(void *rule); #endif /*
> > +_ICE_SWITCH_FILTER_H_ */
> > diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
> > index 2bec688..c6bcba3 100644
> > --- a/drivers/net/ice/meson.build
> > +++ b/drivers/net/ice/meson.build
> > @@ -7,6 +7,7 @@ objs = [base_objs]
> >  sources = files(
> >  	'ice_ethdev.c',
> >  	'ice_rxtx.c'
> > +	'ice_switch_filter.c'
> >  	)
> 
> This is need of a comma between 'ice_rxtx.c' and 'ice_switch_filter.c'
> 

Ok, I will update in v2.

> See:
> 
> https://travis-ci.com/ovsrobot/dpdk/jobs/204909867 for example.
> 
> >  deps += ['hash']

^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v2 0/3] Enable rte_flow API in ice driver
  2019-06-03  9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
  2019-06-03  9:05 ` [dpdk-dev] [PATCH 1/2] net/ice: enable switch filter Qiming Yang
  2019-06-03  9:05 ` [dpdk-dev] [PATCH 2/2] net/ice: add generic flow API Qiming Yang
@ 2019-06-12  7:50 ` Qiming Yang
  2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter Qiming Yang
                     ` (2 more replies)
  2019-06-20  5:34 ` [dpdk-dev] [PATCH v3 0/3] Enable rte_flow API in ice driver Qiming Yang
                   ` (7 subsequent siblings)
  10 siblings, 3 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-12  7:50 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch set enables the backend of rte_flow, and the generic filter related functions in ice driver.
Supported flows include ipv4, tcpv4, udpv4, ipv6, tcpv6, udpv6, tunnel, etc.
This patch set depends on shared code update.

---
v2 changes:
 - added UDP tunnel port support.
 - fixed compile issue.
 - addes document update.

Qiming Yang (2):
  net/ice: add generic flow API
  net/ice: add UDP tunnel port support

wei zhao (1):
  net/ice: enable switch filter

 drivers/net/ice/Makefile            |   2 +
 drivers/net/ice/ice_ethdev.c        |  98 +++++++
 drivers/net/ice/ice_ethdev.h        |  11 +
 drivers/net/ice/ice_generic_flow.c  | 567 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h  | 404 +++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.c | 502 +++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  28 ++
 drivers/net/ice/meson.build         |   3 +-
 8 files changed, 1614 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
  2019-06-12  7:50 ` [dpdk-dev] [PATCH v2 0/3] Enable rte_flow API in ice driver Qiming Yang
@ 2019-06-12  7:50   ` Qiming Yang
  2019-06-13  8:23     ` Wang, Xiao W
                       ` (2 more replies)
  2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API Qiming Yang
  2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 3/3] net/ice: add UDP tunnel port support Qiming Yang
  2 siblings, 3 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-12  7:50 UTC (permalink / raw)
  To: dev; +Cc: wei zhao

From: wei zhao <wei.zhao1@intel.com>

The patch enables the backend of rte_flow. It transfers
rte_flow_xxx to device specific data structure and
configures packet process engine's binary classifier
(switch) properly.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/Makefile            |   1 +
 drivers/net/ice/ice_ethdev.h        |   6 +
 drivers/net/ice/ice_switch_filter.c | 502 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  28 ++
 drivers/net/ice/meson.build         |   3 +-
 5 files changed, 539 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 0e5c55e..b10d826 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
 endif
 
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
 	CC_AVX2_SUPPORT=1
 else
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1385afa..67a358a 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,6 +234,12 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+/* Struct to store flow created. */
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) node;
+void *rule;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
new file mode 100644
index 0000000..e679675
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -0,0 +1,502 @@
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "ice_logs.h"
+#include "base/ice_type.h"
+#include "ice_switch_filter.h"
+
+static int
+ice_parse_switch_filter(
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct ice_adv_rule_info *rule_info,
+			struct ice_adv_lkup_elem **lkup_list,
+			uint16_t *lkups_num)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
+	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
+	struct ice_adv_lkup_elem *list;
+	uint16_t i, j, t = 0;
+	uint16_t item_num = 0;
+	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
+			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
+			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
+			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
+			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
+			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			item_num++;
+	}
+
+	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "no memory malloc");
+		goto out;
+	}
+	*lkup_list = list;
+
+	for (item = pattern, i = 0; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++, i++) {
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			if (eth_spec && eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->src.addr_bytes[j] ==
+								UINT8_MAX) {
+						list[t].h_u.eth_hdr.
+							src_addr[j] =
+						eth_spec->src.addr_bytes[j];
+						list[t].m_u.eth_hdr.
+							src_addr[j] =
+						eth_mask->src.addr_bytes[j];
+					}
+					if (eth_mask->dst.addr_bytes[j] ==
+								UINT8_MAX) {
+						list[t].h_u.eth_hdr.
+							dst_addr[j] =
+						eth_spec->dst.addr_bytes[j];
+						list[t].m_u.eth_hdr.
+							dst_addr[j] =
+						eth_mask->dst.addr_bytes[j];
+					}
+				}
+				if (eth_mask->type == UINT16_MAX) {
+					list[t].h_u.eth_hdr.ethtype_id =
+					rte_be_to_cpu_16(eth_spec->type);
+					list[t].m_u.eth_hdr.ethtype_id =
+						UINT16_MAX;
+				}
+				t++;
+			} else if (!eth_spec && !eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+			if (ipv4_spec && ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.src_addr =
+						ipv4_spec->hdr.src_addr;
+					list[t].m_u.ipv4_hdr.src_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.dst_addr =
+						ipv4_spec->hdr.dst_addr;
+					list[t].m_u.ipv4_hdr.dst_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.time_to_live =
+						ipv4_spec->hdr.time_to_live;
+					list[t].m_u.ipv4_hdr.time_to_live =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.protocol =
+						ipv4_spec->hdr.next_proto_id;
+					list[t].m_u.ipv4_hdr.protocol =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.type_of_service ==
+						UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.tos =
+						ipv4_spec->hdr.type_of_service;
+					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv4_spec && !ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+			if (ipv6_spec && ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV6_OFOS : ICE_IPV6_IL;
+				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j] ==
+								UINT8_MAX) {
+						list[t].h_u.ice_ipv6_ofos_hdr.
+							src_addr[j] =
+						ipv6_spec->hdr.src_addr[j];
+						list[t].m_u.ice_ipv6_ofos_hdr.
+							src_addr[j] =
+						ipv6_mask->hdr.src_addr[j];
+					}
+					if (ipv6_mask->hdr.dst_addr[j] ==
+								UINT8_MAX) {
+						list[t].h_u.ice_ipv6_ofos_hdr.
+							dst_addr[j] =
+						ipv6_spec->hdr.dst_addr[j];
+						list[t].m_u.ice_ipv6_ofos_hdr.
+							dst_addr[j] =
+						ipv6_mask->hdr.dst_addr[j];
+					}
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					list[t].h_u.ice_ipv6_ofos_hdr.next_hdr =
+						ipv6_spec->hdr.proto;
+					list[t].m_u.ice_ipv6_ofos_hdr.next_hdr =
+						UINT8_MAX;
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					list[t].h_u.ice_ipv6_ofos_hdr.
+					hop_limit = ipv6_spec->hdr.hop_limits;
+					list[t].m_u.ice_ipv6_ofos_hdr.
+						hop_limit  = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv6_spec && !ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+			if (udp_spec && udp_mask) {
+				list[t].type = ICE_UDP_ILOS;
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						udp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						udp_mask->hdr.src_port;
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						udp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						udp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!udp_spec && !udp_mask) {
+				list[t].type = ICE_UDP_ILOS;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+			if (tcp_spec && tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						tcp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						tcp_mask->hdr.src_port;
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						tcp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						tcp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!tcp_spec && !tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+			if (sctp_spec && sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.src_port =
+						sctp_spec->hdr.src_port;
+					list[t].m_u.sctp_hdr.src_port =
+						sctp_mask->hdr.src_port;
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.dst_port =
+						sctp_spec->hdr.dst_port;
+					list[t].m_u.sctp_hdr.dst_port =
+						sctp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!sctp_spec && !sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			tun_type = ICE_SW_TUN_VXLAN;
+			if (vxlan_spec && vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+				if (vxlan_mask->vni[0] == UINT8_MAX &&
+					vxlan_mask->vni[1] == UINT8_MAX &&
+					vxlan_mask->vni[2] == UINT8_MAX) {
+					list[t].h_u.tnl_hdr.vni =
+						(vxlan_spec->vni[1] << 8) |
+						vxlan_spec->vni[0];
+					list[t].m_u.tnl_hdr.vni =
+						UINT16_MAX;
+				}
+				t++;
+			} else if (!vxlan_spec && !vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			tun_type = ICE_SW_TUN_NVGRE;
+			if (nvgre_spec && nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+				if (nvgre_mask->tni[0] == UINT8_MAX &&
+					nvgre_mask->tni[1] == UINT8_MAX &&
+					nvgre_mask->tni[2] == UINT8_MAX) {
+					list[t].h_u.nvgre_hdr.tni =
+						(nvgre_spec->tni[1] << 8) |
+						nvgre_spec->tni[0];
+					list[t].m_u.nvgre_hdr.tni =
+						UINT16_MAX;
+				}
+				t++;
+			} else if (!nvgre_spec && !nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+		case RTE_FLOW_ITEM_TYPE_END:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "Invalid pattern item.");
+			goto out;
+		}
+	}
+
+	rule_info->tun_type = tun_type;
+	*lkups_num = t;
+
+	return 0;
+out:
+	return -rte_errno;
+}
+
+/* By now ice switch filter action code implement only
+* supports QUEUE or DROP.
+*/
+static int
+ice_parse_switch_action(struct ice_pf *pf,
+				 const struct rte_flow_action *actions,
+				 struct rte_flow_error *error,
+				 struct ice_adv_rule_info *rule_info)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	struct ice_vsi *vsi = pf->main_vsi;
+	const struct rte_flow_action *act;
+	const struct rte_flow_action_queue *act_q;
+	uint16_t base_queue, index = 0;
+	uint32_t reg;
+
+	/* Check if the first non-void action is QUEUE or DROP. */
+	NEXT_ITEM_OF_ACTION(act, actions, index);
+	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   act, "Not supported action.");
+		return -rte_errno;
+	}
+	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+	if (reg & PFLAN_RX_QALLOC_VALID_M) {
+		base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+	} else {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ACTION,
+			act, "Invalid queue register");
+		return -rte_errno;
+	}
+	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+		act_q = act->conf;
+		rule_info->sw_act.fltr_act = ICE_FWD_TO_Q;
+		rule_info->sw_act.fwd_id.q_id = base_queue + act_q->index;
+		if (act_q->index >= pf->dev_data->nb_rx_queues) {
+			rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION,
+				act, "Invalid queue ID for"
+				" switch filter.");
+			return -rte_errno;
+		}
+	} else {
+		rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
+	}
+
+	rule_info->sw_act.vsi_handle = vsi->idx;
+	rule_info->rx = 1;
+	rule_info->sw_act.src = vsi->idx;
+
+	/* Check if the next non-void item is END */
+	index++;
+	NEXT_ITEM_OF_ACTION(act, actions, index);
+	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+				   act, "Not supported action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_switch_rule_set(struct ice_pf *pf,
+			struct ice_adv_lkup_elem *list,
+			uint16_t lkups_cnt,
+			struct ice_adv_rule_info *rule_info,
+			struct rte_flow *flow)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data rule_added = {0};
+	struct ice_rule_query_data *filter_ptr;
+
+	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
+		PMD_DRV_LOG(ERR, "item number too large for rule");
+		return -ENOTSUP;
+	}
+	if (!list) {
+		PMD_DRV_LOG(ERR, "lookup list should not be NULL");
+		return -ENOTSUP;
+	}
+
+	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
+
+	if (!ret) {
+		filter_ptr = rte_zmalloc("ice_switch_filter",
+			sizeof(struct ice_rule_query_data), 0);
+		if (!filter_ptr) {
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
+			return -EINVAL;
+		}
+		flow->rule = filter_ptr;
+		rte_memcpy(filter_ptr,
+			&rule_added,
+			sizeof(struct ice_rule_query_data));
+	}
+
+	return ret;
+}
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	int ret = 0;
+	struct ice_adv_rule_info rule_info = {0};
+	struct ice_adv_lkup_elem *list = NULL;
+	uint16_t lkups_num = 0;
+
+	ret = ice_parse_switch_filter(pattern, actions, error,
+			&rule_info, &list, &lkups_num);
+	if (ret)
+		goto out;
+
+	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
+	if (ret)
+		goto out;
+
+	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow);
+	if (ret)
+		goto out;
+
+	rte_free(list);
+	return 0;
+
+out:
+	rte_free(list);
+
+	return -rte_errno;
+}
+
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data *filter_ptr;
+	struct ice_rule_query_data rule_added;
+
+	filter_ptr = (struct ice_rule_query_data *)
+			flow->rule;
+	rte_memcpy(&rule_added, filter_ptr,
+		sizeof(struct ice_rule_query_data));
+
+	if (!filter_ptr) {
+		PMD_DRV_LOG(ERR, "no such flow"
+			    " create by switch filter");
+		return -EINVAL;
+	}
+
+	ret = ice_rem_adv_rule_by_id(hw, &rule_added);
+
+	rte_free(filter_ptr);
+
+	return ret;
+}
+
+void
+ice_free_switch_filter_rule(void *rule)
+{
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)rule;
+
+	rte_free(filter_ptr);
+}
diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
new file mode 100644
index 0000000..957d0d1
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.h
@@ -0,0 +1,28 @@
+#ifndef _ICE_SWITCH_FILTER_H_
+#define _ICE_SWITCH_FILTER_H_
+
+#include "base/ice_switch.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
+	do {                                                            \
+		act = actions + index;                                  \
+		while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
+			index++;                                        \
+			act = actions + index;                          \
+		}                                                       \
+	} while (0)
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow);
+void
+ice_free_switch_filter_rule(void *rule);
+#endif /* _ICE_SWITCH_FILTER_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 2bec688..8697676 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -6,7 +6,8 @@ objs = [base_objs]
 
 sources = files(
 	'ice_ethdev.c',
-	'ice_rxtx.c'
+	'ice_rxtx.c',
+	'ice_switch_filter.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API
  2019-06-12  7:50 ` [dpdk-dev] [PATCH v2 0/3] Enable rte_flow API in ice driver Qiming Yang
  2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter Qiming Yang
@ 2019-06-12  7:50   ` Qiming Yang
  2019-06-17  5:50     ` Xing, Beilei
                       ` (2 more replies)
  2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 3/3] net/ice: add UDP tunnel port support Qiming Yang
  2 siblings, 3 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-12  7:50 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch adds ice_flow_create, ice_flow_destroy,
ice_flow_flush and ice_flow_validate support,
these are going to used to handle all the generic filters.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/Makefile           |   1 +
 drivers/net/ice/ice_ethdev.c       |  44 +++
 drivers/net/ice/ice_ethdev.h       |   7 +-
 drivers/net/ice/ice_generic_flow.c | 567 +++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h | 404 ++++++++++++++++++++++++++
 5 files changed, 1022 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index b10d826..32abeb6 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -79,5 +79,6 @@ endif
 ifeq ($(CC_AVX2_SUPPORT), 1)
 	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index bdbceb4..cf6bb1d 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -15,6 +15,7 @@
 #include "base/ice_dcb.h"
 #include "ice_ethdev.h"
 #include "ice_rxtx.h"
+#include "ice_switch_filter.h"
 
 #define ICE_MAX_QP_NUM "max_queue_pair_num"
 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
@@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
 static int ice_xstats_get_names(struct rte_eth_dev *dev,
 				struct rte_eth_xstat_name *xstats_names,
 				unsigned int limit);
+static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+			enum rte_filter_type filter_type,
+			enum rte_filter_op filter_op,
+			void *arg);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get                   = ice_xstats_get,
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
+	.filter_ctrl                  = ice_dev_filter_ctrl,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -1460,6 +1466,8 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* enable uio intr after callback register */
 	rte_intr_enable(intr_handle);
 
+	TAILQ_INIT(&pf->flow_list);
+
 	return 0;
 
 err_pf_setup:
@@ -1602,6 +1610,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
 
 	ice_dev_close(dev);
 
@@ -1619,6 +1629,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	rte_intr_callback_unregister(intr_handle,
 				     ice_interrupt_handler, dev);
 
+	/* Remove all flows */
+	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+		ice_free_switch_filter_rule(p_flow->rule);
+		rte_free(p_flow);
+	}
+
 	return 0;
 }
 
@@ -3603,6 +3620,33 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 }
 
 static int
+ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+		     enum rte_filter_type filter_type,
+		     enum rte_filter_op filter_op,
+		     void *arg)
+{
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ice_flow_ops;
+		break;
+	default:
+		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+					filter_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 67a358a..0905ff9 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,12 +234,16 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+extern const struct rte_flow_ops ice_flow_ops;
+
 /* Struct to store flow created. */
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) node;
-void *rule;
+	void *rule;
 };
 
+TAILQ_HEAD(ice_flow_list, rte_flow);
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -265,6 +269,7 @@ struct ice_pf {
 	struct ice_eth_stats internal_stats;
 	bool offset_loaded;
 	bool adapter_stopped;
+	struct ice_flow_list flow_list;
 };
 
 /**
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
new file mode 100644
index 0000000..4fb50b2
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -0,0 +1,567 @@
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "ice_switch_filter.h"
+
+static int ice_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static int ice_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error);
+static int ice_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error);
+
+const struct rte_flow_ops ice_flow_ops = {
+	.validate = ice_flow_validate,
+	.create = ice_flow_create,
+	.destroy = ice_flow_destroy,
+	.flush = ice_flow_flush,
+};
+
+static int
+ice_flow_valid_attr(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	/* Must be input direction */
+	if (!attr->ingress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Not support priority.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->group) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   attr, "Not support group.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+ice_match_pattern(enum rte_flow_item_type *item_array,
+		const struct rte_flow_item *pattern)
+{
+	const struct rte_flow_item *item = pattern;
+
+	while ((*item_array == item->type) &&
+	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+		item_array++;
+		item++;
+	}
+
+	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+		item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
+		struct rte_flow_error *error)
+{
+	uint16_t i = 0;
+	uint64_t inset;
+
+	for (; i < RTE_DIM(ice_supported_patterns); i++)
+		if (ice_match_pattern(ice_supported_patterns[i].items,
+				      pattern)) {
+			inset = ice_supported_patterns[i].sw_fields;
+			return inset;
+		}
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			   pattern, "Unsupported pattern");
+
+	return 0;
+}
+
+static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
+			struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_icmp *icmp_mask;
+	const struct rte_flow_item_icmp6 *icmp6_mask;
+	enum rte_flow_item_type item_type;
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	uint64_t input_set = ICE_INSET_NONE;
+	bool outer_ip = true;
+	bool outer_l4 = true;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return 0;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			if (eth_spec && eth_mask) {
+				if (rte_is_broadcast_ether_addr(&eth_mask->src))
+					input_set |= ICE_INSET_SMAC;
+				if (rte_is_broadcast_ether_addr(&eth_mask->dst))
+					input_set |= ICE_INSET_DMAC;
+				if (eth_mask->type == RTE_BE16(0xffff))
+					input_set |= ICE_INSET_ETHERTYPE;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (!(ipv4_spec && ipv4_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 spec or mask.");
+				return 0;
+			}
+
+			/* Check IPv4 mask and update input set */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 mask.");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_DST;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TTL;
+				if (ipv4_mask->hdr.fragment_offset == 0)
+					input_set |= ICE_INSET_IPV4_PROTO;
+				outer_ip = false;
+			} else {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_DST;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_PROTO;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (!(ipv6_spec && ipv6_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Invalid IPv6 spec or mask");
+				return 0;
+			}
+
+			if (ipv6_mask->hdr.payload_len ||
+			    ipv6_mask->hdr.vtc_flow) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv6 mask");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_NEXT_HDR;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+				outer_ip = false;
+			} else {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_TTL;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (!(udp_spec && udp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid UDP mask");
+				return 0;
+			}
+
+			/* Check UDP mask and update input set*/
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (!(tcp_spec && tcp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid TCP mask");
+				return 0;
+			}
+
+			/* Check TCP mask and update input set */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (!(sctp_spec && sctp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid SCTP mask");
+				return 0;
+			}
+
+			/* Check SCTP mask and update input set */
+			if (sctp_mask->hdr.cksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid SCTP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			icmp_mask = item->mask;
+			if (icmp_mask->hdr.icmp_code ||
+			    icmp_mask->hdr.icmp_cksum ||
+			    icmp_mask->hdr.icmp_ident ||
+			    icmp_mask->hdr.icmp_seq_nb) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP mask");
+				return 0;
+			}
+
+			if (icmp_mask->hdr.icmp_type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP;
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6:
+			icmp6_mask = item->mask;
+			if (icmp6_mask->code ||
+			    icmp6_mask->checksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP6 mask");
+				return 0;
+			}
+
+			if (icmp6_mask->type == UINT8_MAX)
+			input_set |= ICE_INSET_ICMP6;
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid mask no exist");
+			break;
+		}
+	}
+	return input_set;
+}
+
+static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
+			uint64_t inset, struct rte_flow_error *error)
+{
+	uint64_t fields;
+
+	/* get valid field */
+	fields = ice_get_flow_field(pattern, error);
+	if ((!fields) || (fields && (!inset))) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int ice_flow_valid_action(const struct rte_flow_action *actions,
+				       struct rte_flow_error *error)
+{
+	switch (actions->type) {
+	case RTE_FLOW_ACTION_TYPE_QUEUE:
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_flow_validate(__rte_unused struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	uint64_t inset = 0;
+	int ret = ICE_ERR_NOT_SUPPORTED;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = ice_flow_valid_attr(attr, error);
+	if (!ret)
+		return ret;
+
+	inset = ice_flow_valid_pattern(pattern, error);
+	if (!inset)
+		return -rte_errno;
+
+	ret = ice_flow_valid_inset(pattern, inset, error);
+	if (ret)
+		return ret;
+
+	ret = ice_flow_valid_action(actions, error);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct rte_flow *
+ice_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *flow = NULL;
+	int ret;
+
+	flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return flow;
+	}
+
+	ret = ice_flow_validate(dev, attr, pattern, actions, error);
+	if (ret < 0)
+		return NULL;
+
+	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
+	if (ret)
+		goto free_flow;
+
+	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+	return flow;
+
+free_flow:
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(flow);
+	return NULL;
+}
+
+static int
+ice_flow_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	int ret = 0;
+
+	ret = ice_destroy_switch_filter(pf, flow);
+
+	if (!ret) {
+		TAILQ_REMOVE(&pf->flow_list, flow, node);
+		rte_free(flow);
+	} else
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to destroy flow.");
+
+	return ret;
+}
+
+static int
+ice_flow_flush(struct rte_eth_dev *dev,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
+	int ret;
+
+	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+		ret = ice_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			rte_flow_error_set(error, -ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to flush SW flows.");
+			return -rte_errno;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
new file mode 100644
index 0000000..46c3461
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -0,0 +1,404 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _ICE_GENERIC_FLOW_H_
+#define _ICE_GENERIC_FLOW_H_
+
+#include <rte_flow_driver.h>
+
+struct ice_flow_pattern {
+	enum rte_flow_item_type *items;
+	uint64_t sw_fields;
+};
+
+#define ICE_INSET_NONE            0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define ICE_INSET_SMAC            0x0000000000000001ULL
+#define ICE_INSET_DMAC            0x0000000000000002ULL
+#define ICE_INSET_ETHERTYPE       0x0000000000000020ULL
+
+/* bit 8 ~ bit 15 */
+#define ICE_INSET_IPV4_SRC        0x0000000000000100ULL
+#define ICE_INSET_IPV4_DST        0x0000000000000200ULL
+#define ICE_INSET_IPV6_SRC        0x0000000000000400ULL
+#define ICE_INSET_IPV6_DST        0x0000000000000800ULL
+#define ICE_INSET_SRC_PORT        0x0000000000001000ULL
+#define ICE_INSET_DST_PORT        0x0000000000002000ULL
+#define ICE_INSET_ARP             0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL
+#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL
+#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL
+#define ICE_INSET_IPV6_NEXT_HDR   0x0000000000200000ULL
+#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
+#define ICE_INSET_ICMP            0x0000000001000000ULL
+#define ICE_INSET_ICMP6           0x0000000002000000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define ICE_INSET_TUN_SMAC           0x0000000100000000ULL
+#define ICE_INSET_TUN_DMAC           0x0000000200000000ULL
+#define ICE_INSET_TUN_IPV4_SRC       0x0000000400000000ULL
+#define ICE_INSET_TUN_IPV4_DST       0x0000000800000000ULL
+#define ICE_INSET_TUN_IPV4_TTL       0x0000001000000000ULL
+#define ICE_INSET_TUN_IPV4_PROTO     0x0000002000000000ULL
+#define ICE_INSET_TUN_IPV6_SRC       0x0000004000000000ULL
+#define ICE_INSET_TUN_IPV6_DST       0x0000008000000000ULL
+#define ICE_INSET_TUN_IPV6_TTL       0x0000010000000000ULL
+#define ICE_INSET_TUN_IPV6_PROTO     0x0000020000000000ULL
+#define ICE_INSET_TUN_SRC_PORT       0x0000040000000000ULL
+#define ICE_INSET_TUN_DST_PORT       0x0000080000000000ULL
+#define ICE_INSET_TUN_ID             0x0000100000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+#define ICE_FLAG_VLAN_INNER  0x00000001ULL
+#define ICE_FLAG_VLAN_OUTER  0x00000002ULL
+
+#define INSET_ETHER ( \
+	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define INSET_MAC_IPV4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
+#define INSET_MAC_IPV4_L4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV4_ICMP ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
+#define INSET_MAC_IPV6 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_NEXT_HDR | ICE_INSET_IPV6_HOP_LIMIT)
+#define INSET_MAC_IPV6_L4 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV6_ICMP ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_ICMP6)
+#define INSET_TUNNEL_IPV4_TYPE1 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO)
+#define INSET_TUNNEL_IPV4_TYPE2 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV4_TYPE3 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP)
+#define INSET_TUNNEL_IPV6_TYPE1 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO)
+#define INSET_TUNNEL_IPV6_TYPE2 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV6_TYPE3 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6)
+
+/* L2 */
+static enum rte_flow_item_type pattern_ethertype[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv4 */
+static enum rte_flow_item_type pattern_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv6 */
+static enum rte_flow_item_type pattern_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 tunnel IPv4 */
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 tunnel MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 tunnel IPv6 */
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 tunnel MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct ice_flow_pattern ice_supported_patterns[] = {
+	{pattern_ethertype, INSET_ETHER},
+	{pattern_ipv4, INSET_MAC_IPV4},
+	{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
+	{pattern_ipv6, INSET_MAC_IPV6},
+	{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
+	{pattern_ipv4_tunnel_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_tunnel_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_tunnel_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_tunnel_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_tunnel_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_tunnel_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_tunnel_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_tunnel_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_tunnel_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_tunnel_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_tunnel_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_tunnel_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_tunnel_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_tunnel_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_tunnel_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_tunnel_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_tunnel_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_tunnel_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_tunnel_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_tunnel_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+};
+
+#endif
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v2 3/3] net/ice: add UDP tunnel port support
  2019-06-12  7:50 ` [dpdk-dev] [PATCH v2 0/3] Enable rte_flow API in ice driver Qiming Yang
  2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter Qiming Yang
  2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API Qiming Yang
@ 2019-06-12  7:50   ` Qiming Yang
  2 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-12  7:50 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

Enabled UDP tunnel port add and delete functions.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/ice_ethdev.c | 54 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index cf6bb1d..833b724 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -88,6 +88,10 @@ static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 			enum rte_filter_type filter_type,
 			enum rte_filter_op filter_op,
 			void *arg);
+static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -147,6 +151,8 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
 	.filter_ctrl                  = ice_dev_filter_ctrl,
+	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
+	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -3646,6 +3652,54 @@ ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 	return ret;
 }
 
+/* Add UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -1;
+		break;
+	}
+
+	return ret;
+}
+
+/* Delete UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -1;
+		break;
+	}
+
+	return ret;
+}
+
 static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
  2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter Qiming Yang
@ 2019-06-13  8:23     ` Wang, Xiao W
  2019-06-14  9:46       ` Zhao1, Wei
  2019-06-17  5:27     ` Xing, Beilei
  2019-06-18  9:40     ` Ye Xiaolong
  2 siblings, 1 reply; 73+ messages in thread
From: Wang, Xiao W @ 2019-06-13  8:23 UTC (permalink / raw)
  To: Yang, Qiming, dev; +Cc: Zhao1, Wei

Hi,

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> Sent: Wednesday, June 12, 2019 3:50 PM
> To: dev@dpdk.org
> Cc: Zhao1, Wei <wei.zhao1@intel.com>
> Subject: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> 
> From: wei zhao <wei.zhao1@intel.com>
> 
> The patch enables the backend of rte_flow. It transfers
> rte_flow_xxx to device specific data structure and
> configures packet process engine's binary classifier
> (switch) properly.
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
>  drivers/net/ice/Makefile            |   1 +
>  drivers/net/ice/ice_ethdev.h        |   6 +
>  drivers/net/ice/ice_switch_filter.c | 502
> ++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_switch_filter.h |  28 ++
>  drivers/net/ice/meson.build         |   3 +-
>  5 files changed, 539 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/net/ice/ice_switch_filter.c
>  create mode 100644 drivers/net/ice/ice_switch_filter.h
> 
> diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
> index 0e5c55e..b10d826 100644
> --- a/drivers/net/ice/Makefile
> +++ b/drivers/net/ice/Makefile
> @@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
>  SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
>  endif
> 
> +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
>  ifeq ($(findstring
> RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
>  	CC_AVX2_SUPPORT=1
>  else
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
> index 1385afa..67a358a 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -234,6 +234,12 @@ struct ice_vsi {
>  	bool offset_loaded;
>  };
> 
> +/* Struct to store flow created. */
> +struct rte_flow {
> +	TAILQ_ENTRY(rte_flow) node;
> +void *rule;
> +};
> +
>  struct ice_pf {
>  	struct ice_adapter *adapter; /* The adapter this PF associate to */
>  	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
> diff --git a/drivers/net/ice/ice_switch_filter.c
> b/drivers/net/ice/ice_switch_filter.c
> new file mode 100644
> index 0000000..e679675
> --- /dev/null
> +++ b/drivers/net/ice/ice_switch_filter.c
> @@ -0,0 +1,502 @@

SPDX-License-Identifier missing.

> +#include <sys/queue.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <stdarg.h>
> +
> +#include <rte_debug.h>
> +#include <rte_ether.h>
> +#include <rte_ethdev_driver.h>
> +#include <rte_log.h>
> +#include <rte_malloc.h>
> +#include <rte_eth_ctrl.h>
> +#include <rte_tailq.h>
> +#include <rte_flow_driver.h>
> +
> +#include "ice_logs.h"
> +#include "base/ice_type.h"
> +#include "ice_switch_filter.h"
> +
> +static int
> +ice_parse_switch_filter(
> +			const struct rte_flow_item pattern[],
> +			const struct rte_flow_action actions[],
> +			struct rte_flow_error *error,
> +			struct ice_adv_rule_info *rule_info,
> +			struct ice_adv_lkup_elem **lkup_list,
> +			uint16_t *lkups_num)
> +{
> +	const struct rte_flow_item *item = pattern;
> +	enum rte_flow_item_type item_type;
> +	const struct rte_flow_item_eth *eth_spec, *eth_mask;
> +	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> +	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> +	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> +	const struct rte_flow_item_udp *udp_spec, *udp_mask;
> +	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> +	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
> +	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
> +	struct ice_adv_lkup_elem *list;
> +	uint16_t i, j, t = 0;
> +	uint16_t item_num = 0;
> +	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
> +
> +	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> +		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
> +			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
> +			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
> +			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
> +			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
> +			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
> +			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
> +			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
> +			item_num++;
> +	}
> +
> +	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
> +	if (!list) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
> +				   "no memory malloc");

{RTE_FLOW_ERROR_TYPE_ITEM_NUM, item, "No memory for PMD internal items"} is more appropriate.
Refer to i40e implementation.

> +		goto out;
> +	}
> +	*lkup_list = list;
> +
> +	for (item = pattern, i = 0; item->type !=
> +			RTE_FLOW_ITEM_TYPE_END; item++, i++) {

It seems we don't need the "i" variable.

> +		item_type = item->type;
> +
> +		switch (item_type) {
> +		case RTE_FLOW_ITEM_TYPE_ETH:
> +			eth_spec = item->spec;
> +			eth_mask = item->mask;
> +			if (eth_spec && eth_mask) {
> +				list[t].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_MAC_OFOS : ICE_MAC_IL;
> +				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
> +					if (eth_mask->src.addr_bytes[j] ==
> +								UINT8_MAX) {
> +						list[t].h_u.eth_hdr.
> +							src_addr[j] =
> +						eth_spec->src.addr_bytes[j];
> +						list[t].m_u.eth_hdr.
> +							src_addr[j] =
> +						eth_mask->src.addr_bytes[j];
> +					}
> +					if (eth_mask->dst.addr_bytes[j] ==
> +								UINT8_MAX) {
> +						list[t].h_u.eth_hdr.
> +							dst_addr[j] =
> +						eth_spec->dst.addr_bytes[j];
> +						list[t].m_u.eth_hdr.
> +							dst_addr[j] =
> +						eth_mask->dst.addr_bytes[j];
> +					}
> +				}
> +				if (eth_mask->type == UINT16_MAX) {
> +					list[t].h_u.eth_hdr.ethtype_id =
> +					rte_be_to_cpu_16(eth_spec->type);
> +					list[t].m_u.eth_hdr.ethtype_id =
> +						UINT16_MAX;
> +				}
> +				t++;

A lot of "t++" below, can we move it outside the switch{ } to have only one "t++"?

> +			} else if (!eth_spec && !eth_mask) {
> +				list[t].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_MAC_OFOS : ICE_MAC_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			ipv4_spec = item->spec;
> +			ipv4_mask = item->mask;
> +			if (ipv4_spec && ipv4_mask) {
> +				list[t].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> +				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> {
> +					list[t].h_u.ipv4_hdr.src_addr =
> +						ipv4_spec->hdr.src_addr;
> +					list[t].m_u.ipv4_hdr.src_addr =
> +						UINT32_MAX;
> +				}
> +				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
> {
> +					list[t].h_u.ipv4_hdr.dst_addr =
> +						ipv4_spec->hdr.dst_addr;
> +					list[t].m_u.ipv4_hdr.dst_addr =
> +						UINT32_MAX;
> +				}
> +				if (ipv4_mask->hdr.time_to_live ==
> UINT8_MAX) {
> +					list[t].h_u.ipv4_hdr.time_to_live =
> +						ipv4_spec->hdr.time_to_live;
> +					list[t].m_u.ipv4_hdr.time_to_live =
> +						UINT8_MAX;
> +				}
> +				if (ipv4_mask->hdr.next_proto_id ==
> UINT8_MAX) {
> +					list[t].h_u.ipv4_hdr.protocol =
> +						ipv4_spec-
> >hdr.next_proto_id;
> +					list[t].m_u.ipv4_hdr.protocol =
> +						UINT8_MAX;
> +				}
> +				if (ipv4_mask->hdr.type_of_service ==
> +						UINT8_MAX) {
> +					list[t].h_u.ipv4_hdr.tos =
> +						ipv4_spec-
> >hdr.type_of_service;
> +					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
> +				}
> +				t++;
> +			} else if (!ipv4_spec && !ipv4_mask) {
> +				list[t].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_IPV6:
> +			ipv6_spec = item->spec;
> +			ipv6_mask = item->mask;
> +			if (ipv6_spec && ipv6_mask) {
> +				list[t].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_IPV6_OFOS : ICE_IPV6_IL;
> +				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
> +					if (ipv6_mask->hdr.src_addr[j] ==
> +								UINT8_MAX) {
> +						list[t].h_u.ice_ipv6_ofos_hdr.
> +							src_addr[j] =
> +						ipv6_spec->hdr.src_addr[j];
> +						list[t].m_u.ice_ipv6_ofos_hdr.
> +							src_addr[j] =
> +						ipv6_mask->hdr.src_addr[j];
> +					}
> +					if (ipv6_mask->hdr.dst_addr[j] ==
> +								UINT8_MAX) {
> +						list[t].h_u.ice_ipv6_ofos_hdr.
> +							dst_addr[j] =
> +						ipv6_spec->hdr.dst_addr[j];
> +						list[t].m_u.ice_ipv6_ofos_hdr.
> +							dst_addr[j] =
> +						ipv6_mask->hdr.dst_addr[j];
> +					}
> +				}
> +				if (ipv6_mask->hdr.proto == UINT8_MAX) {
> +
> 	list[t].h_u.ice_ipv6_ofos_hdr.next_hdr =
> +						ipv6_spec->hdr.proto;
> +
> 	list[t].m_u.ice_ipv6_ofos_hdr.next_hdr =
> +						UINT8_MAX;
> +				}
> +				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
> {
> +					list[t].h_u.ice_ipv6_ofos_hdr.
> +					hop_limit = ipv6_spec-
> >hdr.hop_limits;
> +					list[t].m_u.ice_ipv6_ofos_hdr.
> +						hop_limit  = UINT8_MAX;
> +				}
> +				t++;
> +			} else if (!ipv6_spec && !ipv6_mask) {
> +				list[t].type = (tun_type == ICE_NON_TUN) ?
> +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_UDP:
> +			udp_spec = item->spec;
> +			udp_mask = item->mask;
> +			if (udp_spec && udp_mask) {
> +				list[t].type = ICE_UDP_ILOS;
> +				if (udp_mask->hdr.src_port == UINT16_MAX)
> {
> +					list[t].h_u.l4_hdr.src_port =
> +						udp_spec->hdr.src_port;
> +					list[t].m_u.l4_hdr.src_port =
> +						udp_mask->hdr.src_port;
> +				}
> +				if (udp_mask->hdr.dst_port == UINT16_MAX)
> {
> +					list[t].h_u.l4_hdr.dst_port =
> +						udp_spec->hdr.dst_port;
> +					list[t].m_u.l4_hdr.dst_port =
> +						udp_mask->hdr.dst_port;
> +				}
> +				t++;
> +			} else if (!udp_spec && !udp_mask) {
> +				list[t].type = ICE_UDP_ILOS;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_TCP:
> +			tcp_spec = item->spec;
> +			tcp_mask = item->mask;
> +			if (tcp_spec && tcp_mask) {
> +				list[t].type = ICE_TCP_IL;
> +				if (tcp_mask->hdr.src_port == UINT16_MAX) {
> +					list[t].h_u.l4_hdr.src_port =
> +						tcp_spec->hdr.src_port;
> +					list[t].m_u.l4_hdr.src_port =
> +						tcp_mask->hdr.src_port;
> +				}
> +				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
> +					list[t].h_u.l4_hdr.dst_port =
> +						tcp_spec->hdr.dst_port;
> +					list[t].m_u.l4_hdr.dst_port =
> +						tcp_mask->hdr.dst_port;
> +				}
> +				t++;
> +			} else if (!tcp_spec && !tcp_mask) {
> +				list[t].type = ICE_TCP_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_SCTP:
> +			sctp_spec = item->spec;
> +			sctp_mask = item->mask;
> +			if (sctp_spec && sctp_mask) {
> +				list[t].type = ICE_SCTP_IL;
> +				if (sctp_mask->hdr.src_port == UINT16_MAX)
> {
> +					list[t].h_u.sctp_hdr.src_port =
> +						sctp_spec->hdr.src_port;
> +					list[t].m_u.sctp_hdr.src_port =
> +						sctp_mask->hdr.src_port;
> +				}
> +				if (sctp_mask->hdr.dst_port == UINT16_MAX)
> {
> +					list[t].h_u.sctp_hdr.dst_port =
> +						sctp_spec->hdr.dst_port;
> +					list[t].m_u.sctp_hdr.dst_port =
> +						sctp_mask->hdr.dst_port;
> +				}
> +				t++;
> +			} else if (!sctp_spec && !sctp_mask) {
> +				list[t].type = ICE_SCTP_IL;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_VXLAN:
> +			vxlan_spec = item->spec;
> +			vxlan_mask = item->mask;
> +			tun_type = ICE_SW_TUN_VXLAN;
> +			if (vxlan_spec && vxlan_mask) {
> +				list[t].type = ICE_VXLAN;
> +				if (vxlan_mask->vni[0] == UINT8_MAX &&
> +					vxlan_mask->vni[1] == UINT8_MAX
> &&
> +					vxlan_mask->vni[2] == UINT8_MAX) {
> +					list[t].h_u.tnl_hdr.vni =
> +						(vxlan_spec->vni[1] << 8) |
> +						vxlan_spec->vni[0];
> +					list[t].m_u.tnl_hdr.vni =
> +						UINT16_MAX;

vxlan_spec->vni[2] does not need to be put into the list?

> +				}
> +				t++;
> +			} else if (!vxlan_spec && !vxlan_mask) {
> +				list[t].type = ICE_VXLAN;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_NVGRE:
> +			nvgre_spec = item->spec;
> +			nvgre_mask = item->mask;
> +			tun_type = ICE_SW_TUN_NVGRE;
> +			if (nvgre_spec && nvgre_mask) {
> +				list[t].type = ICE_NVGRE;
> +				if (nvgre_mask->tni[0] == UINT8_MAX &&
> +					nvgre_mask->tni[1] == UINT8_MAX
> &&
> +					nvgre_mask->tni[2] == UINT8_MAX) {
> +					list[t].h_u.nvgre_hdr.tni =
> +						(nvgre_spec->tni[1] << 8) |
> +						nvgre_spec->tni[0];
> +					list[t].m_u.nvgre_hdr.tni =
> +						UINT16_MAX;
> +				}
> +				t++;
> +			} else if (!nvgre_spec && !nvgre_mask) {
> +				list[t].type = ICE_NVGRE;
> +			}
> +			break;
> +
> +		case RTE_FLOW_ITEM_TYPE_VOID:
> +		case RTE_FLOW_ITEM_TYPE_END:
> +			break;
> +
> +		default:
> +			rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
> +				   "Invalid pattern item.");
> +			goto out;
> +		}
> +	}
> +
> +	rule_info->tun_type = tun_type;
> +	*lkups_num = t;
> +
> +	return 0;
> +out:

We may need to free the allocated list before return.

> +	return -rte_errno;
> +}
> +
> +/* By now ice switch filter action code implement only
> +* supports QUEUE or DROP.
> +*/
> +static int
> +ice_parse_switch_action(struct ice_pf *pf,
> +				 const struct rte_flow_action *actions,
> +				 struct rte_flow_error *error,
> +				 struct ice_adv_rule_info *rule_info)
> +{
> +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +	struct ice_vsi *vsi = pf->main_vsi;
> +	const struct rte_flow_action *act;
> +	const struct rte_flow_action_queue *act_q;
> +	uint16_t base_queue, index = 0;
> +	uint32_t reg;
> +
> +	/* Check if the first non-void action is QUEUE or DROP. */
> +	NEXT_ITEM_OF_ACTION(act, actions, index);
> +	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
> +	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
> +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ACTION,
> +				   act, "Not supported action.");
> +		return -rte_errno;
> +	}
> +	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
> +	if (reg & PFLAN_RX_QALLOC_VALID_M) {
> +		base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;

Can we get this register info earlier in dev_start() or somewhere else? Then we can use the base_queue directly.

> +	} else {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ACTION,
> +			act, "Invalid queue register");
> +		return -rte_errno;
> +	}
> +	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
> +		act_q = act->conf;
> +		rule_info->sw_act.fltr_act = ICE_FWD_TO_Q;
> +		rule_info->sw_act.fwd_id.q_id = base_queue + act_q->index;
> +		if (act_q->index >= pf->dev_data->nb_rx_queues) {
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION,
> +				act, "Invalid queue ID for"
> +				" switch filter.");
> +			return -rte_errno;
> +		}
> +	} else {
> +		rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
> +	}
> +
> +	rule_info->sw_act.vsi_handle = vsi->idx;
> +	rule_info->rx = 1;
> +	rule_info->sw_act.src = vsi->idx;
> +
> +	/* Check if the next non-void item is END */
> +	index++;
> +	NEXT_ITEM_OF_ACTION(act, actions, index);
> +	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ACTION,
> +				   act, "Not supported action.");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +ice_switch_rule_set(struct ice_pf *pf,
> +			struct ice_adv_lkup_elem *list,
> +			uint16_t lkups_cnt,
> +			struct ice_adv_rule_info *rule_info,
> +			struct rte_flow *flow)
> +{
> +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +	int ret;
> +	struct ice_rule_query_data rule_added = {0};
> +	struct ice_rule_query_data *filter_ptr;
> +
> +	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
> +		PMD_DRV_LOG(ERR, "item number too large for rule");

Why not rte_flow_error_set() to report error?

> +		return -ENOTSUP;
> +	}
> +	if (!list) {
> +		PMD_DRV_LOG(ERR, "lookup list should not be NULL");

Ditto.

> +		return -ENOTSUP;
> +	}
> +
> +	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
> +
> +	if (!ret) {
> +		filter_ptr = rte_zmalloc("ice_switch_filter",
> +			sizeof(struct ice_rule_query_data), 0);
> +		if (!filter_ptr) {
> +			PMD_DRV_LOG(ERR, "failed to allocate memory");
> +			return -EINVAL;
> +		}
> +		flow->rule = filter_ptr;
> +		rte_memcpy(filter_ptr,
> +			&rule_added,
> +			sizeof(struct ice_rule_query_data));
> +	}
> +
> +	return ret;
> +}
> +
> +int
> +ice_create_switch_filter(struct ice_pf *pf,
> +			const struct rte_flow_item pattern[],
> +			const struct rte_flow_action actions[],
> +			struct rte_flow *flow,
> +			struct rte_flow_error *error)
> +{
> +	int ret = 0;
> +	struct ice_adv_rule_info rule_info = {0};
> +	struct ice_adv_lkup_elem *list = NULL;
> +	uint16_t lkups_num = 0;
> +
> +	ret = ice_parse_switch_filter(pattern, actions, error,
> +			&rule_info, &list, &lkups_num);
> +	if (ret)
> +		goto out;
> +
> +	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
> +	if (ret)
> +		goto out;
> +
> +	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow);
> +	if (ret)
> +		goto out;
> +
> +	rte_free(list);
> +	return 0;
> +
> +out:
> +	rte_free(list);
> +
> +	return -rte_errno;
> +}
> +
> +int
> +ice_destroy_switch_filter(struct ice_pf *pf,
> +			struct rte_flow *flow)
> +{
> +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +	int ret;
> +	struct ice_rule_query_data *filter_ptr;
> +	struct ice_rule_query_data rule_added;
> +
> +	filter_ptr = (struct ice_rule_query_data *)
> +			flow->rule;
> +	rte_memcpy(&rule_added, filter_ptr,
> +		sizeof(struct ice_rule_query_data));
> +
> +	if (!filter_ptr) {
> +		PMD_DRV_LOG(ERR, "no such flow"
> +			    " create by switch filter");
> +		return -EINVAL;
> +	}

We should do this check at least before rte_memcpy.

> +
> +	ret = ice_rem_adv_rule_by_id(hw, &rule_added);

We can use filter_ptr directly for the switch rule delete.

> +
> +	rte_free(filter_ptr);
> +
> +	return ret;
> +}
> +
> +void
> +ice_free_switch_filter_rule(void *rule)
> +{
> +	struct ice_rule_query_data *filter_ptr;
> +
> +	filter_ptr = (struct ice_rule_query_data *)rule;
> +
> +	rte_free(filter_ptr);
> +}
> diff --git a/drivers/net/ice/ice_switch_filter.h
> b/drivers/net/ice/ice_switch_filter.h
> new file mode 100644
> index 0000000..957d0d1
> --- /dev/null
> +++ b/drivers/net/ice/ice_switch_filter.h
> @@ -0,0 +1,28 @@

Also a license is needed in new file.

BRs,
Xiao

> +#ifndef _ICE_SWITCH_FILTER_H_
> +#define _ICE_SWITCH_FILTER_H_


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
  2019-06-13  8:23     ` Wang, Xiao W
@ 2019-06-14  9:46       ` Zhao1, Wei
  2019-06-17  8:28         ` Wang, Xiao W
  0 siblings, 1 reply; 73+ messages in thread
From: Zhao1, Wei @ 2019-06-14  9:46 UTC (permalink / raw)
  To: Wang, Xiao W, Yang, Qiming, dev

Hi, xiao

> -----Original Message-----
> From: Wang, Xiao W
> Sent: Thursday, June 13, 2019 4:24 PM
> To: Yang, Qiming <qiming.yang@intel.com>; dev@dpdk.org
> Cc: Zhao1, Wei <wei.zhao1@intel.com>
> Subject: RE: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> 
> Hi,
> 
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> > Sent: Wednesday, June 12, 2019 3:50 PM
> > To: dev@dpdk.org
> > Cc: Zhao1, Wei <wei.zhao1@intel.com>
> > Subject: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> >
> > From: wei zhao <wei.zhao1@intel.com>
> >
> > The patch enables the backend of rte_flow. It transfers rte_flow_xxx
> > to device specific data structure and configures packet process
> > engine's binary classifier
> > (switch) properly.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > ---
> >  drivers/net/ice/Makefile            |   1 +
> >  drivers/net/ice/ice_ethdev.h        |   6 +
> >  drivers/net/ice/ice_switch_filter.c | 502
> > ++++++++++++++++++++++++++++++++++++
> >  drivers/net/ice/ice_switch_filter.h |  28 ++
> >  drivers/net/ice/meson.build         |   3 +-
> >  5 files changed, 539 insertions(+), 1 deletion(-)  create mode 100644
> > drivers/net/ice/ice_switch_filter.c
> >  create mode 100644 drivers/net/ice/ice_switch_filter.h
> >
> > diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile index
> > 0e5c55e..b10d826 100644
> > --- a/drivers/net/ice/Makefile
> > +++ b/drivers/net/ice/Makefile
> > @@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
> >  SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c  endif
> >
> > +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
> >  ifeq ($(findstring
> > RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
> >  	CC_AVX2_SUPPORT=1
> >  else
> > diff --git a/drivers/net/ice/ice_ethdev.h
> > b/drivers/net/ice/ice_ethdev.h index 1385afa..67a358a 100644
> > --- a/drivers/net/ice/ice_ethdev.h
> > +++ b/drivers/net/ice/ice_ethdev.h
> > @@ -234,6 +234,12 @@ struct ice_vsi {
> >  	bool offset_loaded;
> >  };
> >
> > +/* Struct to store flow created. */
> > +struct rte_flow {
> > +	TAILQ_ENTRY(rte_flow) node;
> > +void *rule;
> > +};
> > +
> >  struct ice_pf {
> >  	struct ice_adapter *adapter; /* The adapter this PF associate to */
> >  	struct ice_vsi *main_vsi; /* pointer to main VSI structure */ diff
> > --git a/drivers/net/ice/ice_switch_filter.c
> > b/drivers/net/ice/ice_switch_filter.c
> > new file mode 100644
> > index 0000000..e679675
> > --- /dev/null
> > +++ b/drivers/net/ice/ice_switch_filter.c
> > @@ -0,0 +1,502 @@
> 
> SPDX-License-Identifier missing.

Ok, Updated in v3
> 
> > +#include <sys/queue.h>
> > +#include <stdio.h>
> > +#include <errno.h>
> > +#include <stdint.h>
> > +#include <string.h>
> > +#include <unistd.h>
> > +#include <stdarg.h>
> > +
> > +#include <rte_debug.h>
> > +#include <rte_ether.h>
> > +#include <rte_ethdev_driver.h>
> > +#include <rte_log.h>
> > +#include <rte_malloc.h>
> > +#include <rte_eth_ctrl.h>
> > +#include <rte_tailq.h>
> > +#include <rte_flow_driver.h>
> > +
> > +#include "ice_logs.h"
> > +#include "base/ice_type.h"
> > +#include "ice_switch_filter.h"
> > +
> > +static int
> > +ice_parse_switch_filter(
> > +			const struct rte_flow_item pattern[],
> > +			const struct rte_flow_action actions[],
> > +			struct rte_flow_error *error,
> > +			struct ice_adv_rule_info *rule_info,
> > +			struct ice_adv_lkup_elem **lkup_list,
> > +			uint16_t *lkups_num)
> > +{
> > +	const struct rte_flow_item *item = pattern;
> > +	enum rte_flow_item_type item_type;
> > +	const struct rte_flow_item_eth *eth_spec, *eth_mask;
> > +	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> > +	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> > +	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> > +	const struct rte_flow_item_udp *udp_spec, *udp_mask;
> > +	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> > +	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
> > +	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
> > +	struct ice_adv_lkup_elem *list;
> > +	uint16_t i, j, t = 0;
> > +	uint16_t item_num = 0;
> > +	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
> > +
> > +	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > +		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
> > +			item_num++;
> > +	}
> > +
> > +	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
> > +	if (!list) {
> > +		rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
> > +				   "no memory malloc");
> 
> {RTE_FLOW_ERROR_TYPE_ITEM_NUM, item, "No memory for PMD internal
> items"} is more appropriate.
> Refer to i40e implementation.

Ok, Updated in v3

> 
> > +		goto out;
> > +	}
> > +	*lkup_list = list;
> > +
> > +	for (item = pattern, i = 0; item->type !=
> > +			RTE_FLOW_ITEM_TYPE_END; item++, i++) {
> 
> It seems we don't need the "i" variable.

Ok, Updated in v3

> 
> > +		item_type = item->type;
> > +
> > +		switch (item_type) {
> > +		case RTE_FLOW_ITEM_TYPE_ETH:
> > +			eth_spec = item->spec;
> > +			eth_mask = item->mask;
> > +			if (eth_spec && eth_mask) {
> > +				list[t].type = (tun_type == ICE_NON_TUN) ?
> > +					ICE_MAC_OFOS : ICE_MAC_IL;
> > +				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
> > +					if (eth_mask->src.addr_bytes[j] ==
> > +								UINT8_MAX) {
> > +						list[t].h_u.eth_hdr.
> > +							src_addr[j] =
> > +						eth_spec->src.addr_bytes[j];
> > +						list[t].m_u.eth_hdr.
> > +							src_addr[j] =
> > +						eth_mask->src.addr_bytes[j];
> > +					}
> > +					if (eth_mask->dst.addr_bytes[j] ==
> > +								UINT8_MAX) {
> > +						list[t].h_u.eth_hdr.
> > +							dst_addr[j] =
> > +						eth_spec->dst.addr_bytes[j];
> > +						list[t].m_u.eth_hdr.
> > +							dst_addr[j] =
> > +						eth_mask->dst.addr_bytes[j];
> > +					}
> > +				}
> > +				if (eth_mask->type == UINT16_MAX) {
> > +					list[t].h_u.eth_hdr.ethtype_id =
> > +					rte_be_to_cpu_16(eth_spec->type);
> > +					list[t].m_u.eth_hdr.ethtype_id =
> > +						UINT16_MAX;
> > +				}
> > +				t++;
> 
> A lot of "t++" below, can we move it outside the switch{ } to have only one "t++"?

By now, we can not, because share code can not handle  if (!eth_spec && !eth_mask)  case,  if we t++
For that case, that item will put into list[t], and share code will report error.
> 
> > +			} else if (!eth_spec && !eth_mask) {
> > +				list[t].type = (tun_type == ICE_NON_TUN) ?
> > +					ICE_MAC_OFOS : ICE_MAC_IL;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_IPV4:
> > +			ipv4_spec = item->spec;
> > +			ipv4_mask = item->mask;
> > +			if (ipv4_spec && ipv4_mask) {
> > +				list[t].type = (tun_type == ICE_NON_TUN) ?
> > +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> > +				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> > {
> > +					list[t].h_u.ipv4_hdr.src_addr =
> > +						ipv4_spec->hdr.src_addr;
> > +					list[t].m_u.ipv4_hdr.src_addr =
> > +						UINT32_MAX;
> > +				}
> > +				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
> > {
> > +					list[t].h_u.ipv4_hdr.dst_addr =
> > +						ipv4_spec->hdr.dst_addr;
> > +					list[t].m_u.ipv4_hdr.dst_addr =
> > +						UINT32_MAX;
> > +				}
> > +				if (ipv4_mask->hdr.time_to_live ==
> > UINT8_MAX) {
> > +					list[t].h_u.ipv4_hdr.time_to_live =
> > +						ipv4_spec->hdr.time_to_live;
> > +					list[t].m_u.ipv4_hdr.time_to_live =
> > +						UINT8_MAX;
> > +				}
> > +				if (ipv4_mask->hdr.next_proto_id ==
> > UINT8_MAX) {
> > +					list[t].h_u.ipv4_hdr.protocol =
> > +						ipv4_spec-
> > >hdr.next_proto_id;
> > +					list[t].m_u.ipv4_hdr.protocol =
> > +						UINT8_MAX;
> > +				}
> > +				if (ipv4_mask->hdr.type_of_service ==
> > +						UINT8_MAX) {
> > +					list[t].h_u.ipv4_hdr.tos =
> > +						ipv4_spec-
> > >hdr.type_of_service;
> > +					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
> > +				}
> > +				t++;
> > +			} else if (!ipv4_spec && !ipv4_mask) {
> > +				list[t].type = (tun_type == ICE_NON_TUN) ?
> > +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_IPV6:
> > +			ipv6_spec = item->spec;
> > +			ipv6_mask = item->mask;
> > +			if (ipv6_spec && ipv6_mask) {
> > +				list[t].type = (tun_type == ICE_NON_TUN) ?
> > +					ICE_IPV6_OFOS : ICE_IPV6_IL;
> > +				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
> > +					if (ipv6_mask->hdr.src_addr[j] ==
> > +								UINT8_MAX) {
> > +						list[t].h_u.ice_ipv6_ofos_hdr.
> > +							src_addr[j] =
> > +						ipv6_spec->hdr.src_addr[j];
> > +						list[t].m_u.ice_ipv6_ofos_hdr.
> > +							src_addr[j] =
> > +						ipv6_mask->hdr.src_addr[j];
> > +					}
> > +					if (ipv6_mask->hdr.dst_addr[j] ==
> > +								UINT8_MAX) {
> > +						list[t].h_u.ice_ipv6_ofos_hdr.
> > +							dst_addr[j] =
> > +						ipv6_spec->hdr.dst_addr[j];
> > +						list[t].m_u.ice_ipv6_ofos_hdr.
> > +							dst_addr[j] =
> > +						ipv6_mask->hdr.dst_addr[j];
> > +					}
> > +				}
> > +				if (ipv6_mask->hdr.proto == UINT8_MAX) {
> > +
> > 	list[t].h_u.ice_ipv6_ofos_hdr.next_hdr =
> > +						ipv6_spec->hdr.proto;
> > +
> > 	list[t].m_u.ice_ipv6_ofos_hdr.next_hdr =
> > +						UINT8_MAX;
> > +				}
> > +				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
> > {
> > +					list[t].h_u.ice_ipv6_ofos_hdr.
> > +					hop_limit = ipv6_spec-
> > >hdr.hop_limits;
> > +					list[t].m_u.ice_ipv6_ofos_hdr.
> > +						hop_limit  = UINT8_MAX;
> > +				}
> > +				t++;
> > +			} else if (!ipv6_spec && !ipv6_mask) {
> > +				list[t].type = (tun_type == ICE_NON_TUN) ?
> > +					ICE_IPV4_OFOS : ICE_IPV4_IL;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_UDP:
> > +			udp_spec = item->spec;
> > +			udp_mask = item->mask;
> > +			if (udp_spec && udp_mask) {
> > +				list[t].type = ICE_UDP_ILOS;
> > +				if (udp_mask->hdr.src_port == UINT16_MAX)
> > {
> > +					list[t].h_u.l4_hdr.src_port =
> > +						udp_spec->hdr.src_port;
> > +					list[t].m_u.l4_hdr.src_port =
> > +						udp_mask->hdr.src_port;
> > +				}
> > +				if (udp_mask->hdr.dst_port == UINT16_MAX)
> > {
> > +					list[t].h_u.l4_hdr.dst_port =
> > +						udp_spec->hdr.dst_port;
> > +					list[t].m_u.l4_hdr.dst_port =
> > +						udp_mask->hdr.dst_port;
> > +				}
> > +				t++;
> > +			} else if (!udp_spec && !udp_mask) {
> > +				list[t].type = ICE_UDP_ILOS;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_TCP:
> > +			tcp_spec = item->spec;
> > +			tcp_mask = item->mask;
> > +			if (tcp_spec && tcp_mask) {
> > +				list[t].type = ICE_TCP_IL;
> > +				if (tcp_mask->hdr.src_port == UINT16_MAX) {
> > +					list[t].h_u.l4_hdr.src_port =
> > +						tcp_spec->hdr.src_port;
> > +					list[t].m_u.l4_hdr.src_port =
> > +						tcp_mask->hdr.src_port;
> > +				}
> > +				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
> > +					list[t].h_u.l4_hdr.dst_port =
> > +						tcp_spec->hdr.dst_port;
> > +					list[t].m_u.l4_hdr.dst_port =
> > +						tcp_mask->hdr.dst_port;
> > +				}
> > +				t++;
> > +			} else if (!tcp_spec && !tcp_mask) {
> > +				list[t].type = ICE_TCP_IL;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_SCTP:
> > +			sctp_spec = item->spec;
> > +			sctp_mask = item->mask;
> > +			if (sctp_spec && sctp_mask) {
> > +				list[t].type = ICE_SCTP_IL;
> > +				if (sctp_mask->hdr.src_port == UINT16_MAX)
> > {
> > +					list[t].h_u.sctp_hdr.src_port =
> > +						sctp_spec->hdr.src_port;
> > +					list[t].m_u.sctp_hdr.src_port =
> > +						sctp_mask->hdr.src_port;
> > +				}
> > +				if (sctp_mask->hdr.dst_port == UINT16_MAX)
> > {
> > +					list[t].h_u.sctp_hdr.dst_port =
> > +						sctp_spec->hdr.dst_port;
> > +					list[t].m_u.sctp_hdr.dst_port =
> > +						sctp_mask->hdr.dst_port;
> > +				}
> > +				t++;
> > +			} else if (!sctp_spec && !sctp_mask) {
> > +				list[t].type = ICE_SCTP_IL;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_VXLAN:
> > +			vxlan_spec = item->spec;
> > +			vxlan_mask = item->mask;
> > +			tun_type = ICE_SW_TUN_VXLAN;
> > +			if (vxlan_spec && vxlan_mask) {
> > +				list[t].type = ICE_VXLAN;
> > +				if (vxlan_mask->vni[0] == UINT8_MAX &&
> > +					vxlan_mask->vni[1] == UINT8_MAX
> > &&
> > +					vxlan_mask->vni[2] == UINT8_MAX) {
> > +					list[t].h_u.tnl_hdr.vni =
> > +						(vxlan_spec->vni[1] << 8) |
> > +						vxlan_spec->vni[0];
> > +					list[t].m_u.tnl_hdr.vni =
> > +						UINT16_MAX;
> 
> vxlan_spec->vni[2] does not need to be put into the list?

Old Share code only support 16bit vni, not 24bit ,share code bug.
But we will updated in v3 to 24bit.


> 
> > +				}
> > +				t++;
> > +			} else if (!vxlan_spec && !vxlan_mask) {
> > +				list[t].type = ICE_VXLAN;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_NVGRE:
> > +			nvgre_spec = item->spec;
> > +			nvgre_mask = item->mask;
> > +			tun_type = ICE_SW_TUN_NVGRE;
> > +			if (nvgre_spec && nvgre_mask) {
> > +				list[t].type = ICE_NVGRE;
> > +				if (nvgre_mask->tni[0] == UINT8_MAX &&
> > +					nvgre_mask->tni[1] == UINT8_MAX
> > &&
> > +					nvgre_mask->tni[2] == UINT8_MAX) {
> > +					list[t].h_u.nvgre_hdr.tni =
> > +						(nvgre_spec->tni[1] << 8) |
> > +						nvgre_spec->tni[0];
> > +					list[t].m_u.nvgre_hdr.tni =
> > +						UINT16_MAX;
> > +				}
> > +				t++;
> > +			} else if (!nvgre_spec && !nvgre_mask) {
> > +				list[t].type = ICE_NVGRE;
> > +			}
> > +			break;
> > +
> > +		case RTE_FLOW_ITEM_TYPE_VOID:
> > +		case RTE_FLOW_ITEM_TYPE_END:
> > +			break;
> > +
> > +		default:
> > +			rte_flow_error_set(error, EINVAL,
> > +				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
> > +				   "Invalid pattern item.");
> > +			goto out;
> > +		}
> > +	}
> > +
> > +	rule_info->tun_type = tun_type;
> > +	*lkups_num = t;
> > +
> > +	return 0;
> > +out:
> 
> We may need to free the allocated list before return.

No, list[] memory will be used in later function.
we can not free now.

> 
> > +	return -rte_errno;
> > +}
> > +
> > +/* By now ice switch filter action code implement only
> > +* supports QUEUE or DROP.
> > +*/
> > +static int
> > +ice_parse_switch_action(struct ice_pf *pf,
> > +				 const struct rte_flow_action *actions,
> > +				 struct rte_flow_error *error,
> > +				 struct ice_adv_rule_info *rule_info) {
> > +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +	struct ice_vsi *vsi = pf->main_vsi;
> > +	const struct rte_flow_action *act;
> > +	const struct rte_flow_action_queue *act_q;
> > +	uint16_t base_queue, index = 0;
> > +	uint32_t reg;
> > +
> > +	/* Check if the first non-void action is QUEUE or DROP. */
> > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > +	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
> > +	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
> > +		rte_flow_error_set(error, EINVAL,
> > RTE_FLOW_ERROR_TYPE_ACTION,
> > +				   act, "Not supported action.");
> > +		return -rte_errno;
> > +	}
> > +	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
> > +	if (reg & PFLAN_RX_QALLOC_VALID_M) {
> > +		base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
> 
> Can we get this register info earlier in dev_start() or somewhere else? Then we
> can use the base_queue directly.

Maybe, it can be used for other case.


> 
> > +	} else {
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_ACTION,
> > +			act, "Invalid queue register");
> > +		return -rte_errno;
> > +	}
> > +	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
> > +		act_q = act->conf;
> > +		rule_info->sw_act.fltr_act = ICE_FWD_TO_Q;
> > +		rule_info->sw_act.fwd_id.q_id = base_queue + act_q->index;
> > +		if (act_q->index >= pf->dev_data->nb_rx_queues) {
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ACTION,
> > +				act, "Invalid queue ID for"
> > +				" switch filter.");
> > +			return -rte_errno;
> > +		}
> > +	} else {
> > +		rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
> > +	}
> > +
> > +	rule_info->sw_act.vsi_handle = vsi->idx;
> > +	rule_info->rx = 1;
> > +	rule_info->sw_act.src = vsi->idx;
> > +
> > +	/* Check if the next non-void item is END */
> > +	index++;
> > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > +	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> > +		rte_flow_error_set(error, EINVAL,
> > RTE_FLOW_ERROR_TYPE_ACTION,
> > +				   act, "Not supported action.");
> > +		return -rte_errno;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +ice_switch_rule_set(struct ice_pf *pf,
> > +			struct ice_adv_lkup_elem *list,
> > +			uint16_t lkups_cnt,
> > +			struct ice_adv_rule_info *rule_info,
> > +			struct rte_flow *flow)
> > +{
> > +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +	int ret;
> > +	struct ice_rule_query_data rule_added = {0};
> > +	struct ice_rule_query_data *filter_ptr;
> > +
> > +	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
> > +		PMD_DRV_LOG(ERR, "item number too large for rule");
> 
> Why not rte_flow_error_set() to report error?

Ok, Update in v3

> 
> > +		return -ENOTSUP;
> > +	}
> > +	if (!list) {
> > +		PMD_DRV_LOG(ERR, "lookup list should not be NULL");
> 
> Ditto.
> 
> > +		return -ENOTSUP;
> > +	}
> > +
> > +	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
> > +
> > +	if (!ret) {
> > +		filter_ptr = rte_zmalloc("ice_switch_filter",
> > +			sizeof(struct ice_rule_query_data), 0);
> > +		if (!filter_ptr) {
> > +			PMD_DRV_LOG(ERR, "failed to allocate memory");
> > +			return -EINVAL;
> > +		}
> > +		flow->rule = filter_ptr;
> > +		rte_memcpy(filter_ptr,
> > +			&rule_added,
> > +			sizeof(struct ice_rule_query_data));
> > +	}
> > +
> > +	return ret;
> > +}
> > +
> > +int
> > +ice_create_switch_filter(struct ice_pf *pf,
> > +			const struct rte_flow_item pattern[],
> > +			const struct rte_flow_action actions[],
> > +			struct rte_flow *flow,
> > +			struct rte_flow_error *error)
> > +{
> > +	int ret = 0;
> > +	struct ice_adv_rule_info rule_info = {0};
> > +	struct ice_adv_lkup_elem *list = NULL;
> > +	uint16_t lkups_num = 0;
> > +
> > +	ret = ice_parse_switch_filter(pattern, actions, error,
> > +			&rule_info, &list, &lkups_num);
> > +	if (ret)
> > +		goto out;
> > +
> > +	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
> > +	if (ret)
> > +		goto out;
> > +
> > +	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow);
> > +	if (ret)
> > +		goto out;
> > +
> > +	rte_free(list);
> > +	return 0;
> > +
> > +out:
> > +	rte_free(list);
> > +
> > +	return -rte_errno;
> > +}
> > +
> > +int
> > +ice_destroy_switch_filter(struct ice_pf *pf,
> > +			struct rte_flow *flow)
> > +{
> > +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +	int ret;
> > +	struct ice_rule_query_data *filter_ptr;
> > +	struct ice_rule_query_data rule_added;
> > +
> > +	filter_ptr = (struct ice_rule_query_data *)
> > +			flow->rule;
> > +	rte_memcpy(&rule_added, filter_ptr,
> > +		sizeof(struct ice_rule_query_data));
> > +
> > +	if (!filter_ptr) {
> > +		PMD_DRV_LOG(ERR, "no such flow"
> > +			    " create by switch filter");
> > +		return -EINVAL;
> > +	}
> 
> We should do this check at least before rte_memcpy.

Ok, update in v3

> 
> > +
> > +	ret = ice_rem_adv_rule_by_id(hw, &rule_added);
> 
> We can use filter_ptr directly for the switch rule delete.


Ok, update in v3

> 
> > +
> > +	rte_free(filter_ptr);
> > +
> > +	return ret;
> > +}
> > +
> > +void
> > +ice_free_switch_filter_rule(void *rule) {
> > +	struct ice_rule_query_data *filter_ptr;
> > +
> > +	filter_ptr = (struct ice_rule_query_data *)rule;
> > +
> > +	rte_free(filter_ptr);
> > +}
> > diff --git a/drivers/net/ice/ice_switch_filter.h
> > b/drivers/net/ice/ice_switch_filter.h
> > new file mode 100644
> > index 0000000..957d0d1
> > --- /dev/null
> > +++ b/drivers/net/ice/ice_switch_filter.h
> > @@ -0,0 +1,28 @@
> 
> Also a license is needed in new file.


Ok, update in v3

> 
> BRs,
> Xiao
> 
> > +#ifndef _ICE_SWITCH_FILTER_H_
> > +#define _ICE_SWITCH_FILTER_H_


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
  2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter Qiming Yang
  2019-06-13  8:23     ` Wang, Xiao W
@ 2019-06-17  5:27     ` Xing, Beilei
  2019-06-17  8:23       ` Zhao1, Wei
  2019-06-17  8:51       ` Zhao1, Wei
  2019-06-18  9:40     ` Ye Xiaolong
  2 siblings, 2 replies; 73+ messages in thread
From: Xing, Beilei @ 2019-06-17  5:27 UTC (permalink / raw)
  To: Yang, Qiming, dev; +Cc: Zhao1, Wei



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> Sent: Wednesday, June 12, 2019 3:50 PM
> To: dev@dpdk.org
> Cc: Zhao1, Wei <wei.zhao1@intel.com>
> Subject: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> 
> From: wei zhao <wei.zhao1@intel.com>
> 
> The patch enables the backend of rte_flow. It transfers rte_flow_xxx to
> device specific data structure and configures packet process engine's binary
> classifier
> (switch) properly.
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
>  drivers/net/ice/Makefile            |   1 +
>  drivers/net/ice/ice_ethdev.h        |   6 +
>  drivers/net/ice/ice_switch_filter.c | 502
> ++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_switch_filter.h |  28 ++
>  drivers/net/ice/meson.build         |   3 +-
>  5 files changed, 539 insertions(+), 1 deletion(-)  create mode 100644
> drivers/net/ice/ice_switch_filter.c
>  create mode 100644 drivers/net/ice/ice_switch_filter.h
> 
> diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile index
> 0e5c55e..b10d826 100644
> --- a/drivers/net/ice/Makefile
> +++ b/drivers/net/ice/Makefile
> @@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
>  SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c  endif
> 
> +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
>  ifeq ($(findstring
> RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
>  	CC_AVX2_SUPPORT=1
>  else
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index
> 1385afa..67a358a 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -234,6 +234,12 @@ struct ice_vsi {
>  	bool offset_loaded;
>  };
> 
> +/* Struct to store flow created. */
> +struct rte_flow {
> +	TAILQ_ENTRY(rte_flow) node;
> +void *rule;
> +};
> +
>  struct ice_pf {
>  	struct ice_adapter *adapter; /* The adapter this PF associate to */
>  	struct ice_vsi *main_vsi; /* pointer to main VSI structure */ diff --git
> a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
> new file mode 100644
> index 0000000..e679675
> --- /dev/null
> +++ b/drivers/net/ice/ice_switch_filter.c
> @@ -0,0 +1,502 @@
> +#include <sys/queue.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <stdarg.h>
> +
> +#include <rte_debug.h>
> +#include <rte_ether.h>
> +#include <rte_ethdev_driver.h>
> +#include <rte_log.h>
> +#include <rte_malloc.h>
> +#include <rte_eth_ctrl.h>
> +#include <rte_tailq.h>
> +#include <rte_flow_driver.h>
> +
> +#include "ice_logs.h"
> +#include "base/ice_type.h"
> +#include "ice_switch_filter.h"
> +
> +static int
> +ice_parse_switch_filter(
> +			const struct rte_flow_item pattern[],
> +			const struct rte_flow_action actions[],
> +			struct rte_flow_error *error,
> +			struct ice_adv_rule_info *rule_info,
> +			struct ice_adv_lkup_elem **lkup_list,
> +			uint16_t *lkups_num)
> +{
> +	const struct rte_flow_item *item = pattern;
> +	enum rte_flow_item_type item_type;
> +	const struct rte_flow_item_eth *eth_spec, *eth_mask;
> +	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> +	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> +	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> +	const struct rte_flow_item_udp *udp_spec, *udp_mask;
> +	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> +	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
> +	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
> +	struct ice_adv_lkup_elem *list;
> +	uint16_t i, j, t = 0;
> +	uint16_t item_num = 0;
> +	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
> +
> +	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> +		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
> +			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
> +			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
> +			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
> +			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
> +			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
> +			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
> +			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
> +			item_num++;
> +	}

I think the pattern is verified in generic flow framework, so how about  removing the condition here?

...

> +
> +/* By now ice switch filter action code implement only
> +* supports QUEUE or DROP.
> +*/
> +static int
> +ice_parse_switch_action(struct ice_pf *pf,
> +				 const struct rte_flow_action *actions,
> +				 struct rte_flow_error *error,
> +				 struct ice_adv_rule_info *rule_info) {
> +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +	struct ice_vsi *vsi = pf->main_vsi;
> +	const struct rte_flow_action *act;
> +	const struct rte_flow_action_queue *act_q;
> +	uint16_t base_queue, index = 0;
> +	uint32_t reg;
> +
> +	/* Check if the first non-void action is QUEUE or DROP. */
> +	NEXT_ITEM_OF_ACTION(act, actions, index);
> +	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
> +	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
> +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ACTION,
> +				   act, "Not supported action.");
> +		return -rte_errno;
> +	}
> +	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
> +	if (reg & PFLAN_RX_QALLOC_VALID_M) {
> +		base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
> +	} else {
> +		rte_flow_error_set(error, EINVAL,
> +			RTE_FLOW_ERROR_TYPE_ACTION,
> +			act, "Invalid queue register");
> +		return -rte_errno;
> +	}
> +	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
> +		act_q = act->conf;
> +		rule_info->sw_act.fltr_act = ICE_FWD_TO_Q;
> +		rule_info->sw_act.fwd_id.q_id = base_queue + act_q->index;
> +		if (act_q->index >= pf->dev_data->nb_rx_queues) {
> +			rte_flow_error_set(error, EINVAL,
> +				RTE_FLOW_ERROR_TYPE_ACTION,
> +				act, "Invalid queue ID for"
> +				" switch filter.");
> +			return -rte_errno;
> +		}
> +	} else {
> +		rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
> +	}
> +
> +	rule_info->sw_act.vsi_handle = vsi->idx;
> +	rule_info->rx = 1;
> +	rule_info->sw_act.src = vsi->idx;
> +
> +	/* Check if the next non-void item is END */
> +	index++;
> +	NEXT_ITEM_OF_ACTION(act, actions, index);
> +	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ACTION,
> +				   act, "Not supported action.");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +}


How about use supported array to replace NEXT_ITEM_OF_ACTION? Just like pattern.


> +
> +static int
> +ice_switch_rule_set(struct ice_pf *pf,
> +			struct ice_adv_lkup_elem *list,
> +			uint16_t lkups_cnt,
> +			struct ice_adv_rule_info *rule_info,
> +			struct rte_flow *flow)
> +{
> +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +	int ret;
> +	struct ice_rule_query_data rule_added = {0};
> +	struct ice_rule_query_data *filter_ptr;
> +
> +	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
> +		PMD_DRV_LOG(ERR, "item number too large for rule");
> +		return -ENOTSUP;
> +	}
> +	if (!list) {
> +		PMD_DRV_LOG(ERR, "lookup list should not be NULL");
> +		return -ENOTSUP;
> +	}
> +
> +	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
> +
> +	if (!ret) {
> +		filter_ptr = rte_zmalloc("ice_switch_filter",
> +			sizeof(struct ice_rule_query_data), 0);
> +		if (!filter_ptr) {
> +			PMD_DRV_LOG(ERR, "failed to allocate memory");
> +			return -EINVAL;
> +		}
> +		flow->rule = filter_ptr;
> +		rte_memcpy(filter_ptr,
> +			&rule_added,
> +			sizeof(struct ice_rule_query_data));
> +	}
> +
> +	return ret;
> +}
> +
> +int
> +ice_create_switch_filter(struct ice_pf *pf,
> +			const struct rte_flow_item pattern[],
> +			const struct rte_flow_action actions[],
> +			struct rte_flow *flow,
> +			struct rte_flow_error *error)
> +{
> +	int ret = 0;
> +	struct ice_adv_rule_info rule_info = {0};
> +	struct ice_adv_lkup_elem *list = NULL;
> +	uint16_t lkups_num = 0;
> +
> +	ret = ice_parse_switch_filter(pattern, actions, error,
> +			&rule_info, &list, &lkups_num);
> +	if (ret)
> +		goto out;
> +
> +	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
> +	if (ret)
> +		goto out;
> +
> +	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow);
> +	if (ret)
> +		goto out;
> +
> +	rte_free(list);

Why allocate list is not in the function?

> +	return 0;
> +
> +out:
> +	rte_free(list);
> +
> +	return -rte_errno;
> +}
> +
> +int
> +ice_destroy_switch_filter(struct ice_pf *pf,
> +			struct rte_flow *flow)
> +{
> +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> +	int ret;
> +	struct ice_rule_query_data *filter_ptr;
> +	struct ice_rule_query_data rule_added;
> +
> +	filter_ptr = (struct ice_rule_query_data *)
> +			flow->rule;
> +	rte_memcpy(&rule_added, filter_ptr,
> +		sizeof(struct ice_rule_query_data));
> +
> +	if (!filter_ptr) {
> +		PMD_DRV_LOG(ERR, "no such flow"
> +			    " create by switch filter");
> +		return -EINVAL;
> +	}

How about add struct rte_flow_error *error as the parameter, and use rte_flow_error_set here?

> +
> +	ret = ice_rem_adv_rule_by_id(hw, &rule_added);
> +
> +	rte_free(filter_ptr);
> +
> +	return ret;
> +}
> +
> +void
> +ice_free_switch_filter_rule(void *rule) {
> +	struct ice_rule_query_data *filter_ptr;
> +
> +	filter_ptr = (struct ice_rule_query_data *)rule;
> +
> +	rte_free(filter_ptr);
> +}
> diff --git a/drivers/net/ice/ice_switch_filter.h
> b/drivers/net/ice/ice_switch_filter.h
> new file mode 100644
> index 0000000..957d0d1
> --- /dev/null
> +++ b/drivers/net/ice/ice_switch_filter.h
> @@ -0,0 +1,28 @@
> +#ifndef _ICE_SWITCH_FILTER_H_
> +#define _ICE_SWITCH_FILTER_H_
> +
> +#include "base/ice_switch.h"
> +#include "base/ice_type.h"
> +#include "ice_ethdev.h"
> +
> +#define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
> +	do {                                                            \
> +		act = actions + index;                                  \
> +		while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
> +			index++;                                        \
> +			act = actions + index;                          \
> +		}                                                       \
> +	} while (0)
> +
> +int
> +ice_create_switch_filter(struct ice_pf *pf,
> +			const struct rte_flow_item pattern[],
> +			const struct rte_flow_action actions[],
> +			struct rte_flow *flow,
> +			struct rte_flow_error *error);
> +int
> +ice_destroy_switch_filter(struct ice_pf *pf,
> +			struct rte_flow *flow);
> +void
> +ice_free_switch_filter_rule(void *rule); #endif /*
> +_ICE_SWITCH_FILTER_H_ */
> diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build index
> 2bec688..8697676 100644
> --- a/drivers/net/ice/meson.build
> +++ b/drivers/net/ice/meson.build
> @@ -6,7 +6,8 @@ objs = [base_objs]
> 
>  sources = files(
>  	'ice_ethdev.c',
> -	'ice_rxtx.c'
> +	'ice_rxtx.c',
> +	'ice_switch_filter.c'
>  	)
> 
>  deps += ['hash']
> --
> 2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API
  2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API Qiming Yang
@ 2019-06-17  5:50     ` Xing, Beilei
  2019-06-17  6:02     ` Xing, Beilei
  2019-06-17  9:19     ` Wang, Xiao W
  2 siblings, 0 replies; 73+ messages in thread
From: Xing, Beilei @ 2019-06-17  5:50 UTC (permalink / raw)
  To: Yang, Qiming, dev; +Cc: Yang, Qiming



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> Sent: Wednesday, June 12, 2019 3:50 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>
> Subject: [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API
> 
> This patch adds ice_flow_create, ice_flow_destroy, ice_flow_flush and
> ice_flow_validate support, these are going to used to handle all the generic
> filters.
> 

'going to' can be removed.

> Signed-off-by: Qiming Yang <qiming.yang@intel.com>
> ---
>  drivers/net/ice/Makefile           |   1 +
>  drivers/net/ice/ice_ethdev.c       |  44 +++
>  drivers/net/ice/ice_ethdev.h       |   7 +-
>  drivers/net/ice/ice_generic_flow.c | 567
> +++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_generic_flow.h | 404 ++++++++++++++++++++++++++
>  5 files changed, 1022 insertions(+), 1 deletion(-)  create mode 100644
> drivers/net/ice/ice_generic_flow.c
>  create mode 100644 drivers/net/ice/ice_generic_flow.h
> 
> diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile index
> b10d826..32abeb6 100644
> --- a/drivers/net/ice/Makefile
> +++ b/drivers/net/ice/Makefile
> @@ -79,5 +79,6 @@ endif
>  ifeq ($(CC_AVX2_SUPPORT), 1)
>  	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c  endif
> +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
> 

It should be added in meson.build, too.

>  include $(RTE_SDK)/mk/rte.lib.mk
> diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index
> bdbceb4..cf6bb1d 100644
> --- a/drivers/net/ice/ice_ethdev.c
> +++ b/drivers/net/ice/ice_ethdev.c
> @@ -15,6 +15,7 @@

...

> 
> +extern const struct rte_flow_ops ice_flow_ops;
> +
>  /* Struct to store flow created. */
>  struct rte_flow {
>  	TAILQ_ENTRY(rte_flow) node;
> -void *rule;
> +	void *rule;
>  };

Why not change it in patch 1?

> 
> +TAILQ_HEAD(ice_flow_list, rte_flow);
> +
>  struct ice_pf {
>  	struct ice_adapter *adapter; /* The adapter this PF associate to */
>  	struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@ -
> 265,6 +269,7 @@ struct ice_pf {
>  	struct ice_eth_stats internal_stats;
>  	bool offset_loaded;
>  	bool adapter_stopped;
> +	struct ice_flow_list flow_list;
>  };
> 
>  /**
> diff --git a/drivers/net/ice/ice_generic_flow.c
> b/drivers/net/ice/ice_generic_flow.c
> new file mode 100644
> index 0000000..4fb50b2
> --- /dev/null
> +++ b/drivers/net/ice/ice_generic_flow.c

...

> +
> +static int ice_flow_valid_action(const struct rte_flow_action *actions,
> +				       struct rte_flow_error *error) {
> +	switch (actions->type) {
> +	case RTE_FLOW_ACTION_TYPE_QUEUE:
> +		break;
> +	case RTE_FLOW_ACTION_TYPE_DROP:
> +		break;
> +	default:
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
> +				   "Invalid action.");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +}

Do we need a supported array for action?

> +
> +static int
> +ice_flow_validate(__rte_unused struct rte_eth_dev *dev,

...

^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API
  2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API Qiming Yang
  2019-06-17  5:50     ` Xing, Beilei
@ 2019-06-17  6:02     ` Xing, Beilei
  2019-06-17  9:19     ` Wang, Xiao W
  2 siblings, 0 replies; 73+ messages in thread
From: Xing, Beilei @ 2019-06-17  6:02 UTC (permalink / raw)
  To: Yang, Qiming, dev; +Cc: Yang, Qiming


...

> +
> +static int
> +ice_flow_validate(__rte_unused struct rte_eth_dev *dev,
> +		   const struct rte_flow_attr *attr,
> +		   const struct rte_flow_item pattern[],
> +		   const struct rte_flow_action actions[],
> +		   struct rte_flow_error *error)
> +{
> +	uint64_t inset = 0;
> +	int ret = ICE_ERR_NOT_SUPPORTED;
> +
> +	if (!pattern) {
> +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> +				   NULL, "NULL pattern.");
> +		return -rte_errno;
> +	}
> +
> +	if (!actions) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> +				   NULL, "NULL action.");
> +		return -rte_errno;
> +	}
> +
> +	if (!attr) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ATTR,
> +				   NULL, "NULL attribute.");
> +		return -rte_errno;
> +	}
> +
> +	ret = ice_flow_valid_attr(attr, error);
> +	if (!ret)
> +		return ret;
> +
> +	inset = ice_flow_valid_pattern(pattern, error);
> +	if (!inset)
> +		return -rte_errno;
> +
> +	ret = ice_flow_valid_inset(pattern, inset, error);
> +	if (ret)
> +		return ret;
> +
> +	ret = ice_flow_valid_action(actions, error);
> +	if (ret)
> +		return ret;

There're some duplicate work (such as valid action) with patch 1, it's better to optimize it.

> +
> +	return 0;
> +}
> +
> 


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
  2019-06-17  5:27     ` Xing, Beilei
@ 2019-06-17  8:23       ` Zhao1, Wei
  2019-06-17  8:51       ` Zhao1, Wei
  1 sibling, 0 replies; 73+ messages in thread
From: Zhao1, Wei @ 2019-06-17  8:23 UTC (permalink / raw)
  To: Xing, Beilei, Yang, Qiming, dev

Hi, beilei

> -----Original Message-----
> From: Xing, Beilei
> Sent: Monday, June 17, 2019 1:27 PM
> To: Yang, Qiming <qiming.yang@intel.com>; dev@dpdk.org
> Cc: Zhao1, Wei <wei.zhao1@intel.com>
> Subject: RE: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> 
> 
> 
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> > Sent: Wednesday, June 12, 2019 3:50 PM
> > To: dev@dpdk.org
> > Cc: Zhao1, Wei <wei.zhao1@intel.com>
> > Subject: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> >
> > From: wei zhao <wei.zhao1@intel.com>
> >
> > The patch enables the backend of rte_flow. It transfers rte_flow_xxx
> > to device specific data structure and configures packet process
> > engine's binary classifier
> > (switch) properly.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > ---
> >  drivers/net/ice/Makefile            |   1 +
> >  drivers/net/ice/ice_ethdev.h        |   6 +
> >  drivers/net/ice/ice_switch_filter.c | 502
> > ++++++++++++++++++++++++++++++++++++
> >  drivers/net/ice/ice_switch_filter.h |  28 ++
> >  drivers/net/ice/meson.build         |   3 +-
> >  5 files changed, 539 insertions(+), 1 deletion(-)  create mode 100644
> > drivers/net/ice/ice_switch_filter.c
> >  create mode 100644 drivers/net/ice/ice_switch_filter.h
> >
> > diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile index
> > 0e5c55e..b10d826 100644
> > --- a/drivers/net/ice/Makefile
> > +++ b/drivers/net/ice/Makefile
> > @@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
> >  SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c  endif
> >
> > +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
> >  ifeq ($(findstring
> > RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
> >  	CC_AVX2_SUPPORT=1
> >  else
> > diff --git a/drivers/net/ice/ice_ethdev.h
> > b/drivers/net/ice/ice_ethdev.h index 1385afa..67a358a 100644
> > --- a/drivers/net/ice/ice_ethdev.h
> > +++ b/drivers/net/ice/ice_ethdev.h
> > @@ -234,6 +234,12 @@ struct ice_vsi {
> >  	bool offset_loaded;
> >  };
> >
> > +/* Struct to store flow created. */
> > +struct rte_flow {
> > +	TAILQ_ENTRY(rte_flow) node;
> > +void *rule;
> > +};
> > +
> >  struct ice_pf {
> >  	struct ice_adapter *adapter; /* The adapter this PF associate to */
> >  	struct ice_vsi *main_vsi; /* pointer to main VSI structure */ diff
> > --git a/drivers/net/ice/ice_switch_filter.c
> > b/drivers/net/ice/ice_switch_filter.c
> > new file mode 100644
> > index 0000000..e679675
> > --- /dev/null
> > +++ b/drivers/net/ice/ice_switch_filter.c
> > @@ -0,0 +1,502 @@
> > +#include <sys/queue.h>
> > +#include <stdio.h>
> > +#include <errno.h>
> > +#include <stdint.h>
> > +#include <string.h>
> > +#include <unistd.h>
> > +#include <stdarg.h>
> > +
> > +#include <rte_debug.h>
> > +#include <rte_ether.h>
> > +#include <rte_ethdev_driver.h>
> > +#include <rte_log.h>
> > +#include <rte_malloc.h>
> > +#include <rte_eth_ctrl.h>
> > +#include <rte_tailq.h>
> > +#include <rte_flow_driver.h>
> > +
> > +#include "ice_logs.h"
> > +#include "base/ice_type.h"
> > +#include "ice_switch_filter.h"
> > +
> > +static int
> > +ice_parse_switch_filter(
> > +			const struct rte_flow_item pattern[],
> > +			const struct rte_flow_action actions[],
> > +			struct rte_flow_error *error,
> > +			struct ice_adv_rule_info *rule_info,
> > +			struct ice_adv_lkup_elem **lkup_list,
> > +			uint16_t *lkups_num)
> > +{
> > +	const struct rte_flow_item *item = pattern;
> > +	enum rte_flow_item_type item_type;
> > +	const struct rte_flow_item_eth *eth_spec, *eth_mask;
> > +	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> > +	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> > +	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> > +	const struct rte_flow_item_udp *udp_spec, *udp_mask;
> > +	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> > +	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
> > +	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
> > +	struct ice_adv_lkup_elem *list;
> > +	uint16_t i, j, t = 0;
> > +	uint16_t item_num = 0;
> > +	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
> > +
> > +	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > +		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
> > +			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
> > +			item_num++;
> > +	}
> 
> I think the pattern is verified in generic flow framework, so how about
> removing the condition here?

This is not verfy, just get the number of item_num to malloc memory for list

> 
> ...
> 
> > +
> > +/* By now ice switch filter action code implement only
> > +* supports QUEUE or DROP.
> > +*/
> > +static int
> > +ice_parse_switch_action(struct ice_pf *pf,
> > +				 const struct rte_flow_action *actions,
> > +				 struct rte_flow_error *error,
> > +				 struct ice_adv_rule_info *rule_info) {
> > +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +	struct ice_vsi *vsi = pf->main_vsi;
> > +	const struct rte_flow_action *act;
> > +	const struct rte_flow_action_queue *act_q;
> > +	uint16_t base_queue, index = 0;
> > +	uint32_t reg;
> > +
> > +	/* Check if the first non-void action is QUEUE or DROP. */
> > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > +	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
> > +	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
> > +		rte_flow_error_set(error, EINVAL,
> > RTE_FLOW_ERROR_TYPE_ACTION,
> > +				   act, "Not supported action.");
> > +		return -rte_errno;
> > +	}
> > +	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
> > +	if (reg & PFLAN_RX_QALLOC_VALID_M) {
> > +		base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
> > +	} else {
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_ACTION,
> > +			act, "Invalid queue register");
> > +		return -rte_errno;
> > +	}
> > +	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
> > +		act_q = act->conf;
> > +		rule_info->sw_act.fltr_act = ICE_FWD_TO_Q;
> > +		rule_info->sw_act.fwd_id.q_id = base_queue + act_q->index;
> > +		if (act_q->index >= pf->dev_data->nb_rx_queues) {
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ACTION,
> > +				act, "Invalid queue ID for"
> > +				" switch filter.");
> > +			return -rte_errno;
> > +		}
> > +	} else {
> > +		rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
> > +	}
> > +
> > +	rule_info->sw_act.vsi_handle = vsi->idx;
> > +	rule_info->rx = 1;
> > +	rule_info->sw_act.src = vsi->idx;
> > +
> > +	/* Check if the next non-void item is END */
> > +	index++;
> > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > +	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> > +		rte_flow_error_set(error, EINVAL,
> > RTE_FLOW_ERROR_TYPE_ACTION,
> > +				   act, "Not supported action.");
> > +		return -rte_errno;
> > +	}
> > +
> > +	return 0;
> > +}
> 
> 
> How about use supported array to replace NEXT_ITEM_OF_ACTION? Just like
> pattern.

Ok, updated in v3

> 
> 
> > +
> > +static int
> > +ice_switch_rule_set(struct ice_pf *pf,
> > +			struct ice_adv_lkup_elem *list,
> > +			uint16_t lkups_cnt,
> > +			struct ice_adv_rule_info *rule_info,
> > +			struct rte_flow *flow)
> > +{
> > +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +	int ret;
> > +	struct ice_rule_query_data rule_added = {0};
> > +	struct ice_rule_query_data *filter_ptr;
> > +
> > +	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
> > +		PMD_DRV_LOG(ERR, "item number too large for rule");
> > +		return -ENOTSUP;
> > +	}
> > +	if (!list) {
> > +		PMD_DRV_LOG(ERR, "lookup list should not be NULL");
> > +		return -ENOTSUP;
> > +	}
> > +
> > +	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
> > +
> > +	if (!ret) {
> > +		filter_ptr = rte_zmalloc("ice_switch_filter",
> > +			sizeof(struct ice_rule_query_data), 0);
> > +		if (!filter_ptr) {
> > +			PMD_DRV_LOG(ERR, "failed to allocate memory");
> > +			return -EINVAL;
> > +		}
> > +		flow->rule = filter_ptr;
> > +		rte_memcpy(filter_ptr,
> > +			&rule_added,
> > +			sizeof(struct ice_rule_query_data));
> > +	}
> > +
> > +	return ret;
> > +}
> > +
> > +int
> > +ice_create_switch_filter(struct ice_pf *pf,
> > +			const struct rte_flow_item pattern[],
> > +			const struct rte_flow_action actions[],
> > +			struct rte_flow *flow,
> > +			struct rte_flow_error *error)
> > +{
> > +	int ret = 0;
> > +	struct ice_adv_rule_info rule_info = {0};
> > +	struct ice_adv_lkup_elem *list = NULL;
> > +	uint16_t lkups_num = 0;
> > +
> > +	ret = ice_parse_switch_filter(pattern, actions, error,
> > +			&rule_info, &list, &lkups_num);
> > +	if (ret)
> > +		goto out;
> > +
> > +	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
> > +	if (ret)
> > +		goto out;
> > +
> > +	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow);
> > +	if (ret)
> > +		goto out;
> > +
> > +	rte_free(list);
> 
> Why allocate list is not in the function?


Because I don't know the size, just as the comment before.

> 
> > +	return 0;
> > +
> > +out:
> > +	rte_free(list);
> > +
> > +	return -rte_errno;
> > +}
> > +
> > +int
> > +ice_destroy_switch_filter(struct ice_pf *pf,
> > +			struct rte_flow *flow)
> > +{
> > +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +	int ret;
> > +	struct ice_rule_query_data *filter_ptr;
> > +	struct ice_rule_query_data rule_added;
> > +
> > +	filter_ptr = (struct ice_rule_query_data *)
> > +			flow->rule;
> > +	rte_memcpy(&rule_added, filter_ptr,
> > +		sizeof(struct ice_rule_query_data));
> > +
> > +	if (!filter_ptr) {
> > +		PMD_DRV_LOG(ERR, "no such flow"
> > +			    " create by switch filter");
> > +		return -EINVAL;
> > +	}
> 
> How about add struct rte_flow_error *error as the parameter, and use
> rte_flow_error_set here?

Ok, updated in v3

> 
> > +
> > +	ret = ice_rem_adv_rule_by_id(hw, &rule_added);
> > +
> > +	rte_free(filter_ptr);
> > +
> > +	return ret;
> > +}
> > +
> > +void
> > +ice_free_switch_filter_rule(void *rule) {
> > +	struct ice_rule_query_data *filter_ptr;
> > +
> > +	filter_ptr = (struct ice_rule_query_data *)rule;
> > +
> > +	rte_free(filter_ptr);
> > +}
> > diff --git a/drivers/net/ice/ice_switch_filter.h
> > b/drivers/net/ice/ice_switch_filter.h
> > new file mode 100644
> > index 0000000..957d0d1
> > --- /dev/null
> > +++ b/drivers/net/ice/ice_switch_filter.h
> > @@ -0,0 +1,28 @@
> > +#ifndef _ICE_SWITCH_FILTER_H_
> > +#define _ICE_SWITCH_FILTER_H_
> > +
> > +#include "base/ice_switch.h"
> > +#include "base/ice_type.h"
> > +#include "ice_ethdev.h"
> > +
> > +#define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
> > +	do {                                                            \
> > +		act = actions + index;                                  \
> > +		while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
> > +			index++;                                        \
> > +			act = actions + index;                          \
> > +		}                                                       \
> > +	} while (0)
> > +
> > +int
> > +ice_create_switch_filter(struct ice_pf *pf,
> > +			const struct rte_flow_item pattern[],
> > +			const struct rte_flow_action actions[],
> > +			struct rte_flow *flow,
> > +			struct rte_flow_error *error);
> > +int
> > +ice_destroy_switch_filter(struct ice_pf *pf,
> > +			struct rte_flow *flow);
> > +void
> > +ice_free_switch_filter_rule(void *rule); #endif /*
> > +_ICE_SWITCH_FILTER_H_ */
> > diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
> > index
> > 2bec688..8697676 100644
> > --- a/drivers/net/ice/meson.build
> > +++ b/drivers/net/ice/meson.build
> > @@ -6,7 +6,8 @@ objs = [base_objs]
> >
> >  sources = files(
> >  	'ice_ethdev.c',
> > -	'ice_rxtx.c'
> > +	'ice_rxtx.c',
> > +	'ice_switch_filter.c'
> >  	)
> >
> >  deps += ['hash']
> > --
> > 2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
  2019-06-14  9:46       ` Zhao1, Wei
@ 2019-06-17  8:28         ` Wang, Xiao W
  2019-06-18  1:57           ` Zhao1, Wei
  0 siblings, 1 reply; 73+ messages in thread
From: Wang, Xiao W @ 2019-06-17  8:28 UTC (permalink / raw)
  To: Zhao1, Wei, Yang, Qiming, dev

Hi Wei,

> -----Original Message-----
> From: Zhao1, Wei
> Sent: Friday, June 14, 2019 5:47 PM
> To: Wang, Xiao W <xiao.w.wang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> 
> Hi, xiao
> 
> > -----Original Message-----
> > From: Wang, Xiao W
> > Sent: Thursday, June 13, 2019 4:24 PM
> > To: Yang, Qiming <qiming.yang@intel.com>; dev@dpdk.org
> > Cc: Zhao1, Wei <wei.zhao1@intel.com>
> > Subject: RE: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> >
> > Hi,
> >
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> > > Sent: Wednesday, June 12, 2019 3:50 PM
> > > To: dev@dpdk.org
> > > Cc: Zhao1, Wei <wei.zhao1@intel.com>
> > > Subject: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> > >
> > > From: wei zhao <wei.zhao1@intel.com>
> > >
> > > The patch enables the backend of rte_flow. It transfers rte_flow_xxx
> > > to device specific data structure and configures packet process
> > > engine's binary classifier
> > > (switch) properly.
> > >
> > > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > > ---
> > >  drivers/net/ice/Makefile            |   1 +
> > >  drivers/net/ice/ice_ethdev.h        |   6 +
> > >  drivers/net/ice/ice_switch_filter.c | 502
> > > ++++++++++++++++++++++++++++++++++++
> > >  drivers/net/ice/ice_switch_filter.h |  28 ++
> > >  drivers/net/ice/meson.build         |   3 +-
> > >  5 files changed, 539 insertions(+), 1 deletion(-)  create mode 100644
> > > drivers/net/ice/ice_switch_filter.c
> > >  create mode 100644 drivers/net/ice/ice_switch_filter.h
> > >
> > > diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile index
> > > 0e5c55e..b10d826 100644
> > > --- a/drivers/net/ice/Makefile
> > > +++ b/drivers/net/ice/Makefile
> > > @@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
> > >  SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c  endif
> > >
> > > +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
> > >  ifeq ($(findstring
> > >
> RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
> > >  	CC_AVX2_SUPPORT=1
> > >  else
> > > diff --git a/drivers/net/ice/ice_ethdev.h
> > > b/drivers/net/ice/ice_ethdev.h index 1385afa..67a358a 100644
> > > --- a/drivers/net/ice/ice_ethdev.h
> > > +++ b/drivers/net/ice/ice_ethdev.h
> > > @@ -234,6 +234,12 @@ struct ice_vsi {
> > >  	bool offset_loaded;
> > >  };
> > >
> > > +/* Struct to store flow created. */
> > > +struct rte_flow {
> > > +	TAILQ_ENTRY(rte_flow) node;
> > > +void *rule;
> > > +};
> > > +
> > >  struct ice_pf {
> > >  	struct ice_adapter *adapter; /* The adapter this PF associate to */
> > >  	struct ice_vsi *main_vsi; /* pointer to main VSI structure */ diff
> > > --git a/drivers/net/ice/ice_switch_filter.c
> > > b/drivers/net/ice/ice_switch_filter.c
> > > new file mode 100644
> > > index 0000000..e679675
> > > --- /dev/null
> > > +++ b/drivers/net/ice/ice_switch_filter.c
[...]

> > > +			RTE_FLOW_ITEM_TYPE_END; item++, i++) {
> >
> > It seems we don't need the "i" variable.
> 
> Ok, Updated in v3
> 
> >
> > > +		item_type = item->type;
> > > +
> > > +		switch (item_type) {
> > > +		case RTE_FLOW_ITEM_TYPE_ETH:
> > > +			eth_spec = item->spec;
> > > +			eth_mask = item->mask;
> > > +			if (eth_spec && eth_mask) {
> > > +				list[t].type = (tun_type == ICE_NON_TUN) ?
> > > +					ICE_MAC_OFOS : ICE_MAC_IL;
> > > +				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
> > > +					if (eth_mask->src.addr_bytes[j] ==
> > > +								UINT8_MAX) {
> > > +						list[t].h_u.eth_hdr.
> > > +							src_addr[j] =
> > > +						eth_spec->src.addr_bytes[j];
> > > +						list[t].m_u.eth_hdr.
> > > +							src_addr[j] =
> > > +						eth_mask->src.addr_bytes[j];
> > > +					}
> > > +					if (eth_mask->dst.addr_bytes[j] ==
> > > +								UINT8_MAX) {
> > > +						list[t].h_u.eth_hdr.
> > > +							dst_addr[j] =
> > > +						eth_spec->dst.addr_bytes[j];
> > > +						list[t].m_u.eth_hdr.
> > > +							dst_addr[j] =
> > > +						eth_mask->dst.addr_bytes[j];
> > > +					}
> > > +				}
> > > +				if (eth_mask->type == UINT16_MAX) {
> > > +					list[t].h_u.eth_hdr.ethtype_id =
> > > +					rte_be_to_cpu_16(eth_spec->type);
> > > +					list[t].m_u.eth_hdr.ethtype_id =
> > > +						UINT16_MAX;
> > > +				}
> > > +				t++;
> >
> > A lot of "t++" below, can we move it outside the switch{ } to have only one
> "t++"?
> 
> By now, we can not, because share code can not handle  if (!eth_spec
> && !eth_mask)  case,  if we t++
> For that case, that item will put into list[t], and share code will report error.

The blow "else if" branch has no effect at all, we can just remove it.

BRs,
Xiao

> >
> > > +			} else if (!eth_spec && !eth_mask) {
> > > +				list[t].type = (tun_type == ICE_NON_TUN) ?
> > > +					ICE_MAC_OFOS : ICE_MAC_IL;
> > > +			}
> > > +			break;
[...]

^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
  2019-06-17  5:27     ` Xing, Beilei
  2019-06-17  8:23       ` Zhao1, Wei
@ 2019-06-17  8:51       ` Zhao1, Wei
  2019-06-18  1:50         ` Xing, Beilei
  1 sibling, 1 reply; 73+ messages in thread
From: Zhao1, Wei @ 2019-06-17  8:51 UTC (permalink / raw)
  To: Xing, Beilei, Yang, Qiming, dev

Hi , Beilei


> -----Original Message-----
> From: Xing, Beilei
> Sent: Monday, June 17, 2019 1:27 PM
> To: Yang, Qiming <qiming.yang@intel.com>; dev@dpdk.org
> Cc: Zhao1, Wei <wei.zhao1@intel.com>
> Subject: RE: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> 
> 
> 
> ...
> 
> > +
> > +/* By now ice switch filter action code implement only
> > +* supports QUEUE or DROP.
> > +*/
> > +static int
> > +ice_parse_switch_action(struct ice_pf *pf,
> > +				 const struct rte_flow_action *actions,
> > +				 struct rte_flow_error *error,
> > +				 struct ice_adv_rule_info *rule_info) {
> > +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > +	struct ice_vsi *vsi = pf->main_vsi;
> > +	const struct rte_flow_action *act;
> > +	const struct rte_flow_action_queue *act_q;
> > +	uint16_t base_queue, index = 0;
> > +	uint32_t reg;
> > +
> > +	/* Check if the first non-void action is QUEUE or DROP. */
> > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > +	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
> > +	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
> > +		rte_flow_error_set(error, EINVAL,
> > RTE_FLOW_ERROR_TYPE_ACTION,
> > +				   act, "Not supported action.");
> > +		return -rte_errno;
> > +	}
> > +	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
> > +	if (reg & PFLAN_RX_QALLOC_VALID_M) {
> > +		base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
> > +	} else {
> > +		rte_flow_error_set(error, EINVAL,
> > +			RTE_FLOW_ERROR_TYPE_ACTION,
> > +			act, "Invalid queue register");
> > +		return -rte_errno;
> > +	}
> > +	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
> > +		act_q = act->conf;
> > +		rule_info->sw_act.fltr_act = ICE_FWD_TO_Q;
> > +		rule_info->sw_act.fwd_id.q_id = base_queue + act_q->index;
> > +		if (act_q->index >= pf->dev_data->nb_rx_queues) {
> > +			rte_flow_error_set(error, EINVAL,
> > +				RTE_FLOW_ERROR_TYPE_ACTION,
> > +				act, "Invalid queue ID for"
> > +				" switch filter.");
> > +			return -rte_errno;
> > +		}
> > +	} else {
> > +		rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
> > +	}
> > +
> > +	rule_info->sw_act.vsi_handle = vsi->idx;
> > +	rule_info->rx = 1;
> > +	rule_info->sw_act.src = vsi->idx;
> > +
> > +	/* Check if the next non-void item is END */
> > +	index++;
> > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > +	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> > +		rte_flow_error_set(error, EINVAL,
> > RTE_FLOW_ERROR_TYPE_ACTION,
> > +				   act, "Not supported action.");
> > +		return -rte_errno;
> > +	}
> > +
> > +	return 0;
> > +}
> 
> 
> How about use supported array to replace NEXT_ITEM_OF_ACTION? Just like
> pattern.

This seems no need to change, i40e also implement in this way.


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API
  2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API Qiming Yang
  2019-06-17  5:50     ` Xing, Beilei
  2019-06-17  6:02     ` Xing, Beilei
@ 2019-06-17  9:19     ` Wang, Xiao W
  2 siblings, 0 replies; 73+ messages in thread
From: Wang, Xiao W @ 2019-06-17  9:19 UTC (permalink / raw)
  To: Yang, Qiming, dev; +Cc: Xing, Beilei

Hi Qiming,

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> Sent: Wednesday, June 12, 2019 3:50 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>
> Subject: [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API
> 
> This patch adds ice_flow_create, ice_flow_destroy,
> ice_flow_flush and ice_flow_validate support,
> these are going to used to handle all the generic filters.
> 
> Signed-off-by: Qiming Yang <qiming.yang@intel.com>
> ---
>  drivers/net/ice/Makefile           |   1 +
>  drivers/net/ice/ice_ethdev.c       |  44 +++
>  drivers/net/ice/ice_ethdev.h       |   7 +-
>  drivers/net/ice/ice_generic_flow.c | 567
> +++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_generic_flow.h | 404 ++++++++++++++++++++++++++
>  5 files changed, 1022 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/net/ice/ice_generic_flow.c
>  create mode 100644 drivers/net/ice/ice_generic_flow.h
> 
> diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
> index b10d826..32abeb6 100644
> --- a/drivers/net/ice/Makefile
[...]

>  	bool offset_loaded;
>  	bool adapter_stopped;
> +	struct ice_flow_list flow_list;
>  };
> 
>  /**
> diff --git a/drivers/net/ice/ice_generic_flow.c
> b/drivers/net/ice/ice_generic_flow.c
> new file mode 100644
> index 0000000..4fb50b2
> --- /dev/null
> +++ b/drivers/net/ice/ice_generic_flow.c
> @@ -0,0 +1,567 @@

License header is missing.

> +#include <sys/queue.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <stdarg.h>
> +
> +#include <rte_ether.h>
> +#include <rte_ethdev_driver.h>
> +#include <rte_malloc.h>
> +
> +#include "ice_ethdev.h"
> +#include "ice_generic_flow.h"
> +#include "ice_switch_filter.h"
> +

[...]
> +				if (eth_mask->type == RTE_BE16(0xffff))
> +					input_set |= ICE_INSET_ETHERTYPE;
> +			}
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			ipv4_spec = item->spec;
> +			ipv4_mask = item->mask;
> +
> +			if (!(ipv4_spec && ipv4_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid IPv4 spec or mask.");
> +				return 0;
> +			}

A lot of this kind of check in this function, could we just check " item->spec && item->mask" once before the switch {}?

> +
> +			/* Check IPv4 mask and update input set */
> +			if (ipv4_mask->hdr.version_ihl ||
> +			    ipv4_mask->hdr.total_length ||
> +			    ipv4_mask->hdr.packet_id ||
> +			    ipv4_mask->hdr.hdr_checksum) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid IPv4 mask.");
> +				return 0;
> +			}
> +
> +			if (outer_ip) {
> +				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> +					input_set |= ICE_INSET_IPV4_SRC;
> +				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
> +					input_set |= ICE_INSET_IPV4_DST;
> +				if (ipv4_mask->hdr.type_of_service ==
> UINT8_MAX)
> +					input_set |= ICE_INSET_IPV4_TOS;
> +				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> +					input_set |= ICE_INSET_IPV4_TTL;
> +				if (ipv4_mask->hdr.fragment_offset == 0)
> +					input_set |= ICE_INSET_IPV4_PROTO;
> +				outer_ip = false;
> +			} else {
> +				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> +					input_set |=
> ICE_INSET_TUN_IPV4_SRC;
> +				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
> +					input_set |=
> ICE_INSET_TUN_IPV4_DST;
> +				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> +					input_set |= ICE_INSET_TUN_IPV4_TTL;
> +				if (ipv4_mask->hdr.next_proto_id ==
> UINT8_MAX)
> +					input_set |=
> ICE_INSET_TUN_IPV4_PROTO;
> +			}
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_IPV6:
> +			ipv6_spec = item->spec;
> +			ipv6_mask = item->mask;
> +
> +			if (!(ipv6_spec && ipv6_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM,
> +					item, "Invalid IPv6 spec or mask");
> +				return 0;
> +			}
> +
> +			if (ipv6_mask->hdr.payload_len ||
> +			    ipv6_mask->hdr.vtc_flow) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid IPv6 mask");
> +				return 0;
> +			}
> +
> +			if (outer_ip) {
> +				if (!memcmp(ipv6_mask->hdr.src_addr,

[...]

> +						   item,
> +						   "Invalid ICMP mask");
> +				return 0;
> +			}
> +
> +			if (icmp_mask->hdr.icmp_type == UINT8_MAX)
> +				input_set |= ICE_INSET_ICMP;
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_ICMP6:
> +			icmp6_mask = item->mask;
> +			if (icmp6_mask->code ||
> +			    icmp6_mask->checksum) {
> +				rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid ICMP6 mask");
> +				return 0;
> +			}
> +
> +			if (icmp6_mask->type == UINT8_MAX)
> +			input_set |= ICE_INSET_ICMP6;

Add a '\t' for Indent.

> +			break;
> +		default:
> +			rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid mask no exist");
> +			break;
> +		}
> +	}
> +	return input_set;
> +}
> +
> +static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
> +			uint64_t inset, struct rte_flow_error *error)
> +{
> +	uint64_t fields;
> +
> +	/* get valid field */
> +	fields = ice_get_flow_field(pattern, error);
> +	if ((!fields) || (fields && (!inset))) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
> +				   pattern,
> +				   "Invalid input set");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +}
> +
> +static int ice_flow_valid_action(const struct rte_flow_action *actions,
> +				       struct rte_flow_error *error)
> +{
> +	switch (actions->type) {
> +	case RTE_FLOW_ACTION_TYPE_QUEUE:
> +		break;
> +	case RTE_FLOW_ACTION_TYPE_DROP:
> +		break;
> +	default:
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
> +				   "Invalid action.");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +ice_flow_validate(__rte_unused struct rte_eth_dev *dev,
> +		   const struct rte_flow_attr *attr,
> +		   const struct rte_flow_item pattern[],
> +		   const struct rte_flow_action actions[],
> +		   struct rte_flow_error *error)
> +{
> +	uint64_t inset = 0;
> +	int ret = ICE_ERR_NOT_SUPPORTED;
> +
> +	if (!pattern) {
> +		rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> +				   NULL, "NULL pattern.");
> +		return -rte_errno;
> +	}
> +
> +	if (!actions) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> +				   NULL, "NULL action.");
> +		return -rte_errno;
> +	}
> +
> +	if (!attr) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ATTR,
> +				   NULL, "NULL attribute.");
> +		return -rte_errno;
> +	}
> +
> +	ret = ice_flow_valid_attr(attr, error);
> +	if (!ret)
> +		return ret;
> +
> +	inset = ice_flow_valid_pattern(pattern, error);
> +	if (!inset)
> +		return -rte_errno;
> +
> +	ret = ice_flow_valid_inset(pattern, inset, error);
> +	if (ret)
> +		return ret;
> +
> +	ret = ice_flow_valid_action(actions, error);
> +	if (ret)
> +		return ret;
> +
> +	return 0;
> +}
> +
> +static struct rte_flow *
> +ice_flow_create(struct rte_eth_dev *dev,
> +		 const struct rte_flow_attr *attr,
> +		 const struct rte_flow_item pattern[],
> +		 const struct rte_flow_action actions[],
> +		 struct rte_flow_error *error)
> +{
> +	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> +	struct rte_flow *flow = NULL;
> +	int ret;
> +
> +	flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
> +	if (!flow) {
> +		rte_flow_error_set(error, ENOMEM,
> +				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "Failed to allocate memory");
> +		return flow;
> +	}
> +
> +	ret = ice_flow_validate(dev, attr, pattern, actions, error);
> +	if (ret < 0)
> +		return NULL;
> +
> +	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
> +	if (ret)
> +		goto free_flow;
> +
> +	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
> +	return flow;
> +
> +free_flow:
> +	rte_flow_error_set(error, -ret,
> +			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +			   "Failed to create flow.");
> +	rte_free(flow);
> +	return NULL;
> +}
> +
> +static int
> +ice_flow_destroy(struct rte_eth_dev *dev,
> +		 struct rte_flow *flow,
> +		 struct rte_flow_error *error)
> +{
> +	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> +	int ret = 0;
> +
> +	ret = ice_destroy_switch_filter(pf, flow);
> +
> +	if (!ret) {
> +		TAILQ_REMOVE(&pf->flow_list, flow, node);
> +		rte_free(flow);
> +	} else
> +		rte_flow_error_set(error, -ret,
> +				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "Failed to destroy flow.");
> +
> +	return ret;
> +}
> +
> +static int
> +ice_flow_flush(struct rte_eth_dev *dev,
> +	       struct rte_flow_error *error)
> +{
> +	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> +	struct rte_flow *p_flow;
> +	int ret;
> +
> +	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
> +		ret = ice_flow_destroy(dev, p_flow, error);
> +		if (ret) {
> +			rte_flow_error_set(error, -ret,
> +					   RTE_FLOW_ERROR_TYPE_HANDLE,
> NULL,
> +					   "Failed to flush SW flows.");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	return ret;
> +}
> diff --git a/drivers/net/ice/ice_generic_flow.h
> b/drivers/net/ice/ice_generic_flow.h
> new file mode 100644
> index 0000000..46c3461
> --- /dev/null
> +++ b/drivers/net/ice/ice_generic_flow.h
> @@ -0,0 +1,404 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2018 Intel Corporation

s/2018/2019/g

> + */
> +
> +#ifndef _ICE_GENERIC_FLOW_H_
> +#define _ICE_GENERIC_FLOW_H_
> +
> +#include <rte_flow_driver.h>
> +
> +struct ice_flow_pattern {
[...]

> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_SCTP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_tunnel_eth_ipv6_icmp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_ICMP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static struct ice_flow_pattern ice_supported_patterns[] = {
> +	{pattern_ethertype, INSET_ETHER},
> +	{pattern_ipv4, INSET_MAC_IPV4},
> +	{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
> +	{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
> +	{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
> +	{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
> +	{pattern_ipv6, INSET_MAC_IPV6},
> +	{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
> +	{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
> +	{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
> +	{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
> +	{pattern_ipv4_tunnel_ipv4, INSET_TUNNEL_IPV4_TYPE1},
> +	{pattern_ipv4_tunnel_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_tunnel_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_tunnel_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_tunnel_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
> +	{pattern_ipv4_tunnel_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
> +	{pattern_ipv4_tunnel_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_tunnel_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_tunnel_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_tunnel_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
> +	{pattern_ipv4_tunnel_ipv6, INSET_TUNNEL_IPV6_TYPE1},
> +	{pattern_ipv4_tunnel_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_tunnel_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_tunnel_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_tunnel_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
> +	{pattern_ipv4_tunnel_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
> +	{pattern_ipv4_tunnel_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_tunnel_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_tunnel_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_tunnel_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
> +};
> +
> +#endif
> --
> 2.9.5

I have the same feeling with Beilei for the duplication, some flow validation work is done in this patch, however the switch filter part (1/3) also implements some check.

BRs,
Xiao


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
  2019-06-17  8:51       ` Zhao1, Wei
@ 2019-06-18  1:50         ` Xing, Beilei
  0 siblings, 0 replies; 73+ messages in thread
From: Xing, Beilei @ 2019-06-18  1:50 UTC (permalink / raw)
  To: Zhao1, Wei, Yang, Qiming, dev

Hi Wei,

> -----Original Message-----
> From: Zhao1, Wei
> Sent: Monday, June 17, 2019 4:52 PM
> To: Xing, Beilei <beilei.xing@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> 
> Hi , Beilei
> 
> 
> > -----Original Message-----
> > From: Xing, Beilei
> > Sent: Monday, June 17, 2019 1:27 PM
> > To: Yang, Qiming <qiming.yang@intel.com>; dev@dpdk.org
> > Cc: Zhao1, Wei <wei.zhao1@intel.com>
> > Subject: RE: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> >
> >
> >
> > ...
> >
> > > +
> > > +/* By now ice switch filter action code implement only
> > > +* supports QUEUE or DROP.
> > > +*/
> > > +static int
> > > +ice_parse_switch_action(struct ice_pf *pf,
> > > +				 const struct rte_flow_action *actions,
> > > +				 struct rte_flow_error *error,
> > > +				 struct ice_adv_rule_info *rule_info) {
> > > +	struct ice_hw *hw = ICE_PF_TO_HW(pf);
> > > +	struct ice_vsi *vsi = pf->main_vsi;
> > > +	const struct rte_flow_action *act;
> > > +	const struct rte_flow_action_queue *act_q;
> > > +	uint16_t base_queue, index = 0;
> > > +	uint32_t reg;
> > > +
> > > +	/* Check if the first non-void action is QUEUE or DROP. */
> > > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > > +	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
> > > +	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > RTE_FLOW_ERROR_TYPE_ACTION,
> > > +				   act, "Not supported action.");
> > > +		return -rte_errno;
> > > +	}
> > > +	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
> > > +	if (reg & PFLAN_RX_QALLOC_VALID_M) {
> > > +		base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
> > > +	} else {
> > > +		rte_flow_error_set(error, EINVAL,
> > > +			RTE_FLOW_ERROR_TYPE_ACTION,
> > > +			act, "Invalid queue register");
> > > +		return -rte_errno;
> > > +	}
> > > +	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
> > > +		act_q = act->conf;
> > > +		rule_info->sw_act.fltr_act = ICE_FWD_TO_Q;
> > > +		rule_info->sw_act.fwd_id.q_id = base_queue + act_q->index;
> > > +		if (act_q->index >= pf->dev_data->nb_rx_queues) {
> > > +			rte_flow_error_set(error, EINVAL,
> > > +				RTE_FLOW_ERROR_TYPE_ACTION,
> > > +				act, "Invalid queue ID for"
> > > +				" switch filter.");
> > > +			return -rte_errno;
> > > +		}
> > > +	} else {
> > > +		rule_info->sw_act.fltr_act = ICE_DROP_PACKET;
> > > +	}
> > > +
> > > +	rule_info->sw_act.vsi_handle = vsi->idx;
> > > +	rule_info->rx = 1;
> > > +	rule_info->sw_act.src = vsi->idx;
> > > +
> > > +	/* Check if the next non-void item is END */
> > > +	index++;
> > > +	NEXT_ITEM_OF_ACTION(act, actions, index);
> > > +	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
> > > +		rte_flow_error_set(error, EINVAL,
> > > RTE_FLOW_ERROR_TYPE_ACTION,
> > > +				   act, "Not supported action.");
> > > +		return -rte_errno;
> > > +	}
> > > +
> > > +	return 0;
> > > +}
> >
> >
> > How about use supported array to replace NEXT_ITEM_OF_ACTION? Just
> > like pattern.
> 
> This seems no need to change, i40e also implement in this way.

Code in I40e is not perfect, we can try to improve our PMD in new driver. 
I think supported array is more clear and friendly, what do you think?


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
  2019-06-17  8:28         ` Wang, Xiao W
@ 2019-06-18  1:57           ` Zhao1, Wei
  0 siblings, 0 replies; 73+ messages in thread
From: Zhao1, Wei @ 2019-06-18  1:57 UTC (permalink / raw)
  To: Wang, Xiao W, Yang, Qiming, dev



> -----Original Message-----
> From: Wang, Xiao W
> Sent: Monday, June 17, 2019 4:28 PM
> To: Zhao1, Wei <wei.zhao1@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> 
> Hi Wei,
> 
> > -----Original Message-----
> > From: Zhao1, Wei
> > Sent: Friday, June 14, 2019 5:47 PM
> > To: Wang, Xiao W <xiao.w.wang@intel.com>; Yang, Qiming
> > <qiming.yang@intel.com>; dev@dpdk.org
> > Subject: RE: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> >
> > Hi, xiao
> >
> > > -----Original Message-----
> > > From: Wang, Xiao W
> > > Sent: Thursday, June 13, 2019 4:24 PM
> > > To: Yang, Qiming <qiming.yang@intel.com>; dev@dpdk.org
> > > Cc: Zhao1, Wei <wei.zhao1@intel.com>
> > > Subject: RE: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> > >
> > > Hi,
> > >
> > > > -----Original Message-----
> > > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> > > > Sent: Wednesday, June 12, 2019 3:50 PM
> > > > To: dev@dpdk.org
> > > > Cc: Zhao1, Wei <wei.zhao1@intel.com>
> > > > Subject: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> > > >
> > > > From: wei zhao <wei.zhao1@intel.com>
> > > >
> > > > The patch enables the backend of rte_flow. It transfers
> > > > rte_flow_xxx to device specific data structure and configures
> > > > packet process engine's binary classifier
> > > > (switch) properly.
> > > >
> > > > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > > > ---
> > > >  drivers/net/ice/Makefile            |   1 +
> > > >  drivers/net/ice/ice_ethdev.h        |   6 +
> > > >  drivers/net/ice/ice_switch_filter.c | 502
> > > > ++++++++++++++++++++++++++++++++++++
> > > >  drivers/net/ice/ice_switch_filter.h |  28 ++
> > > >  drivers/net/ice/meson.build         |   3 +-
> > > >  5 files changed, 539 insertions(+), 1 deletion(-)  create mode
> > > > 100644 drivers/net/ice/ice_switch_filter.c
> > > >  create mode 100644 drivers/net/ice/ice_switch_filter.h
> > > >
> > > > diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
> > > > index
> > > > 0e5c55e..b10d826 100644
> > > > --- a/drivers/net/ice/Makefile
> > > > +++ b/drivers/net/ice/Makefile
> > > > @@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
> > > >  SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c  endif
> > > >
> > > > +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
> > > >  ifeq ($(findstring
> > > >
> > RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
> > > >  	CC_AVX2_SUPPORT=1
> > > >  else
> > > > diff --git a/drivers/net/ice/ice_ethdev.h
> > > > b/drivers/net/ice/ice_ethdev.h index 1385afa..67a358a 100644
> > > > --- a/drivers/net/ice/ice_ethdev.h
> > > > +++ b/drivers/net/ice/ice_ethdev.h
> > > > @@ -234,6 +234,12 @@ struct ice_vsi {
> > > >  	bool offset_loaded;
> > > >  };
> > > >
> > > > +/* Struct to store flow created. */ struct rte_flow {
> > > > +	TAILQ_ENTRY(rte_flow) node;
> > > > +void *rule;
> > > > +};
> > > > +
> > > >  struct ice_pf {
> > > >  	struct ice_adapter *adapter; /* The adapter this PF associate to */
> > > >  	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
> > > > diff --git a/drivers/net/ice/ice_switch_filter.c
> > > > b/drivers/net/ice/ice_switch_filter.c
> > > > new file mode 100644
> > > > index 0000000..e679675
> > > > --- /dev/null
> > > > +++ b/drivers/net/ice/ice_switch_filter.c
> [...]
> 
> > > > +			RTE_FLOW_ITEM_TYPE_END; item++, i++) {
> > >
> > > It seems we don't need the "i" variable.
> >
> > Ok, Updated in v3
> >
> > >
> > > > +		item_type = item->type;
> > > > +
> > > > +		switch (item_type) {
> > > > +		case RTE_FLOW_ITEM_TYPE_ETH:
> > > > +			eth_spec = item->spec;
> > > > +			eth_mask = item->mask;
> > > > +			if (eth_spec && eth_mask) {
> > > > +				list[t].type = (tun_type == ICE_NON_TUN) ?
> > > > +					ICE_MAC_OFOS : ICE_MAC_IL;
> > > > +				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
> > > > +					if (eth_mask->src.addr_bytes[j] ==
> > > > +								UINT8_MAX) {
> > > > +						list[t].h_u.eth_hdr.
> > > > +							src_addr[j] =
> > > > +						eth_spec->src.addr_bytes[j];
> > > > +						list[t].m_u.eth_hdr.
> > > > +							src_addr[j] =
> > > > +						eth_mask->src.addr_bytes[j];
> > > > +					}
> > > > +					if (eth_mask->dst.addr_bytes[j] ==
> > > > +								UINT8_MAX) {
> > > > +						list[t].h_u.eth_hdr.
> > > > +							dst_addr[j] =
> > > > +						eth_spec->dst.addr_bytes[j];
> > > > +						list[t].m_u.eth_hdr.
> > > > +							dst_addr[j] =
> > > > +						eth_mask->dst.addr_bytes[j];
> > > > +					}
> > > > +				}
> > > > +				if (eth_mask->type == UINT16_MAX) {
> > > > +					list[t].h_u.eth_hdr.ethtype_id =
> > > > +					rte_be_to_cpu_16(eth_spec->type);
> > > > +					list[t].m_u.eth_hdr.ethtype_id =
> > > > +						UINT16_MAX;
> > > > +				}
> > > > +				t++;
> > >
> > > A lot of "t++" below, can we move it outside the switch{ } to have
> > > only one
> > "t++"?
> >
> > By now, we can not, because share code can not handle  if (!eth_spec
> > && !eth_mask)  case,  if we t++ For that case, that item will put into
> > list[t], and share code will report error.
> 
> The blow "else if" branch has no effect at all, we can just remove it.

I suggest not, because ND is fix this issue now, if updated share code will support this soon in this release.

  
> 
> BRs,
> Xiao
> 
> > >
> > > > +			} else if (!eth_spec && !eth_mask) {
> > > > +				list[t].type = (tun_type == ICE_NON_TUN) ?
> > > > +					ICE_MAC_OFOS : ICE_MAC_IL;
> > > > +			}
> > > > +			break;
> [...]

^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
  2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter Qiming Yang
  2019-06-13  8:23     ` Wang, Xiao W
  2019-06-17  5:27     ` Xing, Beilei
@ 2019-06-18  9:40     ` Ye Xiaolong
  2019-06-19  3:06       ` Zhao1, Wei
  2 siblings, 1 reply; 73+ messages in thread
From: Ye Xiaolong @ 2019-06-18  9:40 UTC (permalink / raw)
  To: Qiming Yang; +Cc: dev, wei zhao

On 06/12, Qiming Yang wrote:
>From: wei zhao <wei.zhao1@intel.com>
>
>The patch enables the backend of rte_flow. It transfers
>rte_flow_xxx to device specific data structure and
>configures packet process engine's binary classifier
>(switch) properly.
>
>Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
>---
> drivers/net/ice/Makefile            |   1 +
> drivers/net/ice/ice_ethdev.h        |   6 +
> drivers/net/ice/ice_switch_filter.c | 502 ++++++++++++++++++++++++++++++++++++
> drivers/net/ice/ice_switch_filter.h |  28 ++
> drivers/net/ice/meson.build         |   3 +-
> 5 files changed, 539 insertions(+), 1 deletion(-)
> create mode 100644 drivers/net/ice/ice_switch_filter.c
> create mode 100644 drivers/net/ice/ice_switch_filter.h
>
>diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
>index 0e5c55e..b10d826 100644
>--- a/drivers/net/ice/Makefile
>+++ b/drivers/net/ice/Makefile
>@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
> SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
> endif
> 
>+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
> ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
> 	CC_AVX2_SUPPORT=1
> else
>diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
>index 1385afa..67a358a 100644
>--- a/drivers/net/ice/ice_ethdev.h
>+++ b/drivers/net/ice/ice_ethdev.h
>@@ -234,6 +234,12 @@ struct ice_vsi {
> 	bool offset_loaded;
> };
> 
>+/* Struct to store flow created. */
>+struct rte_flow {
>+	TAILQ_ENTRY(rte_flow) node;
>+void *rule;

Minor nit: An indentation is needed before void.

Thanks,
Xiaolong

^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
  2019-06-18  9:40     ` Ye Xiaolong
@ 2019-06-19  3:06       ` Zhao1, Wei
  0 siblings, 0 replies; 73+ messages in thread
From: Zhao1, Wei @ 2019-06-19  3:06 UTC (permalink / raw)
  To: Ye, Xiaolong, Yang, Qiming; +Cc: dev


Hi, xiaolong

> -----Original Message-----
> From: Ye, Xiaolong
> Sent: Tuesday, June 18, 2019 5:41 PM
> To: Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org; Zhao1, Wei <wei.zhao1@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter
> 
> On 06/12, Qiming Yang wrote:
> >From: wei zhao <wei.zhao1@intel.com>
> >
> >The patch enables the backend of rte_flow. It transfers rte_flow_xxx to
> >device specific data structure and configures packet process engine's
> >binary classifier
> >(switch) properly.
> >
> >Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> >---
> > drivers/net/ice/Makefile            |   1 +
> > drivers/net/ice/ice_ethdev.h        |   6 +
> > drivers/net/ice/ice_switch_filter.c | 502
> >++++++++++++++++++++++++++++++++++++
> > drivers/net/ice/ice_switch_filter.h |  28 ++
> > drivers/net/ice/meson.build         |   3 +-
> > 5 files changed, 539 insertions(+), 1 deletion(-)  create mode 100644
> >drivers/net/ice/ice_switch_filter.c
> > create mode 100644 drivers/net/ice/ice_switch_filter.h
> >
> >diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile index
> >0e5c55e..b10d826 100644
> >--- a/drivers/net/ice/Makefile
> >+++ b/drivers/net/ice/Makefile
> >@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
> > SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c  endif
> >
> >+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
> > ifeq ($(findstring
> RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
> > 	CC_AVX2_SUPPORT=1
> > else
> >diff --git a/drivers/net/ice/ice_ethdev.h
> >b/drivers/net/ice/ice_ethdev.h index 1385afa..67a358a 100644
> >--- a/drivers/net/ice/ice_ethdev.h
> >+++ b/drivers/net/ice/ice_ethdev.h
> >@@ -234,6 +234,12 @@ struct ice_vsi {
> > 	bool offset_loaded;
> > };
> >
> >+/* Struct to store flow created. */
> >+struct rte_flow {
> >+	TAILQ_ENTRY(rte_flow) node;
> >+void *rule;
> 
> Minor nit: An indentation is needed before void.

Ok, Update in v3
> 
> Thanks,
> Xiaolong

^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v3 0/3] Enable rte_flow API in ice driver
  2019-06-03  9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
                   ` (2 preceding siblings ...)
  2019-06-12  7:50 ` [dpdk-dev] [PATCH v2 0/3] Enable rte_flow API in ice driver Qiming Yang
@ 2019-06-20  5:34 ` Qiming Yang
  2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 1/3] net/ice: enable switch filter Qiming Yang
                     ` (2 more replies)
  2019-06-21  6:13 ` [dpdk-dev] [PATCH v4 0/3] Enable rte_flow API in ice driver Qiming Yang
                   ` (6 subsequent siblings)
  10 siblings, 3 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-20  5:34 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch set enables the backend of rte_flow, and the generic filter related functions in ice driver.
Supported flows include ipv4, tcpv4, udpv4, ipv6, tcpv6, udpv6, tunnel, etc.
This patch set depends on shared code update.

---
v2 changes:
 - added UDP tunnel port support.
 - fixed compile issue.
 - added document update.
v3 changes:
 - removed redundancy parser.
 - added License.
 - added VXLAN and NVGRE item support.

Qiming Yang (2):
  net/ice: add generic flow API
  net/ice: add UDP tunnel port support

wei zhao (1):
  net/ice: enable switch filter

 drivers/net/ice/Makefile            |   2 +
 drivers/net/ice/ice_ethdev.c        | 116 ++++++
 drivers/net/ice/ice_ethdev.h        |  12 +
 drivers/net/ice/ice_generic_flow.c  | 682 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h  | 654 ++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.c | 525 +++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   4 +-
 8 files changed, 2018 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v3 1/3] net/ice: enable switch filter
  2019-06-20  5:34 ` [dpdk-dev] [PATCH v3 0/3] Enable rte_flow API in ice driver Qiming Yang
@ 2019-06-20  5:34   ` Qiming Yang
  2019-06-20  9:01     ` Wang, Xiao W
  2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API Qiming Yang
  2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 3/3] net/ice: add UDP tunnel port support Qiming Yang
  2 siblings, 1 reply; 73+ messages in thread
From: Qiming Yang @ 2019-06-20  5:34 UTC (permalink / raw)
  To: dev; +Cc: wei zhao

From: wei zhao <wei.zhao1@intel.com>

The patch enables the backend of rte_flow. It transfers
rte_flow_xxx to device specific data structure and
configures packet process engine's binary classifier
(switch) properly.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/Makefile            |   1 +
 drivers/net/ice/ice_ethdev.c        |  18 ++
 drivers/net/ice/ice_ethdev.h        |   7 +
 drivers/net/ice/ice_switch_filter.c | 525 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   3 +-
 6 files changed, 577 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 0e5c55e..b10d826 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
 endif
 
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
 	CC_AVX2_SUPPORT=1
 else
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 203d0a9..a94aa7e 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1364,6 +1364,21 @@ static int ice_load_pkg(struct rte_eth_dev *dev)
 	return err;
 }
 
+static void
+ice_base_queue_get(struct ice_pf *pf)
+{
+	uint32_t reg;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+	if (reg & PFLAN_RX_QALLOC_VALID_M) {
+		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+	} else {
+		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
+					" index");
+	}
+}
+
 static int
 ice_dev_init(struct rte_eth_dev *dev)
 {
@@ -1460,6 +1475,9 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* enable uio intr after callback register */
 	rte_intr_enable(intr_handle);
 
+	/* get base queue pairs index  in the device */
+	ice_base_queue_get(pf);
+
 	return 0;
 
 err_pf_setup:
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1385afa..50b966c 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,6 +234,12 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+/* Struct to store flow created. */
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) node;
+	void *rule;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -252,6 +258,7 @@ struct ice_pf {
 	uint16_t hash_lut_size; /* The size of hash lookup table */
 	uint16_t lan_nb_qp_max;
 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+	uint16_t base_queue; /* The base queue pairs index  in the device */
 	struct ice_hw_port_stats stats_offset;
 	struct ice_hw_port_stats stats;
 	/* internal packet statistics, it should be excluded from the total */
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
new file mode 100644
index 0000000..48fe3d5
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -0,0 +1,525 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "ice_logs.h"
+#include "base/ice_type.h"
+#include "ice_switch_filter.h"
+
+static int
+ice_parse_switch_filter(
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct ice_adv_rule_info *rule_info,
+			struct ice_adv_lkup_elem **lkup_list,
+			uint16_t *lkups_num)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
+	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
+	struct ice_adv_lkup_elem *list;
+	uint16_t j, t = 0;
+	uint16_t item_num = 0;
+	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+	uint16_t tunnel_valid = 0;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
+			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
+			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
+			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
+			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
+			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			item_num++;
+		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+			tun_type = ICE_SW_TUN_VXLAN;
+		if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			tun_type = ICE_SW_TUN_NVGRE;
+	}
+
+	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "No memory for PMD internal items");
+		goto out;
+	}
+	*lkup_list = list;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			if (eth_spec && eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->src.addr_bytes[j] ==
+								UINT8_MAX) {
+						list[t].h_u.eth_hdr.
+							src_addr[j] =
+						eth_spec->src.addr_bytes[j];
+						list[t].m_u.eth_hdr.
+							src_addr[j] =
+						eth_mask->src.addr_bytes[j];
+					}
+					if (eth_mask->dst.addr_bytes[j] ==
+								UINT8_MAX) {
+						list[t].h_u.eth_hdr.
+							dst_addr[j] =
+						eth_spec->dst.addr_bytes[j];
+						list[t].m_u.eth_hdr.
+							dst_addr[j] =
+						eth_mask->dst.addr_bytes[j];
+					}
+				}
+				if (eth_mask->type == UINT16_MAX) {
+					list[t].h_u.eth_hdr.ethtype_id =
+					rte_be_to_cpu_16(eth_spec->type);
+					list[t].m_u.eth_hdr.ethtype_id =
+						UINT16_MAX;
+				}
+				t++;
+			} else if (!eth_spec && !eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+			if (ipv4_spec && ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.src_addr =
+						ipv4_spec->hdr.src_addr;
+					list[t].m_u.ipv4_hdr.src_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.dst_addr =
+						ipv4_spec->hdr.dst_addr;
+					list[t].m_u.ipv4_hdr.dst_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.time_to_live =
+						ipv4_spec->hdr.time_to_live;
+					list[t].m_u.ipv4_hdr.time_to_live =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.protocol =
+						ipv4_spec->hdr.next_proto_id;
+					list[t].m_u.ipv4_hdr.protocol =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.type_of_service ==
+						UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.tos =
+						ipv4_spec->hdr.type_of_service;
+					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv4_spec && !ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+			if (ipv6_spec && ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV6_OFOS : ICE_IPV6_IL;
+				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j] ==
+								UINT8_MAX) {
+						list[t].h_u.ice_ipv6_ofos_hdr.
+							src_addr[j] =
+						ipv6_spec->hdr.src_addr[j];
+						list[t].m_u.ice_ipv6_ofos_hdr.
+							src_addr[j] =
+						ipv6_mask->hdr.src_addr[j];
+					}
+					if (ipv6_mask->hdr.dst_addr[j] ==
+								UINT8_MAX) {
+						list[t].h_u.ice_ipv6_ofos_hdr.
+							dst_addr[j] =
+						ipv6_spec->hdr.dst_addr[j];
+						list[t].m_u.ice_ipv6_ofos_hdr.
+							dst_addr[j] =
+						ipv6_mask->hdr.dst_addr[j];
+					}
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					list[t].h_u.ice_ipv6_ofos_hdr.next_hdr =
+						ipv6_spec->hdr.proto;
+					list[t].m_u.ice_ipv6_ofos_hdr.next_hdr =
+						UINT8_MAX;
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					list[t].h_u.ice_ipv6_ofos_hdr.
+					hop_limit = ipv6_spec->hdr.hop_limits;
+					list[t].m_u.ice_ipv6_ofos_hdr.
+						hop_limit  = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv6_spec && !ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+			if (udp_spec && udp_mask) {
+				if (tun_type == ICE_SW_TUN_VXLAN &&
+						tunnel_valid == 0)
+					list[t].type = ICE_UDP_OF;
+				else
+					list[t].type = ICE_UDP_ILOS;
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						udp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						udp_mask->hdr.src_port;
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						udp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						udp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!udp_spec && !udp_mask) {
+				list[t].type = ICE_UDP_ILOS;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+			if (tcp_spec && tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						tcp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						tcp_mask->hdr.src_port;
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						tcp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						tcp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!tcp_spec && !tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+			if (sctp_spec && sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.src_port =
+						sctp_spec->hdr.src_port;
+					list[t].m_u.sctp_hdr.src_port =
+						sctp_mask->hdr.src_port;
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.dst_port =
+						sctp_spec->hdr.dst_port;
+					list[t].m_u.sctp_hdr.dst_port =
+						sctp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!sctp_spec && !sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			tunnel_valid = 1;
+			if (vxlan_spec && vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+				if (vxlan_mask->vni[0] == UINT8_MAX &&
+					vxlan_mask->vni[1] == UINT8_MAX &&
+					vxlan_mask->vni[2] == UINT8_MAX) {
+					list[t].h_u.tnl_hdr.vni =
+						(vxlan_spec->vni[2] << 16) |
+						(vxlan_spec->vni[1] << 8) |
+						vxlan_spec->vni[0];
+					list[t].m_u.tnl_hdr.vni =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!vxlan_spec && !vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			tunnel_valid = 1;
+			if (nvgre_spec && nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+				if (nvgre_mask->tni[0] == UINT8_MAX &&
+					nvgre_mask->tni[1] == UINT8_MAX &&
+					nvgre_mask->tni[2] == UINT8_MAX) {
+					list[t].h_u.nvgre_hdr.tni_flow =
+						(nvgre_spec->tni[2] << 16) |
+						(nvgre_spec->tni[1] << 8) |
+						nvgre_spec->tni[0];
+					list[t].m_u.nvgre_hdr.tni_flow =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!nvgre_spec && !nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+		case RTE_FLOW_ITEM_TYPE_END:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "Invalid pattern item.");
+			goto out;
+		}
+	}
+
+	rule_info->tun_type = tun_type;
+	*lkups_num = t;
+
+	return 0;
+out:
+	return -rte_errno;
+}
+
+/* By now ice switch filter action code implement only
+* supports QUEUE or DROP.
+*/
+static int
+ice_parse_switch_action(struct ice_pf *pf,
+				 const struct rte_flow_action *actions,
+				 struct rte_flow_error *error,
+				 struct ice_adv_rule_info *rule_info)
+{
+	struct ice_vsi *vsi = pf->main_vsi;
+	const struct rte_flow_action_queue *act_q;
+	uint16_t base_queue;
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	base_queue = pf->base_queue;
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			act_q = action->conf;
+			rule_info->sw_act.fltr_act =
+				ICE_FWD_TO_Q;
+			rule_info->sw_act.fwd_id.q_id =
+				base_queue + act_q->index;
+			if (act_q->index >=
+				pf->dev_data->nb_rx_queues) {
+				rte_flow_error_set(error,
+					EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION,
+					actions, "Invalid queue ID"
+					" for switch filter.");
+				return -rte_errno;
+			}
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			rule_info->sw_act.fltr_act =
+				ICE_DROP_PACKET;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error,
+				EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				actions,
+				"Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	rule_info->sw_act.vsi_handle = vsi->idx;
+	rule_info->rx = 1;
+	rule_info->sw_act.src = vsi->idx;
+
+	return 0;
+}
+
+static int
+ice_switch_rule_set(struct ice_pf *pf,
+			struct ice_adv_lkup_elem *list,
+			uint16_t lkups_cnt,
+			struct ice_adv_rule_info *rule_info,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data rule_added = {0};
+	struct ice_rule_query_data *filter_ptr;
+
+	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"item number too large for rule");
+		return -rte_errno;
+	}
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"lookup list should not be NULL");
+		return -rte_errno;
+	}
+
+	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
+
+	if (!ret) {
+		filter_ptr = rte_zmalloc("ice_switch_filter",
+			sizeof(struct ice_rule_query_data), 0);
+		if (!filter_ptr) {
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
+			return -EINVAL;
+		}
+		flow->rule = filter_ptr;
+		rte_memcpy(filter_ptr,
+			&rule_added,
+			sizeof(struct ice_rule_query_data));
+	}
+
+	return ret;
+}
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	int ret = 0;
+	struct ice_adv_rule_info rule_info = {0};
+	struct ice_adv_lkup_elem *list = NULL;
+	uint16_t lkups_num = 0;
+
+	ret = ice_parse_switch_filter(pattern, actions, error,
+			&rule_info, &list, &lkups_num);
+	if (ret)
+		goto out;
+
+	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
+	if (ret)
+		goto out;
+
+	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
+	if (ret)
+		goto out;
+
+	rte_free(list);
+	return 0;
+
+out:
+	rte_free(list);
+
+	return -rte_errno;
+}
+
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)
+			flow->rule;
+
+	if (!filter_ptr) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"no such flow"
+			" create by switch filter");
+		return -rte_errno;
+	}
+
+	ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
+	if (ret) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"fail to destroy switch filter rule");
+		return -rte_errno;
+	}
+
+	rte_free(filter_ptr);
+	return ret;
+}
+
+void
+ice_free_switch_filter_rule(void *rule)
+{
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)rule;
+
+	rte_free(filter_ptr);
+}
diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
new file mode 100644
index 0000000..cea4799
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_SWITCH_FILTER_H_
+#define _ICE_SWITCH_FILTER_H_
+
+#include "base/ice_switch.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+void
+ice_free_switch_filter_rule(void *rule);
+#endif /* _ICE_SWITCH_FILTER_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 2bec688..8697676 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -6,7 +6,8 @@ objs = [base_objs]
 
 sources = files(
 	'ice_ethdev.c',
-	'ice_rxtx.c'
+	'ice_rxtx.c',
+	'ice_switch_filter.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API
  2019-06-20  5:34 ` [dpdk-dev] [PATCH v3 0/3] Enable rte_flow API in ice driver Qiming Yang
  2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 1/3] net/ice: enable switch filter Qiming Yang
@ 2019-06-20  5:34   ` Qiming Yang
  2019-06-20  9:32     ` Wang, Xiao W
                       ` (2 more replies)
  2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 3/3] net/ice: add UDP tunnel port support Qiming Yang
  2 siblings, 3 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-20  5:34 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch adds ice_flow_create, ice_flow_destroy,
ice_flow_flush and ice_flow_validate support,
these are used to handle all the generic filters.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/Makefile           |   1 +
 drivers/net/ice/ice_ethdev.c       |  44 +++
 drivers/net/ice/ice_ethdev.h       |   5 +
 drivers/net/ice/ice_generic_flow.c | 682 +++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h | 654 +++++++++++++++++++++++++++++++++++
 drivers/net/ice/meson.build        |   1 +
 6 files changed, 1387 insertions(+)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index b10d826..32abeb6 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -79,5 +79,6 @@ endif
 ifeq ($(CC_AVX2_SUPPORT), 1)
 	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index a94aa7e..8ee06d1 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -15,6 +15,7 @@
 #include "base/ice_dcb.h"
 #include "ice_ethdev.h"
 #include "ice_rxtx.h"
+#include "ice_switch_filter.h"
 
 #define ICE_MAX_QP_NUM "max_queue_pair_num"
 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
@@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
 static int ice_xstats_get_names(struct rte_eth_dev *dev,
 				struct rte_eth_xstat_name *xstats_names,
 				unsigned int limit);
+static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+			enum rte_filter_type filter_type,
+			enum rte_filter_op filter_op,
+			void *arg);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get                   = ice_xstats_get,
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
+	.filter_ctrl                  = ice_dev_filter_ctrl,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -1478,6 +1484,8 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* get base queue pairs index  in the device */
 	ice_base_queue_get(pf);
 
+	TAILQ_INIT(&pf->flow_list);
+
 	return 0;
 
 err_pf_setup:
@@ -1620,6 +1628,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
 
 	ice_dev_close(dev);
 
@@ -1637,6 +1647,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	rte_intr_callback_unregister(intr_handle,
 				     ice_interrupt_handler, dev);
 
+	/* Remove all flows */
+	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+		ice_free_switch_filter_rule(p_flow->rule);
+		rte_free(p_flow);
+	}
+
 	return 0;
 }
 
@@ -3622,6 +3639,33 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 }
 
 static int
+ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+		     enum rte_filter_type filter_type,
+		     enum rte_filter_op filter_op,
+		     void *arg)
+{
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ice_flow_ops;
+		break;
+	default:
+		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+					filter_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 50b966c..8a52239 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,12 +234,16 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+extern const struct rte_flow_ops ice_flow_ops;
+
 /* Struct to store flow created. */
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) node;
 	void *rule;
 };
 
+TAILQ_HEAD(ice_flow_list, rte_flow);
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -266,6 +270,7 @@ struct ice_pf {
 	struct ice_eth_stats internal_stats;
 	bool offset_loaded;
 	bool adapter_stopped;
+	struct ice_flow_list flow_list;
 };
 
 /**
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
new file mode 100644
index 0000000..c6fce88
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -0,0 +1,682 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "ice_switch_filter.h"
+
+static int ice_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static int ice_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error);
+static int ice_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error);
+
+const struct rte_flow_ops ice_flow_ops = {
+	.validate = ice_flow_validate,
+	.create = ice_flow_create,
+	.destroy = ice_flow_destroy,
+	.flush = ice_flow_flush,
+};
+
+static int
+ice_flow_valid_attr(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	/* Must be input direction */
+	if (!attr->ingress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Not support priority.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->group) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   attr, "Not support group.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+ice_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+	bool is_find;
+
+	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		if (is_void)
+			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+		else
+			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+		if (is_find)
+			break;
+		item++;
+	}
+	return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+ice_pattern_skip_void_item(struct rte_flow_item *items,
+			    const struct rte_flow_item *pattern)
+{
+	uint32_t cpy_count = 0;
+	const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+	for (;;) {
+		/* Find a non-void item first */
+		pb = ice_find_first_item(pb, false);
+		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+			pe = pb;
+			break;
+		}
+
+		/* Find a void item */
+		pe = ice_find_first_item(pb + 1, true);
+
+		cpy_count = pe - pb;
+		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+		items += cpy_count;
+
+		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+			pb = pe;
+			break;
+		}
+
+		pb = pe + 1;
+	}
+	/* Copy the END item. */
+	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+ice_match_pattern(enum rte_flow_item_type *item_array,
+		const struct rte_flow_item *pattern)
+{
+	const struct rte_flow_item *item = pattern;
+
+	while ((*item_array == item->type) &&
+	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+		item_array++;
+		item++;
+	}
+
+	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+		item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
+		struct rte_flow_error *error)
+{
+	uint16_t i = 0;
+	uint64_t inset;
+	struct rte_flow_item *items; /* used for pattern without VOID items */
+	uint32_t item_num = 0; /* non-void item number */
+
+	/* Get the non-void item number of pattern */
+	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+			item_num++;
+		i++;
+	}
+	item_num++;
+
+	items = rte_zmalloc("ice_pattern",
+			    item_num * sizeof(struct rte_flow_item), 0);
+	if (!items) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "No memory for PMD internal items.");
+		return -ENOMEM;
+	}
+
+	ice_pattern_skip_void_item(items, pattern);
+
+	for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
+		if (ice_match_pattern(ice_supported_patterns[i].items,
+				      items)) {
+			inset = ice_supported_patterns[i].sw_fields;
+			rte_free(items);
+			return inset;
+		}
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			   pattern, "Unsupported pattern");
+
+	rte_free(items);
+	return 0;
+}
+
+static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
+			struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_icmp *icmp_mask;
+	const struct rte_flow_item_icmp6 *icmp6_mask;
+	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+	const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
+	enum rte_flow_item_type item_type;
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	uint64_t input_set = ICE_INSET_NONE;
+	bool outer_ip = true;
+	bool outer_l4 = true;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return 0;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			if (eth_spec && eth_mask) {
+				if (rte_is_broadcast_ether_addr(&eth_mask->src))
+					input_set |= ICE_INSET_SMAC;
+				if (rte_is_broadcast_ether_addr(&eth_mask->dst))
+					input_set |= ICE_INSET_DMAC;
+				if (eth_mask->type == RTE_BE16(0xffff))
+					input_set |= ICE_INSET_ETHERTYPE;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (!(ipv4_spec && ipv4_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 spec or mask.");
+				return 0;
+			}
+
+			/* Check IPv4 mask and update input set */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 mask.");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_DST;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TTL;
+				if (ipv4_mask->hdr.fragment_offset == 0)
+					input_set |= ICE_INSET_IPV4_PROTO;
+				outer_ip = false;
+			} else {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_DST;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_PROTO;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (!(ipv6_spec && ipv6_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Invalid IPv6 spec or mask");
+				return 0;
+			}
+
+			if (ipv6_mask->hdr.payload_len ||
+			    ipv6_mask->hdr.vtc_flow) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv6 mask");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_NEXT_HDR;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+				outer_ip = false;
+			} else {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_TTL;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (!(udp_spec && udp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid UDP mask");
+				return 0;
+			}
+
+			/* Check UDP mask and update input set*/
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (!(tcp_spec && tcp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid TCP mask");
+				return 0;
+			}
+
+			/* Check TCP mask and update input set */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (!(sctp_spec && sctp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid SCTP mask");
+				return 0;
+			}
+
+			/* Check SCTP mask and update input set */
+			if (sctp_mask->hdr.cksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid SCTP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			icmp_mask = item->mask;
+			if (icmp_mask->hdr.icmp_code ||
+			    icmp_mask->hdr.icmp_cksum ||
+			    icmp_mask->hdr.icmp_ident ||
+			    icmp_mask->hdr.icmp_seq_nb) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP mask");
+				return 0;
+			}
+
+			if (icmp_mask->hdr.icmp_type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP;
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6:
+			icmp6_mask = item->mask;
+			if (icmp6_mask->code ||
+			    icmp6_mask->checksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP6 mask");
+				return 0;
+			}
+
+			if (icmp6_mask->type == UINT8_MAX)
+			input_set |= ICE_INSET_ICMP6;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!vxlan_spec && vxlan_mask) ||
+			    (vxlan_spec && !vxlan_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid VXLAN item");
+				return -rte_errno;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!nvgre_spec && nvgre_mask) ||
+			    (nvgre_spec && !nvgre_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid VXLAN item");
+				return -rte_errno;
+			}
+
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid mask no exist");
+			break;
+		}
+	}
+	return input_set;
+}
+
+static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
+			uint64_t inset, struct rte_flow_error *error)
+{
+	uint64_t fields;
+
+	/* get valid field */
+	fields = ice_get_flow_field(pattern, error);
+	if ((!fields) || (fields && (!inset))) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int ice_flow_valid_action(const struct rte_flow_action *actions,
+				       struct rte_flow_error *error)
+{
+	switch (actions->type) {
+	case RTE_FLOW_ACTION_TYPE_QUEUE:
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_flow_validate(__rte_unused struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	uint64_t inset = 0;
+	int ret = ICE_ERR_NOT_SUPPORTED;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = ice_flow_valid_attr(attr, error);
+	if (!ret)
+		return ret;
+
+	inset = ice_flow_valid_pattern(pattern, error);
+	if (!inset)
+		return -rte_errno;
+
+	ret = ice_flow_valid_inset(pattern, inset, error);
+	if (ret)
+		return ret;
+
+	ret = ice_flow_valid_action(actions, error);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct rte_flow *
+ice_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *flow = NULL;
+	int ret;
+
+	flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return flow;
+	}
+
+	ret = ice_flow_validate(dev, attr, pattern, actions, error);
+	if (ret < 0)
+		return NULL;
+
+	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
+	if (ret)
+		goto free_flow;
+
+	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+	return flow;
+
+free_flow:
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(flow);
+	return NULL;
+}
+
+static int
+ice_flow_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	int ret = 0;
+
+	ret = ice_destroy_switch_filter(pf, flow, error);
+
+	if (!ret) {
+		TAILQ_REMOVE(&pf->flow_list, flow, node);
+		rte_free(flow);
+	} else
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to destroy flow.");
+
+	return ret;
+}
+
+static int
+ice_flow_flush(struct rte_eth_dev *dev,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
+	int ret;
+
+	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+		ret = ice_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			rte_flow_error_set(error, -ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to flush SW flows.");
+			return -rte_errno;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
new file mode 100644
index 0000000..ed7f3fe
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -0,0 +1,654 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_GENERIC_FLOW_H_
+#define _ICE_GENERIC_FLOW_H_
+
+#include <rte_flow_driver.h>
+
+struct ice_flow_pattern {
+	enum rte_flow_item_type *items;
+	uint64_t sw_fields;
+};
+
+#define ICE_INSET_NONE            0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define ICE_INSET_SMAC            0x0000000000000001ULL
+#define ICE_INSET_DMAC            0x0000000000000002ULL
+#define ICE_INSET_ETHERTYPE       0x0000000000000020ULL
+
+/* bit 8 ~ bit 15 */
+#define ICE_INSET_IPV4_SRC        0x0000000000000100ULL
+#define ICE_INSET_IPV4_DST        0x0000000000000200ULL
+#define ICE_INSET_IPV6_SRC        0x0000000000000400ULL
+#define ICE_INSET_IPV6_DST        0x0000000000000800ULL
+#define ICE_INSET_SRC_PORT        0x0000000000001000ULL
+#define ICE_INSET_DST_PORT        0x0000000000002000ULL
+#define ICE_INSET_ARP             0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL
+#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL
+#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL
+#define ICE_INSET_IPV6_NEXT_HDR   0x0000000000200000ULL
+#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
+#define ICE_INSET_ICMP            0x0000000001000000ULL
+#define ICE_INSET_ICMP6           0x0000000002000000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define ICE_INSET_TUN_SMAC           0x0000000100000000ULL
+#define ICE_INSET_TUN_DMAC           0x0000000200000000ULL
+#define ICE_INSET_TUN_IPV4_SRC       0x0000000400000000ULL
+#define ICE_INSET_TUN_IPV4_DST       0x0000000800000000ULL
+#define ICE_INSET_TUN_IPV4_TTL       0x0000001000000000ULL
+#define ICE_INSET_TUN_IPV4_PROTO     0x0000002000000000ULL
+#define ICE_INSET_TUN_IPV6_SRC       0x0000004000000000ULL
+#define ICE_INSET_TUN_IPV6_DST       0x0000008000000000ULL
+#define ICE_INSET_TUN_IPV6_TTL       0x0000010000000000ULL
+#define ICE_INSET_TUN_IPV6_PROTO     0x0000020000000000ULL
+#define ICE_INSET_TUN_SRC_PORT       0x0000040000000000ULL
+#define ICE_INSET_TUN_DST_PORT       0x0000080000000000ULL
+#define ICE_INSET_TUN_ID             0x0000100000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+#define ICE_FLAG_VLAN_INNER  0x00000001ULL
+#define ICE_FLAG_VLAN_OUTER  0x00000002ULL
+
+#define INSET_ETHER ( \
+	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define INSET_MAC_IPV4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
+#define INSET_MAC_IPV4_L4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV4_ICMP ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
+#define INSET_MAC_IPV6 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_NEXT_HDR | ICE_INSET_IPV6_HOP_LIMIT)
+#define INSET_MAC_IPV6_L4 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV6_ICMP ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_ICMP6)
+#define INSET_TUNNEL_IPV4_TYPE1 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO)
+#define INSET_TUNNEL_IPV4_TYPE2 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV4_TYPE3 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP)
+#define INSET_TUNNEL_IPV6_TYPE1 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO)
+#define INSET_TUNNEL_IPV6_TYPE2 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV6_TYPE3 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6)
+
+/* L2 */
+static enum rte_flow_item_type pattern_ethertype[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv4 */
+static enum rte_flow_item_type pattern_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv6 */
+static enum rte_flow_item_type pattern_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct ice_flow_pattern ice_supported_patterns[] = {
+	{pattern_ethertype, INSET_ETHER},
+	{pattern_ipv4, INSET_MAC_IPV4},
+	{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
+	{pattern_ipv6, INSET_MAC_IPV6},
+	{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
+	{pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+};
+
+#endif
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 8697676..18180b9 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -8,6 +8,7 @@ sources = files(
 	'ice_ethdev.c',
 	'ice_rxtx.c',
 	'ice_switch_filter.c'
+	'ice_generic_flow.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v3 3/3] net/ice: add UDP tunnel port support
  2019-06-20  5:34 ` [dpdk-dev] [PATCH v3 0/3] Enable rte_flow API in ice driver Qiming Yang
  2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 1/3] net/ice: enable switch filter Qiming Yang
  2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API Qiming Yang
@ 2019-06-20  5:34   ` Qiming Yang
  2 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-20  5:34 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

Enabled UDP tunnel port add and delete functions.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/ice_ethdev.c | 54 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 8ee06d1..949d293 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -88,6 +88,10 @@ static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 			enum rte_filter_type filter_type,
 			enum rte_filter_op filter_op,
 			void *arg);
+static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -147,6 +151,8 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
 	.filter_ctrl                  = ice_dev_filter_ctrl,
+	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
+	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -3665,6 +3671,54 @@ ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 	return ret;
 }
 
+/* Add UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -1;
+		break;
+	}
+
+	return ret;
+}
+
+/* Delete UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -1;
+		break;
+	}
+
+	return ret;
+}
+
 static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v3 1/3] net/ice: enable switch filter
  2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 1/3] net/ice: enable switch filter Qiming Yang
@ 2019-06-20  9:01     ` Wang, Xiao W
  2019-06-20  9:12       ` Zhao1, Wei
  0 siblings, 1 reply; 73+ messages in thread
From: Wang, Xiao W @ 2019-06-20  9:01 UTC (permalink / raw)
  To: Yang, Qiming, dev; +Cc: Zhao1, Wei, Zhang, Qi Z, Xing, Beilei

Hi,

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> Sent: Thursday, June 20, 2019 1:35 PM
> To: dev@dpdk.org
> Cc: Zhao1, Wei <wei.zhao1@intel.com>
> Subject: [dpdk-dev] [PATCH v3 1/3] net/ice: enable switch filter
> 
> From: wei zhao <wei.zhao1@intel.com>
> 
> The patch enables the backend of rte_flow. It transfers
> rte_flow_xxx to device specific data structure and
> configures packet process engine's binary classifier
> (switch) properly.
> 
> Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> ---
>  drivers/net/ice/Makefile            |   1 +
>  drivers/net/ice/ice_ethdev.c        |  18 ++
>  drivers/net/ice/ice_ethdev.h        |   7 +
>  drivers/net/ice/ice_switch_filter.c | 525
> ++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_switch_filter.h |  24 ++
>  drivers/net/ice/meson.build         |   3 +-
>  6 files changed, 577 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/net/ice/ice_switch_filter.c
>  create mode 100644 drivers/net/ice/ice_switch_filter.h
> 
> diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
> index 0e5c55e..b10d826 100644
> --- a/drivers/net/ice/Makefile
> +++ b/drivers/net/ice/Makefile
> @@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
>  SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
>  endif
> 
[...]

> +	struct ice_adv_lkup_elem *list = NULL;
> +	uint16_t lkups_num = 0;
> +
> +	ret = ice_parse_switch_filter(pattern, actions, error,
> +			&rule_info, &list, &lkups_num);
> +	if (ret)
> +		goto out;
> +
> +	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
> +	if (ret)
> +		goto out;
> +
> +	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
> +	if (ret)
> +		goto out;
> +
> +	rte_free(list);
> +	return 0;
> +

It's better to change "goto out" to "goto error".

BRs,
Xiao

> +out:
> +	rte_free(list);
> +
> +	return -rte_errno;
> +}
> +


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v3 1/3] net/ice: enable switch filter
  2019-06-20  9:01     ` Wang, Xiao W
@ 2019-06-20  9:12       ` Zhao1, Wei
  0 siblings, 0 replies; 73+ messages in thread
From: Zhao1, Wei @ 2019-06-20  9:12 UTC (permalink / raw)
  To: Wang, Xiao W, Yang, Qiming, dev; +Cc: Zhang, Qi Z, Xing, Beilei

Hi, xiao

> -----Original Message-----
> From: Wang, Xiao W
> Sent: Thursday, June 20, 2019 5:01 PM
> To: Yang, Qiming <qiming.yang@intel.com>; dev@dpdk.org
> Cc: Zhao1, Wei <wei.zhao1@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>
> Subject: RE: [dpdk-dev] [PATCH v3 1/3] net/ice: enable switch filter
> 
> Hi,
> 
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> > Sent: Thursday, June 20, 2019 1:35 PM
> > To: dev@dpdk.org
> > Cc: Zhao1, Wei <wei.zhao1@intel.com>
> > Subject: [dpdk-dev] [PATCH v3 1/3] net/ice: enable switch filter
> >
> > From: wei zhao <wei.zhao1@intel.com>
> >
> > The patch enables the backend of rte_flow. It transfers rte_flow_xxx
> > to device specific data structure and configures packet process
> > engine's binary classifier
> > (switch) properly.
> >
> > Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
> > ---
> >  drivers/net/ice/Makefile            |   1 +
> >  drivers/net/ice/ice_ethdev.c        |  18 ++
> >  drivers/net/ice/ice_ethdev.h        |   7 +
> >  drivers/net/ice/ice_switch_filter.c | 525
> > ++++++++++++++++++++++++++++++++++++
> >  drivers/net/ice/ice_switch_filter.h |  24 ++
> >  drivers/net/ice/meson.build         |   3 +-
> >  6 files changed, 577 insertions(+), 1 deletion(-)  create mode 100644
> > drivers/net/ice/ice_switch_filter.c
> >  create mode 100644 drivers/net/ice/ice_switch_filter.h
> >
> > diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile index
> > 0e5c55e..b10d826 100644
> > --- a/drivers/net/ice/Makefile
> > +++ b/drivers/net/ice/Makefile
> > @@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
> >  SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c  endif
> >
> [...]
> 
> > +	struct ice_adv_lkup_elem *list = NULL;
> > +	uint16_t lkups_num = 0;
> > +
> > +	ret = ice_parse_switch_filter(pattern, actions, error,
> > +			&rule_info, &list, &lkups_num);
> > +	if (ret)
> > +		goto out;
> > +
> > +	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
> > +	if (ret)
> > +		goto out;
> > +
> > +	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
> > +	if (ret)
> > +		goto out;
> > +
> > +	rte_free(list);
> > +	return 0;
> > +
> 
> It's better to change "goto out" to "goto error".

Ok, Update in v4
> 
> BRs,
> Xiao
> 
> > +out:
> > +	rte_free(list);
> > +
> > +	return -rte_errno;
> > +}
> > +


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API
  2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API Qiming Yang
@ 2019-06-20  9:32     ` Wang, Xiao W
  2019-06-21  5:47       ` Yang, Qiming
  2019-06-20 10:21     ` Wang, Xiao W
  2019-06-20 13:33     ` Aaron Conole
  2 siblings, 1 reply; 73+ messages in thread
From: Wang, Xiao W @ 2019-06-20  9:32 UTC (permalink / raw)
  To: Yang, Qiming, dev; +Cc: Zhang, Qi Z, Xing, Beilei

Hi,

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> Sent: Thursday, June 20, 2019 1:35 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>
> Subject: [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API
> 
> This patch adds ice_flow_create, ice_flow_destroy,
> ice_flow_flush and ice_flow_validate support,
> these are used to handle all the generic filters.
> 
> Signed-off-by: Qiming Yang <qiming.yang@intel.com>
> ---
>  drivers/net/ice/Makefile           |   1 +
>  drivers/net/ice/ice_ethdev.c       |  44 +++
>  drivers/net/ice/ice_ethdev.h       |   5 +
>  drivers/net/ice/ice_generic_flow.c | 682
> +++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_generic_flow.h | 654
> +++++++++++++++++++++++++++++++++++
>  drivers/net/ice/meson.build        |   1 +
>  6 files changed, 1387 insertions(+)
>  create mode 100644 drivers/net/ice/ice_generic_flow.c
>  create mode 100644 drivers/net/ice/ice_generic_flow.h
> 
> diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
> index b10d826..32abeb6 100644
> --- a/drivers/net/ice/Makefile
> +++ b/drivers/net/ice/Makefile
> @@ -79,5 +79,6 @@ endif
>  ifeq ($(CC_AVX2_SUPPORT), 1)
>  	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c
>  endif
> +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
> 
>  include $(RTE_SDK)/mk/rte.lib.mk
> diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
> index a94aa7e..8ee06d1 100644
> --- a/drivers/net/ice/ice_ethdev.c
> +++ b/drivers/net/ice/ice_ethdev.c
> @@ -15,6 +15,7 @@
>  #include "base/ice_dcb.h"
>  #include "ice_ethdev.h"
>  #include "ice_rxtx.h"
> +#include "ice_switch_filter.h"
> 
>  #define ICE_MAX_QP_NUM "max_queue_pair_num"
>  #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
> @@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
>  static int ice_xstats_get_names(struct rte_eth_dev *dev,
>  				struct rte_eth_xstat_name *xstats_names,
>  				unsigned int limit);
> +static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
> +			enum rte_filter_type filter_type,
> +			enum rte_filter_op filter_op,
> +			void *arg);
> 
>  static const struct rte_pci_id pci_id_ice_map[] = {
>  	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID,
> ICE_DEV_ID_E810C_BACKPLANE) },
> @@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
>  	.xstats_get                   = ice_xstats_get,
>  	.xstats_get_names             = ice_xstats_get_names,
>  	.xstats_reset                 = ice_stats_reset,
> +	.filter_ctrl                  = ice_dev_filter_ctrl,
>  };
> 
>  /* store statistics names and its offset in stats structure */
> @@ -1478,6 +1484,8 @@ ice_dev_init(struct rte_eth_dev *dev)
>  	/* get base queue pairs index  in the device */
>  	ice_base_queue_get(pf);
> 
> +	TAILQ_INIT(&pf->flow_list);
> +
>  	return 0;
> 
>  err_pf_setup:
> @@ -1620,6 +1628,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)
>  {
>  	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
>  	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
> +	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> +	struct rte_flow *p_flow;
> 
>  	ice_dev_close(dev);
> 
> @@ -1637,6 +1647,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)
>  	rte_intr_callback_unregister(intr_handle,
>  				     ice_interrupt_handler, dev);
> 
> +	/* Remove all flows */
> +	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
> +		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
> +		ice_free_switch_filter_rule(p_flow->rule);
> +		rte_free(p_flow);
> +	}
> +
>  	return 0;
>  }
> 
> @@ -3622,6 +3639,33 @@ static int ice_xstats_get_names(__rte_unused
> struct rte_eth_dev *dev,
>  }
> 
>  static int
> +ice_dev_filter_ctrl(struct rte_eth_dev *dev,
> +		     enum rte_filter_type filter_type,
> +		     enum rte_filter_op filter_op,
> +		     void *arg)
> +{
> +	int ret = 0;
> +
> +	if (!dev)
> +		return -EINVAL;
> +
> +	switch (filter_type) {
> +	case RTE_ETH_FILTER_GENERIC:
> +		if (filter_op != RTE_ETH_FILTER_GET)
> +			return -EINVAL;
> +		*(const void **)arg = &ice_flow_ops;
> +		break;
> +	default:
> +		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
> +					filter_type);
> +		ret = -EINVAL;
> +		break;
> +	}
> +
> +	return ret;
> +}
> +
> +static int
>  ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
>  	      struct rte_pci_device *pci_dev)
>  {
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
> index 50b966c..8a52239 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -234,12 +234,16 @@ struct ice_vsi {
>  	bool offset_loaded;
>  };
> 
> +extern const struct rte_flow_ops ice_flow_ops;
> +
>  /* Struct to store flow created. */
>  struct rte_flow {
>  	TAILQ_ENTRY(rte_flow) node;
>  	void *rule;
>  };
> 
> +TAILQ_HEAD(ice_flow_list, rte_flow);
> +
>  struct ice_pf {
>  	struct ice_adapter *adapter; /* The adapter this PF associate to */
>  	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
> @@ -266,6 +270,7 @@ struct ice_pf {
>  	struct ice_eth_stats internal_stats;
>  	bool offset_loaded;
>  	bool adapter_stopped;
> +	struct ice_flow_list flow_list;
>  };
> 
>  /**
> diff --git a/drivers/net/ice/ice_generic_flow.c
> b/drivers/net/ice/ice_generic_flow.c
> new file mode 100644
> index 0000000..c6fce88
> --- /dev/null
> +++ b/drivers/net/ice/ice_generic_flow.c
> @@ -0,0 +1,682 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2019 Intel Corporation
> + */
> +
> +#include <sys/queue.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>

[...]

> +		/* Find a void item */
> +		pe = ice_find_first_item(pb + 1, true);
> +
> +		cpy_count = pe - pb;
> +		rte_memcpy(items, pb, sizeof(struct rte_flow_item) *
> cpy_count);
> +
> +		items += cpy_count;
> +
> +		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
> +			pb = pe;

No need to "pb = pe".

> +			break;
> +		}
> +
> +		pb = pe + 1;
> +	}
> +	/* Copy the END item. */
> +	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
> +}
> +
> +/* Check if the pattern matches a supported item type array */
> +static bool
> +ice_match_pattern(enum rte_flow_item_type *item_array,
> +		const struct rte_flow_item *pattern)
> +{
> +	const struct rte_flow_item *item = pattern;
> +
> +	while ((*item_array == item->type) &&

[...]

> +			if (icmp6_mask->code ||
> +			    icmp6_mask->checksum) {
> +				rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid ICMP6 mask");
> +				return 0;
> +			}
> +
> +			if (icmp6_mask->type == UINT8_MAX)
> +			input_set |= ICE_INSET_ICMP6;

Indent.

> +			break;
> +		case RTE_FLOW_ITEM_TYPE_VXLAN:
> +			vxlan_spec = item->spec;
> +			vxlan_mask = item->mask;
> +			/* Check if VXLAN item is used to describe protocol.
> +			 * If yes, both spec and mask should be NULL.
> +			 * If no, both spec and mask shouldn't be NULL.
> +			 */
> +			if ((!vxlan_spec && vxlan_mask) ||
> +			    (vxlan_spec && !vxlan_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid VXLAN item");
> +				return -rte_errno;

I think you need to return 0 as above, since the caller will check (!field).

> +			}
> +
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_NVGRE:
> +			nvgre_spec = item->spec;
> +			nvgre_mask = item->mask;
> +			/* Check if VXLAN item is used to describe protocol.

Typo: VXLAN->NVGRE

> +			 * If yes, both spec and mask should be NULL.
> +			 * If no, both spec and mask shouldn't be NULL.
> +			 */
> +			if ((!nvgre_spec && nvgre_mask) ||
> +			    (nvgre_spec && !nvgre_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid VXLAN item");
> +				return -rte_errno;

Ditto.

> +			}
> +
> +			break;
> +		default:
> +			rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid mask no exist");
> +			break;
> +		}
> +	}
> +	return input_set;
> +}
> +
> +static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
> +			uint64_t inset, struct rte_flow_error *error)
> +{
> +	uint64_t fields;
> +
> +	/* get valid field */
> +	fields = ice_get_flow_field(pattern, error);
> +	if ((!fields) || (fields && (!inset))) {

Maybe the intention is to : fields & (~inset), checking if the user's input set exceeds support scope.

> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
> +				   pattern,
> +				   "Invalid input set");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +}
> +
> +static int ice_flow_valid_action(const struct rte_flow_action *actions,
> +				       struct rte_flow_error *error)
> +{
> +	switch (actions->type) {
> +	case RTE_FLOW_ACTION_TYPE_QUEUE:
> +		break;
> +	case RTE_FLOW_ACTION_TYPE_DROP:
> +		break;
> +	default:

[...]
> +				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "Failed to allocate memory");
> +		return flow;
> +	}
> +
> +	ret = ice_flow_validate(dev, attr, pattern, actions, error);
> +	if (ret < 0)
> +		return NULL;

Goto free_flow

BRs,
Xiao

> +
> +	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
> +	if (ret)
> +		goto free_flow;
> +
> +	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
> +	return flow;
> +
> +free_flow:
> +	rte_flow_error_set(error, -ret,
> +			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +			   "Failed to create flow.");
> +	rte_free(flow);
> +	return NULL;
> +}
> +
> +static int
> +ice_flow_destroy(struct rte_eth_dev *dev,
> +		 struct rte_flow *flow,
> +		 struct rte_flow_error *error)
> +{
> +	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> +	int ret = 0;
> +
> +	ret = ice_destroy_switch_filter(pf, flow, error);
> +
> +	if (!ret) {
> +		TAILQ_REMOVE(&pf->flow_list, flow, node);
> +		rte_free(flow);
> +	} else
> +		rte_flow_error_set(error, -ret,
> +				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "Failed to destroy flow.");
> +
> +	return ret;
> +}
> +
> +static int
> +ice_flow_flush(struct rte_eth_dev *dev,
> +	       struct rte_flow_error *error)
> +{
> +	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> +	struct rte_flow *p_flow;
> +	int ret;
> +
> +	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
> +		ret = ice_flow_destroy(dev, p_flow, error);
> +		if (ret) {
> +			rte_flow_error_set(error, -ret,
> +					   RTE_FLOW_ERROR_TYPE_HANDLE,
> NULL,
> +					   "Failed to flush SW flows.");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	return ret;
> +}
> diff --git a/drivers/net/ice/ice_generic_flow.h
> b/drivers/net/ice/ice_generic_flow.h
> new file mode 100644
> index 0000000..ed7f3fe

^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API
  2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API Qiming Yang
  2019-06-20  9:32     ` Wang, Xiao W
@ 2019-06-20 10:21     ` Wang, Xiao W
  2019-06-20 13:33     ` Aaron Conole
  2 siblings, 0 replies; 73+ messages in thread
From: Wang, Xiao W @ 2019-06-20 10:21 UTC (permalink / raw)
  To: Yang, Qiming, dev; +Cc: Zhang, Qi Z, Xing, Beilei

Hi,

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> Sent: Thursday, June 20, 2019 1:35 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>
> Subject: [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API
> 
> This patch adds ice_flow_create, ice_flow_destroy,
> ice_flow_flush and ice_flow_validate support,
> these are used to handle all the generic filters.
> 
> Signed-off-by: Qiming Yang <qiming.yang@intel.com>
> ---
>  drivers/net/ice/Makefile           |   1 +
>  drivers/net/ice/ice_ethdev.c       |  44 +++
>  drivers/net/ice/ice_ethdev.h       |   5 +
>  drivers/net/ice/ice_generic_flow.c | 682
> +++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_generic_flow.h | 654
> +++++++++++++++++++++++++++++++++++
>  drivers/net/ice/meson.build        |   1 +
>  6 files changed, 1387 insertions(+)
>  create mode 100644 drivers/net/ice/ice_generic_flow.c
>  create mode 100644 drivers/net/ice/ice_generic_flow.h
> 

[...]
> +		case RTE_FLOW_ITEM_TYPE_ETH:
> +			eth_spec = item->spec;
> +			eth_mask = item->mask;
> +
> +			if (eth_spec && eth_mask) {
> +				if (rte_is_broadcast_ether_addr(&eth_mask-
> >src))
> +					input_set |= ICE_INSET_SMAC;
> +				if (rte_is_broadcast_ether_addr(&eth_mask-
> >dst))
> +					input_set |= ICE_INSET_DMAC;
> +				if (eth_mask->type == RTE_BE16(0xffff))
> +					input_set |= ICE_INSET_ETHERTYPE;
> +			}
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			ipv4_spec = item->spec;
> +			ipv4_mask = item->mask;
> +
> +			if (!(ipv4_spec && ipv4_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid IPv4 spec or mask.");
> +				return 0;
> +			}
> +
> +			/* Check IPv4 mask and update input set */
> +			if (ipv4_mask->hdr.version_ihl ||
> +			    ipv4_mask->hdr.total_length ||
> +			    ipv4_mask->hdr.packet_id ||
> +			    ipv4_mask->hdr.hdr_checksum) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid IPv4 mask.");
> +				return 0;
> +			}
> +
> +			if (outer_ip) {
> +				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> +					input_set |= ICE_INSET_IPV4_SRC;
> +				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
> +					input_set |= ICE_INSET_IPV4_DST;
> +				if (ipv4_mask->hdr.type_of_service ==
> UINT8_MAX)
> +					input_set |= ICE_INSET_IPV4_TOS;
> +				if (ipv4_mask->hdr.time_to_live ==
> UINT8_MAX)
> +					input_set |= ICE_INSET_IPV4_TTL;
> +				if (ipv4_mask->hdr.fragment_offset == 0)

Seems a typo. fragment_offset --> next_proto_id == UINT8_MAX.

> +					input_set |= ICE_INSET_IPV4_PROTO;
> +				outer_ip = false;
> +			} else {
> +				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> +					input_set |=
> ICE_INSET_TUN_IPV4_SRC;
> +				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
> +					input_set |=
> ICE_INSET_TUN_IPV4_DST;
> +				if (ipv4_mask->hdr.time_to_live ==
> UINT8_MAX)
> +					input_set |=
> ICE_INSET_TUN_IPV4_TTL;
> +				if (ipv4_mask->hdr.next_proto_id ==
> UINT8_MAX)
> +					input_set |=
> ICE_INSET_TUN_IPV4_PROTO;
> +			}
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_IPV6:
> +			ipv6_spec = item->spec;
> +			ipv6_mask = item->mask;
> +
> +			if (!(ipv6_spec && ipv6_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM,
> +					item, "Invalid IPv6 spec or mask");
> +				return 0;
> +			}

[...]

> +/* bit 16 ~ bit 31 */
> +#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL
> +#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL
> +#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL
> +#define ICE_INSET_IPV6_NEXT_HDR   0x0000000000200000ULL

It's better to align the name to "ICE_INSET_IPV6_PROTO".

> +#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
> +#define ICE_INSET_ICMP            0x0000000001000000ULL
> +#define ICE_INSET_ICMP6           0x0000000002000000ULL
> +

BRs,
Xiao

^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API
  2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API Qiming Yang
  2019-06-20  9:32     ` Wang, Xiao W
  2019-06-20 10:21     ` Wang, Xiao W
@ 2019-06-20 13:33     ` Aaron Conole
  2019-06-21  2:18       ` Yang, Qiming
  2 siblings, 1 reply; 73+ messages in thread
From: Aaron Conole @ 2019-06-20 13:33 UTC (permalink / raw)
  To: Qiming Yang; +Cc: dev

Qiming Yang <qiming.yang@intel.com> writes:

> This patch adds ice_flow_create, ice_flow_destroy,
> ice_flow_flush and ice_flow_validate support,
> these are used to handle all the generic filters.
>
> Signed-off-by: Qiming Yang <qiming.yang@intel.com>
> ---
>  drivers/net/ice/Makefile           |   1 +
>  drivers/net/ice/ice_ethdev.c       |  44 +++
>  drivers/net/ice/ice_ethdev.h       |   5 +
>  drivers/net/ice/ice_generic_flow.c | 682 +++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_generic_flow.h | 654 +++++++++++++++++++++++++++++++++++
>  drivers/net/ice/meson.build        |   1 +
>  6 files changed, 1387 insertions(+)
>  create mode 100644 drivers/net/ice/ice_generic_flow.c
>  create mode 100644 drivers/net/ice/ice_generic_flow.h
>
> diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
> index b10d826..32abeb6 100644
> --- a/drivers/net/ice/Makefile
> +++ b/drivers/net/ice/Makefile
> @@ -79,5 +79,6 @@ endif
>  ifeq ($(CC_AVX2_SUPPORT), 1)
>  	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c
>  endif
> +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
>  
>  include $(RTE_SDK)/mk/rte.lib.mk
> diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
> index a94aa7e..8ee06d1 100644
> --- a/drivers/net/ice/ice_ethdev.c
> +++ b/drivers/net/ice/ice_ethdev.c
> @@ -15,6 +15,7 @@
>  #include "base/ice_dcb.h"
>  #include "ice_ethdev.h"
>  #include "ice_rxtx.h"
> +#include "ice_switch_filter.h"
>  
>  #define ICE_MAX_QP_NUM "max_queue_pair_num"
>  #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
> @@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
>  static int ice_xstats_get_names(struct rte_eth_dev *dev,
>  				struct rte_eth_xstat_name *xstats_names,
>  				unsigned int limit);
> +static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
> +			enum rte_filter_type filter_type,
> +			enum rte_filter_op filter_op,
> +			void *arg);
>  
>  static const struct rte_pci_id pci_id_ice_map[] = {
>  	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
> @@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
>  	.xstats_get                   = ice_xstats_get,
>  	.xstats_get_names             = ice_xstats_get_names,
>  	.xstats_reset                 = ice_stats_reset,
> +	.filter_ctrl                  = ice_dev_filter_ctrl,
>  };
>  
>  /* store statistics names and its offset in stats structure */
> @@ -1478,6 +1484,8 @@ ice_dev_init(struct rte_eth_dev *dev)
>  	/* get base queue pairs index  in the device */
>  	ice_base_queue_get(pf);
>  
> +	TAILQ_INIT(&pf->flow_list);
> +
>  	return 0;
>  
>  err_pf_setup:
> @@ -1620,6 +1628,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)
>  {
>  	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
>  	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
> +	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> +	struct rte_flow *p_flow;
>  
>  	ice_dev_close(dev);
>  
> @@ -1637,6 +1647,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)
>  	rte_intr_callback_unregister(intr_handle,
>  				     ice_interrupt_handler, dev);
>  
> +	/* Remove all flows */
> +	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
> +		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
> +		ice_free_switch_filter_rule(p_flow->rule);
> +		rte_free(p_flow);
> +	}
> +
>  	return 0;
>  }
>  
> @@ -3622,6 +3639,33 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
>  }
>  
>  static int
> +ice_dev_filter_ctrl(struct rte_eth_dev *dev,
> +		     enum rte_filter_type filter_type,
> +		     enum rte_filter_op filter_op,
> +		     void *arg)
> +{
> +	int ret = 0;
> +
> +	if (!dev)
> +		return -EINVAL;
> +
> +	switch (filter_type) {
> +	case RTE_ETH_FILTER_GENERIC:
> +		if (filter_op != RTE_ETH_FILTER_GET)
> +			return -EINVAL;
> +		*(const void **)arg = &ice_flow_ops;
> +		break;
> +	default:
> +		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
> +					filter_type);
> +		ret = -EINVAL;
> +		break;
> +	}
> +
> +	return ret;
> +}
> +
> +static int
>  ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
>  	      struct rte_pci_device *pci_dev)
>  {
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
> index 50b966c..8a52239 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -234,12 +234,16 @@ struct ice_vsi {
>  	bool offset_loaded;
>  };
>  
> +extern const struct rte_flow_ops ice_flow_ops;
> +
>  /* Struct to store flow created. */
>  struct rte_flow {
>  	TAILQ_ENTRY(rte_flow) node;
>  	void *rule;
>  };
>  
> +TAILQ_HEAD(ice_flow_list, rte_flow);
> +
>  struct ice_pf {
>  	struct ice_adapter *adapter; /* The adapter this PF associate to */
>  	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
> @@ -266,6 +270,7 @@ struct ice_pf {
>  	struct ice_eth_stats internal_stats;
>  	bool offset_loaded;
>  	bool adapter_stopped;
> +	struct ice_flow_list flow_list;
>  };
>  
>  /**
> diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
> new file mode 100644
> index 0000000..c6fce88
> --- /dev/null
> +++ b/drivers/net/ice/ice_generic_flow.c
> @@ -0,0 +1,682 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2019 Intel Corporation
> + */
> +
> +#include <sys/queue.h>
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <stdarg.h>
> +
> +#include <rte_ether.h>
> +#include <rte_ethdev_driver.h>
> +#include <rte_malloc.h>
> +
> +#include "ice_ethdev.h"
> +#include "ice_generic_flow.h"
> +#include "ice_switch_filter.h"
> +
> +static int ice_flow_validate(struct rte_eth_dev *dev,
> +		const struct rte_flow_attr *attr,
> +		const struct rte_flow_item pattern[],
> +		const struct rte_flow_action actions[],
> +		struct rte_flow_error *error);
> +static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
> +		const struct rte_flow_attr *attr,
> +		const struct rte_flow_item pattern[],
> +		const struct rte_flow_action actions[],
> +		struct rte_flow_error *error);
> +static int ice_flow_destroy(struct rte_eth_dev *dev,
> +		struct rte_flow *flow,
> +		struct rte_flow_error *error);
> +static int ice_flow_flush(struct rte_eth_dev *dev,
> +		struct rte_flow_error *error);
> +
> +const struct rte_flow_ops ice_flow_ops = {
> +	.validate = ice_flow_validate,
> +	.create = ice_flow_create,
> +	.destroy = ice_flow_destroy,
> +	.flush = ice_flow_flush,
> +};
> +
> +static int
> +ice_flow_valid_attr(const struct rte_flow_attr *attr,
> +		     struct rte_flow_error *error)
> +{
> +	/* Must be input direction */
> +	if (!attr->ingress) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
> +				   attr, "Only support ingress.");
> +		return -rte_errno;
> +	}
> +
> +	/* Not supported */
> +	if (attr->egress) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
> +				   attr, "Not support egress.");
> +		return -rte_errno;
> +	}
> +
> +	/* Not supported */
> +	if (attr->priority) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
> +				   attr, "Not support priority.");
> +		return -rte_errno;
> +	}
> +
> +	/* Not supported */
> +	if (attr->group) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
> +				   attr, "Not support group.");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +}
> +
> +/* Find the first VOID or non-VOID item pointer */
> +static const struct rte_flow_item *
> +ice_find_first_item(const struct rte_flow_item *item, bool is_void)
> +{
> +	bool is_find;
> +
> +	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
> +		if (is_void)
> +			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
> +		else
> +			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
> +		if (is_find)
> +			break;
> +		item++;
> +	}
> +	return item;
> +}
> +
> +/* Skip all VOID items of the pattern */
> +static void
> +ice_pattern_skip_void_item(struct rte_flow_item *items,
> +			    const struct rte_flow_item *pattern)
> +{
> +	uint32_t cpy_count = 0;
> +	const struct rte_flow_item *pb = pattern, *pe = pattern;
> +
> +	for (;;) {
> +		/* Find a non-void item first */
> +		pb = ice_find_first_item(pb, false);
> +		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
> +			pe = pb;
> +			break;
> +		}
> +
> +		/* Find a void item */
> +		pe = ice_find_first_item(pb + 1, true);
> +
> +		cpy_count = pe - pb;
> +		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
> +
> +		items += cpy_count;
> +
> +		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
> +			pb = pe;
> +			break;
> +		}
> +
> +		pb = pe + 1;
> +	}
> +	/* Copy the END item. */
> +	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
> +}
> +
> +/* Check if the pattern matches a supported item type array */
> +static bool
> +ice_match_pattern(enum rte_flow_item_type *item_array,
> +		const struct rte_flow_item *pattern)
> +{
> +	const struct rte_flow_item *item = pattern;
> +
> +	while ((*item_array == item->type) &&
> +	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
> +		item_array++;
> +		item++;
> +	}
> +
> +	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
> +		item->type == RTE_FLOW_ITEM_TYPE_END);
> +}
> +
> +static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
> +		struct rte_flow_error *error)
> +{
> +	uint16_t i = 0;
> +	uint64_t inset;
> +	struct rte_flow_item *items; /* used for pattern without VOID items */
> +	uint32_t item_num = 0; /* non-void item number */
> +
> +	/* Get the non-void item number of pattern */
> +	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
> +		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
> +			item_num++;
> +		i++;
> +	}
> +	item_num++;
> +
> +	items = rte_zmalloc("ice_pattern",
> +			    item_num * sizeof(struct rte_flow_item), 0);
> +	if (!items) {
> +		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> +				   NULL, "No memory for PMD internal items.");
> +		return -ENOMEM;
> +	}
> +
> +	ice_pattern_skip_void_item(items, pattern);
> +
> +	for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
> +		if (ice_match_pattern(ice_supported_patterns[i].items,
> +				      items)) {
> +			inset = ice_supported_patterns[i].sw_fields;
> +			rte_free(items);
> +			return inset;
> +		}
> +	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
> +			   pattern, "Unsupported pattern");
> +
> +	rte_free(items);
> +	return 0;
> +}
> +
> +static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
> +			struct rte_flow_error *error)
> +{
> +	const struct rte_flow_item *item = pattern;
> +	const struct rte_flow_item_eth *eth_spec, *eth_mask;
> +	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
> +	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
> +	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
> +	const struct rte_flow_item_udp *udp_spec, *udp_mask;
> +	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
> +	const struct rte_flow_item_icmp *icmp_mask;
> +	const struct rte_flow_item_icmp6 *icmp6_mask;
> +	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
> +	const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
> +	enum rte_flow_item_type item_type;
> +	uint8_t  ipv6_addr_mask[16] = {
> +		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
> +		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
> +	uint64_t input_set = ICE_INSET_NONE;
> +	bool outer_ip = true;
> +	bool outer_l4 = true;
> +
> +	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> +		if (item->last) {
> +			rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Not support range");
> +			return 0;
> +		}
> +		item_type = item->type;
> +		switch (item_type) {
> +		case RTE_FLOW_ITEM_TYPE_ETH:
> +			eth_spec = item->spec;
> +			eth_mask = item->mask;
> +
> +			if (eth_spec && eth_mask) {
> +				if (rte_is_broadcast_ether_addr(&eth_mask->src))
> +					input_set |= ICE_INSET_SMAC;
> +				if (rte_is_broadcast_ether_addr(&eth_mask->dst))
> +					input_set |= ICE_INSET_DMAC;
> +				if (eth_mask->type == RTE_BE16(0xffff))
> +					input_set |= ICE_INSET_ETHERTYPE;
> +			}
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_IPV4:
> +			ipv4_spec = item->spec;
> +			ipv4_mask = item->mask;
> +
> +			if (!(ipv4_spec && ipv4_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid IPv4 spec or mask.");
> +				return 0;
> +			}
> +
> +			/* Check IPv4 mask and update input set */
> +			if (ipv4_mask->hdr.version_ihl ||
> +			    ipv4_mask->hdr.total_length ||
> +			    ipv4_mask->hdr.packet_id ||
> +			    ipv4_mask->hdr.hdr_checksum) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid IPv4 mask.");
> +				return 0;
> +			}
> +
> +			if (outer_ip) {
> +				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> +					input_set |= ICE_INSET_IPV4_SRC;
> +				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
> +					input_set |= ICE_INSET_IPV4_DST;
> +				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
> +					input_set |= ICE_INSET_IPV4_TOS;
> +				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> +					input_set |= ICE_INSET_IPV4_TTL;
> +				if (ipv4_mask->hdr.fragment_offset == 0)
> +					input_set |= ICE_INSET_IPV4_PROTO;
> +				outer_ip = false;
> +			} else {
> +				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
> +					input_set |= ICE_INSET_TUN_IPV4_SRC;
> +				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
> +					input_set |= ICE_INSET_TUN_IPV4_DST;
> +				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
> +					input_set |= ICE_INSET_TUN_IPV4_TTL;
> +				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
> +					input_set |= ICE_INSET_TUN_IPV4_PROTO;
> +			}
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_IPV6:
> +			ipv6_spec = item->spec;
> +			ipv6_mask = item->mask;
> +
> +			if (!(ipv6_spec && ipv6_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +					RTE_FLOW_ERROR_TYPE_ITEM,
> +					item, "Invalid IPv6 spec or mask");
> +				return 0;
> +			}
> +
> +			if (ipv6_mask->hdr.payload_len ||
> +			    ipv6_mask->hdr.vtc_flow) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid IPv6 mask");
> +				return 0;
> +			}
> +
> +			if (outer_ip) {
> +				if (!memcmp(ipv6_mask->hdr.src_addr,
> +					    ipv6_addr_mask,
> +					    RTE_DIM(ipv6_mask->hdr.src_addr)))
> +					input_set |= ICE_INSET_IPV6_SRC;
> +				if (!memcmp(ipv6_mask->hdr.dst_addr,
> +					    ipv6_addr_mask,
> +					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
> +					input_set |= ICE_INSET_IPV6_DST;
> +				if (ipv6_mask->hdr.proto == UINT8_MAX)
> +					input_set |= ICE_INSET_IPV6_NEXT_HDR;
> +				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
> +					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
> +				outer_ip = false;
> +			} else {
> +				if (!memcmp(ipv6_mask->hdr.src_addr,
> +					    ipv6_addr_mask,
> +					    RTE_DIM(ipv6_mask->hdr.src_addr)))
> +					input_set |= ICE_INSET_TUN_IPV6_SRC;
> +				if (!memcmp(ipv6_mask->hdr.dst_addr,
> +					    ipv6_addr_mask,
> +					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
> +					input_set |= ICE_INSET_TUN_IPV6_DST;
> +				if (ipv6_mask->hdr.proto == UINT8_MAX)
> +					input_set |= ICE_INSET_TUN_IPV6_PROTO;
> +				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
> +					input_set |= ICE_INSET_TUN_IPV6_TTL;
> +			}
> +
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_UDP:
> +			udp_spec = item->spec;
> +			udp_mask = item->mask;
> +
> +			if (!(udp_spec && udp_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +						   RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item, "Invalid UDP mask");
> +				return 0;
> +			}
> +
> +			/* Check UDP mask and update input set*/
> +			if (udp_mask->hdr.dgram_len ||
> +			    udp_mask->hdr.dgram_cksum) {
> +				rte_flow_error_set(error, EINVAL,
> +						   RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid UDP mask");
> +				return 0;
> +			}
> +
> +			if (outer_l4) {
> +				if (udp_mask->hdr.src_port == UINT16_MAX)
> +					input_set |= ICE_INSET_SRC_PORT;
> +				if (udp_mask->hdr.dst_port == UINT16_MAX)
> +					input_set |= ICE_INSET_DST_PORT;
> +				outer_l4 = false;
> +			} else {
> +				if (udp_mask->hdr.src_port == UINT16_MAX)
> +					input_set |= ICE_INSET_TUN_SRC_PORT;
> +				if (udp_mask->hdr.dst_port == UINT16_MAX)
> +					input_set |= ICE_INSET_TUN_DST_PORT;
> +			}
> +
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_TCP:
> +			tcp_spec = item->spec;
> +			tcp_mask = item->mask;
> +
> +			if (!(tcp_spec && tcp_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +						   RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item, "Invalid TCP mask");
> +				return 0;
> +			}
> +
> +			/* Check TCP mask and update input set */
> +			if (tcp_mask->hdr.sent_seq ||
> +			    tcp_mask->hdr.recv_ack ||
> +			    tcp_mask->hdr.data_off ||
> +			    tcp_mask->hdr.tcp_flags ||
> +			    tcp_mask->hdr.rx_win ||
> +			    tcp_mask->hdr.cksum ||
> +			    tcp_mask->hdr.tcp_urp) {
> +				rte_flow_error_set(error, EINVAL,
> +						   RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid TCP mask");
> +				return 0;
> +			}
> +
> +			if (outer_l4) {
> +				if (tcp_mask->hdr.src_port == UINT16_MAX)
> +					input_set |= ICE_INSET_SRC_PORT;
> +				if (tcp_mask->hdr.dst_port == UINT16_MAX)
> +					input_set |= ICE_INSET_DST_PORT;
> +				outer_l4 = false;
> +			} else {
> +				if (tcp_mask->hdr.src_port == UINT16_MAX)
> +					input_set |= ICE_INSET_TUN_SRC_PORT;
> +				if (tcp_mask->hdr.dst_port == UINT16_MAX)
> +					input_set |= ICE_INSET_TUN_DST_PORT;
> +			}
> +
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_SCTP:
> +			sctp_spec = item->spec;
> +			sctp_mask = item->mask;
> +
> +			if (!(sctp_spec && sctp_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +						   RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item, "Invalid SCTP mask");
> +				return 0;
> +			}
> +
> +			/* Check SCTP mask and update input set */
> +			if (sctp_mask->hdr.cksum) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid SCTP mask");
> +				return 0;
> +			}
> +
> +			if (outer_l4) {
> +				if (sctp_mask->hdr.src_port == UINT16_MAX)
> +					input_set |= ICE_INSET_SRC_PORT;
> +				if (sctp_mask->hdr.dst_port == UINT16_MAX)
> +					input_set |= ICE_INSET_DST_PORT;
> +				outer_l4 = false;
> +			} else {
> +				if (sctp_mask->hdr.src_port == UINT16_MAX)
> +					input_set |= ICE_INSET_TUN_SRC_PORT;
> +				if (sctp_mask->hdr.dst_port == UINT16_MAX)
> +					input_set |= ICE_INSET_TUN_DST_PORT;
> +			}
> +
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_ICMP:
> +			icmp_mask = item->mask;
> +			if (icmp_mask->hdr.icmp_code ||
> +			    icmp_mask->hdr.icmp_cksum ||
> +			    icmp_mask->hdr.icmp_ident ||
> +			    icmp_mask->hdr.icmp_seq_nb) {
> +				rte_flow_error_set(error, EINVAL,
> +						   RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid ICMP mask");
> +				return 0;
> +			}
> +
> +			if (icmp_mask->hdr.icmp_type == UINT8_MAX)
> +				input_set |= ICE_INSET_ICMP;
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_ICMP6:
> +			icmp6_mask = item->mask;
> +			if (icmp6_mask->code ||
> +			    icmp6_mask->checksum) {
> +				rte_flow_error_set(error, EINVAL,
> +						   RTE_FLOW_ERROR_TYPE_ITEM,
> +						   item,
> +						   "Invalid ICMP6 mask");
> +				return 0;
> +			}
> +
> +			if (icmp6_mask->type == UINT8_MAX)
> +			input_set |= ICE_INSET_ICMP6;
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_VXLAN:
> +			vxlan_spec = item->spec;
> +			vxlan_mask = item->mask;
> +			/* Check if VXLAN item is used to describe protocol.
> +			 * If yes, both spec and mask should be NULL.
> +			 * If no, both spec and mask shouldn't be NULL.
> +			 */
> +			if ((!vxlan_spec && vxlan_mask) ||
> +			    (vxlan_spec && !vxlan_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid VXLAN item");
> +				return -rte_errno;
> +			}
> +
> +			break;
> +		case RTE_FLOW_ITEM_TYPE_NVGRE:
> +			nvgre_spec = item->spec;
> +			nvgre_mask = item->mask;
> +			/* Check if VXLAN item is used to describe protocol.
> +			 * If yes, both spec and mask should be NULL.
> +			 * If no, both spec and mask shouldn't be NULL.
> +			 */
> +			if ((!nvgre_spec && nvgre_mask) ||
> +			    (nvgre_spec && !nvgre_mask)) {
> +				rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid VXLAN item");
> +				return -rte_errno;
> +			}
> +
> +			break;
> +		default:
> +			rte_flow_error_set(error, EINVAL,
> +					   RTE_FLOW_ERROR_TYPE_ITEM,
> +					   item,
> +					   "Invalid mask no exist");
> +			break;
> +		}
> +	}
> +	return input_set;
> +}
> +
> +static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
> +			uint64_t inset, struct rte_flow_error *error)
> +{
> +	uint64_t fields;
> +
> +	/* get valid field */
> +	fields = ice_get_flow_field(pattern, error);
> +	if ((!fields) || (fields && (!inset))) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
> +				   pattern,
> +				   "Invalid input set");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +}
> +
> +static int ice_flow_valid_action(const struct rte_flow_action *actions,
> +				       struct rte_flow_error *error)
> +{
> +	switch (actions->type) {
> +	case RTE_FLOW_ACTION_TYPE_QUEUE:
> +		break;
> +	case RTE_FLOW_ACTION_TYPE_DROP:
> +		break;
> +	default:
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
> +				   "Invalid action.");
> +		return -rte_errno;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +ice_flow_validate(__rte_unused struct rte_eth_dev *dev,
> +		   const struct rte_flow_attr *attr,
> +		   const struct rte_flow_item pattern[],
> +		   const struct rte_flow_action actions[],
> +		   struct rte_flow_error *error)
> +{
> +	uint64_t inset = 0;
> +	int ret = ICE_ERR_NOT_SUPPORTED;
> +
> +	if (!pattern) {
> +		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> +				   NULL, "NULL pattern.");
> +		return -rte_errno;
> +	}
> +
> +	if (!actions) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> +				   NULL, "NULL action.");
> +		return -rte_errno;
> +	}
> +
> +	if (!attr) {
> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ATTR,
> +				   NULL, "NULL attribute.");
> +		return -rte_errno;
> +	}
> +
> +	ret = ice_flow_valid_attr(attr, error);
> +	if (!ret)
> +		return ret;
> +
> +	inset = ice_flow_valid_pattern(pattern, error);
> +	if (!inset)
> +		return -rte_errno;
> +
> +	ret = ice_flow_valid_inset(pattern, inset, error);
> +	if (ret)
> +		return ret;
> +
> +	ret = ice_flow_valid_action(actions, error);
> +	if (ret)
> +		return ret;
> +
> +	return 0;
> +}
> +
> +static struct rte_flow *
> +ice_flow_create(struct rte_eth_dev *dev,
> +		 const struct rte_flow_attr *attr,
> +		 const struct rte_flow_item pattern[],
> +		 const struct rte_flow_action actions[],
> +		 struct rte_flow_error *error)
> +{
> +	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> +	struct rte_flow *flow = NULL;
> +	int ret;
> +
> +	flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
> +	if (!flow) {
> +		rte_flow_error_set(error, ENOMEM,
> +				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "Failed to allocate memory");
> +		return flow;
> +	}
> +
> +	ret = ice_flow_validate(dev, attr, pattern, actions, error);
> +	if (ret < 0)
> +		return NULL;
> +
> +	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
> +	if (ret)
> +		goto free_flow;
> +
> +	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
> +	return flow;
> +
> +free_flow:
> +	rte_flow_error_set(error, -ret,
> +			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +			   "Failed to create flow.");
> +	rte_free(flow);
> +	return NULL;
> +}
> +
> +static int
> +ice_flow_destroy(struct rte_eth_dev *dev,
> +		 struct rte_flow *flow,
> +		 struct rte_flow_error *error)
> +{
> +	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> +	int ret = 0;
> +
> +	ret = ice_destroy_switch_filter(pf, flow, error);
> +
> +	if (!ret) {
> +		TAILQ_REMOVE(&pf->flow_list, flow, node);
> +		rte_free(flow);
> +	} else
> +		rte_flow_error_set(error, -ret,
> +				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +				   "Failed to destroy flow.");
> +
> +	return ret;
> +}
> +
> +static int
> +ice_flow_flush(struct rte_eth_dev *dev,
> +	       struct rte_flow_error *error)
> +{
> +	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> +	struct rte_flow *p_flow;
> +	int ret;
> +
> +	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
> +		ret = ice_flow_destroy(dev, p_flow, error);
> +		if (ret) {
> +			rte_flow_error_set(error, -ret,
> +					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +					   "Failed to flush SW flows.");
> +			return -rte_errno;
> +		}
> +	}
> +
> +	return ret;
> +}
> diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
> new file mode 100644
> index 0000000..ed7f3fe
> --- /dev/null
> +++ b/drivers/net/ice/ice_generic_flow.h
> @@ -0,0 +1,654 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2019 Intel Corporation
> + */
> +
> +#ifndef _ICE_GENERIC_FLOW_H_
> +#define _ICE_GENERIC_FLOW_H_
> +
> +#include <rte_flow_driver.h>
> +
> +struct ice_flow_pattern {
> +	enum rte_flow_item_type *items;
> +	uint64_t sw_fields;
> +};
> +
> +#define ICE_INSET_NONE            0x00000000000000000ULL
> +
> +/* bit0 ~ bit 7 */
> +#define ICE_INSET_SMAC            0x0000000000000001ULL
> +#define ICE_INSET_DMAC            0x0000000000000002ULL
> +#define ICE_INSET_ETHERTYPE       0x0000000000000020ULL
> +
> +/* bit 8 ~ bit 15 */
> +#define ICE_INSET_IPV4_SRC        0x0000000000000100ULL
> +#define ICE_INSET_IPV4_DST        0x0000000000000200ULL
> +#define ICE_INSET_IPV6_SRC        0x0000000000000400ULL
> +#define ICE_INSET_IPV6_DST        0x0000000000000800ULL
> +#define ICE_INSET_SRC_PORT        0x0000000000001000ULL
> +#define ICE_INSET_DST_PORT        0x0000000000002000ULL
> +#define ICE_INSET_ARP             0x0000000000004000ULL
> +
> +/* bit 16 ~ bit 31 */
> +#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL
> +#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL
> +#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL
> +#define ICE_INSET_IPV6_NEXT_HDR   0x0000000000200000ULL
> +#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
> +#define ICE_INSET_ICMP            0x0000000001000000ULL
> +#define ICE_INSET_ICMP6           0x0000000002000000ULL
> +
> +/* bit 32 ~ bit 47, tunnel fields */
> +#define ICE_INSET_TUN_SMAC           0x0000000100000000ULL
> +#define ICE_INSET_TUN_DMAC           0x0000000200000000ULL
> +#define ICE_INSET_TUN_IPV4_SRC       0x0000000400000000ULL
> +#define ICE_INSET_TUN_IPV4_DST       0x0000000800000000ULL
> +#define ICE_INSET_TUN_IPV4_TTL       0x0000001000000000ULL
> +#define ICE_INSET_TUN_IPV4_PROTO     0x0000002000000000ULL
> +#define ICE_INSET_TUN_IPV6_SRC       0x0000004000000000ULL
> +#define ICE_INSET_TUN_IPV6_DST       0x0000008000000000ULL
> +#define ICE_INSET_TUN_IPV6_TTL       0x0000010000000000ULL
> +#define ICE_INSET_TUN_IPV6_PROTO     0x0000020000000000ULL
> +#define ICE_INSET_TUN_SRC_PORT       0x0000040000000000ULL
> +#define ICE_INSET_TUN_DST_PORT       0x0000080000000000ULL
> +#define ICE_INSET_TUN_ID             0x0000100000000000ULL
> +
> +/* bit 48 ~ bit 55 */
> +#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
> +
> +#define ICE_FLAG_VLAN_INNER  0x00000001ULL
> +#define ICE_FLAG_VLAN_OUTER  0x00000002ULL
> +
> +#define INSET_ETHER ( \
> +	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
> +#define INSET_MAC_IPV4 ( \
> +	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
> +	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
> +#define INSET_MAC_IPV4_L4 ( \
> +	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
> +	ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
> +	ICE_INSET_SRC_PORT)
> +#define INSET_MAC_IPV4_ICMP ( \
> +	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
> +	ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
> +#define INSET_MAC_IPV6 ( \
> +	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
> +	ICE_INSET_IPV6_NEXT_HDR | ICE_INSET_IPV6_HOP_LIMIT)
> +#define INSET_MAC_IPV6_L4 ( \
> +	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
> +	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_DST_PORT | \
> +	ICE_INSET_SRC_PORT)
> +#define INSET_MAC_IPV6_ICMP ( \
> +	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
> +	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_ICMP6)
> +#define INSET_TUNNEL_IPV4_TYPE1 ( \
> +	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
> +	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO)
> +#define INSET_TUNNEL_IPV4_TYPE2 ( \
> +	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
> +	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
> +	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
> +#define INSET_TUNNEL_IPV4_TYPE3 ( \
> +	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
> +	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP)
> +#define INSET_TUNNEL_IPV6_TYPE1 ( \
> +	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
> +	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO)
> +#define INSET_TUNNEL_IPV6_TYPE2 ( \
> +	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
> +	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
> +	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
> +#define INSET_TUNNEL_IPV6_TYPE3 ( \
> +	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
> +	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6)
> +
> +/* L2 */
> +static enum rte_flow_item_type pattern_ethertype[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +/* non-tunnel IPv4 */
> +static enum rte_flow_item_type pattern_ipv4[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_udp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_tcp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_TCP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_sctp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_SCTP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_icmp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_ICMP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +/* non-tunnel IPv6 */
> +static enum rte_flow_item_type pattern_ipv6[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv6_udp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv6_tcp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_TCP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv6_sctp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_SCTP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_ICMP6,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +/* IPv4 VXLAN IPv4 */
> +static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_TCP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_SCTP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_ICMP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +/* IPv4 VXLAN MAC IPv4 */
> +static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_TCP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_SCTP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_ICMP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +/* IPv4 VXLAN IPv6 */
> +static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_TCP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_SCTP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_ICMP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +/* IPv4 VXLAN MAC IPv6 */
> +static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_TCP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_SCTP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_ICMP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +/* IPv4 NVGRE IPv4 */
> +static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_TCP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_SCTP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_ICMP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +/* IPv4 NVGRE MAC IPv4 */
> +static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_TCP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_SCTP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_ICMP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +/* IPv4 NVGRE IPv6 */
> +static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_TCP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_SCTP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_icmp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_ICMP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +/* IPv4 NVGRE MAC IPv6 */
> +static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_TCP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_SCTP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_icmp[] = {
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV4,
> +	RTE_FLOW_ITEM_TYPE_UDP,
> +	RTE_FLOW_ITEM_TYPE_VXLAN,
> +	RTE_FLOW_ITEM_TYPE_ETH,
> +	RTE_FLOW_ITEM_TYPE_IPV6,
> +	RTE_FLOW_ITEM_TYPE_ICMP,
> +	RTE_FLOW_ITEM_TYPE_END,
> +};
> +
> +static struct ice_flow_pattern ice_supported_patterns[] = {
> +	{pattern_ethertype, INSET_ETHER},
> +	{pattern_ipv4, INSET_MAC_IPV4},
> +	{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
> +	{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
> +	{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
> +	{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
> +	{pattern_ipv6, INSET_MAC_IPV6},
> +	{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
> +	{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
> +	{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
> +	{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
> +	{pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1},
> +	{pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
> +	{pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
> +	{pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
> +	{pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1},
> +	{pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
> +	{pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
> +	{pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
> +	{pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1},
> +	{pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
> +	{pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
> +	{pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
> +	{pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
> +	{pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1},
> +	{pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_nvgre_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
> +	{pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
> +	{pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
> +	{pattern_ipv4_nvgre_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
> +};
> +
> +#endif
> diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
> index 8697676..18180b9 100644
> --- a/drivers/net/ice/meson.build
> +++ b/drivers/net/ice/meson.build
> @@ -8,6 +8,7 @@ sources = files(
>  	'ice_ethdev.c',
>  	'ice_rxtx.c',
>  	'ice_switch_filter.c'

Missing a comma here.

> +	'ice_generic_flow.c'
>  	)
>  
>  deps += ['hash']

^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API
  2019-06-20 13:33     ` Aaron Conole
@ 2019-06-21  2:18       ` Yang, Qiming
  0 siblings, 0 replies; 73+ messages in thread
From: Yang, Qiming @ 2019-06-21  2:18 UTC (permalink / raw)
  To: Aaron Conole; +Cc: dev

Thanks for your comments. I'll add it in v4.

> -----Original Message-----
> From: Aaron Conole [mailto:aconole@redhat.com]
> Sent: Thursday, June 20, 2019 9:34 PM
> To: Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API
> 
> Qiming Yang <qiming.yang@intel.com> writes:
> 
> > This patch adds ice_flow_create, ice_flow_destroy, ice_flow_flush and
> > ice_flow_validate support, these are used to handle all the generic
> > filters.
> >
> > Signed-off-by: Qiming Yang <qiming.yang@intel.com>
> > ---
> >  drivers/net/ice/Makefile           |   1 +
> >  drivers/net/ice/ice_ethdev.c       |  44 +++
> >  drivers/net/ice/ice_ethdev.h       |   5 +
> >  drivers/net/ice/ice_generic_flow.c | 682
> > +++++++++++++++++++++++++++++++++++++
> >  drivers/net/ice/ice_generic_flow.h | 654
> +++++++++++++++++++++++++++++++++++
> >  drivers/net/ice/meson.build        |   1 +
> >  6 files changed, 1387 insertions(+)
> >  create mode 100644 drivers/net/ice/ice_generic_flow.c
> >  create mode 100644 drivers/net/ice/ice_generic_flow.h
> >
> > diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile index
> > b10d826..32abeb6 100644
> > --- a/drivers/net/ice/Makefile
> > +++ b/drivers/net/ice/Makefile
> > @@ -79,5 +79,6 @@ endif
> >  ifeq ($(CC_AVX2_SUPPORT), 1)
> >  	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c  endif
> > +SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
> >
[...]

> > +#endif
> > diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
> > index 8697676..18180b9 100644
> > --- a/drivers/net/ice/meson.build
> > +++ b/drivers/net/ice/meson.build
> > @@ -8,6 +8,7 @@ sources = files(
> >  	'ice_ethdev.c',
> >  	'ice_rxtx.c',
> >  	'ice_switch_filter.c'
> 
> Missing a comma here.
> 
> > +	'ice_generic_flow.c'
> >  	)
> >
> >  deps += ['hash']

^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API
  2019-06-20  9:32     ` Wang, Xiao W
@ 2019-06-21  5:47       ` Yang, Qiming
  0 siblings, 0 replies; 73+ messages in thread
From: Yang, Qiming @ 2019-06-21  5:47 UTC (permalink / raw)
  To: Wang, Xiao W, dev; +Cc: Zhang, Qi Z, Xing, Beilei

Hi, Xiao
Other comments are all fixed. Left this one.
"fields & (~inset)" can't replace it. 

Qiming
-----Original Message-----
From: Wang, Xiao W 
Sent: Thursday, June 20, 2019 5:32 PM
To: Yang, Qiming <qiming.yang@intel.com>; dev@dpdk.org
Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Xing, Beilei <beilei.xing@intel.com>
Subject: RE: [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API

Hi,

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> Sent: Thursday, June 20, 2019 1:35 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>
> Subject: [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API
> 
> This patch adds ice_flow_create, ice_flow_destroy, ice_flow_flush and 
> ice_flow_validate support, these are used to handle all the generic 
> filters.
> 
> Signed-off-by: Qiming Yang <qiming.yang@intel.com>
> ---
>  drivers/net/ice/Makefile           |   1 +
>  drivers/net/ice/ice_ethdev.c       |  44 +++
>  drivers/net/ice/ice_ethdev.h       |   5 +
>  drivers/net/ice/ice_generic_flow.c | 682
> +++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_generic_flow.h | 654
> +++++++++++++++++++++++++++++++++++
>  drivers/net/ice/meson.build        |   1 +
>  6 files changed, 1387 insertions(+)
>  create mode 100644 drivers/net/ice/ice_generic_flow.c
>  create mode 100644 drivers/net/ice/ice_generic_flow.h
> 

[...]

> +
> +static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
> +			uint64_t inset, struct rte_flow_error *error) {
> +	uint64_t fields;
> +
> +	/* get valid field */
> +	fields = ice_get_flow_field(pattern, error);
> +	if ((!fields) || (fields && (!inset))) {

Maybe the intention is to : fields & (~inset), checking if the user's input set exceeds support scope.

> +		rte_flow_error_set(error, EINVAL,
> +				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
> +				   pattern,
> +				   "Invalid input set");
> +		return -rte_errno;
> +	}
> +
> +	return 0;


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v4 0/3] Enable rte_flow API in ice driver
  2019-06-03  9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
                   ` (3 preceding siblings ...)
  2019-06-20  5:34 ` [dpdk-dev] [PATCH v3 0/3] Enable rte_flow API in ice driver Qiming Yang
@ 2019-06-21  6:13 ` Qiming Yang
  2019-06-21  6:13   ` [dpdk-dev] [PATCH v4 1/3] net/ice: enable switch filter Qiming Yang
                     ` (2 more replies)
  2019-06-21  9:21 ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Qiming Yang
                   ` (5 subsequent siblings)
  10 siblings, 3 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-21  6:13 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch set enables the backend of rte_flow, and the generic
filter related functions in ice driver. Supported flows include
ipv4, tcpv4, udpv4, ipv6, tcpv6, udpv6, tunnel, etc. This patch
set depends on shared code update.

---
v2 changes:
 - added UDP tunnel port support.
 - fixed compile issue.
 - added document update.
v3 changes:
 - removed redundancy parser.
 - added License.
 - added VXLAN and NVGRE item support.
v4 changes:
 - fixed some typos.

Qiming Yang (2):
  net/ice: add generic flow API
  net/ice: add UDP tunnel port support

wei zhao (1):
  net/ice: enable switch filter

 drivers/net/ice/Makefile            |   2 +
 drivers/net/ice/ice_ethdev.c        | 116 ++++++
 drivers/net/ice/ice_ethdev.h        |  12 +
 drivers/net/ice/ice_generic_flow.c  | 682 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h  | 654 ++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.c | 525 +++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   4 +-
 8 files changed, 2018 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v4 1/3] net/ice: enable switch filter
  2019-06-21  6:13 ` [dpdk-dev] [PATCH v4 0/3] Enable rte_flow API in ice driver Qiming Yang
@ 2019-06-21  6:13   ` Qiming Yang
  2019-06-21  6:13   ` [dpdk-dev] [PATCH v4 2/3] net/ice: add generic flow API Qiming Yang
  2019-06-21  6:13   ` [dpdk-dev] [PATCH v4 3/3] net/ice: add UDP tunnel port support Qiming Yang
  2 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-21  6:13 UTC (permalink / raw)
  To: dev; +Cc: wei zhao

From: wei zhao <wei.zhao1@intel.com>

The patch enables the backend of rte_flow. It transfers
rte_flow_xxx to device specific data structure and
configures packet process engine's binary classifier
(switch) properly.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/Makefile            |   1 +
 drivers/net/ice/ice_ethdev.c        |  18 ++
 drivers/net/ice/ice_ethdev.h        |   7 +
 drivers/net/ice/ice_switch_filter.c | 525 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   3 +-
 6 files changed, 577 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 0e5c55e..b10d826 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
 endif
 
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
 	CC_AVX2_SUPPORT=1
 else
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 203d0a9..a94aa7e 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1364,6 +1364,21 @@ static int ice_load_pkg(struct rte_eth_dev *dev)
 	return err;
 }
 
+static void
+ice_base_queue_get(struct ice_pf *pf)
+{
+	uint32_t reg;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+	if (reg & PFLAN_RX_QALLOC_VALID_M) {
+		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+	} else {
+		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
+					" index");
+	}
+}
+
 static int
 ice_dev_init(struct rte_eth_dev *dev)
 {
@@ -1460,6 +1475,9 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* enable uio intr after callback register */
 	rte_intr_enable(intr_handle);
 
+	/* get base queue pairs index  in the device */
+	ice_base_queue_get(pf);
+
 	return 0;
 
 err_pf_setup:
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1385afa..50b966c 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,6 +234,12 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+/* Struct to store flow created. */
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) node;
+	void *rule;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -252,6 +258,7 @@ struct ice_pf {
 	uint16_t hash_lut_size; /* The size of hash lookup table */
 	uint16_t lan_nb_qp_max;
 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+	uint16_t base_queue; /* The base queue pairs index  in the device */
 	struct ice_hw_port_stats stats_offset;
 	struct ice_hw_port_stats stats;
 	/* internal packet statistics, it should be excluded from the total */
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
new file mode 100644
index 0000000..c1b6c47
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -0,0 +1,525 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "ice_logs.h"
+#include "base/ice_type.h"
+#include "ice_switch_filter.h"
+
+static int
+ice_parse_switch_filter(
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct ice_adv_rule_info *rule_info,
+			struct ice_adv_lkup_elem **lkup_list,
+			uint16_t *lkups_num)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
+	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
+	struct ice_adv_lkup_elem *list;
+	uint16_t j, t = 0;
+	uint16_t item_num = 0;
+	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+	uint16_t tunnel_valid = 0;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
+			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
+			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
+			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
+			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
+			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			item_num++;
+		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+			tun_type = ICE_SW_TUN_VXLAN;
+		if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			tun_type = ICE_SW_TUN_NVGRE;
+	}
+
+	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "No memory for PMD internal items");
+		goto out;
+	}
+	*lkup_list = list;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			if (eth_spec && eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->src.addr_bytes[j] ==
+								UINT8_MAX) {
+						list[t].h_u.eth_hdr.
+							src_addr[j] =
+						eth_spec->src.addr_bytes[j];
+						list[t].m_u.eth_hdr.
+							src_addr[j] =
+						eth_mask->src.addr_bytes[j];
+					}
+					if (eth_mask->dst.addr_bytes[j] ==
+								UINT8_MAX) {
+						list[t].h_u.eth_hdr.
+							dst_addr[j] =
+						eth_spec->dst.addr_bytes[j];
+						list[t].m_u.eth_hdr.
+							dst_addr[j] =
+						eth_mask->dst.addr_bytes[j];
+					}
+				}
+				if (eth_mask->type == UINT16_MAX) {
+					list[t].h_u.eth_hdr.ethtype_id =
+					rte_be_to_cpu_16(eth_spec->type);
+					list[t].m_u.eth_hdr.ethtype_id =
+						UINT16_MAX;
+				}
+				t++;
+			} else if (!eth_spec && !eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+			if (ipv4_spec && ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.src_addr =
+						ipv4_spec->hdr.src_addr;
+					list[t].m_u.ipv4_hdr.src_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.dst_addr =
+						ipv4_spec->hdr.dst_addr;
+					list[t].m_u.ipv4_hdr.dst_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.time_to_live =
+						ipv4_spec->hdr.time_to_live;
+					list[t].m_u.ipv4_hdr.time_to_live =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.protocol =
+						ipv4_spec->hdr.next_proto_id;
+					list[t].m_u.ipv4_hdr.protocol =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.type_of_service ==
+						UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.tos =
+						ipv4_spec->hdr.type_of_service;
+					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv4_spec && !ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+			if (ipv6_spec && ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV6_OFOS : ICE_IPV6_IL;
+				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j] ==
+								UINT8_MAX) {
+						list[t].h_u.ice_ipv6_ofos_hdr.
+							src_addr[j] =
+						ipv6_spec->hdr.src_addr[j];
+						list[t].m_u.ice_ipv6_ofos_hdr.
+							src_addr[j] =
+						ipv6_mask->hdr.src_addr[j];
+					}
+					if (ipv6_mask->hdr.dst_addr[j] ==
+								UINT8_MAX) {
+						list[t].h_u.ice_ipv6_ofos_hdr.
+							dst_addr[j] =
+						ipv6_spec->hdr.dst_addr[j];
+						list[t].m_u.ice_ipv6_ofos_hdr.
+							dst_addr[j] =
+						ipv6_mask->hdr.dst_addr[j];
+					}
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					list[t].h_u.ice_ipv6_ofos_hdr.next_hdr =
+						ipv6_spec->hdr.proto;
+					list[t].m_u.ice_ipv6_ofos_hdr.next_hdr =
+						UINT8_MAX;
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					list[t].h_u.ice_ipv6_ofos_hdr.
+					hop_limit = ipv6_spec->hdr.hop_limits;
+					list[t].m_u.ice_ipv6_ofos_hdr.
+						hop_limit  = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv6_spec && !ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+			if (udp_spec && udp_mask) {
+				if (tun_type == ICE_SW_TUN_VXLAN &&
+						tunnel_valid == 0)
+					list[t].type = ICE_UDP_OF;
+				else
+					list[t].type = ICE_UDP_ILOS;
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						udp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						udp_mask->hdr.src_port;
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						udp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						udp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!udp_spec && !udp_mask) {
+				list[t].type = ICE_UDP_ILOS;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+			if (tcp_spec && tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						tcp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						tcp_mask->hdr.src_port;
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						tcp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						tcp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!tcp_spec && !tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+			if (sctp_spec && sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.src_port =
+						sctp_spec->hdr.src_port;
+					list[t].m_u.sctp_hdr.src_port =
+						sctp_mask->hdr.src_port;
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.dst_port =
+						sctp_spec->hdr.dst_port;
+					list[t].m_u.sctp_hdr.dst_port =
+						sctp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!sctp_spec && !sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			tunnel_valid = 1;
+			if (vxlan_spec && vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+				if (vxlan_mask->vni[0] == UINT8_MAX &&
+					vxlan_mask->vni[1] == UINT8_MAX &&
+					vxlan_mask->vni[2] == UINT8_MAX) {
+					list[t].h_u.tnl_hdr.vni =
+						(vxlan_spec->vni[2] << 16) |
+						(vxlan_spec->vni[1] << 8) |
+						vxlan_spec->vni[0];
+					list[t].m_u.tnl_hdr.vni =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!vxlan_spec && !vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			tunnel_valid = 1;
+			if (nvgre_spec && nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+				if (nvgre_mask->tni[0] == UINT8_MAX &&
+					nvgre_mask->tni[1] == UINT8_MAX &&
+					nvgre_mask->tni[2] == UINT8_MAX) {
+					list[t].h_u.nvgre_hdr.tni_flow =
+						(nvgre_spec->tni[2] << 16) |
+						(nvgre_spec->tni[1] << 8) |
+						nvgre_spec->tni[0];
+					list[t].m_u.nvgre_hdr.tni_flow =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!nvgre_spec && !nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+		case RTE_FLOW_ITEM_TYPE_END:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "Invalid pattern item.");
+			goto out;
+		}
+	}
+
+	rule_info->tun_type = tun_type;
+	*lkups_num = t;
+
+	return 0;
+out:
+	return -rte_errno;
+}
+
+/* By now ice switch filter action code implement only
+* supports QUEUE or DROP.
+*/
+static int
+ice_parse_switch_action(struct ice_pf *pf,
+				 const struct rte_flow_action *actions,
+				 struct rte_flow_error *error,
+				 struct ice_adv_rule_info *rule_info)
+{
+	struct ice_vsi *vsi = pf->main_vsi;
+	const struct rte_flow_action_queue *act_q;
+	uint16_t base_queue;
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	base_queue = pf->base_queue;
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			act_q = action->conf;
+			rule_info->sw_act.fltr_act =
+				ICE_FWD_TO_Q;
+			rule_info->sw_act.fwd_id.q_id =
+				base_queue + act_q->index;
+			if (act_q->index >=
+				pf->dev_data->nb_rx_queues) {
+				rte_flow_error_set(error,
+					EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION,
+					actions, "Invalid queue ID"
+					" for switch filter.");
+				return -rte_errno;
+			}
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			rule_info->sw_act.fltr_act =
+				ICE_DROP_PACKET;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error,
+				EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				actions,
+				"Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	rule_info->sw_act.vsi_handle = vsi->idx;
+	rule_info->rx = 1;
+	rule_info->sw_act.src = vsi->idx;
+
+	return 0;
+}
+
+static int
+ice_switch_rule_set(struct ice_pf *pf,
+			struct ice_adv_lkup_elem *list,
+			uint16_t lkups_cnt,
+			struct ice_adv_rule_info *rule_info,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data rule_added = {0};
+	struct ice_rule_query_data *filter_ptr;
+
+	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"item number too large for rule");
+		return -rte_errno;
+	}
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"lookup list should not be NULL");
+		return -rte_errno;
+	}
+
+	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
+
+	if (!ret) {
+		filter_ptr = rte_zmalloc("ice_switch_filter",
+			sizeof(struct ice_rule_query_data), 0);
+		if (!filter_ptr) {
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
+			return -EINVAL;
+		}
+		flow->rule = filter_ptr;
+		rte_memcpy(filter_ptr,
+			&rule_added,
+			sizeof(struct ice_rule_query_data));
+	}
+
+	return ret;
+}
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	int ret = 0;
+	struct ice_adv_rule_info rule_info = {0};
+	struct ice_adv_lkup_elem *list = NULL;
+	uint16_t lkups_num = 0;
+
+	ret = ice_parse_switch_filter(pattern, actions, error,
+			&rule_info, &list, &lkups_num);
+	if (ret)
+		goto error;
+
+	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
+	if (ret)
+		goto error;
+
+	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
+	if (ret)
+		goto error;
+
+	rte_free(list);
+	return 0;
+
+error:
+	rte_free(list);
+
+	return -rte_errno;
+}
+
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)
+			flow->rule;
+
+	if (!filter_ptr) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"no such flow"
+			" create by switch filter");
+		return -rte_errno;
+	}
+
+	ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
+	if (ret) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"fail to destroy switch filter rule");
+		return -rte_errno;
+	}
+
+	rte_free(filter_ptr);
+	return ret;
+}
+
+void
+ice_free_switch_filter_rule(void *rule)
+{
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)rule;
+
+	rte_free(filter_ptr);
+}
diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
new file mode 100644
index 0000000..cea4799
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_SWITCH_FILTER_H_
+#define _ICE_SWITCH_FILTER_H_
+
+#include "base/ice_switch.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+void
+ice_free_switch_filter_rule(void *rule);
+#endif /* _ICE_SWITCH_FILTER_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 2bec688..8697676 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -6,7 +6,8 @@ objs = [base_objs]
 
 sources = files(
 	'ice_ethdev.c',
-	'ice_rxtx.c'
+	'ice_rxtx.c',
+	'ice_switch_filter.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v4 2/3] net/ice: add generic flow API
  2019-06-21  6:13 ` [dpdk-dev] [PATCH v4 0/3] Enable rte_flow API in ice driver Qiming Yang
  2019-06-21  6:13   ` [dpdk-dev] [PATCH v4 1/3] net/ice: enable switch filter Qiming Yang
@ 2019-06-21  6:13   ` Qiming Yang
  2019-06-21  6:13   ` [dpdk-dev] [PATCH v4 3/3] net/ice: add UDP tunnel port support Qiming Yang
  2 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-21  6:13 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch adds ice_flow_create, ice_flow_destroy,
ice_flow_flush and ice_flow_validate support,
these are used to handle all the generic filters.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/Makefile           |   1 +
 drivers/net/ice/ice_ethdev.c       |  44 +++
 drivers/net/ice/ice_ethdev.h       |   5 +
 drivers/net/ice/ice_generic_flow.c | 682 +++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h | 654 +++++++++++++++++++++++++++++++++++
 drivers/net/ice/meson.build        |   3 +-
 6 files changed, 1388 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index b10d826..32abeb6 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -79,5 +79,6 @@ endif
 ifeq ($(CC_AVX2_SUPPORT), 1)
 	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index a94aa7e..8ee06d1 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -15,6 +15,7 @@
 #include "base/ice_dcb.h"
 #include "ice_ethdev.h"
 #include "ice_rxtx.h"
+#include "ice_switch_filter.h"
 
 #define ICE_MAX_QP_NUM "max_queue_pair_num"
 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
@@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
 static int ice_xstats_get_names(struct rte_eth_dev *dev,
 				struct rte_eth_xstat_name *xstats_names,
 				unsigned int limit);
+static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+			enum rte_filter_type filter_type,
+			enum rte_filter_op filter_op,
+			void *arg);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get                   = ice_xstats_get,
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
+	.filter_ctrl                  = ice_dev_filter_ctrl,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -1478,6 +1484,8 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* get base queue pairs index  in the device */
 	ice_base_queue_get(pf);
 
+	TAILQ_INIT(&pf->flow_list);
+
 	return 0;
 
 err_pf_setup:
@@ -1620,6 +1628,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
 
 	ice_dev_close(dev);
 
@@ -1637,6 +1647,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	rte_intr_callback_unregister(intr_handle,
 				     ice_interrupt_handler, dev);
 
+	/* Remove all flows */
+	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+		ice_free_switch_filter_rule(p_flow->rule);
+		rte_free(p_flow);
+	}
+
 	return 0;
 }
 
@@ -3622,6 +3639,33 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 }
 
 static int
+ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+		     enum rte_filter_type filter_type,
+		     enum rte_filter_op filter_op,
+		     void *arg)
+{
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ice_flow_ops;
+		break;
+	default:
+		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+					filter_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 50b966c..8a52239 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,12 +234,16 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+extern const struct rte_flow_ops ice_flow_ops;
+
 /* Struct to store flow created. */
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) node;
 	void *rule;
 };
 
+TAILQ_HEAD(ice_flow_list, rte_flow);
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -266,6 +270,7 @@ struct ice_pf {
 	struct ice_eth_stats internal_stats;
 	bool offset_loaded;
 	bool adapter_stopped;
+	struct ice_flow_list flow_list;
 };
 
 /**
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
new file mode 100644
index 0000000..eeb776e
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -0,0 +1,682 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "ice_switch_filter.h"
+
+static int ice_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static int ice_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error);
+static int ice_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error);
+
+const struct rte_flow_ops ice_flow_ops = {
+	.validate = ice_flow_validate,
+	.create = ice_flow_create,
+	.destroy = ice_flow_destroy,
+	.flush = ice_flow_flush,
+};
+
+static int
+ice_flow_valid_attr(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	/* Must be input direction */
+	if (!attr->ingress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Not support priority.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->group) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   attr, "Not support group.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+ice_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+	bool is_find;
+
+	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		if (is_void)
+			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+		else
+			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+		if (is_find)
+			break;
+		item++;
+	}
+	return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+ice_pattern_skip_void_item(struct rte_flow_item *items,
+			    const struct rte_flow_item *pattern)
+{
+	uint32_t cpy_count = 0;
+	const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+	for (;;) {
+		/* Find a non-void item first */
+		pb = ice_find_first_item(pb, false);
+		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+			pe = pb;
+			break;
+		}
+
+		/* Find a void item */
+		pe = ice_find_first_item(pb + 1, true);
+
+		cpy_count = pe - pb;
+		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+		items += cpy_count;
+
+		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+			pb = pe;
+			break;
+		}
+
+		pb = pe + 1;
+	}
+	/* Copy the END item. */
+	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+ice_match_pattern(enum rte_flow_item_type *item_array,
+		const struct rte_flow_item *pattern)
+{
+	const struct rte_flow_item *item = pattern;
+
+	while ((*item_array == item->type) &&
+	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+		item_array++;
+		item++;
+	}
+
+	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+		item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
+		struct rte_flow_error *error)
+{
+	uint16_t i = 0;
+	uint64_t inset;
+	struct rte_flow_item *items; /* used for pattern without VOID items */
+	uint32_t item_num = 0; /* non-void item number */
+
+	/* Get the non-void item number of pattern */
+	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+			item_num++;
+		i++;
+	}
+	item_num++;
+
+	items = rte_zmalloc("ice_pattern",
+			    item_num * sizeof(struct rte_flow_item), 0);
+	if (!items) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "No memory for PMD internal items.");
+		return -ENOMEM;
+	}
+
+	ice_pattern_skip_void_item(items, pattern);
+
+	for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
+		if (ice_match_pattern(ice_supported_patterns[i].items,
+				      items)) {
+			inset = ice_supported_patterns[i].sw_fields;
+			rte_free(items);
+			return inset;
+		}
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			   pattern, "Unsupported pattern");
+
+	rte_free(items);
+	return 0;
+}
+
+static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
+			struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_icmp *icmp_mask;
+	const struct rte_flow_item_icmp6 *icmp6_mask;
+	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+	const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
+	enum rte_flow_item_type item_type;
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	uint64_t input_set = ICE_INSET_NONE;
+	bool outer_ip = true;
+	bool outer_l4 = true;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return 0;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			if (eth_spec && eth_mask) {
+				if (rte_is_broadcast_ether_addr(&eth_mask->src))
+					input_set |= ICE_INSET_SMAC;
+				if (rte_is_broadcast_ether_addr(&eth_mask->dst))
+					input_set |= ICE_INSET_DMAC;
+				if (eth_mask->type == RTE_BE16(0xffff))
+					input_set |= ICE_INSET_ETHERTYPE;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (!(ipv4_spec && ipv4_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 spec or mask.");
+				return 0;
+			}
+
+			/* Check IPv4 mask and update input set */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 mask.");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_DST;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TTL;
+				if (ipv4_mask->hdr.fragment_offset == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_PROTO;
+				outer_ip = false;
+			} else {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_DST;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_PROTO;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (!(ipv6_spec && ipv6_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Invalid IPv6 spec or mask");
+				return 0;
+			}
+
+			if (ipv6_mask->hdr.payload_len ||
+			    ipv6_mask->hdr.vtc_flow) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv6 mask");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+				outer_ip = false;
+			} else {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_TTL;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (!(udp_spec && udp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid UDP mask");
+				return 0;
+			}
+
+			/* Check UDP mask and update input set*/
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (!(tcp_spec && tcp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid TCP mask");
+				return 0;
+			}
+
+			/* Check TCP mask and update input set */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (!(sctp_spec && sctp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid SCTP mask");
+				return 0;
+			}
+
+			/* Check SCTP mask and update input set */
+			if (sctp_mask->hdr.cksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid SCTP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			icmp_mask = item->mask;
+			if (icmp_mask->hdr.icmp_code ||
+			    icmp_mask->hdr.icmp_cksum ||
+			    icmp_mask->hdr.icmp_ident ||
+			    icmp_mask->hdr.icmp_seq_nb) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP mask");
+				return 0;
+			}
+
+			if (icmp_mask->hdr.icmp_type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP;
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6:
+			icmp6_mask = item->mask;
+			if (icmp6_mask->code ||
+			    icmp6_mask->checksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP6 mask");
+				return 0;
+			}
+
+			if (icmp6_mask->type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP6;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!vxlan_spec && vxlan_mask) ||
+			    (vxlan_spec && !vxlan_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid VXLAN item");
+				return 0;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!nvgre_spec && nvgre_mask) ||
+			    (nvgre_spec && !nvgre_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid NVGRE item");
+				return 0;
+			}
+
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid mask no exist");
+			break;
+		}
+	}
+	return input_set;
+}
+
+static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
+			uint64_t inset, struct rte_flow_error *error)
+{
+	uint64_t fields;
+
+	/* get valid field */
+	fields = ice_get_flow_field(pattern, error);
+	if ((~fields) | (fields & (~inset))) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int ice_flow_valid_action(const struct rte_flow_action *actions,
+				       struct rte_flow_error *error)
+{
+	switch (actions->type) {
+	case RTE_FLOW_ACTION_TYPE_QUEUE:
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_flow_validate(__rte_unused struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	uint64_t inset = 0;
+	int ret = ICE_ERR_NOT_SUPPORTED;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = ice_flow_valid_attr(attr, error);
+	if (!ret)
+		return ret;
+
+	inset = ice_flow_valid_pattern(pattern, error);
+	if (!inset)
+		return -rte_errno;
+
+	ret = ice_flow_valid_inset(pattern, inset, error);
+	if (ret)
+		return ret;
+
+	ret = ice_flow_valid_action(actions, error);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct rte_flow *
+ice_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *flow = NULL;
+	int ret;
+
+	flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return flow;
+	}
+
+	ret = ice_flow_validate(dev, attr, pattern, actions, error);
+	if (ret < 0)
+		return free_flow;
+
+	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
+	if (ret)
+		goto free_flow;
+
+	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+	return flow;
+
+free_flow:
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(flow);
+	return NULL;
+}
+
+static int
+ice_flow_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	int ret = 0;
+
+	ret = ice_destroy_switch_filter(pf, flow, error);
+
+	if (!ret) {
+		TAILQ_REMOVE(&pf->flow_list, flow, node);
+		rte_free(flow);
+	} else
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to destroy flow.");
+
+	return ret;
+}
+
+static int
+ice_flow_flush(struct rte_eth_dev *dev,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
+	int ret;
+
+	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+		ret = ice_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			rte_flow_error_set(error, -ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to flush SW flows.");
+			return -rte_errno;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
new file mode 100644
index 0000000..3264c41
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -0,0 +1,654 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_GENERIC_FLOW_H_
+#define _ICE_GENERIC_FLOW_H_
+
+#include <rte_flow_driver.h>
+
+struct ice_flow_pattern {
+	enum rte_flow_item_type *items;
+	uint64_t sw_fields;
+};
+
+#define ICE_INSET_NONE            0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define ICE_INSET_SMAC            0x0000000000000001ULL
+#define ICE_INSET_DMAC            0x0000000000000002ULL
+#define ICE_INSET_ETHERTYPE       0x0000000000000020ULL
+
+/* bit 8 ~ bit 15 */
+#define ICE_INSET_IPV4_SRC        0x0000000000000100ULL
+#define ICE_INSET_IPV4_DST        0x0000000000000200ULL
+#define ICE_INSET_IPV6_SRC        0x0000000000000400ULL
+#define ICE_INSET_IPV6_DST        0x0000000000000800ULL
+#define ICE_INSET_SRC_PORT        0x0000000000001000ULL
+#define ICE_INSET_DST_PORT        0x0000000000002000ULL
+#define ICE_INSET_ARP             0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL
+#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL
+#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL
+#define ICE_INSET_IPV6_PROTO      0x0000000000200000ULL
+#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
+#define ICE_INSET_ICMP            0x0000000001000000ULL
+#define ICE_INSET_ICMP6           0x0000000002000000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define ICE_INSET_TUN_SMAC           0x0000000100000000ULL
+#define ICE_INSET_TUN_DMAC           0x0000000200000000ULL
+#define ICE_INSET_TUN_IPV4_SRC       0x0000000400000000ULL
+#define ICE_INSET_TUN_IPV4_DST       0x0000000800000000ULL
+#define ICE_INSET_TUN_IPV4_TTL       0x0000001000000000ULL
+#define ICE_INSET_TUN_IPV4_PROTO     0x0000002000000000ULL
+#define ICE_INSET_TUN_IPV6_SRC       0x0000004000000000ULL
+#define ICE_INSET_TUN_IPV6_DST       0x0000008000000000ULL
+#define ICE_INSET_TUN_IPV6_TTL       0x0000010000000000ULL
+#define ICE_INSET_TUN_IPV6_PROTO     0x0000020000000000ULL
+#define ICE_INSET_TUN_SRC_PORT       0x0000040000000000ULL
+#define ICE_INSET_TUN_DST_PORT       0x0000080000000000ULL
+#define ICE_INSET_TUN_ID             0x0000100000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+#define ICE_FLAG_VLAN_INNER  0x00000001ULL
+#define ICE_FLAG_VLAN_OUTER  0x00000002ULL
+
+#define INSET_ETHER ( \
+	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define INSET_MAC_IPV4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
+#define INSET_MAC_IPV4_L4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV4_ICMP ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
+#define INSET_MAC_IPV6 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_PROTO | ICE_INSET_IPV6_HOP_LIMIT)
+#define INSET_MAC_IPV6_L4 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV6_ICMP ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_ICMP6)
+#define INSET_TUNNEL_IPV4_TYPE1 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO)
+#define INSET_TUNNEL_IPV4_TYPE2 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV4_TYPE3 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP)
+#define INSET_TUNNEL_IPV6_TYPE1 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO)
+#define INSET_TUNNEL_IPV6_TYPE2 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV6_TYPE3 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6)
+
+/* L2 */
+static enum rte_flow_item_type pattern_ethertype[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv4 */
+static enum rte_flow_item_type pattern_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv6 */
+static enum rte_flow_item_type pattern_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct ice_flow_pattern ice_supported_patterns[] = {
+	{pattern_ethertype, INSET_ETHER},
+	{pattern_ipv4, INSET_MAC_IPV4},
+	{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
+	{pattern_ipv6, INSET_MAC_IPV6},
+	{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
+	{pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+};
+
+#endif
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 8697676..7f16647 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -7,7 +7,8 @@ objs = [base_objs]
 sources = files(
 	'ice_ethdev.c',
 	'ice_rxtx.c',
-	'ice_switch_filter.c'
+	'ice_switch_filter.c',
+	'ice_generic_flow.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v4 3/3] net/ice: add UDP tunnel port support
  2019-06-21  6:13 ` [dpdk-dev] [PATCH v4 0/3] Enable rte_flow API in ice driver Qiming Yang
  2019-06-21  6:13   ` [dpdk-dev] [PATCH v4 1/3] net/ice: enable switch filter Qiming Yang
  2019-06-21  6:13   ` [dpdk-dev] [PATCH v4 2/3] net/ice: add generic flow API Qiming Yang
@ 2019-06-21  6:13   ` Qiming Yang
  2 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-21  6:13 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

Enabled UDP tunnel port add and delete functions.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/ice_ethdev.c | 54 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 8ee06d1..949d293 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -88,6 +88,10 @@ static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 			enum rte_filter_type filter_type,
 			enum rte_filter_op filter_op,
 			void *arg);
+static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -147,6 +151,8 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
 	.filter_ctrl                  = ice_dev_filter_ctrl,
+	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
+	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -3665,6 +3671,54 @@ ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 	return ret;
 }
 
+/* Add UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -1;
+		break;
+	}
+
+	return ret;
+}
+
+/* Delete UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -1;
+		break;
+	}
+
+	return ret;
+}
+
 static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver
  2019-06-03  9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
                   ` (4 preceding siblings ...)
  2019-06-21  6:13 ` [dpdk-dev] [PATCH v4 0/3] Enable rte_flow API in ice driver Qiming Yang
@ 2019-06-21  9:21 ` Qiming Yang
  2019-06-21  9:21   ` [dpdk-dev] [PATCH v5 1/3] net/ice: enable switch filter Qiming Yang
                     ` (3 more replies)
  2019-06-24  6:15 ` [dpdk-dev] [PATCH v6 " Qiming Yang
                   ` (4 subsequent siblings)
  10 siblings, 4 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-21  9:21 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch set enables the backend of rte_flow, and the generic
filter related functions in ice driver. Supported flows include
ipv4, tcpv4, udpv4, ipv6, tcpv6, udpv6, tunnel, etc. This patch
set depends on shared code update.

---
v2 changes:
 - added UDP tunnel port support.
 - fixed compile issue.
 - added document update.
v3 changes:
 - removed redundancy parser.
 - added License.
 - added VXLAN and NVGRE item support.
v4 changes:
 - fixed some typos.
v5 changes:
 - fixed checkpatch issues.

Qiming Yang (2):
  net/ice: add generic flow API
  net/ice: add UDP tunnel port support

wei zhao (1):
  net/ice: enable switch filter

 drivers/net/ice/Makefile            |   2 +
 drivers/net/ice/ice_ethdev.c        | 116 ++++++
 drivers/net/ice/ice_ethdev.h        |  12 +
 drivers/net/ice/ice_generic_flow.c  | 683 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h  | 654 ++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.c | 521 +++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   4 +-
 8 files changed, 2015 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v5 1/3] net/ice: enable switch filter
  2019-06-21  9:21 ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Qiming Yang
@ 2019-06-21  9:21   ` Qiming Yang
  2019-06-21  9:21   ` [dpdk-dev] [PATCH v5 2/3] net/ice: add generic flow API Qiming Yang
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-21  9:21 UTC (permalink / raw)
  To: dev; +Cc: wei zhao

From: wei zhao <wei.zhao1@intel.com>

The patch enables the backend of rte_flow. It transfers
rte_flow_xxx to device specific data structure and
configures packet process engine's binary classifier
(switch) properly.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/Makefile            |   1 +
 drivers/net/ice/ice_ethdev.c        |  18 ++
 drivers/net/ice/ice_ethdev.h        |   7 +
 drivers/net/ice/ice_switch_filter.c | 521 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   3 +-
 6 files changed, 573 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 0e5c55e..b10d826 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
 endif
 
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
 	CC_AVX2_SUPPORT=1
 else
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 203d0a9..a94aa7e 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1364,6 +1364,21 @@ static int ice_load_pkg(struct rte_eth_dev *dev)
 	return err;
 }
 
+static void
+ice_base_queue_get(struct ice_pf *pf)
+{
+	uint32_t reg;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+	if (reg & PFLAN_RX_QALLOC_VALID_M) {
+		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+	} else {
+		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
+					" index");
+	}
+}
+
 static int
 ice_dev_init(struct rte_eth_dev *dev)
 {
@@ -1460,6 +1475,9 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* enable uio intr after callback register */
 	rte_intr_enable(intr_handle);
 
+	/* get base queue pairs index  in the device */
+	ice_base_queue_get(pf);
+
 	return 0;
 
 err_pf_setup:
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1385afa..50b966c 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,6 +234,12 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+/* Struct to store flow created. */
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) node;
+	void *rule;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -252,6 +258,7 @@ struct ice_pf {
 	uint16_t hash_lut_size; /* The size of hash lookup table */
 	uint16_t lan_nb_qp_max;
 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+	uint16_t base_queue; /* The base queue pairs index  in the device */
 	struct ice_hw_port_stats stats_offset;
 	struct ice_hw_port_stats stats;
 	/* internal packet statistics, it should be excluded from the total */
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
new file mode 100644
index 0000000..adfc154
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -0,0 +1,521 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "ice_logs.h"
+#include "base/ice_type.h"
+#include "ice_switch_filter.h"
+
+static int
+ice_parse_switch_filter(const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct ice_adv_rule_info *rule_info,
+			struct ice_adv_lkup_elem **lkup_list,
+			uint16_t *lkups_num)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
+	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
+	struct ice_adv_lkup_elem *list;
+	uint16_t j, t = 0;
+	uint16_t item_num = 0;
+	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+	uint16_t tunnel_valid = 0;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
+			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
+			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
+			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
+			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
+			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			item_num++;
+		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+			tun_type = ICE_SW_TUN_VXLAN;
+		if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			tun_type = ICE_SW_TUN_NVGRE;
+	}
+
+	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "No memory for PMD internal items");
+		goto out;
+	}
+	*lkup_list = list;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			if (eth_spec && eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+				struct ice_ether_hdr *h;
+				struct ice_ether_hdr *m;
+				h = &list[t].h_u.eth_hdr;
+				m = &list[t].m_u.eth_hdr;
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->src.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->src_addr[j] =
+						eth_spec->src.addr_bytes[j];
+						m->src_addr[j] =
+						eth_mask->src.addr_bytes[j];
+					}
+					if (eth_mask->dst.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->dst_addr[j] =
+						eth_spec->dst.addr_bytes[j];
+						m->dst_addr[j] =
+						eth_mask->dst.addr_bytes[j];
+					}
+				}
+				if (eth_mask->type == UINT16_MAX) {
+					h->ethtype_id =
+					rte_be_to_cpu_16(eth_spec->type);
+					m->ethtype_id = UINT16_MAX;
+				}
+				t++;
+			} else if (!eth_spec && !eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+			if (ipv4_spec && ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.src_addr =
+						ipv4_spec->hdr.src_addr;
+					list[t].m_u.ipv4_hdr.src_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.dst_addr =
+						ipv4_spec->hdr.dst_addr;
+					list[t].m_u.ipv4_hdr.dst_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.time_to_live =
+						ipv4_spec->hdr.time_to_live;
+					list[t].m_u.ipv4_hdr.time_to_live =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.protocol =
+						ipv4_spec->hdr.next_proto_id;
+					list[t].m_u.ipv4_hdr.protocol =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.type_of_service ==
+						UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.tos =
+						ipv4_spec->hdr.type_of_service;
+					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv4_spec && !ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+			if (ipv6_spec && ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV6_OFOS : ICE_IPV6_IL;
+				struct ice_ipv6_hdr *f;
+				struct ice_ipv6_hdr *s;
+				f = &list[t].h_u.ice_ipv6_ofos_hdr;
+				s = &list[t].m_u.ice_ipv6_ofos_hdr;
+				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j] ==
+								UINT8_MAX) {
+						f->src_addr[j] =
+						ipv6_spec->hdr.src_addr[j];
+						s->src_addr[j] =
+						ipv6_mask->hdr.src_addr[j];
+					}
+					if (ipv6_mask->hdr.dst_addr[j] ==
+								UINT8_MAX) {
+						f->dst_addr[j] =
+						ipv6_spec->hdr.dst_addr[j];
+						s->dst_addr[j] =
+						ipv6_mask->hdr.dst_addr[j];
+					}
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					f->next_hdr =
+						ipv6_spec->hdr.proto;
+					s->next_hdr = UINT8_MAX;
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					f->hop_limit =
+						ipv6_spec->hdr.hop_limits;
+					s->hop_limit = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv6_spec && !ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+			if (udp_spec && udp_mask) {
+				if (tun_type == ICE_SW_TUN_VXLAN &&
+						tunnel_valid == 0)
+					list[t].type = ICE_UDP_OF;
+				else
+					list[t].type = ICE_UDP_ILOS;
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						udp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						udp_mask->hdr.src_port;
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						udp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						udp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!udp_spec && !udp_mask) {
+				list[t].type = ICE_UDP_ILOS;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+			if (tcp_spec && tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						tcp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						tcp_mask->hdr.src_port;
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						tcp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						tcp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!tcp_spec && !tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+			if (sctp_spec && sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.src_port =
+						sctp_spec->hdr.src_port;
+					list[t].m_u.sctp_hdr.src_port =
+						sctp_mask->hdr.src_port;
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.dst_port =
+						sctp_spec->hdr.dst_port;
+					list[t].m_u.sctp_hdr.dst_port =
+						sctp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!sctp_spec && !sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			tunnel_valid = 1;
+			if (vxlan_spec && vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+				if (vxlan_mask->vni[0] == UINT8_MAX &&
+					vxlan_mask->vni[1] == UINT8_MAX &&
+					vxlan_mask->vni[2] == UINT8_MAX) {
+					list[t].h_u.tnl_hdr.vni =
+						(vxlan_spec->vni[2] << 16) |
+						(vxlan_spec->vni[1] << 8) |
+						vxlan_spec->vni[0];
+					list[t].m_u.tnl_hdr.vni =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!vxlan_spec && !vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			tunnel_valid = 1;
+			if (nvgre_spec && nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+				if (nvgre_mask->tni[0] == UINT8_MAX &&
+					nvgre_mask->tni[1] == UINT8_MAX &&
+					nvgre_mask->tni[2] == UINT8_MAX) {
+					list[t].h_u.nvgre_hdr.tni_flow =
+						(nvgre_spec->tni[2] << 16) |
+						(nvgre_spec->tni[1] << 8) |
+						nvgre_spec->tni[0];
+					list[t].m_u.nvgre_hdr.tni_flow =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!nvgre_spec && !nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+		case RTE_FLOW_ITEM_TYPE_END:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "Invalid pattern item.");
+			goto out;
+		}
+	}
+
+	rule_info->tun_type = tun_type;
+	*lkups_num = t;
+
+	return 0;
+out:
+	return -rte_errno;
+}
+
+/* By now ice switch filter action code implement only
+ * supports QUEUE or DROP.
+ */
+static int
+ice_parse_switch_action(struct ice_pf *pf,
+				 const struct rte_flow_action *actions,
+				 struct rte_flow_error *error,
+				 struct ice_adv_rule_info *rule_info)
+{
+	struct ice_vsi *vsi = pf->main_vsi;
+	const struct rte_flow_action_queue *act_q;
+	uint16_t base_queue;
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	base_queue = pf->base_queue;
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			act_q = action->conf;
+			rule_info->sw_act.fltr_act =
+				ICE_FWD_TO_Q;
+			rule_info->sw_act.fwd_id.q_id =
+				base_queue + act_q->index;
+			if (act_q->index >=
+				pf->dev_data->nb_rx_queues) {
+				rte_flow_error_set(error,
+					EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION,
+					actions, "Invalid queue ID"
+					" for switch filter.");
+				return -rte_errno;
+			}
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			rule_info->sw_act.fltr_act =
+				ICE_DROP_PACKET;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error,
+				EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				actions,
+				"Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	rule_info->sw_act.vsi_handle = vsi->idx;
+	rule_info->rx = 1;
+	rule_info->sw_act.src = vsi->idx;
+
+	return 0;
+}
+
+static int
+ice_switch_rule_set(struct ice_pf *pf,
+			struct ice_adv_lkup_elem *list,
+			uint16_t lkups_cnt,
+			struct ice_adv_rule_info *rule_info,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data rule_added = {0};
+	struct ice_rule_query_data *filter_ptr;
+
+	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"item number too large for rule");
+		return -rte_errno;
+	}
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"lookup list should not be NULL");
+		return -rte_errno;
+	}
+
+	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
+
+	if (!ret) {
+		filter_ptr = rte_zmalloc("ice_switch_filter",
+			sizeof(struct ice_rule_query_data), 0);
+		if (!filter_ptr) {
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
+			return -EINVAL;
+		}
+		flow->rule = filter_ptr;
+		rte_memcpy(filter_ptr,
+			&rule_added,
+			sizeof(struct ice_rule_query_data));
+	}
+
+	return ret;
+}
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	int ret = 0;
+	struct ice_adv_rule_info rule_info = {0};
+	struct ice_adv_lkup_elem *list = NULL;
+	uint16_t lkups_num = 0;
+
+	ret = ice_parse_switch_filter(pattern, actions, error,
+			&rule_info, &list, &lkups_num);
+	if (ret)
+		goto error;
+
+	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
+	if (ret)
+		goto error;
+
+	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
+	if (ret)
+		goto error;
+
+	rte_free(list);
+	return 0;
+
+error:
+	rte_free(list);
+
+	return -rte_errno;
+}
+
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)
+			flow->rule;
+
+	if (!filter_ptr) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"no such flow"
+			" create by switch filter");
+		return -rte_errno;
+	}
+
+	ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
+	if (ret) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"fail to destroy switch filter rule");
+		return -rte_errno;
+	}
+
+	rte_free(filter_ptr);
+	return ret;
+}
+
+void
+ice_free_switch_filter_rule(void *rule)
+{
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)rule;
+
+	rte_free(filter_ptr);
+}
diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
new file mode 100644
index 0000000..cea4799
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_SWITCH_FILTER_H_
+#define _ICE_SWITCH_FILTER_H_
+
+#include "base/ice_switch.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+void
+ice_free_switch_filter_rule(void *rule);
+#endif /* _ICE_SWITCH_FILTER_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 2bec688..8697676 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -6,7 +6,8 @@ objs = [base_objs]
 
 sources = files(
 	'ice_ethdev.c',
-	'ice_rxtx.c'
+	'ice_rxtx.c',
+	'ice_switch_filter.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v5 2/3] net/ice: add generic flow API
  2019-06-21  9:21 ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Qiming Yang
  2019-06-21  9:21   ` [dpdk-dev] [PATCH v5 1/3] net/ice: enable switch filter Qiming Yang
@ 2019-06-21  9:21   ` Qiming Yang
  2019-06-21  9:21   ` [dpdk-dev] [PATCH v5 3/3] net/ice: add UDP tunnel port support Qiming Yang
  2019-06-21 14:46   ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Aaron Conole
  3 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-21  9:21 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch adds ice_flow_create, ice_flow_destroy,
ice_flow_flush and ice_flow_validate support,
these are used to handle all the generic filters.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/Makefile           |   1 +
 drivers/net/ice/ice_ethdev.c       |  44 +++
 drivers/net/ice/ice_ethdev.h       |   5 +
 drivers/net/ice/ice_generic_flow.c | 683 +++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h | 654 +++++++++++++++++++++++++++++++++++
 drivers/net/ice/meson.build        |   3 +-
 6 files changed, 1389 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index b10d826..32abeb6 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -79,5 +79,6 @@ endif
 ifeq ($(CC_AVX2_SUPPORT), 1)
 	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index a94aa7e..8ee06d1 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -15,6 +15,7 @@
 #include "base/ice_dcb.h"
 #include "ice_ethdev.h"
 #include "ice_rxtx.h"
+#include "ice_switch_filter.h"
 
 #define ICE_MAX_QP_NUM "max_queue_pair_num"
 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
@@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
 static int ice_xstats_get_names(struct rte_eth_dev *dev,
 				struct rte_eth_xstat_name *xstats_names,
 				unsigned int limit);
+static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+			enum rte_filter_type filter_type,
+			enum rte_filter_op filter_op,
+			void *arg);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get                   = ice_xstats_get,
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
+	.filter_ctrl                  = ice_dev_filter_ctrl,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -1478,6 +1484,8 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* get base queue pairs index  in the device */
 	ice_base_queue_get(pf);
 
+	TAILQ_INIT(&pf->flow_list);
+
 	return 0;
 
 err_pf_setup:
@@ -1620,6 +1628,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
 
 	ice_dev_close(dev);
 
@@ -1637,6 +1647,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	rte_intr_callback_unregister(intr_handle,
 				     ice_interrupt_handler, dev);
 
+	/* Remove all flows */
+	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+		ice_free_switch_filter_rule(p_flow->rule);
+		rte_free(p_flow);
+	}
+
 	return 0;
 }
 
@@ -3622,6 +3639,33 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 }
 
 static int
+ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+		     enum rte_filter_type filter_type,
+		     enum rte_filter_op filter_op,
+		     void *arg)
+{
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ice_flow_ops;
+		break;
+	default:
+		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+					filter_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 50b966c..8a52239 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,12 +234,16 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+extern const struct rte_flow_ops ice_flow_ops;
+
 /* Struct to store flow created. */
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) node;
 	void *rule;
 };
 
+TAILQ_HEAD(ice_flow_list, rte_flow);
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -266,6 +270,7 @@ struct ice_pf {
 	struct ice_eth_stats internal_stats;
 	bool offset_loaded;
 	bool adapter_stopped;
+	struct ice_flow_list flow_list;
 };
 
 /**
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
new file mode 100644
index 0000000..3d838a8
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -0,0 +1,683 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "ice_switch_filter.h"
+
+static int ice_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static int ice_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error);
+static int ice_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error);
+
+const struct rte_flow_ops ice_flow_ops = {
+	.validate = ice_flow_validate,
+	.create = ice_flow_create,
+	.destroy = ice_flow_destroy,
+	.flush = ice_flow_flush,
+};
+
+static int
+ice_flow_valid_attr(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	/* Must be input direction */
+	if (!attr->ingress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Not support priority.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->group) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   attr, "Not support group.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+ice_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+	bool is_find;
+
+	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		if (is_void)
+			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+		else
+			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+		if (is_find)
+			break;
+		item++;
+	}
+	return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+ice_pattern_skip_void_item(struct rte_flow_item *items,
+			    const struct rte_flow_item *pattern)
+{
+	uint32_t cpy_count = 0;
+	const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+	for (;;) {
+		/* Find a non-void item first */
+		pb = ice_find_first_item(pb, false);
+		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+			pe = pb;
+			break;
+		}
+
+		/* Find a void item */
+		pe = ice_find_first_item(pb + 1, true);
+
+		cpy_count = pe - pb;
+		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+		items += cpy_count;
+
+		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+			pb = pe;
+			break;
+		}
+
+		pb = pe + 1;
+	}
+	/* Copy the END item. */
+	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+ice_match_pattern(enum rte_flow_item_type *item_array,
+		const struct rte_flow_item *pattern)
+{
+	const struct rte_flow_item *item = pattern;
+
+	while ((*item_array == item->type) &&
+	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+		item_array++;
+		item++;
+	}
+
+	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+		item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
+		struct rte_flow_error *error)
+{
+	uint16_t i = 0;
+	uint64_t inset;
+	struct rte_flow_item *items; /* used for pattern without VOID items */
+	uint32_t item_num = 0; /* non-void item number */
+
+	/* Get the non-void item number of pattern */
+	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+			item_num++;
+		i++;
+	}
+	item_num++;
+
+	items = rte_zmalloc("ice_pattern",
+			    item_num * sizeof(struct rte_flow_item), 0);
+	if (!items) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "No memory for PMD internal items.");
+		return -ENOMEM;
+	}
+
+	ice_pattern_skip_void_item(items, pattern);
+
+	for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
+		if (ice_match_pattern(ice_supported_patterns[i].items,
+				      items)) {
+			inset = ice_supported_patterns[i].sw_fields;
+			rte_free(items);
+			return inset;
+		}
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			   pattern, "Unsupported pattern");
+
+	rte_free(items);
+	return 0;
+}
+
+static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
+			struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_icmp *icmp_mask;
+	const struct rte_flow_item_icmp6 *icmp6_mask;
+	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+	const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
+	enum rte_flow_item_type item_type;
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	uint64_t input_set = ICE_INSET_NONE;
+	bool outer_ip = true;
+	bool outer_l4 = true;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return 0;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			if (eth_spec && eth_mask) {
+				if (rte_is_broadcast_ether_addr(&eth_mask->src))
+					input_set |= ICE_INSET_SMAC;
+				if (rte_is_broadcast_ether_addr(&eth_mask->dst))
+					input_set |= ICE_INSET_DMAC;
+				if (eth_mask->type == RTE_BE16(0xffff))
+					input_set |= ICE_INSET_ETHERTYPE;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (!(ipv4_spec && ipv4_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 spec or mask.");
+				return 0;
+			}
+
+			/* Check IPv4 mask and update input set */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 mask.");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_DST;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_PROTO;
+				outer_ip = false;
+			} else {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_DST;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_PROTO;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (!(ipv6_spec && ipv6_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Invalid IPv6 spec or mask");
+				return 0;
+			}
+
+			if (ipv6_mask->hdr.payload_len ||
+			    ipv6_mask->hdr.vtc_flow) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv6 mask");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+				outer_ip = false;
+			} else {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_TTL;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (!(udp_spec && udp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid UDP mask");
+				return 0;
+			}
+
+			/* Check UDP mask and update input set*/
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (!(tcp_spec && tcp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid TCP mask");
+				return 0;
+			}
+
+			/* Check TCP mask and update input set */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (!(sctp_spec && sctp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid SCTP mask");
+				return 0;
+			}
+
+			/* Check SCTP mask and update input set */
+			if (sctp_mask->hdr.cksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid SCTP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			icmp_mask = item->mask;
+			if (icmp_mask->hdr.icmp_code ||
+			    icmp_mask->hdr.icmp_cksum ||
+			    icmp_mask->hdr.icmp_ident ||
+			    icmp_mask->hdr.icmp_seq_nb) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP mask");
+				return 0;
+			}
+
+			if (icmp_mask->hdr.icmp_type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP;
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6:
+			icmp6_mask = item->mask;
+			if (icmp6_mask->code ||
+			    icmp6_mask->checksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP6 mask");
+				return 0;
+			}
+
+			if (icmp6_mask->type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP6;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!vxlan_spec && vxlan_mask) ||
+			    (vxlan_spec && !vxlan_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid VXLAN item");
+				return 0;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!nvgre_spec && nvgre_mask) ||
+			    (nvgre_spec && !nvgre_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid NVGRE item");
+				return 0;
+			}
+
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid mask no exist");
+			break;
+		}
+	}
+	return input_set;
+}
+
+static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
+			uint64_t inset, struct rte_flow_error *error)
+{
+	uint64_t fields;
+
+	/* get valid field */
+	fields = ice_get_flow_field(pattern, error);
+	if (~fields || fields & (~inset)) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int ice_flow_valid_action(const struct rte_flow_action *actions,
+				       struct rte_flow_error *error)
+{
+	switch (actions->type) {
+	case RTE_FLOW_ACTION_TYPE_QUEUE:
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_flow_validate(__rte_unused struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	uint64_t inset = 0;
+	int ret = ICE_ERR_NOT_SUPPORTED;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = ice_flow_valid_attr(attr, error);
+	if (!ret)
+		return ret;
+
+	inset = ice_flow_valid_pattern(pattern, error);
+	if (!inset)
+		return -rte_errno;
+
+	ret = ice_flow_valid_inset(pattern, inset, error);
+	if (ret)
+		return ret;
+
+	ret = ice_flow_valid_action(actions, error);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct rte_flow *
+ice_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *flow = NULL;
+	int ret;
+
+	flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return flow;
+	}
+
+	ret = ice_flow_validate(dev, attr, pattern, actions, error);
+	if (ret < 0)
+		goto free_flow;
+
+	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
+	if (ret)
+		goto free_flow;
+
+	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+	return flow;
+
+free_flow:
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(flow);
+	return NULL;
+}
+
+static int
+ice_flow_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	int ret = 0;
+
+	ret = ice_destroy_switch_filter(pf, flow, error);
+
+	if (!ret) {
+		TAILQ_REMOVE(&pf->flow_list, flow, node);
+		rte_free(flow);
+	} else {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to destroy flow.");
+	}
+
+	return ret;
+}
+
+static int
+ice_flow_flush(struct rte_eth_dev *dev,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
+	int ret;
+
+	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+		ret = ice_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			rte_flow_error_set(error, -ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to flush SW flows.");
+			return -rte_errno;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
new file mode 100644
index 0000000..3264c41
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -0,0 +1,654 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_GENERIC_FLOW_H_
+#define _ICE_GENERIC_FLOW_H_
+
+#include <rte_flow_driver.h>
+
+struct ice_flow_pattern {
+	enum rte_flow_item_type *items;
+	uint64_t sw_fields;
+};
+
+#define ICE_INSET_NONE            0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define ICE_INSET_SMAC            0x0000000000000001ULL
+#define ICE_INSET_DMAC            0x0000000000000002ULL
+#define ICE_INSET_ETHERTYPE       0x0000000000000020ULL
+
+/* bit 8 ~ bit 15 */
+#define ICE_INSET_IPV4_SRC        0x0000000000000100ULL
+#define ICE_INSET_IPV4_DST        0x0000000000000200ULL
+#define ICE_INSET_IPV6_SRC        0x0000000000000400ULL
+#define ICE_INSET_IPV6_DST        0x0000000000000800ULL
+#define ICE_INSET_SRC_PORT        0x0000000000001000ULL
+#define ICE_INSET_DST_PORT        0x0000000000002000ULL
+#define ICE_INSET_ARP             0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL
+#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL
+#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL
+#define ICE_INSET_IPV6_PROTO      0x0000000000200000ULL
+#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
+#define ICE_INSET_ICMP            0x0000000001000000ULL
+#define ICE_INSET_ICMP6           0x0000000002000000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define ICE_INSET_TUN_SMAC           0x0000000100000000ULL
+#define ICE_INSET_TUN_DMAC           0x0000000200000000ULL
+#define ICE_INSET_TUN_IPV4_SRC       0x0000000400000000ULL
+#define ICE_INSET_TUN_IPV4_DST       0x0000000800000000ULL
+#define ICE_INSET_TUN_IPV4_TTL       0x0000001000000000ULL
+#define ICE_INSET_TUN_IPV4_PROTO     0x0000002000000000ULL
+#define ICE_INSET_TUN_IPV6_SRC       0x0000004000000000ULL
+#define ICE_INSET_TUN_IPV6_DST       0x0000008000000000ULL
+#define ICE_INSET_TUN_IPV6_TTL       0x0000010000000000ULL
+#define ICE_INSET_TUN_IPV6_PROTO     0x0000020000000000ULL
+#define ICE_INSET_TUN_SRC_PORT       0x0000040000000000ULL
+#define ICE_INSET_TUN_DST_PORT       0x0000080000000000ULL
+#define ICE_INSET_TUN_ID             0x0000100000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+#define ICE_FLAG_VLAN_INNER  0x00000001ULL
+#define ICE_FLAG_VLAN_OUTER  0x00000002ULL
+
+#define INSET_ETHER ( \
+	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define INSET_MAC_IPV4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
+#define INSET_MAC_IPV4_L4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV4_ICMP ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
+#define INSET_MAC_IPV6 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_PROTO | ICE_INSET_IPV6_HOP_LIMIT)
+#define INSET_MAC_IPV6_L4 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV6_ICMP ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_ICMP6)
+#define INSET_TUNNEL_IPV4_TYPE1 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO)
+#define INSET_TUNNEL_IPV4_TYPE2 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV4_TYPE3 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP)
+#define INSET_TUNNEL_IPV6_TYPE1 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO)
+#define INSET_TUNNEL_IPV6_TYPE2 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV6_TYPE3 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6)
+
+/* L2 */
+static enum rte_flow_item_type pattern_ethertype[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv4 */
+static enum rte_flow_item_type pattern_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv6 */
+static enum rte_flow_item_type pattern_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct ice_flow_pattern ice_supported_patterns[] = {
+	{pattern_ethertype, INSET_ETHER},
+	{pattern_ipv4, INSET_MAC_IPV4},
+	{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
+	{pattern_ipv6, INSET_MAC_IPV6},
+	{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
+	{pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+};
+
+#endif
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 8697676..7f16647 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -7,7 +7,8 @@ objs = [base_objs]
 sources = files(
 	'ice_ethdev.c',
 	'ice_rxtx.c',
-	'ice_switch_filter.c'
+	'ice_switch_filter.c',
+	'ice_generic_flow.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v5 3/3] net/ice: add UDP tunnel port support
  2019-06-21  9:21 ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Qiming Yang
  2019-06-21  9:21   ` [dpdk-dev] [PATCH v5 1/3] net/ice: enable switch filter Qiming Yang
  2019-06-21  9:21   ` [dpdk-dev] [PATCH v5 2/3] net/ice: add generic flow API Qiming Yang
@ 2019-06-21  9:21   ` Qiming Yang
  2019-06-21 14:46   ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Aaron Conole
  3 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-21  9:21 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

Enabled UDP tunnel port add and delete functions.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/ice_ethdev.c | 54 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 8ee06d1..949d293 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -88,6 +88,10 @@ static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 			enum rte_filter_type filter_type,
 			enum rte_filter_op filter_op,
 			void *arg);
+static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -147,6 +151,8 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
 	.filter_ctrl                  = ice_dev_filter_ctrl,
+	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
+	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -3665,6 +3671,54 @@ ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 	return ret;
 }
 
+/* Add UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -1;
+		break;
+	}
+
+	return ret;
+}
+
+/* Delete UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -1;
+		break;
+	}
+
+	return ret;
+}
+
 static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver
  2019-06-21  9:21 ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Qiming Yang
                     ` (2 preceding siblings ...)
  2019-06-21  9:21   ` [dpdk-dev] [PATCH v5 3/3] net/ice: add UDP tunnel port support Qiming Yang
@ 2019-06-21 14:46   ` Aaron Conole
  3 siblings, 0 replies; 73+ messages in thread
From: Aaron Conole @ 2019-06-21 14:46 UTC (permalink / raw)
  To: Qiming Yang; +Cc: dev

Qiming Yang <qiming.yang@intel.com> writes:

> This patch set enables the backend of rte_flow, and the generic
> filter related functions in ice driver. Supported flows include
> ipv4, tcpv4, udpv4, ipv6, tcpv6, udpv6, tunnel, etc. This patch
> set depends on shared code update.
>
> ---
> v2 changes:
>  - added UDP tunnel port support.
>  - fixed compile issue.
>  - added document update.
> v3 changes:
>  - removed redundancy parser.
>  - added License.
>  - added VXLAN and NVGRE item support.
> v4 changes:
>  - fixed some typos.
> v5 changes:
>  - fixed checkpatch issues.
>

Thanks for fixing up the meson errors.  I notice some build issues from
the Travis server.  Can you take a look?

https://travis-ci.com/ovsrobot/dpdk/builds/116409374

I think the errors are introduced with the first patch, actually.

It might help to use github as an intermediate repository, sign up for
travis, push your changes to your github, and then watch the builds to
ensure that they are compiling properly.  That can help to isolate
environmental issues (for instance, accidentally omitted header files,
etc).

Some of this is documented at doc/guides/contributing/patches.rst but if
something is unclear or should have more detail to explain, lets improve
the docs.

^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v6 0/3] Enable rte_flow API in ice driver
  2019-06-03  9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
                   ` (5 preceding siblings ...)
  2019-06-21  9:21 ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Qiming Yang
@ 2019-06-24  6:15 ` Qiming Yang
  2019-06-24  6:15   ` [dpdk-dev] [PATCH v6 1/3] net/ice: enable switch filter Qiming Yang
                     ` (2 more replies)
  2019-06-25  6:48 ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Qiming Yang
                   ` (3 subsequent siblings)
  10 siblings, 3 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-24  6:15 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch set enables the backend of rte_flow, and the generic
filter related functions in ice driver. Supported flows include
ipv4, tcpv4, udpv4, ipv6, tcpv6, udpv6, tunnel, etc. This patch
set depends on shared code update.

---
v2 changes:
 - added UDP tunnel port support.
 - fixed compile issue.
 - added document update.
v3 changes:
 - removed redundancy parser.
 - added License.
 - added VXLAN and NVGRE item support.
v4 changes:
 - fixed some typos.
v5 changes:
 - fixed checkpatch issues.
v6 changes:
 - fixed one uninitialize issue.

Qiming Yang (2):
  net/ice: add generic flow API
  net/ice: add UDP tunnel port support

wei zhao (1):
  net/ice: enable switch filter

 drivers/net/ice/Makefile            |   2 +
 drivers/net/ice/ice_ethdev.c        | 116 ++++++
 drivers/net/ice/ice_ethdev.h        |  12 +
 drivers/net/ice/ice_generic_flow.c  | 683 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h  | 654 ++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.c | 521 +++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   4 +-
 8 files changed, 2015 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v6 1/3] net/ice: enable switch filter
  2019-06-24  6:15 ` [dpdk-dev] [PATCH v6 " Qiming Yang
@ 2019-06-24  6:15   ` Qiming Yang
  2019-06-24  6:15   ` [dpdk-dev] [PATCH v6 2/3] net/ice: add generic flow API Qiming Yang
  2019-06-24  6:15   ` [dpdk-dev] [PATCH v6 3/3] net/ice: add UDP tunnel port support Qiming Yang
  2 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-24  6:15 UTC (permalink / raw)
  To: dev; +Cc: wei zhao

From: wei zhao <wei.zhao1@intel.com>

The patch enables the backend of rte_flow. It transfers
rte_flow_xxx to device specific data structure and
configures packet process engine's binary classifier
(switch) properly.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/Makefile            |   1 +
 drivers/net/ice/ice_ethdev.c        |  18 ++
 drivers/net/ice/ice_ethdev.h        |   7 +
 drivers/net/ice/ice_switch_filter.c | 521 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   3 +-
 6 files changed, 573 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 0e5c55e..b10d826 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
 endif
 
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
 	CC_AVX2_SUPPORT=1
 else
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 203d0a9..a94aa7e 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1364,6 +1364,21 @@ static int ice_load_pkg(struct rte_eth_dev *dev)
 	return err;
 }
 
+static void
+ice_base_queue_get(struct ice_pf *pf)
+{
+	uint32_t reg;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+	if (reg & PFLAN_RX_QALLOC_VALID_M) {
+		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+	} else {
+		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
+					" index");
+	}
+}
+
 static int
 ice_dev_init(struct rte_eth_dev *dev)
 {
@@ -1460,6 +1475,9 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* enable uio intr after callback register */
 	rte_intr_enable(intr_handle);
 
+	/* get base queue pairs index  in the device */
+	ice_base_queue_get(pf);
+
 	return 0;
 
 err_pf_setup:
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1385afa..50b966c 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,6 +234,12 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+/* Struct to store flow created. */
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) node;
+	void *rule;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -252,6 +258,7 @@ struct ice_pf {
 	uint16_t hash_lut_size; /* The size of hash lookup table */
 	uint16_t lan_nb_qp_max;
 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+	uint16_t base_queue; /* The base queue pairs index  in the device */
 	struct ice_hw_port_stats stats_offset;
 	struct ice_hw_port_stats stats;
 	/* internal packet statistics, it should be excluded from the total */
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
new file mode 100644
index 0000000..adfc154
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -0,0 +1,521 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "ice_logs.h"
+#include "base/ice_type.h"
+#include "ice_switch_filter.h"
+
+static int
+ice_parse_switch_filter(const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct ice_adv_rule_info *rule_info,
+			struct ice_adv_lkup_elem **lkup_list,
+			uint16_t *lkups_num)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
+	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
+	struct ice_adv_lkup_elem *list;
+	uint16_t j, t = 0;
+	uint16_t item_num = 0;
+	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+	uint16_t tunnel_valid = 0;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
+			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
+			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
+			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
+			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
+			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			item_num++;
+		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+			tun_type = ICE_SW_TUN_VXLAN;
+		if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			tun_type = ICE_SW_TUN_NVGRE;
+	}
+
+	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "No memory for PMD internal items");
+		goto out;
+	}
+	*lkup_list = list;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			if (eth_spec && eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+				struct ice_ether_hdr *h;
+				struct ice_ether_hdr *m;
+				h = &list[t].h_u.eth_hdr;
+				m = &list[t].m_u.eth_hdr;
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->src.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->src_addr[j] =
+						eth_spec->src.addr_bytes[j];
+						m->src_addr[j] =
+						eth_mask->src.addr_bytes[j];
+					}
+					if (eth_mask->dst.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->dst_addr[j] =
+						eth_spec->dst.addr_bytes[j];
+						m->dst_addr[j] =
+						eth_mask->dst.addr_bytes[j];
+					}
+				}
+				if (eth_mask->type == UINT16_MAX) {
+					h->ethtype_id =
+					rte_be_to_cpu_16(eth_spec->type);
+					m->ethtype_id = UINT16_MAX;
+				}
+				t++;
+			} else if (!eth_spec && !eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+			if (ipv4_spec && ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.src_addr =
+						ipv4_spec->hdr.src_addr;
+					list[t].m_u.ipv4_hdr.src_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.dst_addr =
+						ipv4_spec->hdr.dst_addr;
+					list[t].m_u.ipv4_hdr.dst_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.time_to_live =
+						ipv4_spec->hdr.time_to_live;
+					list[t].m_u.ipv4_hdr.time_to_live =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.protocol =
+						ipv4_spec->hdr.next_proto_id;
+					list[t].m_u.ipv4_hdr.protocol =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.type_of_service ==
+						UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.tos =
+						ipv4_spec->hdr.type_of_service;
+					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv4_spec && !ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+			if (ipv6_spec && ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV6_OFOS : ICE_IPV6_IL;
+				struct ice_ipv6_hdr *f;
+				struct ice_ipv6_hdr *s;
+				f = &list[t].h_u.ice_ipv6_ofos_hdr;
+				s = &list[t].m_u.ice_ipv6_ofos_hdr;
+				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j] ==
+								UINT8_MAX) {
+						f->src_addr[j] =
+						ipv6_spec->hdr.src_addr[j];
+						s->src_addr[j] =
+						ipv6_mask->hdr.src_addr[j];
+					}
+					if (ipv6_mask->hdr.dst_addr[j] ==
+								UINT8_MAX) {
+						f->dst_addr[j] =
+						ipv6_spec->hdr.dst_addr[j];
+						s->dst_addr[j] =
+						ipv6_mask->hdr.dst_addr[j];
+					}
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					f->next_hdr =
+						ipv6_spec->hdr.proto;
+					s->next_hdr = UINT8_MAX;
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					f->hop_limit =
+						ipv6_spec->hdr.hop_limits;
+					s->hop_limit = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv6_spec && !ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+			if (udp_spec && udp_mask) {
+				if (tun_type == ICE_SW_TUN_VXLAN &&
+						tunnel_valid == 0)
+					list[t].type = ICE_UDP_OF;
+				else
+					list[t].type = ICE_UDP_ILOS;
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						udp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						udp_mask->hdr.src_port;
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						udp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						udp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!udp_spec && !udp_mask) {
+				list[t].type = ICE_UDP_ILOS;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+			if (tcp_spec && tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						tcp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						tcp_mask->hdr.src_port;
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						tcp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						tcp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!tcp_spec && !tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+			if (sctp_spec && sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.src_port =
+						sctp_spec->hdr.src_port;
+					list[t].m_u.sctp_hdr.src_port =
+						sctp_mask->hdr.src_port;
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.dst_port =
+						sctp_spec->hdr.dst_port;
+					list[t].m_u.sctp_hdr.dst_port =
+						sctp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!sctp_spec && !sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			tunnel_valid = 1;
+			if (vxlan_spec && vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+				if (vxlan_mask->vni[0] == UINT8_MAX &&
+					vxlan_mask->vni[1] == UINT8_MAX &&
+					vxlan_mask->vni[2] == UINT8_MAX) {
+					list[t].h_u.tnl_hdr.vni =
+						(vxlan_spec->vni[2] << 16) |
+						(vxlan_spec->vni[1] << 8) |
+						vxlan_spec->vni[0];
+					list[t].m_u.tnl_hdr.vni =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!vxlan_spec && !vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			tunnel_valid = 1;
+			if (nvgre_spec && nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+				if (nvgre_mask->tni[0] == UINT8_MAX &&
+					nvgre_mask->tni[1] == UINT8_MAX &&
+					nvgre_mask->tni[2] == UINT8_MAX) {
+					list[t].h_u.nvgre_hdr.tni_flow =
+						(nvgre_spec->tni[2] << 16) |
+						(nvgre_spec->tni[1] << 8) |
+						nvgre_spec->tni[0];
+					list[t].m_u.nvgre_hdr.tni_flow =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!nvgre_spec && !nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+		case RTE_FLOW_ITEM_TYPE_END:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "Invalid pattern item.");
+			goto out;
+		}
+	}
+
+	rule_info->tun_type = tun_type;
+	*lkups_num = t;
+
+	return 0;
+out:
+	return -rte_errno;
+}
+
+/* By now ice switch filter action code implement only
+ * supports QUEUE or DROP.
+ */
+static int
+ice_parse_switch_action(struct ice_pf *pf,
+				 const struct rte_flow_action *actions,
+				 struct rte_flow_error *error,
+				 struct ice_adv_rule_info *rule_info)
+{
+	struct ice_vsi *vsi = pf->main_vsi;
+	const struct rte_flow_action_queue *act_q;
+	uint16_t base_queue;
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	base_queue = pf->base_queue;
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			act_q = action->conf;
+			rule_info->sw_act.fltr_act =
+				ICE_FWD_TO_Q;
+			rule_info->sw_act.fwd_id.q_id =
+				base_queue + act_q->index;
+			if (act_q->index >=
+				pf->dev_data->nb_rx_queues) {
+				rte_flow_error_set(error,
+					EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION,
+					actions, "Invalid queue ID"
+					" for switch filter.");
+				return -rte_errno;
+			}
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			rule_info->sw_act.fltr_act =
+				ICE_DROP_PACKET;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error,
+				EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				actions,
+				"Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	rule_info->sw_act.vsi_handle = vsi->idx;
+	rule_info->rx = 1;
+	rule_info->sw_act.src = vsi->idx;
+
+	return 0;
+}
+
+static int
+ice_switch_rule_set(struct ice_pf *pf,
+			struct ice_adv_lkup_elem *list,
+			uint16_t lkups_cnt,
+			struct ice_adv_rule_info *rule_info,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data rule_added = {0};
+	struct ice_rule_query_data *filter_ptr;
+
+	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"item number too large for rule");
+		return -rte_errno;
+	}
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"lookup list should not be NULL");
+		return -rte_errno;
+	}
+
+	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
+
+	if (!ret) {
+		filter_ptr = rte_zmalloc("ice_switch_filter",
+			sizeof(struct ice_rule_query_data), 0);
+		if (!filter_ptr) {
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
+			return -EINVAL;
+		}
+		flow->rule = filter_ptr;
+		rte_memcpy(filter_ptr,
+			&rule_added,
+			sizeof(struct ice_rule_query_data));
+	}
+
+	return ret;
+}
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	int ret = 0;
+	struct ice_adv_rule_info rule_info = {0};
+	struct ice_adv_lkup_elem *list = NULL;
+	uint16_t lkups_num = 0;
+
+	ret = ice_parse_switch_filter(pattern, actions, error,
+			&rule_info, &list, &lkups_num);
+	if (ret)
+		goto error;
+
+	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
+	if (ret)
+		goto error;
+
+	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
+	if (ret)
+		goto error;
+
+	rte_free(list);
+	return 0;
+
+error:
+	rte_free(list);
+
+	return -rte_errno;
+}
+
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)
+			flow->rule;
+
+	if (!filter_ptr) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"no such flow"
+			" create by switch filter");
+		return -rte_errno;
+	}
+
+	ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
+	if (ret) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"fail to destroy switch filter rule");
+		return -rte_errno;
+	}
+
+	rte_free(filter_ptr);
+	return ret;
+}
+
+void
+ice_free_switch_filter_rule(void *rule)
+{
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)rule;
+
+	rte_free(filter_ptr);
+}
diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
new file mode 100644
index 0000000..cea4799
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_SWITCH_FILTER_H_
+#define _ICE_SWITCH_FILTER_H_
+
+#include "base/ice_switch.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+void
+ice_free_switch_filter_rule(void *rule);
+#endif /* _ICE_SWITCH_FILTER_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 2bec688..8697676 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -6,7 +6,8 @@ objs = [base_objs]
 
 sources = files(
 	'ice_ethdev.c',
-	'ice_rxtx.c'
+	'ice_rxtx.c',
+	'ice_switch_filter.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v6 2/3] net/ice: add generic flow API
  2019-06-24  6:15 ` [dpdk-dev] [PATCH v6 " Qiming Yang
  2019-06-24  6:15   ` [dpdk-dev] [PATCH v6 1/3] net/ice: enable switch filter Qiming Yang
@ 2019-06-24  6:15   ` Qiming Yang
  2019-06-24  6:15   ` [dpdk-dev] [PATCH v6 3/3] net/ice: add UDP tunnel port support Qiming Yang
  2 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-24  6:15 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch adds ice_flow_create, ice_flow_destroy,
ice_flow_flush and ice_flow_validate support,
these are used to handle all the generic filters.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/Makefile           |   1 +
 drivers/net/ice/ice_ethdev.c       |  44 +++
 drivers/net/ice/ice_ethdev.h       |   5 +
 drivers/net/ice/ice_generic_flow.c | 683 +++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h | 654 +++++++++++++++++++++++++++++++++++
 drivers/net/ice/meson.build        |   3 +-
 6 files changed, 1389 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index b10d826..32abeb6 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -79,5 +79,6 @@ endif
 ifeq ($(CC_AVX2_SUPPORT), 1)
 	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index a94aa7e..8ee06d1 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -15,6 +15,7 @@
 #include "base/ice_dcb.h"
 #include "ice_ethdev.h"
 #include "ice_rxtx.h"
+#include "ice_switch_filter.h"
 
 #define ICE_MAX_QP_NUM "max_queue_pair_num"
 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
@@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
 static int ice_xstats_get_names(struct rte_eth_dev *dev,
 				struct rte_eth_xstat_name *xstats_names,
 				unsigned int limit);
+static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+			enum rte_filter_type filter_type,
+			enum rte_filter_op filter_op,
+			void *arg);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get                   = ice_xstats_get,
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
+	.filter_ctrl                  = ice_dev_filter_ctrl,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -1478,6 +1484,8 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* get base queue pairs index  in the device */
 	ice_base_queue_get(pf);
 
+	TAILQ_INIT(&pf->flow_list);
+
 	return 0;
 
 err_pf_setup:
@@ -1620,6 +1628,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
 
 	ice_dev_close(dev);
 
@@ -1637,6 +1647,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	rte_intr_callback_unregister(intr_handle,
 				     ice_interrupt_handler, dev);
 
+	/* Remove all flows */
+	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+		ice_free_switch_filter_rule(p_flow->rule);
+		rte_free(p_flow);
+	}
+
 	return 0;
 }
 
@@ -3622,6 +3639,33 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 }
 
 static int
+ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+		     enum rte_filter_type filter_type,
+		     enum rte_filter_op filter_op,
+		     void *arg)
+{
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ice_flow_ops;
+		break;
+	default:
+		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+					filter_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 50b966c..8a52239 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,12 +234,16 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+extern const struct rte_flow_ops ice_flow_ops;
+
 /* Struct to store flow created. */
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) node;
 	void *rule;
 };
 
+TAILQ_HEAD(ice_flow_list, rte_flow);
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -266,6 +270,7 @@ struct ice_pf {
 	struct ice_eth_stats internal_stats;
 	bool offset_loaded;
 	bool adapter_stopped;
+	struct ice_flow_list flow_list;
 };
 
 /**
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
new file mode 100644
index 0000000..b26e5ca
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -0,0 +1,683 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "ice_switch_filter.h"
+
+static int ice_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static int ice_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error);
+static int ice_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error);
+
+const struct rte_flow_ops ice_flow_ops = {
+	.validate = ice_flow_validate,
+	.create = ice_flow_create,
+	.destroy = ice_flow_destroy,
+	.flush = ice_flow_flush,
+};
+
+static int
+ice_flow_valid_attr(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	/* Must be input direction */
+	if (!attr->ingress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Not support priority.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->group) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   attr, "Not support group.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+ice_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+	bool is_find;
+
+	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		if (is_void)
+			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+		else
+			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+		if (is_find)
+			break;
+		item++;
+	}
+	return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+ice_pattern_skip_void_item(struct rte_flow_item *items,
+			    const struct rte_flow_item *pattern)
+{
+	uint32_t cpy_count = 0;
+	const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+	for (;;) {
+		/* Find a non-void item first */
+		pb = ice_find_first_item(pb, false);
+		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+			pe = pb;
+			break;
+		}
+
+		/* Find a void item */
+		pe = ice_find_first_item(pb + 1, true);
+
+		cpy_count = pe - pb;
+		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+		items += cpy_count;
+
+		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+			pb = pe;
+			break;
+		}
+
+		pb = pe + 1;
+	}
+	/* Copy the END item. */
+	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+ice_match_pattern(enum rte_flow_item_type *item_array,
+		const struct rte_flow_item *pattern)
+{
+	const struct rte_flow_item *item = pattern;
+
+	while ((*item_array == item->type) &&
+	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+		item_array++;
+		item++;
+	}
+
+	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+		item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
+		struct rte_flow_error *error)
+{
+	uint16_t i = 0;
+	uint64_t inset;
+	struct rte_flow_item *items; /* used for pattern without VOID items */
+	uint32_t item_num = 0; /* non-void item number */
+
+	/* Get the non-void item number of pattern */
+	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+			item_num++;
+		i++;
+	}
+	item_num++;
+
+	items = rte_zmalloc("ice_pattern",
+			    item_num * sizeof(struct rte_flow_item), 0);
+	if (!items) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "No memory for PMD internal items.");
+		return -ENOMEM;
+	}
+
+	ice_pattern_skip_void_item(items, pattern);
+
+	for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
+		if (ice_match_pattern(ice_supported_patterns[i].items,
+				      items)) {
+			inset = ice_supported_patterns[i].sw_fields;
+			rte_free(items);
+			return inset;
+		}
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			   pattern, "Unsupported pattern");
+
+	rte_free(items);
+	return 0;
+}
+
+static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
+			struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_icmp *icmp_mask;
+	const struct rte_flow_item_icmp6 *icmp6_mask;
+	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+	const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
+	enum rte_flow_item_type item_type;
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	uint64_t input_set = ICE_INSET_NONE;
+	bool outer_ip = true;
+	bool outer_l4 = true;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return 0;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			if (eth_spec && eth_mask) {
+				if (rte_is_broadcast_ether_addr(&eth_mask->src))
+					input_set |= ICE_INSET_SMAC;
+				if (rte_is_broadcast_ether_addr(&eth_mask->dst))
+					input_set |= ICE_INSET_DMAC;
+				if (eth_mask->type == RTE_BE16(0xffff))
+					input_set |= ICE_INSET_ETHERTYPE;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (!(ipv4_spec && ipv4_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 spec or mask.");
+				return 0;
+			}
+
+			/* Check IPv4 mask and update input set */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 mask.");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_DST;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_PROTO;
+				outer_ip = false;
+			} else {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_DST;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_PROTO;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (!(ipv6_spec && ipv6_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Invalid IPv6 spec or mask");
+				return 0;
+			}
+
+			if (ipv6_mask->hdr.payload_len ||
+			    ipv6_mask->hdr.vtc_flow) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv6 mask");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+				outer_ip = false;
+			} else {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_TTL;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (!(udp_spec && udp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid UDP mask");
+				return 0;
+			}
+
+			/* Check UDP mask and update input set*/
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (!(tcp_spec && tcp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid TCP mask");
+				return 0;
+			}
+
+			/* Check TCP mask and update input set */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (!(sctp_spec && sctp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid SCTP mask");
+				return 0;
+			}
+
+			/* Check SCTP mask and update input set */
+			if (sctp_mask->hdr.cksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid SCTP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			icmp_mask = item->mask;
+			if (icmp_mask->hdr.icmp_code ||
+			    icmp_mask->hdr.icmp_cksum ||
+			    icmp_mask->hdr.icmp_ident ||
+			    icmp_mask->hdr.icmp_seq_nb) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP mask");
+				return 0;
+			}
+
+			if (icmp_mask->hdr.icmp_type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP;
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6:
+			icmp6_mask = item->mask;
+			if (icmp6_mask->code ||
+			    icmp6_mask->checksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP6 mask");
+				return 0;
+			}
+
+			if (icmp6_mask->type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP6;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!vxlan_spec && vxlan_mask) ||
+			    (vxlan_spec && !vxlan_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid VXLAN item");
+				return 0;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!nvgre_spec && nvgre_mask) ||
+			    (nvgre_spec && !nvgre_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid NVGRE item");
+				return 0;
+			}
+
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid mask no exist");
+			break;
+		}
+	}
+	return input_set;
+}
+
+static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
+			uint64_t inset, struct rte_flow_error *error)
+{
+	uint64_t fields;
+
+	/* get valid field */
+	fields = ice_get_flow_field(pattern, error);
+	if (~fields || fields & (~inset)) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int ice_flow_valid_action(const struct rte_flow_action *actions,
+				       struct rte_flow_error *error)
+{
+	switch (actions->type) {
+	case RTE_FLOW_ACTION_TYPE_QUEUE:
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_flow_validate(__rte_unused struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	uint64_t inset = 0;
+	int ret = ICE_ERR_NOT_SUPPORTED;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = ice_flow_valid_attr(attr, error);
+	if (!ret)
+		return ret;
+
+	inset = ice_flow_valid_pattern(pattern, error);
+	if (!inset)
+		return -rte_errno;
+
+	ret = ice_flow_valid_inset(pattern, inset, error);
+	if (ret)
+		return ret;
+
+	ret = ice_flow_valid_action(actions, error);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct rte_flow *
+ice_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *flow = NULL;
+	int ret;
+
+	flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return flow;
+	}
+
+	ret = ice_flow_validate(dev, attr, pattern, actions, error);
+	if (ret < 0)
+		goto free_flow;
+
+	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
+	if (ret)
+		goto free_flow;
+
+	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+	return flow;
+
+free_flow:
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(flow);
+	return NULL;
+}
+
+static int
+ice_flow_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	int ret = 0;
+
+	ret = ice_destroy_switch_filter(pf, flow, error);
+
+	if (!ret) {
+		TAILQ_REMOVE(&pf->flow_list, flow, node);
+		rte_free(flow);
+	} else {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to destroy flow.");
+	}
+
+	return ret;
+}
+
+static int
+ice_flow_flush(struct rte_eth_dev *dev,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
+	int ret = 0;
+
+	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+		ret = ice_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			rte_flow_error_set(error, -ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to flush SW flows.");
+			return -rte_errno;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
new file mode 100644
index 0000000..3264c41
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -0,0 +1,654 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_GENERIC_FLOW_H_
+#define _ICE_GENERIC_FLOW_H_
+
+#include <rte_flow_driver.h>
+
+struct ice_flow_pattern {
+	enum rte_flow_item_type *items;
+	uint64_t sw_fields;
+};
+
+#define ICE_INSET_NONE            0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define ICE_INSET_SMAC            0x0000000000000001ULL
+#define ICE_INSET_DMAC            0x0000000000000002ULL
+#define ICE_INSET_ETHERTYPE       0x0000000000000020ULL
+
+/* bit 8 ~ bit 15 */
+#define ICE_INSET_IPV4_SRC        0x0000000000000100ULL
+#define ICE_INSET_IPV4_DST        0x0000000000000200ULL
+#define ICE_INSET_IPV6_SRC        0x0000000000000400ULL
+#define ICE_INSET_IPV6_DST        0x0000000000000800ULL
+#define ICE_INSET_SRC_PORT        0x0000000000001000ULL
+#define ICE_INSET_DST_PORT        0x0000000000002000ULL
+#define ICE_INSET_ARP             0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL
+#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL
+#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL
+#define ICE_INSET_IPV6_PROTO      0x0000000000200000ULL
+#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
+#define ICE_INSET_ICMP            0x0000000001000000ULL
+#define ICE_INSET_ICMP6           0x0000000002000000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define ICE_INSET_TUN_SMAC           0x0000000100000000ULL
+#define ICE_INSET_TUN_DMAC           0x0000000200000000ULL
+#define ICE_INSET_TUN_IPV4_SRC       0x0000000400000000ULL
+#define ICE_INSET_TUN_IPV4_DST       0x0000000800000000ULL
+#define ICE_INSET_TUN_IPV4_TTL       0x0000001000000000ULL
+#define ICE_INSET_TUN_IPV4_PROTO     0x0000002000000000ULL
+#define ICE_INSET_TUN_IPV6_SRC       0x0000004000000000ULL
+#define ICE_INSET_TUN_IPV6_DST       0x0000008000000000ULL
+#define ICE_INSET_TUN_IPV6_TTL       0x0000010000000000ULL
+#define ICE_INSET_TUN_IPV6_PROTO     0x0000020000000000ULL
+#define ICE_INSET_TUN_SRC_PORT       0x0000040000000000ULL
+#define ICE_INSET_TUN_DST_PORT       0x0000080000000000ULL
+#define ICE_INSET_TUN_ID             0x0000100000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+#define ICE_FLAG_VLAN_INNER  0x00000001ULL
+#define ICE_FLAG_VLAN_OUTER  0x00000002ULL
+
+#define INSET_ETHER ( \
+	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define INSET_MAC_IPV4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
+#define INSET_MAC_IPV4_L4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV4_ICMP ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
+#define INSET_MAC_IPV6 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_PROTO | ICE_INSET_IPV6_HOP_LIMIT)
+#define INSET_MAC_IPV6_L4 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV6_ICMP ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_ICMP6)
+#define INSET_TUNNEL_IPV4_TYPE1 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO)
+#define INSET_TUNNEL_IPV4_TYPE2 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV4_TYPE3 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP)
+#define INSET_TUNNEL_IPV6_TYPE1 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO)
+#define INSET_TUNNEL_IPV6_TYPE2 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV6_TYPE3 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6)
+
+/* L2 */
+static enum rte_flow_item_type pattern_ethertype[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv4 */
+static enum rte_flow_item_type pattern_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv6 */
+static enum rte_flow_item_type pattern_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct ice_flow_pattern ice_supported_patterns[] = {
+	{pattern_ethertype, INSET_ETHER},
+	{pattern_ipv4, INSET_MAC_IPV4},
+	{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
+	{pattern_ipv6, INSET_MAC_IPV6},
+	{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
+	{pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+};
+
+#endif
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 8697676..7f16647 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -7,7 +7,8 @@ objs = [base_objs]
 sources = files(
 	'ice_ethdev.c',
 	'ice_rxtx.c',
-	'ice_switch_filter.c'
+	'ice_switch_filter.c',
+	'ice_generic_flow.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v6 3/3] net/ice: add UDP tunnel port support
  2019-06-24  6:15 ` [dpdk-dev] [PATCH v6 " Qiming Yang
  2019-06-24  6:15   ` [dpdk-dev] [PATCH v6 1/3] net/ice: enable switch filter Qiming Yang
  2019-06-24  6:15   ` [dpdk-dev] [PATCH v6 2/3] net/ice: add generic flow API Qiming Yang
@ 2019-06-24  6:15   ` Qiming Yang
  2 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-24  6:15 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

Enabled UDP tunnel port add and delete functions.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/ice_ethdev.c | 54 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 8ee06d1..949d293 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -88,6 +88,10 @@ static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 			enum rte_filter_type filter_type,
 			enum rte_filter_op filter_op,
 			void *arg);
+static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -147,6 +151,8 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
 	.filter_ctrl                  = ice_dev_filter_ctrl,
+	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
+	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -3665,6 +3671,54 @@ ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 	return ret;
 }
 
+/* Add UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -1;
+		break;
+	}
+
+	return ret;
+}
+
+/* Delete UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -1;
+		break;
+	}
+
+	return ret;
+}
+
 static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver
  2019-06-03  9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
                   ` (6 preceding siblings ...)
  2019-06-24  6:15 ` [dpdk-dev] [PATCH v6 " Qiming Yang
@ 2019-06-25  6:48 ` Qiming Yang
  2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 1/3] net/ice: enable switch filter Qiming Yang
                     ` (4 more replies)
  2019-06-26  8:03 ` [dpdk-dev] [PATCH v8 " Qiming Yang
                   ` (2 subsequent siblings)
  10 siblings, 5 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-25  6:48 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch set enables the backend of rte_flow, and the generic
filter related functions in ice driver. Supported flows include
ipv4, tcpv4, udpv4, ipv6, tcpv6, udpv6, tunnel, etc. This patch
set depends on shared code update.

---
v2 changes:
 - added UDP tunnel port support.
 - fixed compile issue.
 - added document update.
v3 changes:
 - removed redundancy parser.
 - added License.
 - added VXLAN and NVGRE item support.
v4 changes:
 - fixed some typos.
v5 changes:
 - fixed checkpatch issues.
v6 changes:
 - fixed one uninitialize issue.
v7 changes:
 - fixed queue action validation.

Qiming Yang (2):
  net/ice: add generic flow API
  net/ice: add UDP tunnel port support

wei zhao (1):
  net/ice: enable switch filter

 drivers/net/ice/Makefile            |   2 +
 drivers/net/ice/ice_ethdev.c        | 116 ++++++
 drivers/net/ice/ice_ethdev.h        |  12 +
 drivers/net/ice/ice_generic_flow.c  | 696 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h  | 614 +++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.c | 512 ++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   4 +-
 8 files changed, 1979 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v7 1/3] net/ice: enable switch filter
  2019-06-25  6:48 ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Qiming Yang
@ 2019-06-25  6:48   ` Qiming Yang
  2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 2/3] net/ice: add generic flow API Qiming Yang
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-25  6:48 UTC (permalink / raw)
  To: dev; +Cc: wei zhao

From: wei zhao <wei.zhao1@intel.com>

The patch enables the backend of rte_flow. It transfers
rte_flow_xxx to device specific data structure and
configures packet process engine's binary classifier
(switch) properly.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/Makefile            |   1 +
 drivers/net/ice/ice_ethdev.c        |  18 ++
 drivers/net/ice/ice_ethdev.h        |   7 +
 drivers/net/ice/ice_switch_filter.c | 512 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   3 +-
 6 files changed, 564 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 0e5c55e..b10d826 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
 endif
 
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
 	CC_AVX2_SUPPORT=1
 else
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 203d0a9..a94aa7e 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1364,6 +1364,21 @@ static int ice_load_pkg(struct rte_eth_dev *dev)
 	return err;
 }
 
+static void
+ice_base_queue_get(struct ice_pf *pf)
+{
+	uint32_t reg;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+	if (reg & PFLAN_RX_QALLOC_VALID_M) {
+		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+	} else {
+		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
+					" index");
+	}
+}
+
 static int
 ice_dev_init(struct rte_eth_dev *dev)
 {
@@ -1460,6 +1475,9 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* enable uio intr after callback register */
 	rte_intr_enable(intr_handle);
 
+	/* get base queue pairs index  in the device */
+	ice_base_queue_get(pf);
+
 	return 0;
 
 err_pf_setup:
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1385afa..50b966c 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,6 +234,12 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+/* Struct to store flow created. */
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) node;
+	void *rule;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -252,6 +258,7 @@ struct ice_pf {
 	uint16_t hash_lut_size; /* The size of hash lookup table */
 	uint16_t lan_nb_qp_max;
 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+	uint16_t base_queue; /* The base queue pairs index  in the device */
 	struct ice_hw_port_stats stats_offset;
 	struct ice_hw_port_stats stats;
 	/* internal packet statistics, it should be excluded from the total */
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
new file mode 100644
index 0000000..28a76ab
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -0,0 +1,512 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "ice_logs.h"
+#include "base/ice_type.h"
+#include "ice_switch_filter.h"
+
+static int
+ice_parse_switch_filter(const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct ice_adv_rule_info *rule_info,
+			struct ice_adv_lkup_elem **lkup_list,
+			uint16_t *lkups_num)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
+	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
+	struct ice_adv_lkup_elem *list;
+	uint16_t j, t = 0;
+	uint16_t item_num = 0;
+	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+	uint16_t tunnel_valid = 0;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->type == RTE_FLOW_ITEM_TYPE_ETH ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+			item->type == RTE_FLOW_ITEM_TYPE_IPV6 ||
+			item->type == RTE_FLOW_ITEM_TYPE_UDP ||
+			item->type == RTE_FLOW_ITEM_TYPE_TCP ||
+			item->type == RTE_FLOW_ITEM_TYPE_SCTP ||
+			item->type == RTE_FLOW_ITEM_TYPE_VXLAN ||
+			item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			item_num++;
+		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+			tun_type = ICE_SW_TUN_VXLAN;
+		if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			tun_type = ICE_SW_TUN_NVGRE;
+	}
+
+	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "No memory for PMD internal items");
+		goto out;
+	}
+	*lkup_list = list;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			if (eth_spec && eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+				struct ice_ether_hdr *h;
+				struct ice_ether_hdr *m;
+				h = &list[t].h_u.eth_hdr;
+				m = &list[t].m_u.eth_hdr;
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->src.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->src_addr[j] =
+						eth_spec->src.addr_bytes[j];
+						m->src_addr[j] =
+						eth_mask->src.addr_bytes[j];
+					}
+					if (eth_mask->dst.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->dst_addr[j] =
+						eth_spec->dst.addr_bytes[j];
+						m->dst_addr[j] =
+						eth_mask->dst.addr_bytes[j];
+					}
+				}
+				if (eth_mask->type == UINT16_MAX) {
+					h->ethtype_id =
+					rte_be_to_cpu_16(eth_spec->type);
+					m->ethtype_id = UINT16_MAX;
+				}
+				t++;
+			} else if (!eth_spec && !eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+			if (ipv4_spec && ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.src_addr =
+						ipv4_spec->hdr.src_addr;
+					list[t].m_u.ipv4_hdr.src_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.dst_addr =
+						ipv4_spec->hdr.dst_addr;
+					list[t].m_u.ipv4_hdr.dst_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.time_to_live =
+						ipv4_spec->hdr.time_to_live;
+					list[t].m_u.ipv4_hdr.time_to_live =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.protocol =
+						ipv4_spec->hdr.next_proto_id;
+					list[t].m_u.ipv4_hdr.protocol =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.type_of_service ==
+						UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.tos =
+						ipv4_spec->hdr.type_of_service;
+					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv4_spec && !ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+			if (ipv6_spec && ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV6_OFOS : ICE_IPV6_IL;
+				struct ice_ipv6_hdr *f;
+				struct ice_ipv6_hdr *s;
+				f = &list[t].h_u.ice_ipv6_ofos_hdr;
+				s = &list[t].m_u.ice_ipv6_ofos_hdr;
+				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j] ==
+								UINT8_MAX) {
+						f->src_addr[j] =
+						ipv6_spec->hdr.src_addr[j];
+						s->src_addr[j] =
+						ipv6_mask->hdr.src_addr[j];
+					}
+					if (ipv6_mask->hdr.dst_addr[j] ==
+								UINT8_MAX) {
+						f->dst_addr[j] =
+						ipv6_spec->hdr.dst_addr[j];
+						s->dst_addr[j] =
+						ipv6_mask->hdr.dst_addr[j];
+					}
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					f->next_hdr =
+						ipv6_spec->hdr.proto;
+					s->next_hdr = UINT8_MAX;
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					f->hop_limit =
+						ipv6_spec->hdr.hop_limits;
+					s->hop_limit = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv6_spec && !ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+			if (udp_spec && udp_mask) {
+				if (tun_type == ICE_SW_TUN_VXLAN &&
+						tunnel_valid == 0)
+					list[t].type = ICE_UDP_OF;
+				else
+					list[t].type = ICE_UDP_ILOS;
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						udp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						udp_mask->hdr.src_port;
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						udp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						udp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!udp_spec && !udp_mask) {
+				list[t].type = ICE_UDP_ILOS;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+			if (tcp_spec && tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						tcp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						tcp_mask->hdr.src_port;
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						tcp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						tcp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!tcp_spec && !tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+			if (sctp_spec && sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.src_port =
+						sctp_spec->hdr.src_port;
+					list[t].m_u.sctp_hdr.src_port =
+						sctp_mask->hdr.src_port;
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.dst_port =
+						sctp_spec->hdr.dst_port;
+					list[t].m_u.sctp_hdr.dst_port =
+						sctp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!sctp_spec && !sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			tunnel_valid = 1;
+			if (vxlan_spec && vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+				if (vxlan_mask->vni[0] == UINT8_MAX &&
+					vxlan_mask->vni[1] == UINT8_MAX &&
+					vxlan_mask->vni[2] == UINT8_MAX) {
+					list[t].h_u.tnl_hdr.vni =
+						(vxlan_spec->vni[2] << 16) |
+						(vxlan_spec->vni[1] << 8) |
+						vxlan_spec->vni[0];
+					list[t].m_u.tnl_hdr.vni =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!vxlan_spec && !vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			tunnel_valid = 1;
+			if (nvgre_spec && nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+				if (nvgre_mask->tni[0] == UINT8_MAX &&
+					nvgre_mask->tni[1] == UINT8_MAX &&
+					nvgre_mask->tni[2] == UINT8_MAX) {
+					list[t].h_u.nvgre_hdr.tni_flow =
+						(nvgre_spec->tni[2] << 16) |
+						(nvgre_spec->tni[1] << 8) |
+						nvgre_spec->tni[0];
+					list[t].m_u.nvgre_hdr.tni_flow =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!nvgre_spec && !nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+		case RTE_FLOW_ITEM_TYPE_END:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "Invalid pattern item.");
+			goto out;
+		}
+	}
+
+	rule_info->tun_type = tun_type;
+	*lkups_num = t;
+
+	return 0;
+out:
+	return -rte_errno;
+}
+
+/* By now ice switch filter action code implement only
+ * supports QUEUE or DROP.
+ */
+static int
+ice_parse_switch_action(struct ice_pf *pf,
+				 const struct rte_flow_action *actions,
+				 struct rte_flow_error *error,
+				 struct ice_adv_rule_info *rule_info)
+{
+	struct ice_vsi *vsi = pf->main_vsi;
+	const struct rte_flow_action_queue *act_q;
+	uint16_t base_queue;
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	base_queue = pf->base_queue;
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			act_q = action->conf;
+			rule_info->sw_act.fltr_act =
+				ICE_FWD_TO_Q;
+			rule_info->sw_act.fwd_id.q_id =
+				base_queue + act_q->index;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			rule_info->sw_act.fltr_act =
+				ICE_DROP_PACKET;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error,
+				EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				actions,
+				"Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	rule_info->sw_act.vsi_handle = vsi->idx;
+	rule_info->rx = 1;
+	rule_info->sw_act.src = vsi->idx;
+
+	return 0;
+}
+
+static int
+ice_switch_rule_set(struct ice_pf *pf,
+			struct ice_adv_lkup_elem *list,
+			uint16_t lkups_cnt,
+			struct ice_adv_rule_info *rule_info,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data rule_added = {0};
+	struct ice_rule_query_data *filter_ptr;
+
+	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"item number too large for rule");
+		return -rte_errno;
+	}
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"lookup list should not be NULL");
+		return -rte_errno;
+	}
+
+	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
+
+	if (!ret) {
+		filter_ptr = rte_zmalloc("ice_switch_filter",
+			sizeof(struct ice_rule_query_data), 0);
+		if (!filter_ptr) {
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
+			return -EINVAL;
+		}
+		flow->rule = filter_ptr;
+		rte_memcpy(filter_ptr,
+			&rule_added,
+			sizeof(struct ice_rule_query_data));
+	}
+
+	return ret;
+}
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	int ret = 0;
+	struct ice_adv_rule_info rule_info = {0};
+	struct ice_adv_lkup_elem *list = NULL;
+	uint16_t lkups_num = 0;
+
+	ret = ice_parse_switch_filter(pattern, actions, error,
+			&rule_info, &list, &lkups_num);
+	if (ret)
+		goto error;
+
+	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
+	if (ret)
+		goto error;
+
+	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
+	if (ret)
+		goto error;
+
+	rte_free(list);
+	return 0;
+
+error:
+	rte_free(list);
+
+	return -rte_errno;
+}
+
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)
+			flow->rule;
+
+	if (!filter_ptr) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"no such flow"
+			" create by switch filter");
+		return -rte_errno;
+	}
+
+	ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
+	if (ret) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"fail to destroy switch filter rule");
+		return -rte_errno;
+	}
+
+	rte_free(filter_ptr);
+	return ret;
+}
+
+void
+ice_free_switch_filter_rule(void *rule)
+{
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)rule;
+
+	rte_free(filter_ptr);
+}
diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
new file mode 100644
index 0000000..cea4799
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_SWITCH_FILTER_H_
+#define _ICE_SWITCH_FILTER_H_
+
+#include "base/ice_switch.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+void
+ice_free_switch_filter_rule(void *rule);
+#endif /* _ICE_SWITCH_FILTER_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 2bec688..8697676 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -6,7 +6,8 @@ objs = [base_objs]
 
 sources = files(
 	'ice_ethdev.c',
-	'ice_rxtx.c'
+	'ice_rxtx.c',
+	'ice_switch_filter.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v7 2/3] net/ice: add generic flow API
  2019-06-25  6:48 ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Qiming Yang
  2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 1/3] net/ice: enable switch filter Qiming Yang
@ 2019-06-25  6:48   ` Qiming Yang
  2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 3/3] net/ice: add UDP tunnel port support Qiming Yang
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-25  6:48 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch adds ice_flow_create, ice_flow_destroy,
ice_flow_flush and ice_flow_validate support,
these are used to handle all the generic filters.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/Makefile           |   1 +
 drivers/net/ice/ice_ethdev.c       |  44 +++
 drivers/net/ice/ice_ethdev.h       |   5 +
 drivers/net/ice/ice_generic_flow.c | 696 +++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h | 614 ++++++++++++++++++++++++++++++++
 drivers/net/ice/meson.build        |   3 +-
 6 files changed, 1362 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index b10d826..32abeb6 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -79,5 +79,6 @@ endif
 ifeq ($(CC_AVX2_SUPPORT), 1)
 	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index a94aa7e..8ee06d1 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -15,6 +15,7 @@
 #include "base/ice_dcb.h"
 #include "ice_ethdev.h"
 #include "ice_rxtx.h"
+#include "ice_switch_filter.h"
 
 #define ICE_MAX_QP_NUM "max_queue_pair_num"
 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
@@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
 static int ice_xstats_get_names(struct rte_eth_dev *dev,
 				struct rte_eth_xstat_name *xstats_names,
 				unsigned int limit);
+static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+			enum rte_filter_type filter_type,
+			enum rte_filter_op filter_op,
+			void *arg);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get                   = ice_xstats_get,
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
+	.filter_ctrl                  = ice_dev_filter_ctrl,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -1478,6 +1484,8 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* get base queue pairs index  in the device */
 	ice_base_queue_get(pf);
 
+	TAILQ_INIT(&pf->flow_list);
+
 	return 0;
 
 err_pf_setup:
@@ -1620,6 +1628,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
 
 	ice_dev_close(dev);
 
@@ -1637,6 +1647,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	rte_intr_callback_unregister(intr_handle,
 				     ice_interrupt_handler, dev);
 
+	/* Remove all flows */
+	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+		ice_free_switch_filter_rule(p_flow->rule);
+		rte_free(p_flow);
+	}
+
 	return 0;
 }
 
@@ -3622,6 +3639,33 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 }
 
 static int
+ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+		     enum rte_filter_type filter_type,
+		     enum rte_filter_op filter_op,
+		     void *arg)
+{
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ice_flow_ops;
+		break;
+	default:
+		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+					filter_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 50b966c..8a52239 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,12 +234,16 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+extern const struct rte_flow_ops ice_flow_ops;
+
 /* Struct to store flow created. */
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) node;
 	void *rule;
 };
 
+TAILQ_HEAD(ice_flow_list, rte_flow);
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -266,6 +270,7 @@ struct ice_pf {
 	struct ice_eth_stats internal_stats;
 	bool offset_loaded;
 	bool adapter_stopped;
+	struct ice_flow_list flow_list;
 };
 
 /**
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
new file mode 100644
index 0000000..d5ff278
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -0,0 +1,696 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "ice_switch_filter.h"
+
+static int ice_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static int ice_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error);
+static int ice_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error);
+
+const struct rte_flow_ops ice_flow_ops = {
+	.validate = ice_flow_validate,
+	.create = ice_flow_create,
+	.destroy = ice_flow_destroy,
+	.flush = ice_flow_flush,
+};
+
+static int
+ice_flow_valid_attr(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	/* Must be input direction */
+	if (!attr->ingress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Not support priority.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->group) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   attr, "Not support group.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+ice_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+	bool is_find;
+
+	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		if (is_void)
+			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+		else
+			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+		if (is_find)
+			break;
+		item++;
+	}
+	return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+ice_pattern_skip_void_item(struct rte_flow_item *items,
+			    const struct rte_flow_item *pattern)
+{
+	uint32_t cpy_count = 0;
+	const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+	for (;;) {
+		/* Find a non-void item first */
+		pb = ice_find_first_item(pb, false);
+		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+			pe = pb;
+			break;
+		}
+
+		/* Find a void item */
+		pe = ice_find_first_item(pb + 1, true);
+
+		cpy_count = pe - pb;
+		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+		items += cpy_count;
+
+		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+			pb = pe;
+			break;
+		}
+
+		pb = pe + 1;
+	}
+	/* Copy the END item. */
+	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+ice_match_pattern(enum rte_flow_item_type *item_array,
+		const struct rte_flow_item *pattern)
+{
+	const struct rte_flow_item *item = pattern;
+
+	while ((*item_array == item->type) &&
+	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+		item_array++;
+		item++;
+	}
+
+	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+		item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
+		struct rte_flow_error *error)
+{
+	uint16_t i = 0;
+	uint64_t inset;
+	struct rte_flow_item *items; /* used for pattern without VOID items */
+	uint32_t item_num = 0; /* non-void item number */
+
+	/* Get the non-void item number of pattern */
+	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+			item_num++;
+		i++;
+	}
+	item_num++;
+
+	items = rte_zmalloc("ice_pattern",
+			    item_num * sizeof(struct rte_flow_item), 0);
+	if (!items) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "No memory for PMD internal items.");
+		return -ENOMEM;
+	}
+
+	ice_pattern_skip_void_item(items, pattern);
+
+	for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
+		if (ice_match_pattern(ice_supported_patterns[i].items,
+				      items)) {
+			inset = ice_supported_patterns[i].sw_fields;
+			rte_free(items);
+			return inset;
+		}
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			   pattern, "Unsupported pattern");
+
+	rte_free(items);
+	return 0;
+}
+
+static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
+			struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_icmp *icmp_mask;
+	const struct rte_flow_item_icmp6 *icmp6_mask;
+	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+	const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
+	enum rte_flow_item_type item_type;
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	uint64_t input_set = ICE_INSET_NONE;
+	bool outer_ip = true;
+	bool outer_l4 = true;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return 0;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			if (eth_spec && eth_mask) {
+				if (rte_is_broadcast_ether_addr(&eth_mask->src))
+					input_set |= ICE_INSET_SMAC;
+				if (rte_is_broadcast_ether_addr(&eth_mask->dst))
+					input_set |= ICE_INSET_DMAC;
+				if (eth_mask->type == RTE_BE16(0xffff))
+					input_set |= ICE_INSET_ETHERTYPE;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (!(ipv4_spec && ipv4_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 spec or mask.");
+				return 0;
+			}
+
+			/* Check IPv4 mask and update input set */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 mask.");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_DST;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_PROTO;
+				outer_ip = false;
+			} else {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_DST;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_PROTO;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (!(ipv6_spec && ipv6_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Invalid IPv6 spec or mask");
+				return 0;
+			}
+
+			if (ipv6_mask->hdr.payload_len ||
+			    ipv6_mask->hdr.vtc_flow) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv6 mask");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+				outer_ip = false;
+			} else {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_TTL;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (!(udp_spec && udp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid UDP mask");
+				return 0;
+			}
+
+			/* Check UDP mask and update input set*/
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (!(tcp_spec && tcp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid TCP mask");
+				return 0;
+			}
+
+			/* Check TCP mask and update input set */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (!(sctp_spec && sctp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid SCTP mask");
+				return 0;
+			}
+
+			/* Check SCTP mask and update input set */
+			if (sctp_mask->hdr.cksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid SCTP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			icmp_mask = item->mask;
+			if (icmp_mask->hdr.icmp_code ||
+			    icmp_mask->hdr.icmp_cksum ||
+			    icmp_mask->hdr.icmp_ident ||
+			    icmp_mask->hdr.icmp_seq_nb) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP mask");
+				return 0;
+			}
+
+			if (icmp_mask->hdr.icmp_type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP;
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6:
+			icmp6_mask = item->mask;
+			if (icmp6_mask->code ||
+			    icmp6_mask->checksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP6 mask");
+				return 0;
+			}
+
+			if (icmp6_mask->type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP6;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!vxlan_spec && vxlan_mask) ||
+			    (vxlan_spec && !vxlan_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid VXLAN item");
+				return 0;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!nvgre_spec && nvgre_mask) ||
+			    (nvgre_spec && !nvgre_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid NVGRE item");
+				return 0;
+			}
+
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid mask no exist");
+			break;
+		}
+	}
+	return input_set;
+}
+
+static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
+			uint64_t inset, struct rte_flow_error *error)
+{
+	uint64_t fields;
+
+	/* get valid field */
+	fields = ice_get_flow_field(pattern, error);
+	if (!fields || fields & (~inset)) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int ice_flow_valid_action(struct rte_eth_dev *dev,
+				const struct rte_flow_action *actions,
+				struct rte_flow_error *error)
+{
+	const struct rte_flow_action_queue *act_q;
+	uint16_t queue;
+
+	switch (actions->type) {
+	case RTE_FLOW_ACTION_TYPE_QUEUE:
+		act_q = actions->conf;
+		queue = act_q->index;
+		if (queue >= dev->data->nb_rx_queues) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid queue ID for"
+					   " ethertype_filter.");
+			return -rte_errno;
+		}
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	uint64_t inset = 0;
+	int ret = ICE_ERR_NOT_SUPPORTED;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = ice_flow_valid_attr(attr, error);
+	if (!ret)
+		return ret;
+
+	inset = ice_flow_valid_pattern(pattern, error);
+	if (!inset)
+		return -rte_errno;
+
+	ret = ice_flow_valid_inset(pattern, inset, error);
+	if (ret)
+		return ret;
+
+	ret = ice_flow_valid_action(dev, actions, error);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct rte_flow *
+ice_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *flow = NULL;
+	int ret;
+
+	flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return flow;
+	}
+
+	ret = ice_flow_validate(dev, attr, pattern, actions, error);
+	if (ret < 0)
+		goto free_flow;
+
+	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
+	if (ret)
+		goto free_flow;
+
+	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+	return flow;
+
+free_flow:
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(flow);
+	return NULL;
+}
+
+static int
+ice_flow_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	int ret = 0;
+
+	ret = ice_destroy_switch_filter(pf, flow, error);
+
+	if (!ret) {
+		TAILQ_REMOVE(&pf->flow_list, flow, node);
+		rte_free(flow);
+	} else {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to destroy flow.");
+	}
+
+	return ret;
+}
+
+static int
+ice_flow_flush(struct rte_eth_dev *dev,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
+	int ret = 0;
+
+	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+		ret = ice_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			rte_flow_error_set(error, -ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to flush SW flows.");
+			return -rte_errno;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
new file mode 100644
index 0000000..2e43a29
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_GENERIC_FLOW_H_
+#define _ICE_GENERIC_FLOW_H_
+
+#include <rte_flow_driver.h>
+
+struct ice_flow_pattern {
+	enum rte_flow_item_type *items;
+	uint64_t sw_fields;
+};
+
+#define ICE_INSET_NONE            0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define ICE_INSET_SMAC            0x0000000000000001ULL
+#define ICE_INSET_DMAC            0x0000000000000002ULL
+#define ICE_INSET_ETHERTYPE       0x0000000000000020ULL
+
+/* bit 8 ~ bit 15 */
+#define ICE_INSET_IPV4_SRC        0x0000000000000100ULL
+#define ICE_INSET_IPV4_DST        0x0000000000000200ULL
+#define ICE_INSET_IPV6_SRC        0x0000000000000400ULL
+#define ICE_INSET_IPV6_DST        0x0000000000000800ULL
+#define ICE_INSET_SRC_PORT        0x0000000000001000ULL
+#define ICE_INSET_DST_PORT        0x0000000000002000ULL
+#define ICE_INSET_ARP             0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL
+#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL
+#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL
+#define ICE_INSET_IPV6_PROTO      0x0000000000200000ULL
+#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
+#define ICE_INSET_ICMP            0x0000000001000000ULL
+#define ICE_INSET_ICMP6           0x0000000002000000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define ICE_INSET_TUN_SMAC           0x0000000100000000ULL
+#define ICE_INSET_TUN_DMAC           0x0000000200000000ULL
+#define ICE_INSET_TUN_IPV4_SRC       0x0000000400000000ULL
+#define ICE_INSET_TUN_IPV4_DST       0x0000000800000000ULL
+#define ICE_INSET_TUN_IPV4_TTL       0x0000001000000000ULL
+#define ICE_INSET_TUN_IPV4_PROTO     0x0000002000000000ULL
+#define ICE_INSET_TUN_IPV6_SRC       0x0000004000000000ULL
+#define ICE_INSET_TUN_IPV6_DST       0x0000008000000000ULL
+#define ICE_INSET_TUN_IPV6_TTL       0x0000010000000000ULL
+#define ICE_INSET_TUN_IPV6_PROTO     0x0000020000000000ULL
+#define ICE_INSET_TUN_SRC_PORT       0x0000040000000000ULL
+#define ICE_INSET_TUN_DST_PORT       0x0000080000000000ULL
+#define ICE_INSET_TUN_ID             0x0000100000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+#define ICE_FLAG_VLAN_INNER  0x00000001ULL
+#define ICE_FLAG_VLAN_OUTER  0x00000002ULL
+
+#define INSET_ETHER ( \
+	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define INSET_MAC_IPV4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
+#define INSET_MAC_IPV4_L4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV4_ICMP ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
+#define INSET_MAC_IPV6 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_PROTO | ICE_INSET_IPV6_HOP_LIMIT)
+#define INSET_MAC_IPV6_L4 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV6_ICMP ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_ICMP6)
+#define INSET_TUNNEL_IPV4_TYPE1 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO)
+#define INSET_TUNNEL_IPV4_TYPE2 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV4_TYPE3 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP)
+#define INSET_TUNNEL_IPV6_TYPE1 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO)
+#define INSET_TUNNEL_IPV6_TYPE2 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV6_TYPE3 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6)
+
+/* L2 */
+static enum rte_flow_item_type pattern_ethertype[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv4 */
+static enum rte_flow_item_type pattern_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv6 */
+static enum rte_flow_item_type pattern_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+
+/* IPv4 NVGRE MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct ice_flow_pattern ice_supported_patterns[] = {
+	{pattern_ethertype, INSET_ETHER},
+	{pattern_ipv4, INSET_MAC_IPV4},
+	{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
+	{pattern_ipv6, INSET_MAC_IPV6},
+	{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
+	{pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+};
+
+#endif
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 8697676..7f16647 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -7,7 +7,8 @@ objs = [base_objs]
 sources = files(
 	'ice_ethdev.c',
 	'ice_rxtx.c',
-	'ice_switch_filter.c'
+	'ice_switch_filter.c',
+	'ice_generic_flow.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v7 3/3] net/ice: add UDP tunnel port support
  2019-06-25  6:48 ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Qiming Yang
  2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 1/3] net/ice: enable switch filter Qiming Yang
  2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 2/3] net/ice: add generic flow API Qiming Yang
@ 2019-06-25  6:48   ` Qiming Yang
  2019-06-26  7:07     ` Xing, Beilei
  2019-06-25 14:58   ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Aaron Conole
  2019-06-26 15:52   ` Ye Xiaolong
  4 siblings, 1 reply; 73+ messages in thread
From: Qiming Yang @ 2019-06-25  6:48 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

Enabled UDP tunnel port add and delete functions.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/ice_ethdev.c | 54 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 8ee06d1..949d293 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -88,6 +88,10 @@ static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 			enum rte_filter_type filter_type,
 			enum rte_filter_op filter_op,
 			void *arg);
+static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -147,6 +151,8 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
 	.filter_ctrl                  = ice_dev_filter_ctrl,
+	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
+	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -3665,6 +3671,54 @@ ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 	return ret;
 }
 
+/* Add UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -1;
+		break;
+	}
+
+	return ret;
+}
+
+/* Delete UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -1;
+		break;
+	}
+
+	return ret;
+}
+
 static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver
  2019-06-25  6:48 ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Qiming Yang
                     ` (2 preceding siblings ...)
  2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 3/3] net/ice: add UDP tunnel port support Qiming Yang
@ 2019-06-25 14:58   ` Aaron Conole
  2019-06-26  1:52     ` Yang, Qiming
  2019-06-26 15:52   ` Ye Xiaolong
  4 siblings, 1 reply; 73+ messages in thread
From: Aaron Conole @ 2019-06-25 14:58 UTC (permalink / raw)
  To: Qiming Yang; +Cc: dev

Qiming Yang <qiming.yang@intel.com> writes:

> This patch set enables the backend of rte_flow, and the generic
> filter related functions in ice driver. Supported flows include
> ipv4, tcpv4, udpv4, ipv6, tcpv6, udpv6, tunnel, etc. This patch
> set depends on shared code update.
>
> ---
> v2 changes:
>  - added UDP tunnel port support.
>  - fixed compile issue.
>  - added document update.
> v3 changes:
>  - removed redundancy parser.
>  - added License.
>  - added VXLAN and NVGRE item support.
> v4 changes:
>  - fixed some typos.
> v5 changes:
>  - fixed checkpatch issues.
> v6 changes:
>  - fixed one uninitialize issue.
> v7 changes:
>  - fixed queue action validation.

Seems there is still compliation problems - is some dependency not
correctly updated?

https://travis-ci.com/ovsrobot/dpdk/jobs/210680563

> Qiming Yang (2):
>   net/ice: add generic flow API
>   net/ice: add UDP tunnel port support
>
> wei zhao (1):
>   net/ice: enable switch filter
>
>  drivers/net/ice/Makefile            |   2 +
>  drivers/net/ice/ice_ethdev.c        | 116 ++++++
>  drivers/net/ice/ice_ethdev.h        |  12 +
>  drivers/net/ice/ice_generic_flow.c  | 696 ++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_generic_flow.h  | 614 +++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_switch_filter.c | 512 ++++++++++++++++++++++++++
>  drivers/net/ice/ice_switch_filter.h |  24 ++
>  drivers/net/ice/meson.build         |   4 +-
>  8 files changed, 1979 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/net/ice/ice_generic_flow.c
>  create mode 100644 drivers/net/ice/ice_generic_flow.h
>  create mode 100644 drivers/net/ice/ice_switch_filter.c
>  create mode 100644 drivers/net/ice/ice_switch_filter.h

^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver
  2019-06-25 14:58   ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Aaron Conole
@ 2019-06-26  1:52     ` Yang, Qiming
  2019-06-26  7:42       ` Ferruh Yigit
  0 siblings, 1 reply; 73+ messages in thread
From: Yang, Qiming @ 2019-06-26  1:52 UTC (permalink / raw)
  To: Aaron Conole; +Cc: dev

Hi,
It seems these error induced by lack of some base code which are merged in dpdk-next-net-intel branch.
It's not this patch set's issue.

Qiming
-----Original Message-----
From: Aaron Conole [mailto:aconole@redhat.com] 
Sent: Tuesday, June 25, 2019 10:58 PM
To: Yang, Qiming <qiming.yang@intel.com>
Cc: dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver

Qiming Yang <qiming.yang@intel.com> writes:

> This patch set enables the backend of rte_flow, and the generic filter 
> related functions in ice driver. Supported flows include ipv4, tcpv4, 
> udpv4, ipv6, tcpv6, udpv6, tunnel, etc. This patch set depends on 
> shared code update.
>
> ---
> v2 changes:
>  - added UDP tunnel port support.
>  - fixed compile issue.
>  - added document update.
> v3 changes:
>  - removed redundancy parser.
>  - added License.
>  - added VXLAN and NVGRE item support.
> v4 changes:
>  - fixed some typos.
> v5 changes:
>  - fixed checkpatch issues.
> v6 changes:
>  - fixed one uninitialize issue.
> v7 changes:
>  - fixed queue action validation.

Seems there is still compliation problems - is some dependency not correctly updated?

https://travis-ci.com/ovsrobot/dpdk/jobs/210680563

> Qiming Yang (2):
>   net/ice: add generic flow API
>   net/ice: add UDP tunnel port support
>
> wei zhao (1):
>   net/ice: enable switch filter
>
>  drivers/net/ice/Makefile            |   2 +
>  drivers/net/ice/ice_ethdev.c        | 116 ++++++
>  drivers/net/ice/ice_ethdev.h        |  12 +
>  drivers/net/ice/ice_generic_flow.c  | 696 
> ++++++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_generic_flow.h  | 614 
> +++++++++++++++++++++++++++++++  drivers/net/ice/ice_switch_filter.c | 
> 512 ++++++++++++++++++++++++++  drivers/net/ice/ice_switch_filter.h |  24 ++
>  drivers/net/ice/meson.build         |   4 +-
>  8 files changed, 1979 insertions(+), 1 deletion(-)  create mode 
> 100644 drivers/net/ice/ice_generic_flow.c
>  create mode 100644 drivers/net/ice/ice_generic_flow.h
>  create mode 100644 drivers/net/ice/ice_switch_filter.c
>  create mode 100644 drivers/net/ice/ice_switch_filter.h

^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v7 3/3] net/ice: add UDP tunnel port support
  2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 3/3] net/ice: add UDP tunnel port support Qiming Yang
@ 2019-06-26  7:07     ` Xing, Beilei
  0 siblings, 0 replies; 73+ messages in thread
From: Xing, Beilei @ 2019-06-26  7:07 UTC (permalink / raw)
  To: Yang, Qiming, dev; +Cc: Yang, Qiming



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> Sent: Tuesday, June 25, 2019 2:48 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>
> Subject: [dpdk-dev] [PATCH v7 3/3] net/ice: add UDP tunnel port support
> 
> Enabled UDP tunnel port add and delete functions.
> 
> Signed-off-by: Qiming Yang <qiming.yang@intel.com>
> ---
>  drivers/net/ice/ice_ethdev.c | 54
> ++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 54 insertions(+)
> 
> diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index
> 8ee06d1..949d293 100644
> --- a/drivers/net/ice/ice_ethdev.c
> +++ b/drivers/net/ice/ice_ethdev.c
> @@ -88,6 +88,10 @@ static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
>  			enum rte_filter_type filter_type,
>  			enum rte_filter_op filter_op,
>  			void *arg);
> +static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
> +			struct rte_eth_udp_tunnel *udp_tunnel); static int
> +ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
> +			struct rte_eth_udp_tunnel *udp_tunnel);
> 
>  static const struct rte_pci_id pci_id_ice_map[] = {
>  	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID,
> ICE_DEV_ID_E810C_BACKPLANE) }, @@ -147,6 +151,8 @@ static const struct
> eth_dev_ops ice_eth_dev_ops = {
>  	.xstats_get_names             = ice_xstats_get_names,
>  	.xstats_reset                 = ice_stats_reset,
>  	.filter_ctrl                  = ice_dev_filter_ctrl,
> +	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
> +	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
>  };
> 
>  /* store statistics names and its offset in stats structure */ @@ -3665,6
> +3671,54 @@ ice_dev_filter_ctrl(struct rte_eth_dev *dev,
>  	return ret;
>  }
> 
> +/* Add UDP tunneling port */
> +static int
> +ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
> +			     struct rte_eth_udp_tunnel *udp_tunnel) {
> +	int ret = 0;
> +	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> +
> +	if (udp_tunnel == NULL)
> +		return -EINVAL;
> +
> +	switch (udp_tunnel->prot_type) {
> +	case RTE_TUNNEL_TYPE_VXLAN:
> +		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel-
> >udp_port);
> +		break;
> +	default:
> +		PMD_DRV_LOG(ERR, "Invalid tunnel type");
> +		ret = -1;

How about return -EINVAL here?

> +		break;
> +	}
> +
> +	return ret;
> +}
> +
> +/* Delete UDP tunneling port */
> +static int
> +ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
> +			     struct rte_eth_udp_tunnel *udp_tunnel) {
> +	int ret = 0;
> +	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data-
> >dev_private);
> +
> +	if (udp_tunnel == NULL)
> +		return -EINVAL;
> +
> +	switch (udp_tunnel->prot_type) {
> +	case RTE_TUNNEL_TYPE_VXLAN:
> +		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
> +		break;
> +	default:
> +		PMD_DRV_LOG(ERR, "Invalid tunnel type");
> +		ret = -1;
Ditto.

> +		break;
> +	}
> +
> +	return ret;
> +}
> +
>  static int
>  ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
>  	      struct rte_pci_device *pci_dev)
> --
> 2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver
  2019-06-26  1:52     ` Yang, Qiming
@ 2019-06-26  7:42       ` Ferruh Yigit
  2019-06-26  8:26         ` Yang, Qiming
  0 siblings, 1 reply; 73+ messages in thread
From: Ferruh Yigit @ 2019-06-26  7:42 UTC (permalink / raw)
  To: Yang, Qiming, Aaron Conole; +Cc: dev

On 6/26/2019 2:52 AM, Yang, Qiming wrote:
> Hi,
> It seems these error induced by lack of some base code which are merged in dpdk-next-net-intel branch.
> It's not this patch set's issue.

If there is a dependency to another patchset, can you please put this
information into cover letter?

> 
> Qiming
> -----Original Message-----
> From: Aaron Conole [mailto:aconole@redhat.com] 
> Sent: Tuesday, June 25, 2019 10:58 PM
> To: Yang, Qiming <qiming.yang@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver
> 
> Qiming Yang <qiming.yang@intel.com> writes:
> 
>> This patch set enables the backend of rte_flow, and the generic filter 
>> related functions in ice driver. Supported flows include ipv4, tcpv4, 
>> udpv4, ipv6, tcpv6, udpv6, tunnel, etc. This patch set depends on 
>> shared code update.
>>
>> ---
>> v2 changes:
>>  - added UDP tunnel port support.
>>  - fixed compile issue.
>>  - added document update.
>> v3 changes:
>>  - removed redundancy parser.
>>  - added License.
>>  - added VXLAN and NVGRE item support.
>> v4 changes:
>>  - fixed some typos.
>> v5 changes:
>>  - fixed checkpatch issues.
>> v6 changes:
>>  - fixed one uninitialize issue.
>> v7 changes:
>>  - fixed queue action validation.
> 
> Seems there is still compliation problems - is some dependency not correctly updated?
> 
> https://travis-ci.com/ovsrobot/dpdk/jobs/210680563
> 
>> Qiming Yang (2):
>>   net/ice: add generic flow API
>>   net/ice: add UDP tunnel port support
>>
>> wei zhao (1):
>>   net/ice: enable switch filter
>>
>>  drivers/net/ice/Makefile            |   2 +
>>  drivers/net/ice/ice_ethdev.c        | 116 ++++++
>>  drivers/net/ice/ice_ethdev.h        |  12 +
>>  drivers/net/ice/ice_generic_flow.c  | 696 
>> ++++++++++++++++++++++++++++++++++++
>>  drivers/net/ice/ice_generic_flow.h  | 614 
>> +++++++++++++++++++++++++++++++  drivers/net/ice/ice_switch_filter.c | 
>> 512 ++++++++++++++++++++++++++  drivers/net/ice/ice_switch_filter.h |  24 ++
>>  drivers/net/ice/meson.build         |   4 +-
>>  8 files changed, 1979 insertions(+), 1 deletion(-)  create mode 
>> 100644 drivers/net/ice/ice_generic_flow.c
>>  create mode 100644 drivers/net/ice/ice_generic_flow.h
>>  create mode 100644 drivers/net/ice/ice_switch_filter.c
>>  create mode 100644 drivers/net/ice/ice_switch_filter.h


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v8 0/3] Enable rte_flow API in ice driver
  2019-06-03  9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
                   ` (7 preceding siblings ...)
  2019-06-25  6:48 ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Qiming Yang
@ 2019-06-26  8:03 ` Qiming Yang
  2019-06-26  8:03   ` [dpdk-dev] [PATCH v8 1/3] net/ice: enable switch filter Qiming Yang
                     ` (2 more replies)
  2019-06-26  8:58 ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Qiming Yang
  2019-07-01  8:32 ` [dpdk-dev] [PATCH v9 0/3] " Qiming Yang
  10 siblings, 3 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-26  8:03 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch set enables the backend of rte_flow, and the generic
filter related functions in ice driver. Supported flows include
ipv4, tcpv4, udpv4, ipv6, tcpv6, udpv6, tunnel, etc.

This patch set depends on patch set:
net/ice: shared code update.

---
v2 changes:
 - added UDP tunnel port support.
 - fixed compile issue.
 - added document update.
v3 changes:
 - removed redundancy parser.
 - added License.
 - added VXLAN and NVGRE item support.
v4 changes:
 - fixed some typos.
v5 changes:
 - fixed checkpatch issues.
v6 changes:
 - fixed one uninitialize issue.
v7 changes:
 - fixed queue action validation.
v8 changes:
 - optimized some return values.
 - code reorgnization.

Qiming Yang (2):
  net/ice: add generic flow API
  net/ice: add UDP tunnel port support

wei zhao (1):
  net/ice: enable switch filter

 drivers/net/ice/Makefile            |   2 +
 drivers/net/ice/ice_ethdev.c        | 116 ++++++
 drivers/net/ice/ice_ethdev.h        |  12 +
 drivers/net/ice/ice_generic_flow.c  | 696 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h  | 614 +++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.c | 503 ++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   4 +-
 8 files changed, 1970 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v8 1/3] net/ice: enable switch filter
  2019-06-26  8:03 ` [dpdk-dev] [PATCH v8 " Qiming Yang
@ 2019-06-26  8:03   ` Qiming Yang
  2019-06-26  8:03   ` [dpdk-dev] [PATCH v8 2/3] net/ice: add generic flow API Qiming Yang
  2019-06-26  8:03   ` [dpdk-dev] [PATCH v8 3/3] net/ice: add UDP tunnel port support Qiming Yang
  2 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-26  8:03 UTC (permalink / raw)
  To: dev; +Cc: wei zhao

From: wei zhao <wei.zhao1@intel.com>

The patch enables the backend of rte_flow. It transfers
rte_flow_xxx to device specific data structure and
configures packet process engine's binary classifier
(switch) properly.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/Makefile            |   1 +
 drivers/net/ice/ice_ethdev.c        |  18 ++
 drivers/net/ice/ice_ethdev.h        |   7 +
 drivers/net/ice/ice_switch_filter.c | 503 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   3 +-
 6 files changed, 555 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 0e5c55e..b10d826 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
 endif
 
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
 	CC_AVX2_SUPPORT=1
 else
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 203d0a9..a94aa7e 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1364,6 +1364,21 @@ static int ice_load_pkg(struct rte_eth_dev *dev)
 	return err;
 }
 
+static void
+ice_base_queue_get(struct ice_pf *pf)
+{
+	uint32_t reg;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+	if (reg & PFLAN_RX_QALLOC_VALID_M) {
+		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+	} else {
+		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
+					" index");
+	}
+}
+
 static int
 ice_dev_init(struct rte_eth_dev *dev)
 {
@@ -1460,6 +1475,9 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* enable uio intr after callback register */
 	rte_intr_enable(intr_handle);
 
+	/* get base queue pairs index  in the device */
+	ice_base_queue_get(pf);
+
 	return 0;
 
 err_pf_setup:
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1385afa..50b966c 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,6 +234,12 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+/* Struct to store flow created. */
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) node;
+	void *rule;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -252,6 +258,7 @@ struct ice_pf {
 	uint16_t hash_lut_size; /* The size of hash lookup table */
 	uint16_t lan_nb_qp_max;
 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+	uint16_t base_queue; /* The base queue pairs index  in the device */
 	struct ice_hw_port_stats stats_offset;
 	struct ice_hw_port_stats stats;
 	/* internal packet statistics, it should be excluded from the total */
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
new file mode 100644
index 0000000..e88a555
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "ice_logs.h"
+#include "base/ice_type.h"
+#include "ice_switch_filter.h"
+
+static int
+ice_parse_switch_filter(const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct ice_adv_lkup_elem *list,
+			uint16_t *lkups_num,
+			enum ice_sw_tunnel_type tun_type)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
+	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
+	uint16_t j, t = 0;
+	uint16_t tunnel_valid = 0;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			if (eth_spec && eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+				struct ice_ether_hdr *h;
+				struct ice_ether_hdr *m;
+				h = &list[t].h_u.eth_hdr;
+				m = &list[t].m_u.eth_hdr;
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->src.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->src_addr[j] =
+						eth_spec->src.addr_bytes[j];
+						m->src_addr[j] =
+						eth_mask->src.addr_bytes[j];
+					}
+					if (eth_mask->dst.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->dst_addr[j] =
+						eth_spec->dst.addr_bytes[j];
+						m->dst_addr[j] =
+						eth_mask->dst.addr_bytes[j];
+					}
+				}
+				if (eth_mask->type == UINT16_MAX) {
+					h->ethtype_id =
+					rte_be_to_cpu_16(eth_spec->type);
+					m->ethtype_id = UINT16_MAX;
+				}
+				t++;
+			} else if (!eth_spec && !eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+			if (ipv4_spec && ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.src_addr =
+						ipv4_spec->hdr.src_addr;
+					list[t].m_u.ipv4_hdr.src_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.dst_addr =
+						ipv4_spec->hdr.dst_addr;
+					list[t].m_u.ipv4_hdr.dst_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.time_to_live =
+						ipv4_spec->hdr.time_to_live;
+					list[t].m_u.ipv4_hdr.time_to_live =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.protocol =
+						ipv4_spec->hdr.next_proto_id;
+					list[t].m_u.ipv4_hdr.protocol =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.type_of_service ==
+						UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.tos =
+						ipv4_spec->hdr.type_of_service;
+					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv4_spec && !ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+			if (ipv6_spec && ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV6_OFOS : ICE_IPV6_IL;
+				struct ice_ipv6_hdr *f;
+				struct ice_ipv6_hdr *s;
+				f = &list[t].h_u.ice_ipv6_ofos_hdr;
+				s = &list[t].m_u.ice_ipv6_ofos_hdr;
+				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j] ==
+								UINT8_MAX) {
+						f->src_addr[j] =
+						ipv6_spec->hdr.src_addr[j];
+						s->src_addr[j] =
+						ipv6_mask->hdr.src_addr[j];
+					}
+					if (ipv6_mask->hdr.dst_addr[j] ==
+								UINT8_MAX) {
+						f->dst_addr[j] =
+						ipv6_spec->hdr.dst_addr[j];
+						s->dst_addr[j] =
+						ipv6_mask->hdr.dst_addr[j];
+					}
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					f->next_hdr =
+						ipv6_spec->hdr.proto;
+					s->next_hdr = UINT8_MAX;
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					f->hop_limit =
+						ipv6_spec->hdr.hop_limits;
+					s->hop_limit = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv6_spec && !ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+			if (udp_spec && udp_mask) {
+				if (tun_type == ICE_SW_TUN_VXLAN &&
+						tunnel_valid == 0)
+					list[t].type = ICE_UDP_OF;
+				else
+					list[t].type = ICE_UDP_ILOS;
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						udp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						udp_mask->hdr.src_port;
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						udp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						udp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!udp_spec && !udp_mask) {
+				list[t].type = ICE_UDP_ILOS;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+			if (tcp_spec && tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						tcp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						tcp_mask->hdr.src_port;
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						tcp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						tcp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!tcp_spec && !tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+			if (sctp_spec && sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.src_port =
+						sctp_spec->hdr.src_port;
+					list[t].m_u.sctp_hdr.src_port =
+						sctp_mask->hdr.src_port;
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.dst_port =
+						sctp_spec->hdr.dst_port;
+					list[t].m_u.sctp_hdr.dst_port =
+						sctp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!sctp_spec && !sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			tunnel_valid = 1;
+			if (vxlan_spec && vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+				if (vxlan_mask->vni[0] == UINT8_MAX &&
+					vxlan_mask->vni[1] == UINT8_MAX &&
+					vxlan_mask->vni[2] == UINT8_MAX) {
+					list[t].h_u.tnl_hdr.vni =
+						(vxlan_spec->vni[2] << 16) |
+						(vxlan_spec->vni[1] << 8) |
+						vxlan_spec->vni[0];
+					list[t].m_u.tnl_hdr.vni =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!vxlan_spec && !vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			tunnel_valid = 1;
+			if (nvgre_spec && nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+				if (nvgre_mask->tni[0] == UINT8_MAX &&
+					nvgre_mask->tni[1] == UINT8_MAX &&
+					nvgre_mask->tni[2] == UINT8_MAX) {
+					list[t].h_u.nvgre_hdr.tni_flow =
+						(nvgre_spec->tni[2] << 16) |
+						(nvgre_spec->tni[1] << 8) |
+						nvgre_spec->tni[0];
+					list[t].m_u.nvgre_hdr.tni_flow =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!nvgre_spec && !nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+		case RTE_FLOW_ITEM_TYPE_END:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "Invalid pattern item.");
+			goto out;
+		}
+	}
+
+	*lkups_num = t;
+
+	return 0;
+out:
+	return -rte_errno;
+}
+
+/* By now ice switch filter action code implement only
+ * supports QUEUE or DROP.
+ */
+static int
+ice_parse_switch_action(struct ice_pf *pf,
+				 const struct rte_flow_action *actions,
+				 struct rte_flow_error *error,
+				 struct ice_adv_rule_info *rule_info)
+{
+	struct ice_vsi *vsi = pf->main_vsi;
+	const struct rte_flow_action_queue *act_q;
+	uint16_t base_queue;
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	base_queue = pf->base_queue;
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			act_q = action->conf;
+			rule_info->sw_act.fltr_act =
+				ICE_FWD_TO_Q;
+			rule_info->sw_act.fwd_id.q_id =
+				base_queue + act_q->index;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			rule_info->sw_act.fltr_act =
+				ICE_DROP_PACKET;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error,
+				EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				actions,
+				"Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	rule_info->sw_act.vsi_handle = vsi->idx;
+	rule_info->rx = 1;
+	rule_info->sw_act.src = vsi->idx;
+
+	return 0;
+}
+
+static int
+ice_switch_rule_set(struct ice_pf *pf,
+			struct ice_adv_lkup_elem *list,
+			uint16_t lkups_cnt,
+			struct ice_adv_rule_info *rule_info,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data rule_added = {0};
+	struct ice_rule_query_data *filter_ptr;
+
+	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"item number too large for rule");
+		return -rte_errno;
+	}
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"lookup list should not be NULL");
+		return -rte_errno;
+	}
+
+	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
+
+	if (!ret) {
+		filter_ptr = rte_zmalloc("ice_switch_filter",
+			sizeof(struct ice_rule_query_data), 0);
+		if (!filter_ptr) {
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
+			return -EINVAL;
+		}
+		flow->rule = filter_ptr;
+		rte_memcpy(filter_ptr,
+			&rule_added,
+			sizeof(struct ice_rule_query_data));
+	}
+
+	return ret;
+}
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	int ret = 0;
+	struct ice_adv_rule_info rule_info = {0};
+	struct ice_adv_lkup_elem *list = NULL;
+	uint16_t lkups_num = 0;
+	const struct rte_flow_item *item = pattern;
+	uint16_t item_num = 0;
+	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		item_num++;
+		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+			tun_type = ICE_SW_TUN_VXLAN;
+		if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			tun_type = ICE_SW_TUN_NVGRE;
+	}
+	rule_info.tun_type = tun_type;
+
+	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "No memory for PMD internal items");
+		return -rte_errno;
+	}
+
+	ret = ice_parse_switch_filter(pattern, actions, error,
+			list, &lkups_num, tun_type);
+	if (ret)
+		goto error;
+
+	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
+	if (ret)
+		goto error;
+
+	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
+	if (ret)
+		goto error;
+
+	rte_free(list);
+	return 0;
+
+error:
+	rte_free(list);
+
+	return -rte_errno;
+}
+
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)
+			flow->rule;
+
+	if (!filter_ptr) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"no such flow"
+			" create by switch filter");
+		return -rte_errno;
+	}
+
+	ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
+	if (ret) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"fail to destroy switch filter rule");
+		return -rte_errno;
+	}
+
+	rte_free(filter_ptr);
+	return ret;
+}
+
+void
+ice_free_switch_filter_rule(void *rule)
+{
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)rule;
+
+	rte_free(filter_ptr);
+}
diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
new file mode 100644
index 0000000..cea4799
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_SWITCH_FILTER_H_
+#define _ICE_SWITCH_FILTER_H_
+
+#include "base/ice_switch.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+void
+ice_free_switch_filter_rule(void *rule);
+#endif /* _ICE_SWITCH_FILTER_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 2bec688..8697676 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -6,7 +6,8 @@ objs = [base_objs]
 
 sources = files(
 	'ice_ethdev.c',
-	'ice_rxtx.c'
+	'ice_rxtx.c',
+	'ice_switch_filter.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v8 2/3] net/ice: add generic flow API
  2019-06-26  8:03 ` [dpdk-dev] [PATCH v8 " Qiming Yang
  2019-06-26  8:03   ` [dpdk-dev] [PATCH v8 1/3] net/ice: enable switch filter Qiming Yang
@ 2019-06-26  8:03   ` Qiming Yang
  2019-06-26  8:03   ` [dpdk-dev] [PATCH v8 3/3] net/ice: add UDP tunnel port support Qiming Yang
  2 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-26  8:03 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch adds ice_flow_create, ice_flow_destroy,
ice_flow_flush and ice_flow_validate support,
these are used to handle all the generic filters.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/Makefile           |   1 +
 drivers/net/ice/ice_ethdev.c       |  44 +++
 drivers/net/ice/ice_ethdev.h       |   5 +
 drivers/net/ice/ice_generic_flow.c | 696 +++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h | 614 ++++++++++++++++++++++++++++++++
 drivers/net/ice/meson.build        |   3 +-
 6 files changed, 1362 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index b10d826..32abeb6 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -79,5 +79,6 @@ endif
 ifeq ($(CC_AVX2_SUPPORT), 1)
 	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index a94aa7e..8ee06d1 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -15,6 +15,7 @@
 #include "base/ice_dcb.h"
 #include "ice_ethdev.h"
 #include "ice_rxtx.h"
+#include "ice_switch_filter.h"
 
 #define ICE_MAX_QP_NUM "max_queue_pair_num"
 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
@@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
 static int ice_xstats_get_names(struct rte_eth_dev *dev,
 				struct rte_eth_xstat_name *xstats_names,
 				unsigned int limit);
+static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+			enum rte_filter_type filter_type,
+			enum rte_filter_op filter_op,
+			void *arg);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get                   = ice_xstats_get,
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
+	.filter_ctrl                  = ice_dev_filter_ctrl,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -1478,6 +1484,8 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* get base queue pairs index  in the device */
 	ice_base_queue_get(pf);
 
+	TAILQ_INIT(&pf->flow_list);
+
 	return 0;
 
 err_pf_setup:
@@ -1620,6 +1628,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
 
 	ice_dev_close(dev);
 
@@ -1637,6 +1647,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	rte_intr_callback_unregister(intr_handle,
 				     ice_interrupt_handler, dev);
 
+	/* Remove all flows */
+	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+		ice_free_switch_filter_rule(p_flow->rule);
+		rte_free(p_flow);
+	}
+
 	return 0;
 }
 
@@ -3622,6 +3639,33 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 }
 
 static int
+ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+		     enum rte_filter_type filter_type,
+		     enum rte_filter_op filter_op,
+		     void *arg)
+{
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ice_flow_ops;
+		break;
+	default:
+		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+					filter_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 50b966c..8a52239 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,12 +234,16 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+extern const struct rte_flow_ops ice_flow_ops;
+
 /* Struct to store flow created. */
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) node;
 	void *rule;
 };
 
+TAILQ_HEAD(ice_flow_list, rte_flow);
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -266,6 +270,7 @@ struct ice_pf {
 	struct ice_eth_stats internal_stats;
 	bool offset_loaded;
 	bool adapter_stopped;
+	struct ice_flow_list flow_list;
 };
 
 /**
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
new file mode 100644
index 0000000..d5ff278
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -0,0 +1,696 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "ice_switch_filter.h"
+
+static int ice_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static int ice_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error);
+static int ice_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error);
+
+const struct rte_flow_ops ice_flow_ops = {
+	.validate = ice_flow_validate,
+	.create = ice_flow_create,
+	.destroy = ice_flow_destroy,
+	.flush = ice_flow_flush,
+};
+
+static int
+ice_flow_valid_attr(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	/* Must be input direction */
+	if (!attr->ingress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Not support priority.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->group) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   attr, "Not support group.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+ice_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+	bool is_find;
+
+	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		if (is_void)
+			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+		else
+			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+		if (is_find)
+			break;
+		item++;
+	}
+	return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+ice_pattern_skip_void_item(struct rte_flow_item *items,
+			    const struct rte_flow_item *pattern)
+{
+	uint32_t cpy_count = 0;
+	const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+	for (;;) {
+		/* Find a non-void item first */
+		pb = ice_find_first_item(pb, false);
+		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+			pe = pb;
+			break;
+		}
+
+		/* Find a void item */
+		pe = ice_find_first_item(pb + 1, true);
+
+		cpy_count = pe - pb;
+		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+		items += cpy_count;
+
+		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+			pb = pe;
+			break;
+		}
+
+		pb = pe + 1;
+	}
+	/* Copy the END item. */
+	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+ice_match_pattern(enum rte_flow_item_type *item_array,
+		const struct rte_flow_item *pattern)
+{
+	const struct rte_flow_item *item = pattern;
+
+	while ((*item_array == item->type) &&
+	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+		item_array++;
+		item++;
+	}
+
+	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+		item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
+		struct rte_flow_error *error)
+{
+	uint16_t i = 0;
+	uint64_t inset;
+	struct rte_flow_item *items; /* used for pattern without VOID items */
+	uint32_t item_num = 0; /* non-void item number */
+
+	/* Get the non-void item number of pattern */
+	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+			item_num++;
+		i++;
+	}
+	item_num++;
+
+	items = rte_zmalloc("ice_pattern",
+			    item_num * sizeof(struct rte_flow_item), 0);
+	if (!items) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "No memory for PMD internal items.");
+		return -ENOMEM;
+	}
+
+	ice_pattern_skip_void_item(items, pattern);
+
+	for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
+		if (ice_match_pattern(ice_supported_patterns[i].items,
+				      items)) {
+			inset = ice_supported_patterns[i].sw_fields;
+			rte_free(items);
+			return inset;
+		}
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			   pattern, "Unsupported pattern");
+
+	rte_free(items);
+	return 0;
+}
+
+static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
+			struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_icmp *icmp_mask;
+	const struct rte_flow_item_icmp6 *icmp6_mask;
+	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+	const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
+	enum rte_flow_item_type item_type;
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	uint64_t input_set = ICE_INSET_NONE;
+	bool outer_ip = true;
+	bool outer_l4 = true;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return 0;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			if (eth_spec && eth_mask) {
+				if (rte_is_broadcast_ether_addr(&eth_mask->src))
+					input_set |= ICE_INSET_SMAC;
+				if (rte_is_broadcast_ether_addr(&eth_mask->dst))
+					input_set |= ICE_INSET_DMAC;
+				if (eth_mask->type == RTE_BE16(0xffff))
+					input_set |= ICE_INSET_ETHERTYPE;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (!(ipv4_spec && ipv4_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 spec or mask.");
+				return 0;
+			}
+
+			/* Check IPv4 mask and update input set */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 mask.");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_DST;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_PROTO;
+				outer_ip = false;
+			} else {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_DST;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_PROTO;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (!(ipv6_spec && ipv6_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Invalid IPv6 spec or mask");
+				return 0;
+			}
+
+			if (ipv6_mask->hdr.payload_len ||
+			    ipv6_mask->hdr.vtc_flow) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv6 mask");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+				outer_ip = false;
+			} else {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_TTL;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (!(udp_spec && udp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid UDP mask");
+				return 0;
+			}
+
+			/* Check UDP mask and update input set*/
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (!(tcp_spec && tcp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid TCP mask");
+				return 0;
+			}
+
+			/* Check TCP mask and update input set */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (!(sctp_spec && sctp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid SCTP mask");
+				return 0;
+			}
+
+			/* Check SCTP mask and update input set */
+			if (sctp_mask->hdr.cksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid SCTP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			icmp_mask = item->mask;
+			if (icmp_mask->hdr.icmp_code ||
+			    icmp_mask->hdr.icmp_cksum ||
+			    icmp_mask->hdr.icmp_ident ||
+			    icmp_mask->hdr.icmp_seq_nb) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP mask");
+				return 0;
+			}
+
+			if (icmp_mask->hdr.icmp_type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP;
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6:
+			icmp6_mask = item->mask;
+			if (icmp6_mask->code ||
+			    icmp6_mask->checksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP6 mask");
+				return 0;
+			}
+
+			if (icmp6_mask->type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP6;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!vxlan_spec && vxlan_mask) ||
+			    (vxlan_spec && !vxlan_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid VXLAN item");
+				return 0;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!nvgre_spec && nvgre_mask) ||
+			    (nvgre_spec && !nvgre_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid NVGRE item");
+				return 0;
+			}
+
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid mask no exist");
+			break;
+		}
+	}
+	return input_set;
+}
+
+static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
+			uint64_t inset, struct rte_flow_error *error)
+{
+	uint64_t fields;
+
+	/* get valid field */
+	fields = ice_get_flow_field(pattern, error);
+	if (!fields || fields & (~inset)) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int ice_flow_valid_action(struct rte_eth_dev *dev,
+				const struct rte_flow_action *actions,
+				struct rte_flow_error *error)
+{
+	const struct rte_flow_action_queue *act_q;
+	uint16_t queue;
+
+	switch (actions->type) {
+	case RTE_FLOW_ACTION_TYPE_QUEUE:
+		act_q = actions->conf;
+		queue = act_q->index;
+		if (queue >= dev->data->nb_rx_queues) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid queue ID for"
+					   " ethertype_filter.");
+			return -rte_errno;
+		}
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	uint64_t inset = 0;
+	int ret = ICE_ERR_NOT_SUPPORTED;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = ice_flow_valid_attr(attr, error);
+	if (!ret)
+		return ret;
+
+	inset = ice_flow_valid_pattern(pattern, error);
+	if (!inset)
+		return -rte_errno;
+
+	ret = ice_flow_valid_inset(pattern, inset, error);
+	if (ret)
+		return ret;
+
+	ret = ice_flow_valid_action(dev, actions, error);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct rte_flow *
+ice_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *flow = NULL;
+	int ret;
+
+	flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return flow;
+	}
+
+	ret = ice_flow_validate(dev, attr, pattern, actions, error);
+	if (ret < 0)
+		goto free_flow;
+
+	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
+	if (ret)
+		goto free_flow;
+
+	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+	return flow;
+
+free_flow:
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(flow);
+	return NULL;
+}
+
+static int
+ice_flow_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	int ret = 0;
+
+	ret = ice_destroy_switch_filter(pf, flow, error);
+
+	if (!ret) {
+		TAILQ_REMOVE(&pf->flow_list, flow, node);
+		rte_free(flow);
+	} else {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to destroy flow.");
+	}
+
+	return ret;
+}
+
+static int
+ice_flow_flush(struct rte_eth_dev *dev,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
+	int ret = 0;
+
+	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+		ret = ice_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			rte_flow_error_set(error, -ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to flush SW flows.");
+			return -rte_errno;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
new file mode 100644
index 0000000..2e43a29
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_GENERIC_FLOW_H_
+#define _ICE_GENERIC_FLOW_H_
+
+#include <rte_flow_driver.h>
+
+struct ice_flow_pattern {
+	enum rte_flow_item_type *items;
+	uint64_t sw_fields;
+};
+
+#define ICE_INSET_NONE            0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define ICE_INSET_SMAC            0x0000000000000001ULL
+#define ICE_INSET_DMAC            0x0000000000000002ULL
+#define ICE_INSET_ETHERTYPE       0x0000000000000020ULL
+
+/* bit 8 ~ bit 15 */
+#define ICE_INSET_IPV4_SRC        0x0000000000000100ULL
+#define ICE_INSET_IPV4_DST        0x0000000000000200ULL
+#define ICE_INSET_IPV6_SRC        0x0000000000000400ULL
+#define ICE_INSET_IPV6_DST        0x0000000000000800ULL
+#define ICE_INSET_SRC_PORT        0x0000000000001000ULL
+#define ICE_INSET_DST_PORT        0x0000000000002000ULL
+#define ICE_INSET_ARP             0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL
+#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL
+#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL
+#define ICE_INSET_IPV6_PROTO      0x0000000000200000ULL
+#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
+#define ICE_INSET_ICMP            0x0000000001000000ULL
+#define ICE_INSET_ICMP6           0x0000000002000000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define ICE_INSET_TUN_SMAC           0x0000000100000000ULL
+#define ICE_INSET_TUN_DMAC           0x0000000200000000ULL
+#define ICE_INSET_TUN_IPV4_SRC       0x0000000400000000ULL
+#define ICE_INSET_TUN_IPV4_DST       0x0000000800000000ULL
+#define ICE_INSET_TUN_IPV4_TTL       0x0000001000000000ULL
+#define ICE_INSET_TUN_IPV4_PROTO     0x0000002000000000ULL
+#define ICE_INSET_TUN_IPV6_SRC       0x0000004000000000ULL
+#define ICE_INSET_TUN_IPV6_DST       0x0000008000000000ULL
+#define ICE_INSET_TUN_IPV6_TTL       0x0000010000000000ULL
+#define ICE_INSET_TUN_IPV6_PROTO     0x0000020000000000ULL
+#define ICE_INSET_TUN_SRC_PORT       0x0000040000000000ULL
+#define ICE_INSET_TUN_DST_PORT       0x0000080000000000ULL
+#define ICE_INSET_TUN_ID             0x0000100000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+#define ICE_FLAG_VLAN_INNER  0x00000001ULL
+#define ICE_FLAG_VLAN_OUTER  0x00000002ULL
+
+#define INSET_ETHER ( \
+	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define INSET_MAC_IPV4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
+#define INSET_MAC_IPV4_L4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV4_ICMP ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
+#define INSET_MAC_IPV6 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_PROTO | ICE_INSET_IPV6_HOP_LIMIT)
+#define INSET_MAC_IPV6_L4 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV6_ICMP ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_ICMP6)
+#define INSET_TUNNEL_IPV4_TYPE1 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO)
+#define INSET_TUNNEL_IPV4_TYPE2 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV4_TYPE3 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP)
+#define INSET_TUNNEL_IPV6_TYPE1 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO)
+#define INSET_TUNNEL_IPV6_TYPE2 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV6_TYPE3 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6)
+
+/* L2 */
+static enum rte_flow_item_type pattern_ethertype[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv4 */
+static enum rte_flow_item_type pattern_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv6 */
+static enum rte_flow_item_type pattern_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+
+/* IPv4 NVGRE MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct ice_flow_pattern ice_supported_patterns[] = {
+	{pattern_ethertype, INSET_ETHER},
+	{pattern_ipv4, INSET_MAC_IPV4},
+	{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
+	{pattern_ipv6, INSET_MAC_IPV6},
+	{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
+	{pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+};
+
+#endif
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 8697676..7f16647 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -7,7 +7,8 @@ objs = [base_objs]
 sources = files(
 	'ice_ethdev.c',
 	'ice_rxtx.c',
-	'ice_switch_filter.c'
+	'ice_switch_filter.c',
+	'ice_generic_flow.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v8 3/3] net/ice: add UDP tunnel port support
  2019-06-26  8:03 ` [dpdk-dev] [PATCH v8 " Qiming Yang
  2019-06-26  8:03   ` [dpdk-dev] [PATCH v8 1/3] net/ice: enable switch filter Qiming Yang
  2019-06-26  8:03   ` [dpdk-dev] [PATCH v8 2/3] net/ice: add generic flow API Qiming Yang
@ 2019-06-26  8:03   ` Qiming Yang
  2 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-26  8:03 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

Enabled UDP tunnel port add and delete functions.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/ice_ethdev.c | 54 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 8ee06d1..1533adb 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -88,6 +88,10 @@ static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 			enum rte_filter_type filter_type,
 			enum rte_filter_op filter_op,
 			void *arg);
+static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -147,6 +151,8 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
 	.filter_ctrl                  = ice_dev_filter_ctrl,
+	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
+	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -3665,6 +3671,54 @@ ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 	return ret;
 }
 
+/* Add UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+/* Delete UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
 static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver
  2019-06-26  7:42       ` Ferruh Yigit
@ 2019-06-26  8:26         ` Yang, Qiming
  0 siblings, 0 replies; 73+ messages in thread
From: Yang, Qiming @ 2019-06-26  8:26 UTC (permalink / raw)
  To: Yigit, Ferruh, Aaron Conole; +Cc: dev

Sure, I'll add the information in cover letter. 

> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, June 26, 2019 3:43 PM
> To: Yang, Qiming <qiming.yang@intel.com>; Aaron Conole
> <aconole@redhat.com>
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver
> 
> On 6/26/2019 2:52 AM, Yang, Qiming wrote:
> > Hi,
> > It seems these error induced by lack of some base code which are merged
> in dpdk-next-net-intel branch.
> > It's not this patch set's issue.
> 
> If there is a dependency to another patchset, can you please put this
> information into cover letter?
> 
> >
> > Qiming
> > -----Original Message-----
> > From: Aaron Conole [mailto:aconole@redhat.com]
> > Sent: Tuesday, June 25, 2019 10:58 PM
> > To: Yang, Qiming <qiming.yang@intel.com>
> > Cc: dev@dpdk.org
> > Subject: Re: [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice
> > driver
> >
> > Qiming Yang <qiming.yang@intel.com> writes:
> >
> >> This patch set enables the backend of rte_flow, and the generic
> >> filter related functions in ice driver. Supported flows include ipv4,
> >> tcpv4, udpv4, ipv6, tcpv6, udpv6, tunnel, etc. This patch set depends
> >> on shared code update.
> >>
> >> ---
> >> v2 changes:
> >>  - added UDP tunnel port support.
> >>  - fixed compile issue.
> >>  - added document update.
> >> v3 changes:
> >>  - removed redundancy parser.
> >>  - added License.
> >>  - added VXLAN and NVGRE item support.
> >> v4 changes:
> >>  - fixed some typos.
> >> v5 changes:
> >>  - fixed checkpatch issues.
> >> v6 changes:
> >>  - fixed one uninitialize issue.
> >> v7 changes:
> >>  - fixed queue action validation.
> >
> > Seems there is still compliation problems - is some dependency not
> correctly updated?
> >
> > https://travis-ci.com/ovsrobot/dpdk/jobs/210680563
> >
> >> Qiming Yang (2):
> >>   net/ice: add generic flow API
> >>   net/ice: add UDP tunnel port support
> >>
> >> wei zhao (1):
> >>   net/ice: enable switch filter
> >>
> >>  drivers/net/ice/Makefile            |   2 +
> >>  drivers/net/ice/ice_ethdev.c        | 116 ++++++
> >>  drivers/net/ice/ice_ethdev.h        |  12 +
> >>  drivers/net/ice/ice_generic_flow.c  | 696
> >> ++++++++++++++++++++++++++++++++++++
> >>  drivers/net/ice/ice_generic_flow.h  | 614
> >> +++++++++++++++++++++++++++++++  drivers/net/ice/ice_switch_filter.c
> >> +++++++++++++++++++++++++++++++ |
> >> 512 ++++++++++++++++++++++++++  drivers/net/ice/ice_switch_filter.h |
> 24 ++
> >>  drivers/net/ice/meson.build         |   4 +-
> >>  8 files changed, 1979 insertions(+), 1 deletion(-)  create mode
> >> 100644 drivers/net/ice/ice_generic_flow.c
> >>  create mode 100644 drivers/net/ice/ice_generic_flow.h
> >>  create mode 100644 drivers/net/ice/ice_switch_filter.c
> >>  create mode 100644 drivers/net/ice/ice_switch_filter.h


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver
  2019-06-03  9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
                   ` (8 preceding siblings ...)
  2019-06-26  8:03 ` [dpdk-dev] [PATCH v8 " Qiming Yang
@ 2019-06-26  8:58 ` Qiming Yang
  2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 1/4] net/ice: enable switch filter Qiming Yang
                     ` (4 more replies)
  2019-07-01  8:32 ` [dpdk-dev] [PATCH v9 0/3] " Qiming Yang
  10 siblings, 5 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-26  8:58 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch set enables the backend of rte_flow, and the generic
filter related functions in ice driver. Supported flows include
ipv4, tcpv4, udpv4, ipv6, tcpv6, udpv6, tunnel, etc.

These patchs depends on patch set:
net/ice: shared code update.

---
v2 changes:
 - added UDP tunnel port support.
 - fixed compile issue.
 - added document update.
v3 changes:
 - removed redundancy parser.
 - added License.
 - added VXLAN and NVGRE item support.
v4 changes:
 - fixed some typos.
v5 changes:
 - fixed checkpatch issues.
v6 changes:
 - fixed one uninitialize issue.
v7 changes:
 - fixed queue action validation.
v8 changes:
 - optimized some return values.
 - code reorgnization.
 - added release note.


Qiming Yang (3):
  net/ice: add generic flow API
  net/ice: add UDP tunnel port support
  doc: add release note for generic flow

wei zhao (1):
  net/ice: enable switch filter

 doc/guides/rel_notes/release_19_08.rst |   1 +
 drivers/net/ice/Makefile               |   2 +
 drivers/net/ice/ice_ethdev.c           | 116 ++++++
 drivers/net/ice/ice_ethdev.h           |  12 +
 drivers/net/ice/ice_generic_flow.c     | 696 +++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h     | 614 +++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.c    | 503 ++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h    |  24 ++
 drivers/net/ice/meson.build            |   4 +-
 9 files changed, 1971 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v8 1/4] net/ice: enable switch filter
  2019-06-26  8:58 ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Qiming Yang
@ 2019-06-26  8:58   ` Qiming Yang
  2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 2/4] net/ice: add generic flow API Qiming Yang
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-26  8:58 UTC (permalink / raw)
  To: dev; +Cc: wei zhao

From: wei zhao <wei.zhao1@intel.com>

The patch enables the backend of rte_flow. It transfers
rte_flow_xxx to device specific data structure and
configures packet process engine's binary classifier
(switch) properly.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/Makefile            |   1 +
 drivers/net/ice/ice_ethdev.c        |  18 ++
 drivers/net/ice/ice_ethdev.h        |   7 +
 drivers/net/ice/ice_switch_filter.c | 503 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   3 +-
 6 files changed, 555 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 0e5c55e..b10d826 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
 endif
 
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
 	CC_AVX2_SUPPORT=1
 else
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 203d0a9..a94aa7e 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1364,6 +1364,21 @@ static int ice_load_pkg(struct rte_eth_dev *dev)
 	return err;
 }
 
+static void
+ice_base_queue_get(struct ice_pf *pf)
+{
+	uint32_t reg;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+	if (reg & PFLAN_RX_QALLOC_VALID_M) {
+		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+	} else {
+		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
+					" index");
+	}
+}
+
 static int
 ice_dev_init(struct rte_eth_dev *dev)
 {
@@ -1460,6 +1475,9 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* enable uio intr after callback register */
 	rte_intr_enable(intr_handle);
 
+	/* get base queue pairs index  in the device */
+	ice_base_queue_get(pf);
+
 	return 0;
 
 err_pf_setup:
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1385afa..50b966c 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,6 +234,12 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+/* Struct to store flow created. */
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) node;
+	void *rule;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -252,6 +258,7 @@ struct ice_pf {
 	uint16_t hash_lut_size; /* The size of hash lookup table */
 	uint16_t lan_nb_qp_max;
 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+	uint16_t base_queue; /* The base queue pairs index  in the device */
 	struct ice_hw_port_stats stats_offset;
 	struct ice_hw_port_stats stats;
 	/* internal packet statistics, it should be excluded from the total */
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
new file mode 100644
index 0000000..e88a555
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "ice_logs.h"
+#include "base/ice_type.h"
+#include "ice_switch_filter.h"
+
+static int
+ice_parse_switch_filter(const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct ice_adv_lkup_elem *list,
+			uint16_t *lkups_num,
+			enum ice_sw_tunnel_type tun_type)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
+	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
+	uint16_t j, t = 0;
+	uint16_t tunnel_valid = 0;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			if (eth_spec && eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+				struct ice_ether_hdr *h;
+				struct ice_ether_hdr *m;
+				h = &list[t].h_u.eth_hdr;
+				m = &list[t].m_u.eth_hdr;
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->src.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->src_addr[j] =
+						eth_spec->src.addr_bytes[j];
+						m->src_addr[j] =
+						eth_mask->src.addr_bytes[j];
+					}
+					if (eth_mask->dst.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->dst_addr[j] =
+						eth_spec->dst.addr_bytes[j];
+						m->dst_addr[j] =
+						eth_mask->dst.addr_bytes[j];
+					}
+				}
+				if (eth_mask->type == UINT16_MAX) {
+					h->ethtype_id =
+					rte_be_to_cpu_16(eth_spec->type);
+					m->ethtype_id = UINT16_MAX;
+				}
+				t++;
+			} else if (!eth_spec && !eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+			if (ipv4_spec && ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.src_addr =
+						ipv4_spec->hdr.src_addr;
+					list[t].m_u.ipv4_hdr.src_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.dst_addr =
+						ipv4_spec->hdr.dst_addr;
+					list[t].m_u.ipv4_hdr.dst_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.time_to_live =
+						ipv4_spec->hdr.time_to_live;
+					list[t].m_u.ipv4_hdr.time_to_live =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.protocol =
+						ipv4_spec->hdr.next_proto_id;
+					list[t].m_u.ipv4_hdr.protocol =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.type_of_service ==
+						UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.tos =
+						ipv4_spec->hdr.type_of_service;
+					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv4_spec && !ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+			if (ipv6_spec && ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV6_OFOS : ICE_IPV6_IL;
+				struct ice_ipv6_hdr *f;
+				struct ice_ipv6_hdr *s;
+				f = &list[t].h_u.ice_ipv6_ofos_hdr;
+				s = &list[t].m_u.ice_ipv6_ofos_hdr;
+				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j] ==
+								UINT8_MAX) {
+						f->src_addr[j] =
+						ipv6_spec->hdr.src_addr[j];
+						s->src_addr[j] =
+						ipv6_mask->hdr.src_addr[j];
+					}
+					if (ipv6_mask->hdr.dst_addr[j] ==
+								UINT8_MAX) {
+						f->dst_addr[j] =
+						ipv6_spec->hdr.dst_addr[j];
+						s->dst_addr[j] =
+						ipv6_mask->hdr.dst_addr[j];
+					}
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					f->next_hdr =
+						ipv6_spec->hdr.proto;
+					s->next_hdr = UINT8_MAX;
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					f->hop_limit =
+						ipv6_spec->hdr.hop_limits;
+					s->hop_limit = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv6_spec && !ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+			if (udp_spec && udp_mask) {
+				if (tun_type == ICE_SW_TUN_VXLAN &&
+						tunnel_valid == 0)
+					list[t].type = ICE_UDP_OF;
+				else
+					list[t].type = ICE_UDP_ILOS;
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						udp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						udp_mask->hdr.src_port;
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						udp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						udp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!udp_spec && !udp_mask) {
+				list[t].type = ICE_UDP_ILOS;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+			if (tcp_spec && tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						tcp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						tcp_mask->hdr.src_port;
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						tcp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						tcp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!tcp_spec && !tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+			if (sctp_spec && sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.src_port =
+						sctp_spec->hdr.src_port;
+					list[t].m_u.sctp_hdr.src_port =
+						sctp_mask->hdr.src_port;
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.dst_port =
+						sctp_spec->hdr.dst_port;
+					list[t].m_u.sctp_hdr.dst_port =
+						sctp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!sctp_spec && !sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			tunnel_valid = 1;
+			if (vxlan_spec && vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+				if (vxlan_mask->vni[0] == UINT8_MAX &&
+					vxlan_mask->vni[1] == UINT8_MAX &&
+					vxlan_mask->vni[2] == UINT8_MAX) {
+					list[t].h_u.tnl_hdr.vni =
+						(vxlan_spec->vni[2] << 16) |
+						(vxlan_spec->vni[1] << 8) |
+						vxlan_spec->vni[0];
+					list[t].m_u.tnl_hdr.vni =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!vxlan_spec && !vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			tunnel_valid = 1;
+			if (nvgre_spec && nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+				if (nvgre_mask->tni[0] == UINT8_MAX &&
+					nvgre_mask->tni[1] == UINT8_MAX &&
+					nvgre_mask->tni[2] == UINT8_MAX) {
+					list[t].h_u.nvgre_hdr.tni_flow =
+						(nvgre_spec->tni[2] << 16) |
+						(nvgre_spec->tni[1] << 8) |
+						nvgre_spec->tni[0];
+					list[t].m_u.nvgre_hdr.tni_flow =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!nvgre_spec && !nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+		case RTE_FLOW_ITEM_TYPE_END:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "Invalid pattern item.");
+			goto out;
+		}
+	}
+
+	*lkups_num = t;
+
+	return 0;
+out:
+	return -rte_errno;
+}
+
+/* By now ice switch filter action code implement only
+ * supports QUEUE or DROP.
+ */
+static int
+ice_parse_switch_action(struct ice_pf *pf,
+				 const struct rte_flow_action *actions,
+				 struct rte_flow_error *error,
+				 struct ice_adv_rule_info *rule_info)
+{
+	struct ice_vsi *vsi = pf->main_vsi;
+	const struct rte_flow_action_queue *act_q;
+	uint16_t base_queue;
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	base_queue = pf->base_queue;
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			act_q = action->conf;
+			rule_info->sw_act.fltr_act =
+				ICE_FWD_TO_Q;
+			rule_info->sw_act.fwd_id.q_id =
+				base_queue + act_q->index;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			rule_info->sw_act.fltr_act =
+				ICE_DROP_PACKET;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error,
+				EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				actions,
+				"Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	rule_info->sw_act.vsi_handle = vsi->idx;
+	rule_info->rx = 1;
+	rule_info->sw_act.src = vsi->idx;
+
+	return 0;
+}
+
+static int
+ice_switch_rule_set(struct ice_pf *pf,
+			struct ice_adv_lkup_elem *list,
+			uint16_t lkups_cnt,
+			struct ice_adv_rule_info *rule_info,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data rule_added = {0};
+	struct ice_rule_query_data *filter_ptr;
+
+	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"item number too large for rule");
+		return -rte_errno;
+	}
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"lookup list should not be NULL");
+		return -rte_errno;
+	}
+
+	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
+
+	if (!ret) {
+		filter_ptr = rte_zmalloc("ice_switch_filter",
+			sizeof(struct ice_rule_query_data), 0);
+		if (!filter_ptr) {
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
+			return -EINVAL;
+		}
+		flow->rule = filter_ptr;
+		rte_memcpy(filter_ptr,
+			&rule_added,
+			sizeof(struct ice_rule_query_data));
+	}
+
+	return ret;
+}
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	int ret = 0;
+	struct ice_adv_rule_info rule_info = {0};
+	struct ice_adv_lkup_elem *list = NULL;
+	uint16_t lkups_num = 0;
+	const struct rte_flow_item *item = pattern;
+	uint16_t item_num = 0;
+	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		item_num++;
+		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+			tun_type = ICE_SW_TUN_VXLAN;
+		if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			tun_type = ICE_SW_TUN_NVGRE;
+	}
+	rule_info.tun_type = tun_type;
+
+	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "No memory for PMD internal items");
+		return -rte_errno;
+	}
+
+	ret = ice_parse_switch_filter(pattern, actions, error,
+			list, &lkups_num, tun_type);
+	if (ret)
+		goto error;
+
+	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
+	if (ret)
+		goto error;
+
+	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
+	if (ret)
+		goto error;
+
+	rte_free(list);
+	return 0;
+
+error:
+	rte_free(list);
+
+	return -rte_errno;
+}
+
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)
+			flow->rule;
+
+	if (!filter_ptr) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"no such flow"
+			" create by switch filter");
+		return -rte_errno;
+	}
+
+	ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
+	if (ret) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"fail to destroy switch filter rule");
+		return -rte_errno;
+	}
+
+	rte_free(filter_ptr);
+	return ret;
+}
+
+void
+ice_free_switch_filter_rule(void *rule)
+{
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)rule;
+
+	rte_free(filter_ptr);
+}
diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
new file mode 100644
index 0000000..cea4799
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_SWITCH_FILTER_H_
+#define _ICE_SWITCH_FILTER_H_
+
+#include "base/ice_switch.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+void
+ice_free_switch_filter_rule(void *rule);
+#endif /* _ICE_SWITCH_FILTER_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 2bec688..8697676 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -6,7 +6,8 @@ objs = [base_objs]
 
 sources = files(
 	'ice_ethdev.c',
-	'ice_rxtx.c'
+	'ice_rxtx.c',
+	'ice_switch_filter.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v8 2/4] net/ice: add generic flow API
  2019-06-26  8:58 ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Qiming Yang
  2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 1/4] net/ice: enable switch filter Qiming Yang
@ 2019-06-26  8:58   ` Qiming Yang
  2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 3/4] net/ice: add UDP tunnel port support Qiming Yang
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-26  8:58 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

This patch adds ice_flow_create, ice_flow_destroy,
ice_flow_flush and ice_flow_validate support,
these are used to handle all the generic filters.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/Makefile           |   1 +
 drivers/net/ice/ice_ethdev.c       |  44 +++
 drivers/net/ice/ice_ethdev.h       |   5 +
 drivers/net/ice/ice_generic_flow.c | 696 +++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h | 614 ++++++++++++++++++++++++++++++++
 drivers/net/ice/meson.build        |   3 +-
 6 files changed, 1362 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index b10d826..32abeb6 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -79,5 +79,6 @@ endif
 ifeq ($(CC_AVX2_SUPPORT), 1)
 	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index a94aa7e..8ee06d1 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -15,6 +15,7 @@
 #include "base/ice_dcb.h"
 #include "ice_ethdev.h"
 #include "ice_rxtx.h"
+#include "ice_switch_filter.h"
 
 #define ICE_MAX_QP_NUM "max_queue_pair_num"
 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
@@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
 static int ice_xstats_get_names(struct rte_eth_dev *dev,
 				struct rte_eth_xstat_name *xstats_names,
 				unsigned int limit);
+static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+			enum rte_filter_type filter_type,
+			enum rte_filter_op filter_op,
+			void *arg);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get                   = ice_xstats_get,
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
+	.filter_ctrl                  = ice_dev_filter_ctrl,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -1478,6 +1484,8 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* get base queue pairs index  in the device */
 	ice_base_queue_get(pf);
 
+	TAILQ_INIT(&pf->flow_list);
+
 	return 0;
 
 err_pf_setup:
@@ -1620,6 +1628,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
 
 	ice_dev_close(dev);
 
@@ -1637,6 +1647,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	rte_intr_callback_unregister(intr_handle,
 				     ice_interrupt_handler, dev);
 
+	/* Remove all flows */
+	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+		ice_free_switch_filter_rule(p_flow->rule);
+		rte_free(p_flow);
+	}
+
 	return 0;
 }
 
@@ -3622,6 +3639,33 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 }
 
 static int
+ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+		     enum rte_filter_type filter_type,
+		     enum rte_filter_op filter_op,
+		     void *arg)
+{
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ice_flow_ops;
+		break;
+	default:
+		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+					filter_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 50b966c..8a52239 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,12 +234,16 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+extern const struct rte_flow_ops ice_flow_ops;
+
 /* Struct to store flow created. */
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) node;
 	void *rule;
 };
 
+TAILQ_HEAD(ice_flow_list, rte_flow);
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -266,6 +270,7 @@ struct ice_pf {
 	struct ice_eth_stats internal_stats;
 	bool offset_loaded;
 	bool adapter_stopped;
+	struct ice_flow_list flow_list;
 };
 
 /**
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
new file mode 100644
index 0000000..d5ff278
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -0,0 +1,696 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "ice_switch_filter.h"
+
+static int ice_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static int ice_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error);
+static int ice_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error);
+
+const struct rte_flow_ops ice_flow_ops = {
+	.validate = ice_flow_validate,
+	.create = ice_flow_create,
+	.destroy = ice_flow_destroy,
+	.flush = ice_flow_flush,
+};
+
+static int
+ice_flow_valid_attr(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	/* Must be input direction */
+	if (!attr->ingress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Not support priority.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->group) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   attr, "Not support group.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+ice_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+	bool is_find;
+
+	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		if (is_void)
+			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+		else
+			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+		if (is_find)
+			break;
+		item++;
+	}
+	return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+ice_pattern_skip_void_item(struct rte_flow_item *items,
+			    const struct rte_flow_item *pattern)
+{
+	uint32_t cpy_count = 0;
+	const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+	for (;;) {
+		/* Find a non-void item first */
+		pb = ice_find_first_item(pb, false);
+		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+			pe = pb;
+			break;
+		}
+
+		/* Find a void item */
+		pe = ice_find_first_item(pb + 1, true);
+
+		cpy_count = pe - pb;
+		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+		items += cpy_count;
+
+		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+			pb = pe;
+			break;
+		}
+
+		pb = pe + 1;
+	}
+	/* Copy the END item. */
+	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+ice_match_pattern(enum rte_flow_item_type *item_array,
+		const struct rte_flow_item *pattern)
+{
+	const struct rte_flow_item *item = pattern;
+
+	while ((*item_array == item->type) &&
+	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+		item_array++;
+		item++;
+	}
+
+	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+		item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
+		struct rte_flow_error *error)
+{
+	uint16_t i = 0;
+	uint64_t inset;
+	struct rte_flow_item *items; /* used for pattern without VOID items */
+	uint32_t item_num = 0; /* non-void item number */
+
+	/* Get the non-void item number of pattern */
+	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+			item_num++;
+		i++;
+	}
+	item_num++;
+
+	items = rte_zmalloc("ice_pattern",
+			    item_num * sizeof(struct rte_flow_item), 0);
+	if (!items) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "No memory for PMD internal items.");
+		return -ENOMEM;
+	}
+
+	ice_pattern_skip_void_item(items, pattern);
+
+	for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
+		if (ice_match_pattern(ice_supported_patterns[i].items,
+				      items)) {
+			inset = ice_supported_patterns[i].sw_fields;
+			rte_free(items);
+			return inset;
+		}
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			   pattern, "Unsupported pattern");
+
+	rte_free(items);
+	return 0;
+}
+
+static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
+			struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_icmp *icmp_mask;
+	const struct rte_flow_item_icmp6 *icmp6_mask;
+	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+	const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
+	enum rte_flow_item_type item_type;
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	uint64_t input_set = ICE_INSET_NONE;
+	bool outer_ip = true;
+	bool outer_l4 = true;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return 0;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			if (eth_spec && eth_mask) {
+				if (rte_is_broadcast_ether_addr(&eth_mask->src))
+					input_set |= ICE_INSET_SMAC;
+				if (rte_is_broadcast_ether_addr(&eth_mask->dst))
+					input_set |= ICE_INSET_DMAC;
+				if (eth_mask->type == RTE_BE16(0xffff))
+					input_set |= ICE_INSET_ETHERTYPE;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (!(ipv4_spec && ipv4_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 spec or mask.");
+				return 0;
+			}
+
+			/* Check IPv4 mask and update input set */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 mask.");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_DST;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_PROTO;
+				outer_ip = false;
+			} else {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_DST;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_PROTO;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (!(ipv6_spec && ipv6_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Invalid IPv6 spec or mask");
+				return 0;
+			}
+
+			if (ipv6_mask->hdr.payload_len ||
+			    ipv6_mask->hdr.vtc_flow) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv6 mask");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+				outer_ip = false;
+			} else {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_TTL;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (!(udp_spec && udp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid UDP mask");
+				return 0;
+			}
+
+			/* Check UDP mask and update input set*/
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (!(tcp_spec && tcp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid TCP mask");
+				return 0;
+			}
+
+			/* Check TCP mask and update input set */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (!(sctp_spec && sctp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid SCTP mask");
+				return 0;
+			}
+
+			/* Check SCTP mask and update input set */
+			if (sctp_mask->hdr.cksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid SCTP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			icmp_mask = item->mask;
+			if (icmp_mask->hdr.icmp_code ||
+			    icmp_mask->hdr.icmp_cksum ||
+			    icmp_mask->hdr.icmp_ident ||
+			    icmp_mask->hdr.icmp_seq_nb) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP mask");
+				return 0;
+			}
+
+			if (icmp_mask->hdr.icmp_type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP;
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6:
+			icmp6_mask = item->mask;
+			if (icmp6_mask->code ||
+			    icmp6_mask->checksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP6 mask");
+				return 0;
+			}
+
+			if (icmp6_mask->type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP6;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!vxlan_spec && vxlan_mask) ||
+			    (vxlan_spec && !vxlan_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid VXLAN item");
+				return 0;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!nvgre_spec && nvgre_mask) ||
+			    (nvgre_spec && !nvgre_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid NVGRE item");
+				return 0;
+			}
+
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid mask no exist");
+			break;
+		}
+	}
+	return input_set;
+}
+
+static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
+			uint64_t inset, struct rte_flow_error *error)
+{
+	uint64_t fields;
+
+	/* get valid field */
+	fields = ice_get_flow_field(pattern, error);
+	if (!fields || fields & (~inset)) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int ice_flow_valid_action(struct rte_eth_dev *dev,
+				const struct rte_flow_action *actions,
+				struct rte_flow_error *error)
+{
+	const struct rte_flow_action_queue *act_q;
+	uint16_t queue;
+
+	switch (actions->type) {
+	case RTE_FLOW_ACTION_TYPE_QUEUE:
+		act_q = actions->conf;
+		queue = act_q->index;
+		if (queue >= dev->data->nb_rx_queues) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid queue ID for"
+					   " ethertype_filter.");
+			return -rte_errno;
+		}
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	uint64_t inset = 0;
+	int ret = ICE_ERR_NOT_SUPPORTED;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = ice_flow_valid_attr(attr, error);
+	if (!ret)
+		return ret;
+
+	inset = ice_flow_valid_pattern(pattern, error);
+	if (!inset)
+		return -rte_errno;
+
+	ret = ice_flow_valid_inset(pattern, inset, error);
+	if (ret)
+		return ret;
+
+	ret = ice_flow_valid_action(dev, actions, error);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct rte_flow *
+ice_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *flow = NULL;
+	int ret;
+
+	flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return flow;
+	}
+
+	ret = ice_flow_validate(dev, attr, pattern, actions, error);
+	if (ret < 0)
+		goto free_flow;
+
+	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
+	if (ret)
+		goto free_flow;
+
+	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+	return flow;
+
+free_flow:
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(flow);
+	return NULL;
+}
+
+static int
+ice_flow_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	int ret = 0;
+
+	ret = ice_destroy_switch_filter(pf, flow, error);
+
+	if (!ret) {
+		TAILQ_REMOVE(&pf->flow_list, flow, node);
+		rte_free(flow);
+	} else {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to destroy flow.");
+	}
+
+	return ret;
+}
+
+static int
+ice_flow_flush(struct rte_eth_dev *dev,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
+	int ret = 0;
+
+	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+		ret = ice_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			rte_flow_error_set(error, -ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to flush SW flows.");
+			return -rte_errno;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
new file mode 100644
index 0000000..2e43a29
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_GENERIC_FLOW_H_
+#define _ICE_GENERIC_FLOW_H_
+
+#include <rte_flow_driver.h>
+
+struct ice_flow_pattern {
+	enum rte_flow_item_type *items;
+	uint64_t sw_fields;
+};
+
+#define ICE_INSET_NONE            0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define ICE_INSET_SMAC            0x0000000000000001ULL
+#define ICE_INSET_DMAC            0x0000000000000002ULL
+#define ICE_INSET_ETHERTYPE       0x0000000000000020ULL
+
+/* bit 8 ~ bit 15 */
+#define ICE_INSET_IPV4_SRC        0x0000000000000100ULL
+#define ICE_INSET_IPV4_DST        0x0000000000000200ULL
+#define ICE_INSET_IPV6_SRC        0x0000000000000400ULL
+#define ICE_INSET_IPV6_DST        0x0000000000000800ULL
+#define ICE_INSET_SRC_PORT        0x0000000000001000ULL
+#define ICE_INSET_DST_PORT        0x0000000000002000ULL
+#define ICE_INSET_ARP             0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL
+#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL
+#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL
+#define ICE_INSET_IPV6_PROTO      0x0000000000200000ULL
+#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
+#define ICE_INSET_ICMP            0x0000000001000000ULL
+#define ICE_INSET_ICMP6           0x0000000002000000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define ICE_INSET_TUN_SMAC           0x0000000100000000ULL
+#define ICE_INSET_TUN_DMAC           0x0000000200000000ULL
+#define ICE_INSET_TUN_IPV4_SRC       0x0000000400000000ULL
+#define ICE_INSET_TUN_IPV4_DST       0x0000000800000000ULL
+#define ICE_INSET_TUN_IPV4_TTL       0x0000001000000000ULL
+#define ICE_INSET_TUN_IPV4_PROTO     0x0000002000000000ULL
+#define ICE_INSET_TUN_IPV6_SRC       0x0000004000000000ULL
+#define ICE_INSET_TUN_IPV6_DST       0x0000008000000000ULL
+#define ICE_INSET_TUN_IPV6_TTL       0x0000010000000000ULL
+#define ICE_INSET_TUN_IPV6_PROTO     0x0000020000000000ULL
+#define ICE_INSET_TUN_SRC_PORT       0x0000040000000000ULL
+#define ICE_INSET_TUN_DST_PORT       0x0000080000000000ULL
+#define ICE_INSET_TUN_ID             0x0000100000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+#define ICE_FLAG_VLAN_INNER  0x00000001ULL
+#define ICE_FLAG_VLAN_OUTER  0x00000002ULL
+
+#define INSET_ETHER ( \
+	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define INSET_MAC_IPV4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS)
+#define INSET_MAC_IPV4_L4 ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV4_ICMP ( \
+	ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
+#define INSET_MAC_IPV6 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_PROTO | ICE_INSET_IPV6_HOP_LIMIT)
+#define INSET_MAC_IPV6_L4 ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV6_ICMP ( \
+	ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_ICMP6)
+#define INSET_TUNNEL_IPV4_TYPE1 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO)
+#define INSET_TUNNEL_IPV4_TYPE2 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV4_TYPE3 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP)
+#define INSET_TUNNEL_IPV6_TYPE1 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO)
+#define INSET_TUNNEL_IPV6_TYPE2 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV6_TYPE3 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6)
+
+/* L2 */
+static enum rte_flow_item_type pattern_ethertype[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv4 */
+static enum rte_flow_item_type pattern_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv6 */
+static enum rte_flow_item_type pattern_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+
+/* IPv4 NVGRE MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct ice_flow_pattern ice_supported_patterns[] = {
+	{pattern_ethertype, INSET_ETHER},
+	{pattern_ipv4, INSET_MAC_IPV4},
+	{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
+	{pattern_ipv6, INSET_MAC_IPV6},
+	{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
+	{pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+};
+
+#endif
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 8697676..7f16647 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -7,7 +7,8 @@ objs = [base_objs]
 sources = files(
 	'ice_ethdev.c',
 	'ice_rxtx.c',
-	'ice_switch_filter.c'
+	'ice_switch_filter.c',
+	'ice_generic_flow.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v8 3/4] net/ice: add UDP tunnel port support
  2019-06-26  8:58 ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Qiming Yang
  2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 1/4] net/ice: enable switch filter Qiming Yang
  2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 2/4] net/ice: add generic flow API Qiming Yang
@ 2019-06-26  8:58   ` Qiming Yang
  2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 4/4] doc: add release note for generic flow Qiming Yang
  2019-06-26 13:25   ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Xing, Beilei
  4 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-06-26  8:58 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

Enabled UDP tunnel port add and delete functions.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 drivers/net/ice/ice_ethdev.c | 54 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 54 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 8ee06d1..1533adb 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -88,6 +88,10 @@ static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 			enum rte_filter_type filter_type,
 			enum rte_filter_op filter_op,
 			void *arg);
+static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -147,6 +151,8 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
 	.filter_ctrl                  = ice_dev_filter_ctrl,
+	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
+	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -3665,6 +3671,54 @@ ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 	return ret;
 }
 
+/* Add UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+/* Delete UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
 static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v8 4/4] doc: add release note for generic flow
  2019-06-26  8:58 ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Qiming Yang
                     ` (2 preceding siblings ...)
  2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 3/4] net/ice: add UDP tunnel port support Qiming Yang
@ 2019-06-26  8:58   ` Qiming Yang
  2019-06-26 21:27     ` Thomas Monjalon
  2019-06-26 13:25   ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Xing, Beilei
  4 siblings, 1 reply; 73+ messages in thread
From: Qiming Yang @ 2019-06-26  8:58 UTC (permalink / raw)
  To: dev; +Cc: Qiming Yang

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 doc/guides/rel_notes/release_19_08.rst | 1 +
 1 file changed, 1 insertion(+)

diff --git a/doc/guides/rel_notes/release_19_08.rst b/doc/guides/rel_notes/release_19_08.rst
index 563999d..0f46b86 100644
--- a/doc/guides/rel_notes/release_19_08.rst
+++ b/doc/guides/rel_notes/release_19_08.rst
@@ -93,6 +93,7 @@ New Features
   Updated ice driver with new features and improvements, including:
 
   * Enabled Tx outer/inner L3/L4 checksum offload.
+  * Enabled generic filter framework and supported switch filter.
 
 
 Removed Items
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver
  2019-06-26  8:58 ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Qiming Yang
                     ` (3 preceding siblings ...)
  2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 4/4] doc: add release note for generic flow Qiming Yang
@ 2019-06-26 13:25   ` Xing, Beilei
  4 siblings, 0 replies; 73+ messages in thread
From: Xing, Beilei @ 2019-06-26 13:25 UTC (permalink / raw)
  To: Yang, Qiming, dev; +Cc: Yang, Qiming

> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Qiming Yang
> Sent: Wednesday, June 26, 2019 4:58 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>
> Subject: [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver
> 
> This patch set enables the backend of rte_flow, and the generic filter related
> functions in ice driver. Supported flows include ipv4, tcpv4, udpv4, ipv6,
> tcpv6, udpv6, tunnel, etc.
> 
> These patchs depends on patch set:
> net/ice: shared code update.
> 
> ---
> v2 changes:
>  - added UDP tunnel port support.
>  - fixed compile issue.
>  - added document update.
> v3 changes:
>  - removed redundancy parser.
>  - added License.
>  - added VXLAN and NVGRE item support.
> v4 changes:
>  - fixed some typos.
> v5 changes:
>  - fixed checkpatch issues.
> v6 changes:
>  - fixed one uninitialize issue.
> v7 changes:
>  - fixed queue action validation.
> v8 changes:
>  - optimized some return values.
>  - code reorgnization.
>  - added release note.
> 
> 
> Qiming Yang (3):
>   net/ice: add generic flow API
>   net/ice: add UDP tunnel port support
>   doc: add release note for generic flow
> 
> wei zhao (1):
>   net/ice: enable switch filter
> 
>  doc/guides/rel_notes/release_19_08.rst |   1 +
>  drivers/net/ice/Makefile               |   2 +
>  drivers/net/ice/ice_ethdev.c           | 116 ++++++
>  drivers/net/ice/ice_ethdev.h           |  12 +
>  drivers/net/ice/ice_generic_flow.c     | 696
> +++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_generic_flow.h     | 614
> +++++++++++++++++++++++++++++
>  drivers/net/ice/ice_switch_filter.c    | 503 ++++++++++++++++++++++++
>  drivers/net/ice/ice_switch_filter.h    |  24 ++
>  drivers/net/ice/meson.build            |   4 +-
>  9 files changed, 1971 insertions(+), 1 deletion(-)  create mode 100644
> drivers/net/ice/ice_generic_flow.c
>  create mode 100644 drivers/net/ice/ice_generic_flow.h
>  create mode 100644 drivers/net/ice/ice_switch_filter.c
>  create mode 100644 drivers/net/ice/ice_switch_filter.h
> 
> --
> 2.9.5

Acked-by: Beilei Xing <beilei.xing@intel.com>

^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver
  2019-06-25  6:48 ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Qiming Yang
                     ` (3 preceding siblings ...)
  2019-06-25 14:58   ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Aaron Conole
@ 2019-06-26 15:52   ` Ye Xiaolong
  4 siblings, 0 replies; 73+ messages in thread
From: Ye Xiaolong @ 2019-06-26 15:52 UTC (permalink / raw)
  To: Qiming Yang; +Cc: dev

Hi, Qiming

On 06/25, Qiming Yang wrote:
>This patch set enables the backend of rte_flow, and the generic
>filter related functions in ice driver. Supported flows include
>ipv4, tcpv4, udpv4, ipv6, tcpv6, udpv6, tunnel, etc. This patch
>set depends on shared code update.

Please also add doc and release notes for this patchset.

Thanks,
Xiaolong
>
>---
>v2 changes:
> - added UDP tunnel port support.
> - fixed compile issue.
> - added document update.
>v3 changes:
> - removed redundancy parser.
> - added License.
> - added VXLAN and NVGRE item support.
>v4 changes:
> - fixed some typos.
>v5 changes:
> - fixed checkpatch issues.
>v6 changes:
> - fixed one uninitialize issue.
>v7 changes:
> - fixed queue action validation.
>
>Qiming Yang (2):
>  net/ice: add generic flow API
>  net/ice: add UDP tunnel port support
>
>wei zhao (1):
>  net/ice: enable switch filter
>
> drivers/net/ice/Makefile            |   2 +
> drivers/net/ice/ice_ethdev.c        | 116 ++++++
> drivers/net/ice/ice_ethdev.h        |  12 +
> drivers/net/ice/ice_generic_flow.c  | 696 ++++++++++++++++++++++++++++++++++++
> drivers/net/ice/ice_generic_flow.h  | 614 +++++++++++++++++++++++++++++++
> drivers/net/ice/ice_switch_filter.c | 512 ++++++++++++++++++++++++++
> drivers/net/ice/ice_switch_filter.h |  24 ++
> drivers/net/ice/meson.build         |   4 +-
> 8 files changed, 1979 insertions(+), 1 deletion(-)
> create mode 100644 drivers/net/ice/ice_generic_flow.c
> create mode 100644 drivers/net/ice/ice_generic_flow.h
> create mode 100644 drivers/net/ice/ice_switch_filter.c
> create mode 100644 drivers/net/ice/ice_switch_filter.h
>
>-- 
>2.9.5
>

^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v8 4/4] doc: add release note for generic flow
  2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 4/4] doc: add release note for generic flow Qiming Yang
@ 2019-06-26 21:27     ` Thomas Monjalon
  2019-06-27  2:04       ` Yang, Qiming
  0 siblings, 1 reply; 73+ messages in thread
From: Thomas Monjalon @ 2019-06-26 21:27 UTC (permalink / raw)
  To: Qiming Yang; +Cc: dev

26/06/2019 10:58, Qiming Yang:
> --- a/doc/guides/rel_notes/release_19_08.rst
> +++ b/doc/guides/rel_notes/release_19_08.rst
> @@ -93,6 +93,7 @@ New Features
>    Updated ice driver with new features and improvements, including:
>  
>    * Enabled Tx outer/inner L3/L4 checksum offload.
> +  * Enabled generic filter framework and supported switch filter.

This should be in the patch adding this feature.



^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v8 4/4] doc: add release note for generic flow
  2019-06-26 21:27     ` Thomas Monjalon
@ 2019-06-27  2:04       ` Yang, Qiming
  0 siblings, 0 replies; 73+ messages in thread
From: Yang, Qiming @ 2019-06-27  2:04 UTC (permalink / raw)
  To: 'Thomas Monjalon'; +Cc: dev

Hi, Thomas
OK I'll move the release adding to patches.

-----Original Message-----
From: Thomas Monjalon [mailto:thomas@monjalon.net] 
Sent: Thursday, June 27, 2019 5:28 AM
To: Yang, Qiming <qiming.yang@intel.com>
Cc: dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v8 4/4] doc: add release note for generic flow

26/06/2019 10:58, Qiming Yang:
> --- a/doc/guides/rel_notes/release_19_08.rst
> +++ b/doc/guides/rel_notes/release_19_08.rst
> @@ -93,6 +93,7 @@ New Features
>    Updated ice driver with new features and improvements, including:
>  
>    * Enabled Tx outer/inner L3/L4 checksum offload.
> +  * Enabled generic filter framework and supported switch filter.

This should be in the patch adding this feature.



^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v9 0/3] Enable rte_flow API in ice driver
  2019-06-03  9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
                   ` (9 preceding siblings ...)
  2019-06-26  8:58 ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Qiming Yang
@ 2019-07-01  8:32 ` Qiming Yang
  2019-07-01  8:32   ` [dpdk-dev] [PATCH v9 1/3] net/ice: enable switch filter Qiming Yang
                     ` (3 more replies)
  10 siblings, 4 replies; 73+ messages in thread
From: Qiming Yang @ 2019-07-01  8:32 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, Qiming Yang

This patch set enables the backend of rte_flow, and the generic
filter related functions in ice driver. Supported flows include
ipv4, tcpv4, udpv4, ipv6, tcpv6, udpv6, tunnel, etc.

These patchs depends on patch set:
net/ice: shared code update.

---
v2 changes:
 - added UDP tunnel port support.
 - fixed compile issue.
 - added document update.
v3 changes:
 - removed redundancy parser.
 - added License.
 - added VXLAN and NVGRE item support.
v4 changes:
 - fixed some typos.
v5 changes:
 - fixed checkpatch issues.
v6 changes:
 - fixed one uninitialize issue.
v7 changes:
 - fixed queue action validation.
v8 changes:
 - optimized some return values.
 - code reorgnization.
 - added release note.
v9 changes:
 - fixed wrong input set.

Qiming Yang (2):
  net/ice: add generic flow API
  net/ice: add UDP tunnel port support

wei zhao (1):
  net/ice: enable switch filter

 doc/guides/rel_notes/release_19_08.rst |   2 +
 drivers/net/ice/Makefile               |   2 +
 drivers/net/ice/ice_ethdev.c           | 116 ++++++
 drivers/net/ice/ice_ethdev.h           |  12 +
 drivers/net/ice/ice_generic_flow.c     | 696 +++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h     | 615 +++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.c    | 511 ++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h    |  24 ++
 drivers/net/ice/meson.build            |   4 +-
 9 files changed, 1981 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

-- 
2.9.5

Acked-by: Beilei Xing <beilei.xing@intel.com>

^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v9 1/3] net/ice: enable switch filter
  2019-07-01  8:32 ` [dpdk-dev] [PATCH v9 0/3] " Qiming Yang
@ 2019-07-01  8:32   ` Qiming Yang
  2019-07-01  8:32   ` [dpdk-dev] [PATCH v9 2/3] net/ice: add generic flow API Qiming Yang
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-07-01  8:32 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, wei zhao

From: wei zhao <wei.zhao1@intel.com>

The patch enables the backend of rte_flow. It transfers
rte_flow_xxx to device specific data structure and
configures packet process engine's binary classifier
(switch) properly.

Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
 drivers/net/ice/Makefile            |   1 +
 drivers/net/ice/ice_ethdev.c        |  18 ++
 drivers/net/ice/ice_ethdev.h        |   7 +
 drivers/net/ice/ice_switch_filter.c | 511 ++++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_switch_filter.h |  24 ++
 drivers/net/ice/meson.build         |   3 +-
 6 files changed, 563 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_switch_filter.c
 create mode 100644 drivers/net/ice/ice_switch_filter.h

diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index 0e5c55e..b10d826 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -60,6 +60,7 @@ ifeq ($(CONFIG_RTE_ARCH_X86), y)
 SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_sse.c
 endif
 
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_switch_filter.c
 ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
 	CC_AVX2_SUPPORT=1
 else
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 1e48bd0..b50ec50 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -1364,6 +1364,21 @@ static int ice_load_pkg(struct rte_eth_dev *dev)
 	return err;
 }
 
+static void
+ice_base_queue_get(struct ice_pf *pf)
+{
+	uint32_t reg;
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+
+	reg = ICE_READ_REG(hw, PFLAN_RX_QALLOC);
+	if (reg & PFLAN_RX_QALLOC_VALID_M) {
+		pf->base_queue = reg & PFLAN_RX_QALLOC_FIRSTQ_M;
+	} else {
+		PMD_INIT_LOG(WARNING, "Failed to get Rx base queue"
+					" index");
+	}
+}
+
 static int
 ice_dev_init(struct rte_eth_dev *dev)
 {
@@ -1460,6 +1475,9 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* enable uio intr after callback register */
 	rte_intr_enable(intr_handle);
 
+	/* get base queue pairs index  in the device */
+	ice_base_queue_get(pf);
+
 	return 0;
 
 err_pf_setup:
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 1385afa..50b966c 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,6 +234,12 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+/* Struct to store flow created. */
+struct rte_flow {
+	TAILQ_ENTRY(rte_flow) node;
+	void *rule;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -252,6 +258,7 @@ struct ice_pf {
 	uint16_t hash_lut_size; /* The size of hash lookup table */
 	uint16_t lan_nb_qp_max;
 	uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
+	uint16_t base_queue; /* The base queue pairs index  in the device */
 	struct ice_hw_port_stats stats_offset;
 	struct ice_hw_port_stats stats;
 	/* internal packet statistics, it should be excluded from the total */
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
new file mode 100644
index 0000000..5424223
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -0,0 +1,511 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "ice_logs.h"
+#include "base/ice_type.h"
+#include "ice_switch_filter.h"
+
+static int
+ice_parse_switch_filter(const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow_error *error,
+			struct ice_adv_lkup_elem *list,
+			uint16_t *lkups_num,
+			enum ice_sw_tunnel_type tun_type)
+{
+	const struct rte_flow_item *item = pattern;
+	enum rte_flow_item_type item_type;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
+	const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
+	uint16_t j, t = 0;
+	uint16_t tunnel_valid = 0;
+
+	for (item = pattern; item->type !=
+			RTE_FLOW_ITEM_TYPE_END; item++) {
+		item_type = item->type;
+
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+			if (eth_spec && eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+				struct ice_ether_hdr *h;
+				struct ice_ether_hdr *m;
+				uint16_t i = 0;
+				h = &list[t].h_u.eth_hdr;
+				m = &list[t].m_u.eth_hdr;
+				for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
+					if (eth_mask->src.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->src_addr[j] =
+						eth_spec->src.addr_bytes[j];
+						m->src_addr[j] =
+						eth_mask->src.addr_bytes[j];
+						i = 1;
+					}
+					if (eth_mask->dst.addr_bytes[j] ==
+								UINT8_MAX) {
+						h->dst_addr[j] =
+						eth_spec->dst.addr_bytes[j];
+						m->dst_addr[j] =
+						eth_mask->dst.addr_bytes[j];
+						i = 1;
+					}
+				}
+				if (i)
+					t++;
+				if (eth_mask->type == UINT16_MAX) {
+					list[t].type = ICE_ETYPE_OL;
+					list[t].h_u.ethertype.ethtype_id =
+						eth_spec->type;
+					list[t].m_u.ethertype.ethtype_id =
+						UINT16_MAX;
+					t++;
+				}
+			} else if (!eth_spec && !eth_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_MAC_OFOS : ICE_MAC_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+			if (ipv4_spec && ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.src_addr =
+						ipv4_spec->hdr.src_addr;
+					list[t].m_u.ipv4_hdr.src_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+					list[t].h_u.ipv4_hdr.dst_addr =
+						ipv4_spec->hdr.dst_addr;
+					list[t].m_u.ipv4_hdr.dst_addr =
+						UINT32_MAX;
+				}
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.time_to_live =
+						ipv4_spec->hdr.time_to_live;
+					list[t].m_u.ipv4_hdr.time_to_live =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.protocol =
+						ipv4_spec->hdr.next_proto_id;
+					list[t].m_u.ipv4_hdr.protocol =
+						UINT8_MAX;
+				}
+				if (ipv4_mask->hdr.type_of_service ==
+						UINT8_MAX) {
+					list[t].h_u.ipv4_hdr.tos =
+						ipv4_spec->hdr.type_of_service;
+					list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv4_spec && !ipv4_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+			if (ipv6_spec && ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV6_OFOS : ICE_IPV6_IL;
+				struct ice_ipv6_hdr *f;
+				struct ice_ipv6_hdr *s;
+				f = &list[t].h_u.ipv6_hdr;
+				s = &list[t].m_u.ipv6_hdr;
+				for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
+					if (ipv6_mask->hdr.src_addr[j] ==
+								UINT8_MAX) {
+						f->src_addr[j] =
+						ipv6_spec->hdr.src_addr[j];
+						s->src_addr[j] =
+						ipv6_mask->hdr.src_addr[j];
+					}
+					if (ipv6_mask->hdr.dst_addr[j] ==
+								UINT8_MAX) {
+						f->dst_addr[j] =
+						ipv6_spec->hdr.dst_addr[j];
+						s->dst_addr[j] =
+						ipv6_mask->hdr.dst_addr[j];
+					}
+				}
+				if (ipv6_mask->hdr.proto == UINT8_MAX) {
+					f->next_hdr =
+						ipv6_spec->hdr.proto;
+					s->next_hdr = UINT8_MAX;
+				}
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
+					f->hop_limit =
+						ipv6_spec->hdr.hop_limits;
+					s->hop_limit = UINT8_MAX;
+				}
+				t++;
+			} else if (!ipv6_spec && !ipv6_mask) {
+				list[t].type = (tun_type == ICE_NON_TUN) ?
+					ICE_IPV4_OFOS : ICE_IPV4_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+			if (udp_spec && udp_mask) {
+				if (tun_type == ICE_SW_TUN_VXLAN &&
+						tunnel_valid == 0)
+					list[t].type = ICE_UDP_OF;
+				else
+					list[t].type = ICE_UDP_ILOS;
+				if (udp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						udp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						udp_mask->hdr.src_port;
+				}
+				if (udp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						udp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						udp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!udp_spec && !udp_mask) {
+				list[t].type = ICE_UDP_ILOS;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+			if (tcp_spec && tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+				if (tcp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.src_port =
+						tcp_spec->hdr.src_port;
+					list[t].m_u.l4_hdr.src_port =
+						tcp_mask->hdr.src_port;
+				}
+				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.l4_hdr.dst_port =
+						tcp_spec->hdr.dst_port;
+					list[t].m_u.l4_hdr.dst_port =
+						tcp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!tcp_spec && !tcp_mask) {
+				list[t].type = ICE_TCP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+			if (sctp_spec && sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+				if (sctp_mask->hdr.src_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.src_port =
+						sctp_spec->hdr.src_port;
+					list[t].m_u.sctp_hdr.src_port =
+						sctp_mask->hdr.src_port;
+				}
+				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
+					list[t].h_u.sctp_hdr.dst_port =
+						sctp_spec->hdr.dst_port;
+					list[t].m_u.sctp_hdr.dst_port =
+						sctp_mask->hdr.dst_port;
+				}
+				t++;
+			} else if (!sctp_spec && !sctp_mask) {
+				list[t].type = ICE_SCTP_IL;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			tunnel_valid = 1;
+			if (vxlan_spec && vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+				if (vxlan_mask->vni[0] == UINT8_MAX &&
+					vxlan_mask->vni[1] == UINT8_MAX &&
+					vxlan_mask->vni[2] == UINT8_MAX) {
+					list[t].h_u.tnl_hdr.vni =
+						(vxlan_spec->vni[2] << 16) |
+						(vxlan_spec->vni[1] << 8) |
+						vxlan_spec->vni[0];
+					list[t].m_u.tnl_hdr.vni =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!vxlan_spec && !vxlan_mask) {
+				list[t].type = ICE_VXLAN;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			tunnel_valid = 1;
+			if (nvgre_spec && nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+				if (nvgre_mask->tni[0] == UINT8_MAX &&
+					nvgre_mask->tni[1] == UINT8_MAX &&
+					nvgre_mask->tni[2] == UINT8_MAX) {
+					list[t].h_u.nvgre_hdr.tni_flow =
+						(nvgre_spec->tni[2] << 16) |
+						(nvgre_spec->tni[1] << 8) |
+						nvgre_spec->tni[0];
+					list[t].m_u.nvgre_hdr.tni_flow =
+						UINT32_MAX;
+				}
+				t++;
+			} else if (!nvgre_spec && !nvgre_mask) {
+				list[t].type = ICE_NVGRE;
+			}
+			break;
+
+		case RTE_FLOW_ITEM_TYPE_VOID:
+		case RTE_FLOW_ITEM_TYPE_END:
+			break;
+
+		default:
+			rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM, actions,
+				   "Invalid pattern item.");
+			goto out;
+		}
+	}
+
+	*lkups_num = t;
+
+	return 0;
+out:
+	return -rte_errno;
+}
+
+/* By now ice switch filter action code implement only
+ * supports QUEUE or DROP.
+ */
+static int
+ice_parse_switch_action(struct ice_pf *pf,
+				 const struct rte_flow_action *actions,
+				 struct rte_flow_error *error,
+				 struct ice_adv_rule_info *rule_info)
+{
+	struct ice_vsi *vsi = pf->main_vsi;
+	const struct rte_flow_action_queue *act_q;
+	uint16_t base_queue;
+	const struct rte_flow_action *action;
+	enum rte_flow_action_type action_type;
+
+	base_queue = pf->base_queue;
+	for (action = actions; action->type !=
+			RTE_FLOW_ACTION_TYPE_END; action++) {
+		action_type = action->type;
+		switch (action_type) {
+		case RTE_FLOW_ACTION_TYPE_QUEUE:
+			act_q = action->conf;
+			rule_info->sw_act.fltr_act =
+				ICE_FWD_TO_Q;
+			rule_info->sw_act.fwd_id.q_id =
+				base_queue + act_q->index;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_DROP:
+			rule_info->sw_act.fltr_act =
+				ICE_DROP_PACKET;
+			break;
+
+		case RTE_FLOW_ACTION_TYPE_VOID:
+			break;
+
+		default:
+			rte_flow_error_set(error,
+				EINVAL,
+				RTE_FLOW_ERROR_TYPE_ITEM,
+				actions,
+				"Invalid action type");
+			return -rte_errno;
+		}
+	}
+
+	rule_info->sw_act.vsi_handle = vsi->idx;
+	rule_info->rx = 1;
+	rule_info->sw_act.src = vsi->idx;
+	rule_info->priority = 5;
+
+	return 0;
+}
+
+static int
+ice_switch_rule_set(struct ice_pf *pf,
+			struct ice_adv_lkup_elem *list,
+			uint16_t lkups_cnt,
+			struct ice_adv_rule_info *rule_info,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data rule_added = {0};
+	struct ice_rule_query_data *filter_ptr;
+
+	if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"item number too large for rule");
+		return -rte_errno;
+	}
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+			"lookup list should not be NULL");
+		return -rte_errno;
+	}
+
+	ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
+
+	if (!ret) {
+		filter_ptr = rte_zmalloc("ice_switch_filter",
+			sizeof(struct ice_rule_query_data), 0);
+		if (!filter_ptr) {
+			PMD_DRV_LOG(ERR, "failed to allocate memory");
+			return -EINVAL;
+		}
+		flow->rule = filter_ptr;
+		rte_memcpy(filter_ptr,
+			&rule_added,
+			sizeof(struct ice_rule_query_data));
+	}
+
+	return ret;
+}
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	int ret = 0;
+	struct ice_adv_rule_info rule_info = {0};
+	struct ice_adv_lkup_elem *list = NULL;
+	uint16_t lkups_num = 0;
+	const struct rte_flow_item *item = pattern;
+	uint16_t item_num = 0;
+	enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		item_num++;
+		if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+			tun_type = ICE_SW_TUN_VXLAN;
+		if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+			tun_type = ICE_SW_TUN_NVGRE;
+	}
+	rule_info.tun_type = tun_type;
+
+	list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
+	if (!list) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "No memory for PMD internal items");
+		return -rte_errno;
+	}
+
+	ret = ice_parse_switch_filter(pattern, actions, error,
+			list, &lkups_num, tun_type);
+	if (ret)
+		goto error;
+
+	ret = ice_parse_switch_action(pf, actions, error, &rule_info);
+	if (ret)
+		goto error;
+
+	ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
+	if (ret)
+		goto error;
+
+	rte_free(list);
+	return 0;
+
+error:
+	rte_free(list);
+
+	return -rte_errno;
+}
+
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error)
+{
+	struct ice_hw *hw = ICE_PF_TO_HW(pf);
+	int ret;
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)
+			flow->rule;
+
+	if (!filter_ptr) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"no such flow"
+			" create by switch filter");
+		return -rte_errno;
+	}
+
+	ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
+	if (ret) {
+		rte_flow_error_set(error, EINVAL,
+			RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			"fail to destroy switch filter rule");
+		return -rte_errno;
+	}
+
+	rte_free(filter_ptr);
+	return ret;
+}
+
+void
+ice_free_switch_filter_rule(void *rule)
+{
+	struct ice_rule_query_data *filter_ptr;
+
+	filter_ptr = (struct ice_rule_query_data *)rule;
+
+	rte_free(filter_ptr);
+}
diff --git a/drivers/net/ice/ice_switch_filter.h b/drivers/net/ice/ice_switch_filter.h
new file mode 100644
index 0000000..cea4799
--- /dev/null
+++ b/drivers/net/ice/ice_switch_filter.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_SWITCH_FILTER_H_
+#define _ICE_SWITCH_FILTER_H_
+
+#include "base/ice_switch.h"
+#include "base/ice_type.h"
+#include "ice_ethdev.h"
+
+int
+ice_create_switch_filter(struct ice_pf *pf,
+			const struct rte_flow_item pattern[],
+			const struct rte_flow_action actions[],
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+int
+ice_destroy_switch_filter(struct ice_pf *pf,
+			struct rte_flow *flow,
+			struct rte_flow_error *error);
+void
+ice_free_switch_filter_rule(void *rule);
+#endif /* _ICE_SWITCH_FILTER_H_ */
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 2bec688..8697676 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -6,7 +6,8 @@ objs = [base_objs]
 
 sources = files(
 	'ice_ethdev.c',
-	'ice_rxtx.c'
+	'ice_rxtx.c',
+	'ice_switch_filter.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v9 2/3] net/ice: add generic flow API
  2019-07-01  8:32 ` [dpdk-dev] [PATCH v9 0/3] " Qiming Yang
  2019-07-01  8:32   ` [dpdk-dev] [PATCH v9 1/3] net/ice: enable switch filter Qiming Yang
@ 2019-07-01  8:32   ` Qiming Yang
  2019-07-01  8:32   ` [dpdk-dev] [PATCH v9 3/3] net/ice: add UDP tunnel port support Qiming Yang
  2019-07-01 11:38   ` [dpdk-dev] [PATCH v9 0/3] Enable rte_flow API in ice driver Zhang, Qi Z
  3 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-07-01  8:32 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, Qiming Yang

This patch adds ice_flow_create, ice_flow_destroy,
ice_flow_flush and ice_flow_validate support,
these are used to handle all the generic filters.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 doc/guides/rel_notes/release_19_08.rst |   1 +
 drivers/net/ice/Makefile               |   1 +
 drivers/net/ice/ice_ethdev.c           |  44 +++
 drivers/net/ice/ice_ethdev.h           |   5 +
 drivers/net/ice/ice_generic_flow.c     | 696 +++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_generic_flow.h     | 615 +++++++++++++++++++++++++++++
 drivers/net/ice/meson.build            |   3 +-
 7 files changed, 1364 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ice/ice_generic_flow.c
 create mode 100644 drivers/net/ice/ice_generic_flow.h

diff --git a/doc/guides/rel_notes/release_19_08.rst b/doc/guides/rel_notes/release_19_08.rst
index 57364af..0f472da 100644
--- a/doc/guides/rel_notes/release_19_08.rst
+++ b/doc/guides/rel_notes/release_19_08.rst
@@ -87,6 +87,7 @@ New Features
   Updated ice driver with new features and improvements, including:
 
   * Enabled Tx outer/inner L3/L4 checksum offload.
+  * Enabled generic filter framework and supported switch filter.
 
 * **Updated Solarflare network PMD.**
 
diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
index b10d826..32abeb6 100644
--- a/drivers/net/ice/Makefile
+++ b/drivers/net/ice/Makefile
@@ -79,5 +79,6 @@ endif
 ifeq ($(CC_AVX2_SUPPORT), 1)
 	SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_rxtx_vec_avx2.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index b50ec50..28065eb 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -15,6 +15,7 @@
 #include "base/ice_dcb.h"
 #include "ice_ethdev.h"
 #include "ice_rxtx.h"
+#include "ice_switch_filter.h"
 
 #define ICE_MAX_QP_NUM "max_queue_pair_num"
 #define ICE_DFLT_OUTER_TAG_TYPE ICE_AQ_VSI_OUTER_TAG_VLAN_9100
@@ -83,6 +84,10 @@ static int ice_xstats_get(struct rte_eth_dev *dev,
 static int ice_xstats_get_names(struct rte_eth_dev *dev,
 				struct rte_eth_xstat_name *xstats_names,
 				unsigned int limit);
+static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+			enum rte_filter_type filter_type,
+			enum rte_filter_op filter_op,
+			void *arg);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -141,6 +146,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get                   = ice_xstats_get,
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
+	.filter_ctrl                  = ice_dev_filter_ctrl,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -1478,6 +1484,8 @@ ice_dev_init(struct rte_eth_dev *dev)
 	/* get base queue pairs index  in the device */
 	ice_base_queue_get(pf);
 
+	TAILQ_INIT(&pf->flow_list);
+
 	return 0;
 
 err_pf_setup:
@@ -1621,6 +1629,8 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
 
 	ice_dev_close(dev);
 
@@ -1638,6 +1648,13 @@ ice_dev_uninit(struct rte_eth_dev *dev)
 	rte_intr_callback_unregister(intr_handle,
 				     ice_interrupt_handler, dev);
 
+	/* Remove all flows */
+	while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+		TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+		ice_free_switch_filter_rule(p_flow->rule);
+		rte_free(p_flow);
+	}
+
 	return 0;
 }
 
@@ -3623,6 +3640,33 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 }
 
 static int
+ice_dev_filter_ctrl(struct rte_eth_dev *dev,
+		     enum rte_filter_type filter_type,
+		     enum rte_filter_op filter_op,
+		     void *arg)
+{
+	int ret = 0;
+
+	if (!dev)
+		return -EINVAL;
+
+	switch (filter_type) {
+	case RTE_ETH_FILTER_GENERIC:
+		if (filter_op != RTE_ETH_FILTER_GET)
+			return -EINVAL;
+		*(const void **)arg = &ice_flow_ops;
+		break;
+	default:
+		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+					filter_type);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
 {
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 50b966c..8a52239 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -234,12 +234,16 @@ struct ice_vsi {
 	bool offset_loaded;
 };
 
+extern const struct rte_flow_ops ice_flow_ops;
+
 /* Struct to store flow created. */
 struct rte_flow {
 	TAILQ_ENTRY(rte_flow) node;
 	void *rule;
 };
 
+TAILQ_HEAD(ice_flow_list, rte_flow);
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -266,6 +270,7 @@ struct ice_pf {
 	struct ice_eth_stats internal_stats;
 	bool offset_loaded;
 	bool adapter_stopped;
+	struct ice_flow_list flow_list;
 };
 
 /**
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
new file mode 100644
index 0000000..d5ff278
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -0,0 +1,696 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+
+#include "ice_ethdev.h"
+#include "ice_generic_flow.h"
+#include "ice_switch_filter.h"
+
+static int ice_flow_validate(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
+		const struct rte_flow_attr *attr,
+		const struct rte_flow_item pattern[],
+		const struct rte_flow_action actions[],
+		struct rte_flow_error *error);
+static int ice_flow_destroy(struct rte_eth_dev *dev,
+		struct rte_flow *flow,
+		struct rte_flow_error *error);
+static int ice_flow_flush(struct rte_eth_dev *dev,
+		struct rte_flow_error *error);
+
+const struct rte_flow_ops ice_flow_ops = {
+	.validate = ice_flow_validate,
+	.create = ice_flow_create,
+	.destroy = ice_flow_destroy,
+	.flush = ice_flow_flush,
+};
+
+static int
+ice_flow_valid_attr(const struct rte_flow_attr *attr,
+		     struct rte_flow_error *error)
+{
+	/* Must be input direction */
+	if (!attr->ingress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+				   attr, "Only support ingress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->egress) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+				   attr, "Not support egress.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->priority) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+				   attr, "Not support priority.");
+		return -rte_errno;
+	}
+
+	/* Not supported */
+	if (attr->group) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+				   attr, "Not support group.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+ice_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+	bool is_find;
+
+	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+		if (is_void)
+			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+		else
+			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+		if (is_find)
+			break;
+		item++;
+	}
+	return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+ice_pattern_skip_void_item(struct rte_flow_item *items,
+			    const struct rte_flow_item *pattern)
+{
+	uint32_t cpy_count = 0;
+	const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+	for (;;) {
+		/* Find a non-void item first */
+		pb = ice_find_first_item(pb, false);
+		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+			pe = pb;
+			break;
+		}
+
+		/* Find a void item */
+		pe = ice_find_first_item(pb + 1, true);
+
+		cpy_count = pe - pb;
+		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+		items += cpy_count;
+
+		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+			pb = pe;
+			break;
+		}
+
+		pb = pe + 1;
+	}
+	/* Copy the END item. */
+	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+ice_match_pattern(enum rte_flow_item_type *item_array,
+		const struct rte_flow_item *pattern)
+{
+	const struct rte_flow_item *item = pattern;
+
+	while ((*item_array == item->type) &&
+	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+		item_array++;
+		item++;
+	}
+
+	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+		item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
+		struct rte_flow_error *error)
+{
+	uint16_t i = 0;
+	uint64_t inset;
+	struct rte_flow_item *items; /* used for pattern without VOID items */
+	uint32_t item_num = 0; /* non-void item number */
+
+	/* Get the non-void item number of pattern */
+	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+			item_num++;
+		i++;
+	}
+	item_num++;
+
+	items = rte_zmalloc("ice_pattern",
+			    item_num * sizeof(struct rte_flow_item), 0);
+	if (!items) {
+		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "No memory for PMD internal items.");
+		return -ENOMEM;
+	}
+
+	ice_pattern_skip_void_item(items, pattern);
+
+	for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
+		if (ice_match_pattern(ice_supported_patterns[i].items,
+				      items)) {
+			inset = ice_supported_patterns[i].sw_fields;
+			rte_free(items);
+			return inset;
+		}
+	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+			   pattern, "Unsupported pattern");
+
+	rte_free(items);
+	return 0;
+}
+
+static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
+			struct rte_flow_error *error)
+{
+	const struct rte_flow_item *item = pattern;
+	const struct rte_flow_item_eth *eth_spec, *eth_mask;
+	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+	const struct rte_flow_item_udp *udp_spec, *udp_mask;
+	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+	const struct rte_flow_item_icmp *icmp_mask;
+	const struct rte_flow_item_icmp6 *icmp6_mask;
+	const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+	const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
+	enum rte_flow_item_type item_type;
+	uint8_t  ipv6_addr_mask[16] = {
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+	uint64_t input_set = ICE_INSET_NONE;
+	bool outer_ip = true;
+	bool outer_l4 = true;
+
+	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+		if (item->last) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Not support range");
+			return 0;
+		}
+		item_type = item->type;
+		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ETH:
+			eth_spec = item->spec;
+			eth_mask = item->mask;
+
+			if (eth_spec && eth_mask) {
+				if (rte_is_broadcast_ether_addr(&eth_mask->src))
+					input_set |= ICE_INSET_SMAC;
+				if (rte_is_broadcast_ether_addr(&eth_mask->dst))
+					input_set |= ICE_INSET_DMAC;
+				if (eth_mask->type == RTE_BE16(0xffff))
+					input_set |= ICE_INSET_ETHERTYPE;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV4:
+			ipv4_spec = item->spec;
+			ipv4_mask = item->mask;
+
+			if (!(ipv4_spec && ipv4_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 spec or mask.");
+				return 0;
+			}
+
+			/* Check IPv4 mask and update input set */
+			if (ipv4_mask->hdr.version_ihl ||
+			    ipv4_mask->hdr.total_length ||
+			    ipv4_mask->hdr.packet_id ||
+			    ipv4_mask->hdr.hdr_checksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv4 mask.");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_IPV4_DST;
+				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TOS;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_IPV4_PROTO;
+				outer_ip = false;
+			} else {
+				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_SRC;
+				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_DST;
+				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_TTL;
+				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV4_PROTO;
+			}
+			break;
+		case RTE_FLOW_ITEM_TYPE_IPV6:
+			ipv6_spec = item->spec;
+			ipv6_mask = item->mask;
+
+			if (!(ipv6_spec && ipv6_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ITEM,
+					item, "Invalid IPv6 spec or mask");
+				return 0;
+			}
+
+			if (ipv6_mask->hdr.payload_len ||
+			    ipv6_mask->hdr.vtc_flow) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid IPv6 mask");
+				return 0;
+			}
+
+			if (outer_ip) {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+				outer_ip = false;
+			} else {
+				if (!memcmp(ipv6_mask->hdr.src_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.src_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_SRC;
+				if (!memcmp(ipv6_mask->hdr.dst_addr,
+					    ipv6_addr_mask,
+					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
+					input_set |= ICE_INSET_TUN_IPV6_DST;
+				if (ipv6_mask->hdr.proto == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_PROTO;
+				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+					input_set |= ICE_INSET_TUN_IPV6_TTL;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_UDP:
+			udp_spec = item->spec;
+			udp_mask = item->mask;
+
+			if (!(udp_spec && udp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid UDP mask");
+				return 0;
+			}
+
+			/* Check UDP mask and update input set*/
+			if (udp_mask->hdr.dgram_len ||
+			    udp_mask->hdr.dgram_cksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid UDP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (udp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (udp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_TCP:
+			tcp_spec = item->spec;
+			tcp_mask = item->mask;
+
+			if (!(tcp_spec && tcp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid TCP mask");
+				return 0;
+			}
+
+			/* Check TCP mask and update input set */
+			if (tcp_mask->hdr.sent_seq ||
+			    tcp_mask->hdr.recv_ack ||
+			    tcp_mask->hdr.data_off ||
+			    tcp_mask->hdr.tcp_flags ||
+			    tcp_mask->hdr.rx_win ||
+			    tcp_mask->hdr.cksum ||
+			    tcp_mask->hdr.tcp_urp) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid TCP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (tcp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (tcp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_SCTP:
+			sctp_spec = item->spec;
+			sctp_mask = item->mask;
+
+			if (!(sctp_spec && sctp_mask)) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item, "Invalid SCTP mask");
+				return 0;
+			}
+
+			/* Check SCTP mask and update input set */
+			if (sctp_mask->hdr.cksum) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid SCTP mask");
+				return 0;
+			}
+
+			if (outer_l4) {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_DST_PORT;
+				outer_l4 = false;
+			} else {
+				if (sctp_mask->hdr.src_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_SRC_PORT;
+				if (sctp_mask->hdr.dst_port == UINT16_MAX)
+					input_set |= ICE_INSET_TUN_DST_PORT;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP:
+			icmp_mask = item->mask;
+			if (icmp_mask->hdr.icmp_code ||
+			    icmp_mask->hdr.icmp_cksum ||
+			    icmp_mask->hdr.icmp_ident ||
+			    icmp_mask->hdr.icmp_seq_nb) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP mask");
+				return 0;
+			}
+
+			if (icmp_mask->hdr.icmp_type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP;
+			break;
+		case RTE_FLOW_ITEM_TYPE_ICMP6:
+			icmp6_mask = item->mask;
+			if (icmp6_mask->code ||
+			    icmp6_mask->checksum) {
+				rte_flow_error_set(error, EINVAL,
+						   RTE_FLOW_ERROR_TYPE_ITEM,
+						   item,
+						   "Invalid ICMP6 mask");
+				return 0;
+			}
+
+			if (icmp6_mask->type == UINT8_MAX)
+				input_set |= ICE_INSET_ICMP6;
+			break;
+		case RTE_FLOW_ITEM_TYPE_VXLAN:
+			vxlan_spec = item->spec;
+			vxlan_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!vxlan_spec && vxlan_mask) ||
+			    (vxlan_spec && !vxlan_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid VXLAN item");
+				return 0;
+			}
+
+			break;
+		case RTE_FLOW_ITEM_TYPE_NVGRE:
+			nvgre_spec = item->spec;
+			nvgre_mask = item->mask;
+			/* Check if VXLAN item is used to describe protocol.
+			 * If yes, both spec and mask should be NULL.
+			 * If no, both spec and mask shouldn't be NULL.
+			 */
+			if ((!nvgre_spec && nvgre_mask) ||
+			    (nvgre_spec && !nvgre_mask)) {
+				rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid NVGRE item");
+				return 0;
+			}
+
+			break;
+		default:
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ITEM,
+					   item,
+					   "Invalid mask no exist");
+			break;
+		}
+	}
+	return input_set;
+}
+
+static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
+			uint64_t inset, struct rte_flow_error *error)
+{
+	uint64_t fields;
+
+	/* get valid field */
+	fields = ice_get_flow_field(pattern, error);
+	if (!fields || fields & (~inset)) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+				   pattern,
+				   "Invalid input set");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int ice_flow_valid_action(struct rte_eth_dev *dev,
+				const struct rte_flow_action *actions,
+				struct rte_flow_error *error)
+{
+	const struct rte_flow_action_queue *act_q;
+	uint16_t queue;
+
+	switch (actions->type) {
+	case RTE_FLOW_ACTION_TYPE_QUEUE:
+		act_q = actions->conf;
+		queue = act_q->index;
+		if (queue >= dev->data->nb_rx_queues) {
+			rte_flow_error_set(error, EINVAL,
+					   RTE_FLOW_ERROR_TYPE_ACTION,
+					   actions, "Invalid queue ID for"
+					   " ethertype_filter.");
+			return -rte_errno;
+		}
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		break;
+	default:
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION, actions,
+				   "Invalid action.");
+		return -rte_errno;
+	}
+
+	return 0;
+}
+
+static int
+ice_flow_validate(struct rte_eth_dev *dev,
+		   const struct rte_flow_attr *attr,
+		   const struct rte_flow_item pattern[],
+		   const struct rte_flow_action actions[],
+		   struct rte_flow_error *error)
+{
+	uint64_t inset = 0;
+	int ret = ICE_ERR_NOT_SUPPORTED;
+
+	if (!pattern) {
+		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+				   NULL, "NULL pattern.");
+		return -rte_errno;
+	}
+
+	if (!actions) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+				   NULL, "NULL action.");
+		return -rte_errno;
+	}
+
+	if (!attr) {
+		rte_flow_error_set(error, EINVAL,
+				   RTE_FLOW_ERROR_TYPE_ATTR,
+				   NULL, "NULL attribute.");
+		return -rte_errno;
+	}
+
+	ret = ice_flow_valid_attr(attr, error);
+	if (!ret)
+		return ret;
+
+	inset = ice_flow_valid_pattern(pattern, error);
+	if (!inset)
+		return -rte_errno;
+
+	ret = ice_flow_valid_inset(pattern, inset, error);
+	if (ret)
+		return ret;
+
+	ret = ice_flow_valid_action(dev, actions, error);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static struct rte_flow *
+ice_flow_create(struct rte_eth_dev *dev,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item pattern[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *flow = NULL;
+	int ret;
+
+	flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
+	if (!flow) {
+		rte_flow_error_set(error, ENOMEM,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to allocate memory");
+		return flow;
+	}
+
+	ret = ice_flow_validate(dev, attr, pattern, actions, error);
+	if (ret < 0)
+		goto free_flow;
+
+	ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
+	if (ret)
+		goto free_flow;
+
+	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+	return flow;
+
+free_flow:
+	rte_flow_error_set(error, -ret,
+			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+			   "Failed to create flow.");
+	rte_free(flow);
+	return NULL;
+}
+
+static int
+ice_flow_destroy(struct rte_eth_dev *dev,
+		 struct rte_flow *flow,
+		 struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	int ret = 0;
+
+	ret = ice_destroy_switch_filter(pf, flow, error);
+
+	if (!ret) {
+		TAILQ_REMOVE(&pf->flow_list, flow, node);
+		rte_free(flow);
+	} else {
+		rte_flow_error_set(error, -ret,
+				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+				   "Failed to destroy flow.");
+	}
+
+	return ret;
+}
+
+static int
+ice_flow_flush(struct rte_eth_dev *dev,
+	       struct rte_flow_error *error)
+{
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	struct rte_flow *p_flow;
+	int ret = 0;
+
+	TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+		ret = ice_flow_destroy(dev, p_flow, error);
+		if (ret) {
+			rte_flow_error_set(error, -ret,
+					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+					   "Failed to flush SW flows.");
+			return -rte_errno;
+		}
+	}
+
+	return ret;
+}
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
new file mode 100644
index 0000000..2aa79e0
--- /dev/null
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -0,0 +1,615 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _ICE_GENERIC_FLOW_H_
+#define _ICE_GENERIC_FLOW_H_
+
+#include <rte_flow_driver.h>
+
+struct ice_flow_pattern {
+	enum rte_flow_item_type *items;
+	uint64_t sw_fields;
+};
+
+#define ICE_INSET_NONE            0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define ICE_INSET_SMAC            0x0000000000000001ULL
+#define ICE_INSET_DMAC            0x0000000000000002ULL
+#define ICE_INSET_ETHERTYPE       0x0000000000000020ULL
+
+/* bit 8 ~ bit 15 */
+#define ICE_INSET_IPV4_SRC        0x0000000000000100ULL
+#define ICE_INSET_IPV4_DST        0x0000000000000200ULL
+#define ICE_INSET_IPV6_SRC        0x0000000000000400ULL
+#define ICE_INSET_IPV6_DST        0x0000000000000800ULL
+#define ICE_INSET_SRC_PORT        0x0000000000001000ULL
+#define ICE_INSET_DST_PORT        0x0000000000002000ULL
+#define ICE_INSET_ARP             0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define ICE_INSET_IPV4_TOS        0x0000000000010000ULL
+#define ICE_INSET_IPV4_PROTO      0x0000000000020000ULL
+#define ICE_INSET_IPV4_TTL        0x0000000000040000ULL
+#define ICE_INSET_IPV6_TOS        0x0000000000100000ULL
+#define ICE_INSET_IPV6_PROTO      0x0000000000200000ULL
+#define ICE_INSET_IPV6_HOP_LIMIT  0x0000000000400000ULL
+#define ICE_INSET_ICMP            0x0000000001000000ULL
+#define ICE_INSET_ICMP6           0x0000000002000000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define ICE_INSET_TUN_SMAC           0x0000000100000000ULL
+#define ICE_INSET_TUN_DMAC           0x0000000200000000ULL
+#define ICE_INSET_TUN_IPV4_SRC       0x0000000400000000ULL
+#define ICE_INSET_TUN_IPV4_DST       0x0000000800000000ULL
+#define ICE_INSET_TUN_IPV4_TTL       0x0000001000000000ULL
+#define ICE_INSET_TUN_IPV4_PROTO     0x0000002000000000ULL
+#define ICE_INSET_TUN_IPV6_SRC       0x0000004000000000ULL
+#define ICE_INSET_TUN_IPV6_DST       0x0000008000000000ULL
+#define ICE_INSET_TUN_IPV6_TTL       0x0000010000000000ULL
+#define ICE_INSET_TUN_IPV6_PROTO     0x0000020000000000ULL
+#define ICE_INSET_TUN_SRC_PORT       0x0000040000000000ULL
+#define ICE_INSET_TUN_DST_PORT       0x0000080000000000ULL
+#define ICE_INSET_TUN_ID             0x0000100000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define ICE_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+#define ICE_FLAG_VLAN_INNER  0x00000001ULL
+#define ICE_FLAG_VLAN_OUTER  0x00000002ULL
+
+#define INSET_ETHER ( \
+	ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
+#define INSET_MAC_IPV4 ( \
+	ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS)
+#define INSET_MAC_IPV4_L4 ( \
+	ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | ICE_INSET_DST_PORT | \
+	ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV4_ICMP ( \
+	ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \
+	ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | ICE_INSET_ICMP)
+#define INSET_MAC_IPV6 ( \
+	ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_TOS | ICE_INSET_IPV6_HOP_LIMIT)
+#define INSET_MAC_IPV6_L4 ( \
+	ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TOS | \
+	ICE_INSET_DST_PORT | ICE_INSET_SRC_PORT)
+#define INSET_MAC_IPV6_ICMP ( \
+	ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \
+	ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TOS | ICE_INSET_ICMP6)
+#define INSET_TUNNEL_IPV4_TYPE1 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO)
+#define INSET_TUNNEL_IPV4_TYPE2 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_TUN_IPV4_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV4_TYPE3 ( \
+	ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \
+	ICE_INSET_TUN_IPV4_TTL | ICE_INSET_ICMP)
+#define INSET_TUNNEL_IPV6_TYPE1 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO)
+#define INSET_TUNNEL_IPV6_TYPE2 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_TUN_IPV6_PROTO | \
+	ICE_INSET_TUN_SRC_PORT | ICE_INSET_TUN_DST_PORT)
+#define INSET_TUNNEL_IPV6_TYPE3 ( \
+	ICE_INSET_TUN_IPV6_SRC | ICE_INSET_TUN_IPV6_DST | \
+	ICE_INSET_TUN_IPV6_TTL | ICE_INSET_ICMP6)
+
+/* L2 */
+static enum rte_flow_item_type pattern_ethertype[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv4 */
+static enum rte_flow_item_type pattern_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* non-tunnel IPv6 */
+static enum rte_flow_item_type pattern_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv6_icmp6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 VXLAN MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_vxlan_eth_ipv6_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_VXLAN,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE MAC IPv4 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv4_icmp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_ICMP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* IPv4 NVGRE IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+
+/* IPv4 NVGRE MAC IPv6 */
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_udp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_UDP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_tcp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_TCP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_ipv4_nvgre_eth_ipv6_sctp[] = {
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV4,
+	RTE_FLOW_ITEM_TYPE_NVGRE,
+	RTE_FLOW_ITEM_TYPE_ETH,
+	RTE_FLOW_ITEM_TYPE_IPV6,
+	RTE_FLOW_ITEM_TYPE_SCTP,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct ice_flow_pattern ice_supported_patterns[] = {
+	{pattern_ethertype, INSET_ETHER},
+	{pattern_ipv4, INSET_MAC_IPV4},
+	{pattern_ipv4_udp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_sctp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_tcp, INSET_MAC_IPV4_L4},
+	{pattern_ipv4_icmp, INSET_MAC_IPV4_ICMP},
+	{pattern_ipv6, INSET_MAC_IPV6},
+	{pattern_ipv6_udp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_sctp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_tcp, INSET_MAC_IPV6_L4},
+	{pattern_ipv6_icmp6, INSET_MAC_IPV6_ICMP},
+	{pattern_ipv4_vxlan_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_vxlan_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_vxlan_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_vxlan_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_vxlan_eth_ipv6_icmp, INSET_TUNNEL_IPV6_TYPE3},
+	{pattern_ipv4_nvgre_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_eth_ipv4, INSET_TUNNEL_IPV4_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv4_udp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_tcp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_sctp, INSET_TUNNEL_IPV4_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv4_icmp, INSET_TUNNEL_IPV4_TYPE3},
+	{pattern_ipv4_nvgre_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6, INSET_TUNNEL_IPV6_TYPE1},
+	{pattern_ipv4_nvgre_eth_ipv6_udp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_tcp, INSET_TUNNEL_IPV6_TYPE2},
+	{pattern_ipv4_nvgre_eth_ipv6_sctp, INSET_TUNNEL_IPV6_TYPE2},
+};
+
+#endif
diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
index 8697676..7f16647 100644
--- a/drivers/net/ice/meson.build
+++ b/drivers/net/ice/meson.build
@@ -7,7 +7,8 @@ objs = [base_objs]
 sources = files(
 	'ice_ethdev.c',
 	'ice_rxtx.c',
-	'ice_switch_filter.c'
+	'ice_switch_filter.c',
+	'ice_generic_flow.c'
 	)
 
 deps += ['hash']
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* [dpdk-dev] [PATCH v9 3/3] net/ice: add UDP tunnel port support
  2019-07-01  8:32 ` [dpdk-dev] [PATCH v9 0/3] " Qiming Yang
  2019-07-01  8:32   ` [dpdk-dev] [PATCH v9 1/3] net/ice: enable switch filter Qiming Yang
  2019-07-01  8:32   ` [dpdk-dev] [PATCH v9 2/3] net/ice: add generic flow API Qiming Yang
@ 2019-07-01  8:32   ` Qiming Yang
  2019-07-01 11:38   ` [dpdk-dev] [PATCH v9 0/3] Enable rte_flow API in ice driver Zhang, Qi Z
  3 siblings, 0 replies; 73+ messages in thread
From: Qiming Yang @ 2019-07-01  8:32 UTC (permalink / raw)
  To: dev; +Cc: qi.z.zhang, Qiming Yang

Enabled UDP tunnel port add and delete functions.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
---
 doc/guides/rel_notes/release_19_08.rst |  1 +
 drivers/net/ice/ice_ethdev.c           | 54 ++++++++++++++++++++++++++++++++++
 2 files changed, 55 insertions(+)

diff --git a/doc/guides/rel_notes/release_19_08.rst b/doc/guides/rel_notes/release_19_08.rst
index 0f472da..2a0e3aa 100644
--- a/doc/guides/rel_notes/release_19_08.rst
+++ b/doc/guides/rel_notes/release_19_08.rst
@@ -88,6 +88,7 @@ New Features
 
   * Enabled Tx outer/inner L3/L4 checksum offload.
   * Enabled generic filter framework and supported switch filter.
+  * Supported UDP tunnel port add.
 
 * **Updated Solarflare network PMD.**
 
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 28065eb..9ce730c 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -88,6 +88,10 @@ static int ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 			enum rte_filter_type filter_type,
 			enum rte_filter_op filter_op,
 			void *arg);
+static int ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
+static int ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			struct rte_eth_udp_tunnel *udp_tunnel);
 
 static const struct rte_pci_id pci_id_ice_map[] = {
 	{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
@@ -147,6 +151,8 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.xstats_get_names             = ice_xstats_get_names,
 	.xstats_reset                 = ice_stats_reset,
 	.filter_ctrl                  = ice_dev_filter_ctrl,
+	.udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
+	.udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -3666,6 +3672,54 @@ ice_dev_filter_ctrl(struct rte_eth_dev *dev,
 	return ret;
 }
 
+/* Add UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_create_tunnel(hw, TNL_VXLAN, udp_tunnel->udp_port);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+/* Delete UDP tunneling port */
+static int
+ice_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+			     struct rte_eth_udp_tunnel *udp_tunnel)
+{
+	int ret = 0;
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	if (udp_tunnel == NULL)
+		return -EINVAL;
+
+	switch (udp_tunnel->prot_type) {
+	case RTE_TUNNEL_TYPE_VXLAN:
+		ret = ice_destroy_tunnel(hw, udp_tunnel->udp_port, 0);
+		break;
+	default:
+		PMD_DRV_LOG(ERR, "Invalid tunnel type");
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
 static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
-- 
2.9.5


^ permalink raw reply	[flat|nested] 73+ messages in thread

* Re: [dpdk-dev] [PATCH v9 0/3] Enable rte_flow API in ice driver
  2019-07-01  8:32 ` [dpdk-dev] [PATCH v9 0/3] " Qiming Yang
                     ` (2 preceding siblings ...)
  2019-07-01  8:32   ` [dpdk-dev] [PATCH v9 3/3] net/ice: add UDP tunnel port support Qiming Yang
@ 2019-07-01 11:38   ` Zhang, Qi Z
  3 siblings, 0 replies; 73+ messages in thread
From: Zhang, Qi Z @ 2019-07-01 11:38 UTC (permalink / raw)
  To: Yang, Qiming, dev



> -----Original Message-----
> From: Yang, Qiming
> Sent: Monday, July 1, 2019 4:32 PM
> To: dev@dpdk.org
> Cc: Zhang, Qi Z <qi.z.zhang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>
> Subject: [PATCH v9 0/3] Enable rte_flow API in ice driver
> 
> This patch set enables the backend of rte_flow, and the generic filter related
> functions in ice driver. Supported flows include ipv4, tcpv4, udpv4, ipv6, tcpv6,
> udpv6, tunnel, etc.
> 
> These patchs depends on patch set:
> net/ice: shared code update.
> 
> ---
> v2 changes:
>  - added UDP tunnel port support.
>  - fixed compile issue.
>  - added document update.
> v3 changes:
>  - removed redundancy parser.
>  - added License.
>  - added VXLAN and NVGRE item support.
> v4 changes:
>  - fixed some typos.
> v5 changes:
>  - fixed checkpatch issues.
> v6 changes:
>  - fixed one uninitialize issue.
> v7 changes:
>  - fixed queue action validation.
> v8 changes:
>  - optimized some return values.
>  - code reorgnization.
>  - added release note.
> v9 changes:
>  - fixed wrong input set.
> 
> Qiming Yang (2):
>   net/ice: add generic flow API
>   net/ice: add UDP tunnel port support
> 
> wei zhao (1):
>   net/ice: enable switch filter
> 
>  doc/guides/rel_notes/release_19_08.rst |   2 +
>  drivers/net/ice/Makefile               |   2 +
>  drivers/net/ice/ice_ethdev.c           | 116 ++++++
>  drivers/net/ice/ice_ethdev.h           |  12 +
>  drivers/net/ice/ice_generic_flow.c     | 696
> +++++++++++++++++++++++++++++++++
>  drivers/net/ice/ice_generic_flow.h     | 615
> +++++++++++++++++++++++++++++
>  drivers/net/ice/ice_switch_filter.c    | 511 ++++++++++++++++++++++++
>  drivers/net/ice/ice_switch_filter.h    |  24 ++
>  drivers/net/ice/meson.build            |   4 +-
>  9 files changed, 1981 insertions(+), 1 deletion(-)  create mode 100644
> drivers/net/ice/ice_generic_flow.c
>  create mode 100644 drivers/net/ice/ice_generic_flow.h
>  create mode 100644 drivers/net/ice/ice_switch_filter.c
>  create mode 100644 drivers/net/ice/ice_switch_filter.h
> 
> --
> 2.9.5
> 
> Acked-by: Beilei Xing <beilei.xing@intel.com>

Applied to dpdk-next-net-intel.

Thanks
Qi

^ permalink raw reply	[flat|nested] 73+ messages in thread

end of thread, other threads:[~2019-07-01 11:38 UTC | newest]

Thread overview: 73+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-06-03  9:05 [dpdk-dev] [PATCH 0/2] Enable rte_flow API in ice driver Qiming Yang
2019-06-03  9:05 ` [dpdk-dev] [PATCH 1/2] net/ice: enable switch filter Qiming Yang
2019-06-03 17:07   ` Aaron Conole
2019-06-04  2:02     ` Zhao1, Wei
2019-06-03  9:05 ` [dpdk-dev] [PATCH 2/2] net/ice: add generic flow API Qiming Yang
2019-06-12  7:50 ` [dpdk-dev] [PATCH v2 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 1/3] net/ice: enable switch filter Qiming Yang
2019-06-13  8:23     ` Wang, Xiao W
2019-06-14  9:46       ` Zhao1, Wei
2019-06-17  8:28         ` Wang, Xiao W
2019-06-18  1:57           ` Zhao1, Wei
2019-06-17  5:27     ` Xing, Beilei
2019-06-17  8:23       ` Zhao1, Wei
2019-06-17  8:51       ` Zhao1, Wei
2019-06-18  1:50         ` Xing, Beilei
2019-06-18  9:40     ` Ye Xiaolong
2019-06-19  3:06       ` Zhao1, Wei
2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 2/3] net/ice: add generic flow API Qiming Yang
2019-06-17  5:50     ` Xing, Beilei
2019-06-17  6:02     ` Xing, Beilei
2019-06-17  9:19     ` Wang, Xiao W
2019-06-12  7:50   ` [dpdk-dev] [PATCH v2 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-20  5:34 ` [dpdk-dev] [PATCH v3 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 1/3] net/ice: enable switch filter Qiming Yang
2019-06-20  9:01     ` Wang, Xiao W
2019-06-20  9:12       ` Zhao1, Wei
2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 2/3] net/ice: add generic flow API Qiming Yang
2019-06-20  9:32     ` Wang, Xiao W
2019-06-21  5:47       ` Yang, Qiming
2019-06-20 10:21     ` Wang, Xiao W
2019-06-20 13:33     ` Aaron Conole
2019-06-21  2:18       ` Yang, Qiming
2019-06-20  5:34   ` [dpdk-dev] [PATCH v3 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-21  6:13 ` [dpdk-dev] [PATCH v4 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-21  6:13   ` [dpdk-dev] [PATCH v4 1/3] net/ice: enable switch filter Qiming Yang
2019-06-21  6:13   ` [dpdk-dev] [PATCH v4 2/3] net/ice: add generic flow API Qiming Yang
2019-06-21  6:13   ` [dpdk-dev] [PATCH v4 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-21  9:21 ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-21  9:21   ` [dpdk-dev] [PATCH v5 1/3] net/ice: enable switch filter Qiming Yang
2019-06-21  9:21   ` [dpdk-dev] [PATCH v5 2/3] net/ice: add generic flow API Qiming Yang
2019-06-21  9:21   ` [dpdk-dev] [PATCH v5 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-21 14:46   ` [dpdk-dev] [PATCH v5 0/3] Enable rte_flow API in ice driver Aaron Conole
2019-06-24  6:15 ` [dpdk-dev] [PATCH v6 " Qiming Yang
2019-06-24  6:15   ` [dpdk-dev] [PATCH v6 1/3] net/ice: enable switch filter Qiming Yang
2019-06-24  6:15   ` [dpdk-dev] [PATCH v6 2/3] net/ice: add generic flow API Qiming Yang
2019-06-24  6:15   ` [dpdk-dev] [PATCH v6 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-25  6:48 ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Qiming Yang
2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 1/3] net/ice: enable switch filter Qiming Yang
2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 2/3] net/ice: add generic flow API Qiming Yang
2019-06-25  6:48   ` [dpdk-dev] [PATCH v7 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-26  7:07     ` Xing, Beilei
2019-06-25 14:58   ` [dpdk-dev] [PATCH v7 0/3] Enable rte_flow API in ice driver Aaron Conole
2019-06-26  1:52     ` Yang, Qiming
2019-06-26  7:42       ` Ferruh Yigit
2019-06-26  8:26         ` Yang, Qiming
2019-06-26 15:52   ` Ye Xiaolong
2019-06-26  8:03 ` [dpdk-dev] [PATCH v8 " Qiming Yang
2019-06-26  8:03   ` [dpdk-dev] [PATCH v8 1/3] net/ice: enable switch filter Qiming Yang
2019-06-26  8:03   ` [dpdk-dev] [PATCH v8 2/3] net/ice: add generic flow API Qiming Yang
2019-06-26  8:03   ` [dpdk-dev] [PATCH v8 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-06-26  8:58 ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Qiming Yang
2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 1/4] net/ice: enable switch filter Qiming Yang
2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 2/4] net/ice: add generic flow API Qiming Yang
2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 3/4] net/ice: add UDP tunnel port support Qiming Yang
2019-06-26  8:58   ` [dpdk-dev] [PATCH v8 4/4] doc: add release note for generic flow Qiming Yang
2019-06-26 21:27     ` Thomas Monjalon
2019-06-27  2:04       ` Yang, Qiming
2019-06-26 13:25   ` [dpdk-dev] [PATCH v8 0/4] Enable rte_flow API in ice driver Xing, Beilei
2019-07-01  8:32 ` [dpdk-dev] [PATCH v9 0/3] " Qiming Yang
2019-07-01  8:32   ` [dpdk-dev] [PATCH v9 1/3] net/ice: enable switch filter Qiming Yang
2019-07-01  8:32   ` [dpdk-dev] [PATCH v9 2/3] net/ice: add generic flow API Qiming Yang
2019-07-01  8:32   ` [dpdk-dev] [PATCH v9 3/3] net/ice: add UDP tunnel port support Qiming Yang
2019-07-01 11:38   ` [dpdk-dev] [PATCH v9 0/3] Enable rte_flow API in ice driver Zhang, Qi Z

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).