DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH v2 1/2] net/ice/base: support ANY software type on switch filter
       [not found] <http://patchwork.dpdk.org/project/dpdk/patch/20220117105319.700421-1-yuying.zhang@intel.com/>
@ 2022-01-18 14:57 ` Yuying Zhang
  2022-01-18 14:57   ` [PATCH v2 2/2] net/ice: support drop any and steer all to queue Yuying Zhang
  0 siblings, 1 reply; 2+ messages in thread
From: Yuying Zhang @ 2022-01-18 14:57 UTC (permalink / raw)
  To: dev, qi.z.zhang; +Cc: Yuying Zhang

Add support of ANY software type on switch filter for steering all kinds
of common packets to specific queue or drop all kinds of common packets.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/ice/base/ice_flex_pipe.c     | 72 +++++++++++++++---------
 drivers/net/ice/base/ice_flex_pipe.h     |  5 +-
 drivers/net/ice/base/ice_flex_type.h     |  1 +
 drivers/net/ice/base/ice_protocol_type.h |  3 +-
 drivers/net/ice/base/ice_switch.c        | 39 +++++++------
 5 files changed, 72 insertions(+), 48 deletions(-)

diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index 395787806b..708deb36d4 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -1785,23 +1785,30 @@ static enum ice_prof_type
 ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
 {
 	u16 i;
+	bool is_any = false;
 
 	for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
+		if (fv->ew[i].off != ICE_NAN_OFFSET)
+			is_any = true;
+
 		/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
 		if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
 		    fv->ew[i].off == ICE_VNI_OFFSET)
-			return ICE_PROF_TUN_UDP;
+			return ICE_PROF_TUN_UDP | ICE_PROF_ANY;
 
 		/* GRE tunnel will have GRE protocol */
 		if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
-			return ICE_PROF_TUN_GRE;
+			return ICE_PROF_TUN_GRE | ICE_PROF_ANY;
 
 		/* PPPOE tunnel will have PPPOE protocol */
 		if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE)
-			return ICE_PROF_TUN_PPPOE;
+			return ICE_PROF_TUN_PPPOE | ICE_PROF_ANY;
 	}
 
-	return ICE_PROF_NON_TUN;
+	if (is_any)
+		return ICE_PROF_NON_TUN | ICE_PROF_ANY;
+	else
+		return ICE_PROF_NON_TUN;
 }
 
 /**
@@ -1861,8 +1868,9 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
  * allocated for every list entry.
  */
 enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
-		   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
+ice_get_sw_fv_list(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
+		   u8 *prot_ids, u16 ids_cnt, ice_bitmap_t *bm,
+		   struct LIST_HEAD_TYPE *fv_list)
 {
 	struct ice_sw_fv_list_entry *fvl;
 	struct ice_sw_fv_list_entry *tmp;
@@ -1873,7 +1881,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
 
 	ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
 
-	if (!ids_cnt || !hw->seg)
+	if (tun_type != ICE_SW_ANY && (!ids_cnt || !hw->seg))
 		return ICE_ERR_PARAM;
 
 	ice_seg = hw->seg;
@@ -1893,28 +1901,38 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
 		if (!ice_is_bit_set(bm, (u16)offset))
 			continue;
 
-		for (i = 0; i < ids_cnt; i++) {
-			int j;
+		if (tun_type == ICE_SW_ANY) {
+			fvl = (struct ice_sw_fv_list_entry *)
+				ice_malloc(hw, sizeof(*fvl));
+			if (!fvl)
+				goto err;
+			fvl->fv_ptr = fv;
+			fvl->profile_id = offset;
+			LIST_ADD(&fvl->list_entry, fv_list);
+		} else {
+			for (i = 0; i < ids_cnt; i++) {
+				int j;
 
-			/* This code assumes that if a switch field vector line
-			 * has a matching protocol, then this line will contain
-			 * the entries necessary to represent every field in
-			 * that protocol header.
-			 */
-			for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
-				if (fv->ew[j].prot_id == prot_ids[i])
+				/* This code assumes that if a switch field vector line
+				 * has a matching protocol, then this line will contain
+				 * the entries necessary to represent every field in
+				 * that protocol header.
+				 */
+				for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+					if (fv->ew[j].prot_id == prot_ids[i])
+						break;
+				if (j >= hw->blk[ICE_BLK_SW].es.fvw)
 					break;
-			if (j >= hw->blk[ICE_BLK_SW].es.fvw)
-				break;
-			if (i + 1 == ids_cnt) {
-				fvl = (struct ice_sw_fv_list_entry *)
-					ice_malloc(hw, sizeof(*fvl));
-				if (!fvl)
-					goto err;
-				fvl->fv_ptr = fv;
-				fvl->profile_id = offset;
-				LIST_ADD(&fvl->list_entry, fv_list);
-				break;
+				if (i + 1 == ids_cnt) {
+					fvl = (struct ice_sw_fv_list_entry *)
+						ice_malloc(hw, sizeof(*fvl));
+					if (!fvl)
+						goto err;
+					fvl->fv_ptr = fv;
+					fvl->profile_id = offset;
+					LIST_ADD(&fvl->list_entry, fv_list);
+					break;
+				}
 			}
 		}
 	} while (fv);
diff --git a/drivers/net/ice/base/ice_flex_pipe.h b/drivers/net/ice/base/ice_flex_pipe.h
index 23ba45564a..63910d4422 100644
--- a/drivers/net/ice/base/ice_flex_pipe.h
+++ b/drivers/net/ice/base/ice_flex_pipe.h
@@ -36,8 +36,9 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
 void
 ice_init_prof_result_bm(struct ice_hw *hw);
 enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
-		   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
+ice_get_sw_fv_list(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
+		   u8 *prot_ids, u16 ids_cnt, ice_bitmap_t *bm,
+		   struct LIST_HEAD_TYPE *fv_list);
 enum ice_status
 ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
 u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
diff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h
index 59eeca0a30..3628c9d951 100644
--- a/drivers/net/ice/base/ice_flex_type.h
+++ b/drivers/net/ice/base/ice_flex_type.h
@@ -1008,6 +1008,7 @@ enum ice_prof_type {
 	ICE_PROF_TUN_GRE = 0x4,
 	ICE_PROF_TUN_PPPOE = 0x8,
 	ICE_PROF_TUN_ALL = 0xE,
+	ICE_PROF_ANY = 0x10,
 	ICE_PROF_ALL = 0xFF,
 };
 
diff --git a/drivers/net/ice/base/ice_protocol_type.h b/drivers/net/ice/base/ice_protocol_type.h
index cef8354f77..818484cafb 100644
--- a/drivers/net/ice/base/ice_protocol_type.h
+++ b/drivers/net/ice/base/ice_protocol_type.h
@@ -69,6 +69,7 @@ enum ice_sw_tunnel_type {
 	ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN
 			 * and GENEVE
 			 */
+	ICE_SW_ANY,
 	ICE_SW_IPV4_TCP,
 	ICE_SW_IPV4_UDP,
 	ICE_SW_IPV6_TCP,
@@ -190,7 +191,7 @@ enum ice_prot_id {
 };
 
 #define ICE_VNI_OFFSET		12 /* offset of VNI from ICE_PROT_UDP_OF */
-
+#define ICE_NAN_OFFSET		511
 #define ICE_MAC_OFOS_HW		1
 #define ICE_MAC_IL_HW		4
 #define ICE_ETYPE_OL_HW		9
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index 1fee790c25..344518e830 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -7457,19 +7457,22 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
  * @fv_list: pointer to a list that holds the returned field vectors
  */
 static enum ice_status
-ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ice_get_fv(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
+	   struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
 	   ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
 {
 	enum ice_status status;
-	u8 *prot_ids;
+	u8 *prot_ids = NULL;
 	u16 i;
 
-	if (!lkups_cnt)
+	if (!lkups_cnt && tun_type != ICE_SW_ANY)
 		return ICE_SUCCESS;
 
-	prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
-	if (!prot_ids)
-		return ICE_ERR_NO_MEMORY;
+	if (lkups_cnt) {
+		prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
+		if (!prot_ids)
+			return ICE_ERR_NO_MEMORY;
+	}
 
 	for (i = 0; i < lkups_cnt; i++)
 		if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
@@ -7478,10 +7481,11 @@ ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
 		}
 
 	/* Find field vectors that include all specified protocol types */
-	status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
+	status = ice_get_sw_fv_list(hw, tun_type, prot_ids, lkups_cnt, bm, fv_list);
 
 free_mem:
-	ice_free(hw, prot_ids);
+	if (lkups_cnt)
+		ice_free(hw, prot_ids);
 	return status;
 }
 
@@ -7562,6 +7566,9 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
 	ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
 
 	switch (rinfo->tun_type) {
+	case ICE_SW_ANY:
+		prof_type = ICE_PROF_ANY;
+		break;
 	case ICE_NON_TUN:
 	case ICE_NON_TUN_QINQ:
 		prof_type = ICE_PROF_NON_TUN;
@@ -7779,6 +7786,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
 bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
 {
 	switch (type) {
+	case ICE_SW_ANY:
 	case ICE_SW_TUN_PROFID_IPV6_ESP:
 	case ICE_SW_TUN_PROFID_IPV6_AH:
 	case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
@@ -7863,7 +7871,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
 	 */
 	ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
 
-	status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+	status = ice_get_fv(hw, rinfo->tun_type, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
 	if (status)
 		goto err_unroll;
 
@@ -8717,15 +8725,10 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
 	enum ice_status status;
 	u16 vsi_list_id = 0;
 
-	if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
-	    cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
-	    cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
-		return ICE_ERR_NOT_IMPL;
-
-	if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
-	     new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
-	    (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
-	     cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
+	if ((new_fltr->sw_act.fltr_act != ICE_FWD_TO_VSI &&
+	     new_fltr->sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) ||
+	    (cur_fltr->sw_act.fltr_act != ICE_FWD_TO_VSI &&
+	     cur_fltr->sw_act.fltr_act != ICE_FWD_TO_VSI_LIST))
 		return ICE_ERR_NOT_IMPL;
 
 	if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

* [PATCH v2 2/2] net/ice: support drop any and steer all to queue
  2022-01-18 14:57 ` [PATCH v2 1/2] net/ice/base: support ANY software type on switch filter Yuying Zhang
@ 2022-01-18 14:57   ` Yuying Zhang
  0 siblings, 0 replies; 2+ messages in thread
From: Yuying Zhang @ 2022-01-18 14:57 UTC (permalink / raw)
  To: dev, qi.z.zhang; +Cc: Yuying Zhang

This patch supports drop any and steer all to queue in switch
filter. Support new rte_flow pattern any to handle all packets.
The usage is listed below.

1. drop any:
flow create 0 ingress pattern any / end actions drop / end
All packets received in port 0 will be dropped.

2. steer all to queue:
flow create 0 ingress pattern any / end actions queue index 3 / end
All packets received in port 0 will be steered to queue 3.

Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
 drivers/net/ice/ice_generic_flow.c  | 6 ++++++
 drivers/net/ice/ice_generic_flow.h  | 3 +++
 drivers/net/ice/ice_switch_filter.c | 6 ++++++
 3 files changed, 15 insertions(+)

diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 406a0a953f..53b1c0b69a 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -65,6 +65,11 @@ enum rte_flow_item_type pattern_empty[] = {
 	RTE_FLOW_ITEM_TYPE_END,
 };
 
+enum rte_flow_item_type pattern_any[] = {
+	RTE_FLOW_ITEM_TYPE_ANY,
+	RTE_FLOW_ITEM_TYPE_END,
+};
+
 /* raw */
 enum rte_flow_item_type pattern_raw[] = {
 	RTE_FLOW_ITEM_TYPE_RAW,
@@ -2111,6 +2116,7 @@ struct ice_ptype_match {
 
 static struct ice_ptype_match ice_ptype_map[] = {
 	{pattern_raw,					ICE_PTYPE_IPV4_PAY},
+	{pattern_any,					ICE_PTYPE_IPV4_PAY},
 	{pattern_eth_ipv4,				ICE_PTYPE_IPV4_PAY},
 	{pattern_eth_ipv4_udp,				ICE_PTYPE_IPV4_UDP_PAY},
 	{pattern_eth_ipv4_tcp,				ICE_PTYPE_IPV4_TCP_PAY},
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 1b030c0466..11f51a5c15 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -124,6 +124,9 @@
 /* empty pattern */
 extern enum rte_flow_item_type pattern_empty[];
 
+/* any pattern */
+extern enum rte_flow_item_type pattern_any[];
+
 /* raw pattern */
 extern enum rte_flow_item_type pattern_raw[];
 
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index bd805d9606..60f9a201f7 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -206,6 +206,7 @@ static struct ice_flow_parser ice_switch_perm_parser;
 
 static struct
 ice_pattern_match_item ice_switch_pattern_dist_list[] = {
+	{pattern_any,					ICE_INSET_NONE,				ICE_INSET_NONE,				ICE_INSET_NONE},
 	{pattern_ethertype,				ICE_SW_INSET_ETHER,			ICE_INSET_NONE,				ICE_INSET_NONE},
 	{pattern_ethertype_vlan,			ICE_SW_INSET_MAC_VLAN,			ICE_INSET_NONE,				ICE_INSET_NONE},
 	{pattern_ethertype_qinq,			ICE_SW_INSET_MAC_QINQ,			ICE_INSET_NONE,				ICE_INSET_NONE},
@@ -289,6 +290,7 @@ ice_pattern_match_item ice_switch_pattern_dist_list[] = {
 
 static struct
 ice_pattern_match_item ice_switch_pattern_perm_list[] = {
+	{pattern_any,					ICE_INSET_NONE,				ICE_INSET_NONE,				ICE_INSET_NONE},
 	{pattern_ethertype,				ICE_SW_INSET_ETHER,			ICE_INSET_NONE,				ICE_INSET_NONE},
 	{pattern_ethertype_vlan,			ICE_SW_INSET_MAC_VLAN,			ICE_INSET_NONE,				ICE_INSET_NONE},
 	{pattern_ethertype_qinq,			ICE_SW_INSET_MAC_QINQ,			ICE_INSET_NONE,				ICE_INSET_NONE},
@@ -582,6 +584,10 @@ ice_switch_parse_pattern(const struct rte_flow_item pattern[],
 		item_type = item->type;
 
 		switch (item_type) {
+		case RTE_FLOW_ITEM_TYPE_ANY:
+			*tun_type = ICE_SW_ANY;
+			break;
+
 		case RTE_FLOW_ITEM_TYPE_ETH:
 			eth_spec = item->spec;
 			eth_mask = item->mask;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-01-18  6:57 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <http://patchwork.dpdk.org/project/dpdk/patch/20220117105319.700421-1-yuying.zhang@intel.com/>
2022-01-18 14:57 ` [PATCH v2 1/2] net/ice/base: support ANY software type on switch filter Yuying Zhang
2022-01-18 14:57   ` [PATCH v2 2/2] net/ice: support drop any and steer all to queue Yuying Zhang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).