* [dpdk-dev] [PATCH RFC 1/2] net/ice/base: support drop any and steer all to queue
@ 2021-08-30 7:56 Yuying Zhang
2021-08-30 7:56 ` [dpdk-dev] [PATCH RFC 2/2] net/ice: " Yuying Zhang
0 siblings, 1 reply; 2+ messages in thread
From: Yuying Zhang @ 2021-08-30 7:56 UTC (permalink / raw)
To: dev, qi.z.zhang; +Cc: Yuying Zhang
This patch supports drop any and steer all to queue in switch
filter.
Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
drivers/net/ice/base/ice_flex_pipe.c | 73 +++++++++++++++---------
drivers/net/ice/base/ice_flex_pipe.h | 5 +-
drivers/net/ice/base/ice_flex_type.h | 1 +
drivers/net/ice/base/ice_protocol_type.h | 3 +-
drivers/net/ice/base/ice_switch.c | 39 +++++++------
5 files changed, 73 insertions(+), 48 deletions(-)
diff --git a/drivers/net/ice/base/ice_flex_pipe.c b/drivers/net/ice/base/ice_flex_pipe.c
index cf470bc4f0..2ebef279a0 100644
--- a/drivers/net/ice/base/ice_flex_pipe.c
+++ b/drivers/net/ice/base/ice_flex_pipe.c
@@ -1711,23 +1711,30 @@ static enum ice_prof_type
ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
{
u16 i;
+ bool is_any = false;
for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
+ if (fv->ew[i].off != ICE_NAN_OFFSET)
+ is_any = true;
+
/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
fv->ew[i].off == ICE_VNI_OFFSET)
- return ICE_PROF_TUN_UDP;
+ return ICE_PROF_TUN_UDP | ICE_PROF_ANY;
/* GRE tunnel will have GRE protocol */
if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
- return ICE_PROF_TUN_GRE;
+ return ICE_PROF_TUN_GRE | ICE_PROF_ANY;
/* PPPOE tunnel will have PPPOE protocol */
if (fv->ew[i].prot_id == (u8)ICE_PROT_PPPOE)
- return ICE_PROF_TUN_PPPOE;
+ return ICE_PROF_TUN_PPPOE | ICE_PROF_ANY;
}
- return ICE_PROF_NON_TUN;
+ if (is_any)
+ return ICE_PROF_NON_TUN | ICE_PROF_ANY;
+ else
+ return ICE_PROF_NON_TUN;
}
/**
@@ -1764,7 +1771,6 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
if (fv) {
/* Determine field vector type */
prof_type = ice_get_sw_prof_type(hw, fv);
-
if (req_profs & prof_type)
ice_set_bit((u16)offset, bm);
}
@@ -1787,8 +1793,9 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
* allocated for every list entry.
*/
enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
- ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
+ice_get_sw_fv_list(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
+ u8 *prot_ids, u16 ids_cnt, ice_bitmap_t *bm,
+ struct LIST_HEAD_TYPE *fv_list)
{
struct ice_sw_fv_list_entry *fvl;
struct ice_sw_fv_list_entry *tmp;
@@ -1799,7 +1806,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
- if (!ids_cnt || !hw->seg)
+ if (tun_type != ICE_ANY && (!ids_cnt || !hw->seg))
return ICE_ERR_PARAM;
ice_seg = hw->seg;
@@ -1819,28 +1826,38 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
if (!ice_is_bit_set(bm, (u16)offset))
continue;
- for (i = 0; i < ids_cnt; i++) {
- int j;
+ if (tun_type == ICE_ANY) {
+ fvl = (struct ice_sw_fv_list_entry *)
+ ice_malloc(hw, sizeof(*fvl));
+ if (!fvl)
+ goto err;
+ fvl->fv_ptr = fv;
+ fvl->profile_id = offset;
+ LIST_ADD(&fvl->list_entry, fv_list);
+ } else {
+ for (i = 0; i < ids_cnt; i++) {
+ int j;
- /* This code assumes that if a switch field vector line
- * has a matching protocol, then this line will contain
- * the entries necessary to represent every field in
- * that protocol header.
- */
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id == prot_ids[i])
+ /* This code assumes that if a switch field vector
+ * line has a matching protocol, then this line
+ * will contain the entries necessary to represent
+ * every field in that protocol header.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id == prot_ids[i])
+ break;
+ if (j >= hw->blk[ICE_BLK_SW].es.fvw)
break;
- if (j >= hw->blk[ICE_BLK_SW].es.fvw)
- break;
- if (i + 1 == ids_cnt) {
- fvl = (struct ice_sw_fv_list_entry *)
- ice_malloc(hw, sizeof(*fvl));
- if (!fvl)
- goto err;
- fvl->fv_ptr = fv;
- fvl->profile_id = offset;
- LIST_ADD(&fvl->list_entry, fv_list);
- break;
+ if (i + 1 == ids_cnt) {
+ fvl = (struct ice_sw_fv_list_entry *)
+ ice_malloc(hw, sizeof(*fvl));
+ if (!fvl)
+ goto err;
+ fvl->fv_ptr = fv;
+ fvl->profile_id = offset;
+ LIST_ADD(&fvl->list_entry, fv_list);
+ break;
+ }
}
}
} while (fv);
diff --git a/drivers/net/ice/base/ice_flex_pipe.h b/drivers/net/ice/base/ice_flex_pipe.h
index 58e3c1d1ec..ca9b216f69 100644
--- a/drivers/net/ice/base/ice_flex_pipe.h
+++ b/drivers/net/ice/base/ice_flex_pipe.h
@@ -36,8 +36,9 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
void
ice_init_prof_result_bm(struct ice_hw *hw);
enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
- ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
+ice_get_sw_fv_list(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
+ u8 *prot_ids, u16 ids_cnt, ice_bitmap_t *bm,
+ struct LIST_HEAD_TYPE *fv_list);
enum ice_status
ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
diff --git a/drivers/net/ice/base/ice_flex_type.h b/drivers/net/ice/base/ice_flex_type.h
index c7f92b9150..b63b984688 100644
--- a/drivers/net/ice/base/ice_flex_type.h
+++ b/drivers/net/ice/base/ice_flex_type.h
@@ -916,6 +916,7 @@ enum ice_prof_type {
ICE_PROF_TUN_GRE = 0x4,
ICE_PROF_TUN_PPPOE = 0x8,
ICE_PROF_TUN_ALL = 0xE,
+ ICE_PROF_ANY = 0x10,
ICE_PROF_ALL = 0xFF,
};
diff --git a/drivers/net/ice/base/ice_protocol_type.h b/drivers/net/ice/base/ice_protocol_type.h
index d769ad0580..0a8c39b369 100644
--- a/drivers/net/ice/base/ice_protocol_type.h
+++ b/drivers/net/ice/base/ice_protocol_type.h
@@ -109,6 +109,7 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_PPPOE_PAY_QINQ,
ICE_SW_TUN_PPPOE_IPV4_QINQ,
ICE_SW_TUN_PPPOE_IPV6_QINQ,
+ ICE_ANY,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
@@ -164,7 +165,7 @@ enum ice_prot_id {
};
#define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */
-
+#define ICE_NAN_OFFSET 511
#define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9
diff --git a/drivers/net/ice/base/ice_switch.c b/drivers/net/ice/base/ice_switch.c
index 4568242c10..4bf9761909 100644
--- a/drivers/net/ice/base/ice_switch.c
+++ b/drivers/net/ice/base/ice_switch.c
@@ -7235,19 +7235,22 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
* @fv_list: pointer to a list that holds the returned field vectors
*/
static enum ice_status
-ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ice_get_fv(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
+ struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
{
enum ice_status status;
u8 *prot_ids;
u16 i;
- if (!lkups_cnt)
+ if (!lkups_cnt && tun_type != ICE_ANY)
return ICE_SUCCESS;
- prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
- if (!prot_ids)
- return ICE_ERR_NO_MEMORY;
+ if (lkups_cnt) {
+ prot_ids = (u8 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
+ if (!prot_ids)
+ return ICE_ERR_NO_MEMORY;
+ }
for (i = 0; i < lkups_cnt; i++)
if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
@@ -7256,10 +7259,12 @@ ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
}
/* Find field vectors that include all specified protocol types */
- status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
+ status = ice_get_sw_fv_list(hw, tun_type, prot_ids,
+ lkups_cnt, bm, fv_list);
free_mem:
- ice_free(hw, prot_ids);
+ if (lkups_cnt)
+ ice_free(hw, prot_ids);
return status;
}
@@ -7340,6 +7345,9 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
switch (rinfo->tun_type) {
+ case ICE_ANY:
+ prof_type = ICE_PROF_ANY;
+ break;
case ICE_NON_TUN:
case ICE_NON_TUN_QINQ:
prof_type = ICE_PROF_NON_TUN;
@@ -7495,6 +7503,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
bool ice_is_prof_rule(enum ice_sw_tunnel_type type)
{
switch (type) {
+ case ICE_ANY:
case ICE_SW_TUN_PROFID_IPV6_ESP:
case ICE_SW_TUN_PROFID_IPV6_AH:
case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
@@ -7579,7 +7588,8 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*/
ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
- status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+ status = ice_get_fv(hw, rinfo->tun_type, lkups, lkups_cnt,
+ fv_bitmap, &rm->fv_list);
if (status)
goto err_unroll;
@@ -8314,15 +8324,10 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
enum ice_status status;
u16 vsi_list_id = 0;
- if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
- cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
- cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
- return ICE_ERR_NOT_IMPL;
-
- if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
- new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
- (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
- cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
+ if ((cur_fltr->sw_act.fltr_act != ICE_FWD_TO_VSI &&
+ cur_fltr->sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) ||
+ (new_fltr->sw_act.fltr_act != ICE_FWD_TO_VSI &&
+ new_fltr->sw_act.fltr_act != ICE_FWD_TO_VSI_LIST))
return ICE_ERR_NOT_IMPL;
if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
--
2.25.1
^ permalink raw reply [flat|nested] 2+ messages in thread
* [dpdk-dev] [PATCH RFC 2/2] net/ice: support drop any and steer all to queue
2021-08-30 7:56 [dpdk-dev] [PATCH RFC 1/2] net/ice/base: support drop any and steer all to queue Yuying Zhang
@ 2021-08-30 7:56 ` Yuying Zhang
0 siblings, 0 replies; 2+ messages in thread
From: Yuying Zhang @ 2021-08-30 7:56 UTC (permalink / raw)
To: dev, qi.z.zhang; +Cc: Yuying Zhang
This patch supports drop any and steer all to queue in switch
filter. Support new rte_flow pattern any to handle all packets.
The usage is listed below.
1. drop any:
flow create 0 ingress pattern any / end actions drop / end
All packets received in port 0 will be dropped.
2. steer all to queue:
flow create 0 ingress pattern any / end actions queue index 3 / end
All packets received in port 0 will be steered to queue 3.
Signed-off-by: Yuying Zhang <yuying.zhang@intel.com>
---
drivers/net/ice/ice_generic_flow.c | 7 +++++++
drivers/net/ice/ice_generic_flow.h | 3 +++
drivers/net/ice/ice_switch_filter.c | 9 +++++++++
3 files changed, 19 insertions(+)
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 25d15a8adb..7ab0f4be24 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -65,6 +65,12 @@ enum rte_flow_item_type pattern_empty[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* any */
+enum rte_flow_item_type pattern_any[] = {
+ RTE_FLOW_ITEM_TYPE_ANY,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
/* L2 */
enum rte_flow_item_type pattern_ethertype[] = {
RTE_FLOW_ITEM_TYPE_ETH,
@@ -2152,6 +2158,7 @@ static struct ice_ptype_match ice_ptype_map[] = {
{pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_MAC_IPV4_TUN_IPV4_UDP_PAY},
{pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_MAC_IPV4_TUN_IPV4_TCP},
{pattern_empty, 0},
+ {pattern_any, 0},
};
static bool
diff --git a/drivers/net/ice/ice_generic_flow.h b/drivers/net/ice/ice_generic_flow.h
index 18918769d2..69d3698026 100644
--- a/drivers/net/ice/ice_generic_flow.h
+++ b/drivers/net/ice/ice_generic_flow.h
@@ -207,6 +207,9 @@
/* empty pattern */
extern enum rte_flow_item_type pattern_empty[];
+/* any */
+extern enum rte_flow_item_type pattern_any[];
+
/* L2 */
extern enum rte_flow_item_type pattern_ethertype[];
extern enum rte_flow_item_type pattern_ethertype_vlan[];
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 6525e6c115..93399a0291 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -247,6 +247,8 @@ ice_pattern_match_item ice_switch_pattern_dist_list[] = {
ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
{pattern_eth_qinq_pppoes_ipv6,
ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
+ {pattern_any,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static struct
@@ -349,6 +351,8 @@ ice_pattern_match_item ice_switch_pattern_perm_list[] = {
ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE},
{pattern_eth_qinq_pppoes_ipv6,
ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE},
+ {pattern_any,
+ ICE_INSET_NONE, ICE_INSET_NONE},
};
static int
@@ -505,6 +509,10 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
item_type = item->type;
switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ANY:
+ *tun_type = ICE_ANY;
+ break;
+
case RTE_FLOW_ITEM_TYPE_ETH:
eth_spec = item->spec;
eth_mask = item->mask;
@@ -1628,6 +1636,7 @@ static bool
ice_is_profile_rule(enum ice_sw_tunnel_type tun_type)
{
switch (tun_type) {
+ case ICE_ANY:
case ICE_SW_TUN_PROFID_IPV6_ESP:
case ICE_SW_TUN_PROFID_IPV6_AH:
case ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3:
--
2.25.1
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2021-08-30 8:13 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-30 7:56 [dpdk-dev] [PATCH RFC 1/2] net/ice/base: support drop any and steer all to queue Yuying Zhang
2021-08-30 7:56 ` [dpdk-dev] [PATCH RFC 2/2] net/ice: " Yuying Zhang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).