* [dpdk-stable] [DPDK 2/4] net/ice: fix tunnel type for switch rule
[not found] <20200628031422.70053-1-wei.zhao1@intel.com>
@ 2020-06-28 3:14 ` Wei Zhao
2020-06-28 3:14 ` [dpdk-stable] [DPDK 3/4] net/ice: support switch flow for specific L4 type Wei Zhao
2020-06-28 3:14 ` [dpdk-stable] [DPDK 4/4] net/ice: add input set byte number check Wei Zhao
2 siblings, 0 replies; 4+ messages in thread
From: Wei Zhao @ 2020-06-28 3:14 UTC (permalink / raw)
To: qabuild; +Cc: Wei Zhao, stable
This patch add check for protocol type of IPv4 packet,
it need to update tunnel type when NVGRE is in payload.
Fixes: 6bc7628c5e0b ("net/ice: change default tunnel type")
Cc: stable@dpdk.org
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 3c0c36bce..c607e8d17 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -28,6 +28,7 @@
#define MAX_QGRP_NUM_TYPE 7
#define ICE_PPP_IPV4_PROTO 0x0021
#define ICE_PPP_IPV6_PROTO 0x0057
+#define ICE_IPV4_PROTO_NVGRE 0x002F
#define ICE_SW_INSET_ETHER ( \
ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
@@ -632,6 +633,10 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].m_u.ipv4_hdr.protocol =
ipv4_mask->hdr.next_proto_id;
}
+ if ((ipv4_spec->hdr.next_proto_id &
+ ipv4_mask->hdr.next_proto_id) ==
+ ICE_IPV4_PROTO_NVGRE)
+ *tun_type = ICE_SW_TUN_AND_NON_TUN;
if (ipv4_mask->hdr.type_of_service) {
list[t].h_u.ipv4_hdr.tos =
ipv4_spec->hdr.type_of_service;
@@ -1526,7 +1531,7 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
const struct rte_flow_item *item = pattern;
uint16_t item_num = 0;
enum ice_sw_tunnel_type tun_type =
- ICE_SW_TUN_AND_NON_TUN;
+ ICE_NON_TUN;
struct ice_pattern_match_item *pattern_match_item = NULL;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
--
2.19.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [dpdk-stable] [DPDK 3/4] net/ice: support switch flow for specific L4 type
[not found] <20200628031422.70053-1-wei.zhao1@intel.com>
2020-06-28 3:14 ` [dpdk-stable] [DPDK 2/4] net/ice: fix tunnel type for switch rule Wei Zhao
@ 2020-06-28 3:14 ` Wei Zhao
2020-06-28 3:14 ` [dpdk-stable] [DPDK 4/4] net/ice: add input set byte number check Wei Zhao
2 siblings, 0 replies; 4+ messages in thread
From: Wei Zhao @ 2020-06-28 3:14 UTC (permalink / raw)
To: qabuild; +Cc: Wei Zhao, stable
This patch add more specific tunnel type for ipv4/ipv6 packet,
it enable tcp/udp layer of ipv4/ipv6 as L4 payload but without
L4 dst/src port number as input set for the switch filter rule.
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Cc: stable@dpdk.org
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 27 ++++++++++++++++++++-------
1 file changed, 20 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index c607e8d17..c1ea74c73 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -29,6 +29,8 @@
#define ICE_PPP_IPV4_PROTO 0x0021
#define ICE_PPP_IPV6_PROTO 0x0057
#define ICE_IPV4_PROTO_NVGRE 0x002F
+#define ICE_TUN_VXLAN_VALID 0x0001
+#define ICE_TUN_NVGRE_VALID 0x0002
#define ICE_SW_INSET_ETHER ( \
ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
@@ -471,11 +473,11 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
uint64_t input_set = ICE_INSET_NONE;
+ uint16_t tunnel_valid = 0;
bool pppoe_elem_valid = 0;
bool pppoe_patt_valid = 0;
bool pppoe_prot_valid = 0;
bool profile_rule = 0;
- bool tunnel_valid = 0;
bool ipv6_valiad = 0;
bool ipv4_valiad = 0;
bool udp_valiad = 0;
@@ -924,7 +926,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
return 0;
}
- tunnel_valid = 1;
+ tunnel_valid = ICE_TUN_VXLAN_VALID;
if (vxlan_spec && vxlan_mask) {
list[t].type = ICE_VXLAN;
if (vxlan_mask->vni[0] ||
@@ -960,7 +962,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
"Invalid NVGRE item");
return 0;
}
- tunnel_valid = 1;
+ tunnel_valid = ICE_TUN_NVGRE_VALID;
if (nvgre_spec && nvgre_mask) {
list[t].type = ICE_NVGRE;
if (nvgre_mask->tni[0] ||
@@ -1325,6 +1327,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
*tun_type = ICE_SW_TUN_PPPOE;
}
+ if (*tun_type == ICE_NON_TUN) {
+ if (tunnel_valid == ICE_TUN_VXLAN_VALID)
+ *tun_type = ICE_SW_TUN_VXLAN;
+ else if (tunnel_valid == ICE_TUN_NVGRE_VALID)
+ *tun_type = ICE_SW_TUN_NVGRE;
+ else if (ipv4_valiad && tcp_valiad)
+ *tun_type = ICE_SW_IPV4_TCP;
+ else if (ipv4_valiad && udp_valiad)
+ *tun_type = ICE_SW_IPV4_UDP;
+ else if (ipv6_valiad && tcp_valiad)
+ *tun_type = ICE_SW_IPV6_TCP;
+ else if (ipv6_valiad && udp_valiad)
+ *tun_type = ICE_SW_IPV6_UDP;
+ }
+
*lkups_num = t;
return input_set;
@@ -1536,10 +1553,6 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
item_num++;
- if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
- tun_type = ICE_SW_TUN_VXLAN;
- if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
- tun_type = ICE_SW_TUN_NVGRE;
if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
const struct rte_flow_item_eth *eth_mask;
if (item->mask)
--
2.19.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [dpdk-stable] [DPDK 4/4] net/ice: add input set byte number check
[not found] <20200628031422.70053-1-wei.zhao1@intel.com>
2020-06-28 3:14 ` [dpdk-stable] [DPDK 2/4] net/ice: fix tunnel type for switch rule Wei Zhao
2020-06-28 3:14 ` [dpdk-stable] [DPDK 3/4] net/ice: support switch flow for specific L4 type Wei Zhao
@ 2020-06-28 3:14 ` Wei Zhao
2 siblings, 0 replies; 4+ messages in thread
From: Wei Zhao @ 2020-06-28 3:14 UTC (permalink / raw)
To: qabuild; +Cc: Wei Zhao, stable
This patch add the total input set byte number check,
as there is a hardware requirement for the total number
of 32 byte.
Fixes: 47d460d63233 ("net/ice: rework switch filter")
Cc: stable@dpdk.org
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 43 +++++++++++++++++++++++++++--
1 file changed, 40 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index c1ea74c73..a4d7fcb14 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -25,7 +25,8 @@
#include "ice_generic_flow.h"
-#define MAX_QGRP_NUM_TYPE 7
+#define MAX_QGRP_NUM_TYPE 7
+#define MAX_INPUT_SET_BYTE 32
#define ICE_PPP_IPV4_PROTO 0x0021
#define ICE_PPP_IPV6_PROTO 0x0057
#define ICE_IPV4_PROTO_NVGRE 0x002F
@@ -473,6 +474,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
uint64_t input_set = ICE_INSET_NONE;
+ uint16_t feild_vec_byte = 0;
uint16_t tunnel_valid = 0;
bool pppoe_elem_valid = 0;
bool pppoe_patt_valid = 0;
@@ -540,6 +542,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
m->src_addr[j] =
eth_mask->src.addr_bytes[j];
i = 1;
+ feild_vec_byte++;
}
if (eth_mask->dst.addr_bytes[j]) {
h->dst_addr[j] =
@@ -547,6 +550,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
m->dst_addr[j] =
eth_mask->dst.addr_bytes[j];
i = 1;
+ feild_vec_byte++;
}
}
if (i)
@@ -557,6 +561,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
eth_spec->type;
list[t].m_u.ethertype.ethtype_id =
eth_mask->type;
+ feild_vec_byte += 2;
t++;
}
}
@@ -616,24 +621,28 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
ipv4_spec->hdr.src_addr;
list[t].m_u.ipv4_hdr.src_addr =
ipv4_mask->hdr.src_addr;
+ feild_vec_byte += 2;
}
if (ipv4_mask->hdr.dst_addr) {
list[t].h_u.ipv4_hdr.dst_addr =
ipv4_spec->hdr.dst_addr;
list[t].m_u.ipv4_hdr.dst_addr =
ipv4_mask->hdr.dst_addr;
+ feild_vec_byte += 2;
}
if (ipv4_mask->hdr.time_to_live) {
list[t].h_u.ipv4_hdr.time_to_live =
ipv4_spec->hdr.time_to_live;
list[t].m_u.ipv4_hdr.time_to_live =
ipv4_mask->hdr.time_to_live;
+ feild_vec_byte++;
}
if (ipv4_mask->hdr.next_proto_id) {
list[t].h_u.ipv4_hdr.protocol =
ipv4_spec->hdr.next_proto_id;
list[t].m_u.ipv4_hdr.protocol =
ipv4_mask->hdr.next_proto_id;
+ feild_vec_byte++;
}
if ((ipv4_spec->hdr.next_proto_id &
ipv4_mask->hdr.next_proto_id) ==
@@ -644,6 +653,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
ipv4_spec->hdr.type_of_service;
list[t].m_u.ipv4_hdr.tos =
ipv4_mask->hdr.type_of_service;
+ feild_vec_byte++;
}
t++;
}
@@ -721,12 +731,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
ipv6_spec->hdr.src_addr[j];
s->src_addr[j] =
ipv6_mask->hdr.src_addr[j];
+ feild_vec_byte++;
}
if (ipv6_mask->hdr.dst_addr[j]) {
f->dst_addr[j] =
ipv6_spec->hdr.dst_addr[j];
s->dst_addr[j] =
ipv6_mask->hdr.dst_addr[j];
+ feild_vec_byte++;
}
}
if (ipv6_mask->hdr.proto) {
@@ -734,12 +746,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
ipv6_spec->hdr.proto;
s->next_hdr =
ipv6_mask->hdr.proto;
+ feild_vec_byte++;
}
if (ipv6_mask->hdr.hop_limits) {
f->hop_limit =
ipv6_spec->hdr.hop_limits;
s->hop_limit =
ipv6_mask->hdr.hop_limits;
+ feild_vec_byte++;
}
if (ipv6_mask->hdr.vtc_flow &
rte_cpu_to_be_32
@@ -757,6 +771,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
RTE_IPV6_HDR_TC_MASK) >>
RTE_IPV6_HDR_TC_SHIFT;
s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val);
+ feild_vec_byte += 4;
}
t++;
}
@@ -802,14 +817,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
udp_spec->hdr.src_port;
list[t].m_u.l4_hdr.src_port =
udp_mask->hdr.src_port;
+ feild_vec_byte += 2;
}
if (udp_mask->hdr.dst_port) {
list[t].h_u.l4_hdr.dst_port =
udp_spec->hdr.dst_port;
list[t].m_u.l4_hdr.dst_port =
udp_mask->hdr.dst_port;
+ feild_vec_byte += 2;
}
- t++;
+ t++;
}
break;
@@ -854,12 +871,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
tcp_spec->hdr.src_port;
list[t].m_u.l4_hdr.src_port =
tcp_mask->hdr.src_port;
+ feild_vec_byte += 2;
}
if (tcp_mask->hdr.dst_port) {
list[t].h_u.l4_hdr.dst_port =
tcp_spec->hdr.dst_port;
list[t].m_u.l4_hdr.dst_port =
tcp_mask->hdr.dst_port;
+ feild_vec_byte += 2;
}
t++;
}
@@ -899,12 +918,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
sctp_spec->hdr.src_port;
list[t].m_u.sctp_hdr.src_port =
sctp_mask->hdr.src_port;
+ feild_vec_byte += 2;
}
if (sctp_mask->hdr.dst_port) {
list[t].h_u.sctp_hdr.dst_port =
sctp_spec->hdr.dst_port;
list[t].m_u.sctp_hdr.dst_port =
sctp_mask->hdr.dst_port;
+ feild_vec_byte += 2;
}
t++;
}
@@ -942,6 +963,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
vxlan_mask->vni[0];
input_set |=
ICE_INSET_TUN_VXLAN_VNI;
+ feild_vec_byte += 2;
}
t++;
}
@@ -978,6 +1000,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
nvgre_mask->tni[0];
input_set |=
ICE_INSET_TUN_NVGRE_TNI;
+ feild_vec_byte += 2;
}
t++;
}
@@ -1006,6 +1029,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].m_u.vlan_hdr.vlan =
vlan_mask->tci;
input_set |= ICE_INSET_VLAN_OUTER;
+ feild_vec_byte += 2;
}
if (vlan_mask->inner_type) {
list[t].h_u.vlan_hdr.type =
@@ -1013,6 +1037,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].m_u.vlan_hdr.type =
vlan_mask->inner_type;
input_set |= ICE_INSET_ETHERTYPE;
+ feild_vec_byte += 2;
}
t++;
}
@@ -1053,6 +1078,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].m_u.pppoe_hdr.session_id =
pppoe_mask->session_id;
input_set |= ICE_INSET_PPPOE_SESSION;
+ feild_vec_byte += 2;
}
t++;
pppoe_elem_valid = 1;
@@ -1085,7 +1111,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].m_u.pppoe_hdr.ppp_prot_id =
pppoe_proto_mask->proto_id;
input_set |= ICE_INSET_PPPOE_PROTO;
-
+ feild_vec_byte += 2;
pppoe_prot_valid = 1;
}
if ((pppoe_proto_mask->proto_id &
@@ -1142,6 +1168,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].m_u.esp_hdr.spi =
esp_mask->hdr.spi;
input_set |= ICE_INSET_ESP_SPI;
+ feild_vec_byte += 4;
t++;
}
@@ -1198,6 +1225,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].m_u.ah_hdr.spi =
ah_mask->spi;
input_set |= ICE_INSET_AH_SPI;
+ feild_vec_byte += 4;
t++;
}
@@ -1237,6 +1265,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].m_u.l2tpv3_sess_hdr.session_id =
l2tp_mask->session_id;
input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID;
+ feild_vec_byte += 4;
t++;
}
@@ -1342,6 +1371,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
*tun_type = ICE_SW_IPV6_UDP;
}
+ if (feild_vec_byte >= MAX_INPUT_SET_BYTE) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "too much input set");
+ return -ENOTSUP;
+ }
+
*lkups_num = t;
return input_set;
--
2.19.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [dpdk-stable] [DPDK 2/4] net/ice: fix tunnel type for switch rule
[not found] <20200628031410.70004-1-wei.zhao1@intel.com>
@ 2020-06-28 3:14 ` Wei Zhao
0 siblings, 0 replies; 4+ messages in thread
From: Wei Zhao @ 2020-06-28 3:14 UTC (permalink / raw)
To: qabuild; +Cc: Wei Zhao, stable
This patch add check for protocol type of IPv4 packet,
it need to update tunnel type when NVGRE is in payload.
Fixes: 6bc7628c5e0b ("net/ice: change default tunnel type")
Cc: stable@dpdk.org
Signed-off-by: Wei Zhao <wei.zhao1@intel.com>
---
drivers/net/ice/ice_switch_filter.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c
index 3c0c36bce..c607e8d17 100644
--- a/drivers/net/ice/ice_switch_filter.c
+++ b/drivers/net/ice/ice_switch_filter.c
@@ -28,6 +28,7 @@
#define MAX_QGRP_NUM_TYPE 7
#define ICE_PPP_IPV4_PROTO 0x0021
#define ICE_PPP_IPV6_PROTO 0x0057
+#define ICE_IPV4_PROTO_NVGRE 0x002F
#define ICE_SW_INSET_ETHER ( \
ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
@@ -632,6 +633,10 @@ ice_switch_inset_get(const struct rte_flow_item pattern[],
list[t].m_u.ipv4_hdr.protocol =
ipv4_mask->hdr.next_proto_id;
}
+ if ((ipv4_spec->hdr.next_proto_id &
+ ipv4_mask->hdr.next_proto_id) ==
+ ICE_IPV4_PROTO_NVGRE)
+ *tun_type = ICE_SW_TUN_AND_NON_TUN;
if (ipv4_mask->hdr.type_of_service) {
list[t].h_u.ipv4_hdr.tos =
ipv4_spec->hdr.type_of_service;
@@ -1526,7 +1531,7 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad,
const struct rte_flow_item *item = pattern;
uint16_t item_num = 0;
enum ice_sw_tunnel_type tun_type =
- ICE_SW_TUN_AND_NON_TUN;
+ ICE_NON_TUN;
struct ice_pattern_match_item *pattern_match_item = NULL;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
--
2.19.1
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2020-06-28 3:39 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
[not found] <20200628031422.70053-1-wei.zhao1@intel.com>
2020-06-28 3:14 ` [dpdk-stable] [DPDK 2/4] net/ice: fix tunnel type for switch rule Wei Zhao
2020-06-28 3:14 ` [dpdk-stable] [DPDK 3/4] net/ice: support switch flow for specific L4 type Wei Zhao
2020-06-28 3:14 ` [dpdk-stable] [DPDK 4/4] net/ice: add input set byte number check Wei Zhao
[not found] <20200628031410.70004-1-wei.zhao1@intel.com>
2020-06-28 3:14 ` [dpdk-stable] [DPDK 2/4] net/ice: fix tunnel type for switch rule Wei Zhao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).