* [dpdk-dev] [PATCH 0/3] net/ixgbe: enable signature match and ipv6 for consistent API.
@ 2017-05-26 19:52 Qi Zhang
2017-05-26 19:52 ` [dpdk-dev] [PATCH 1/3] net/ixgbe: replace macro with inline function Qi Zhang
` (4 more replies)
0 siblings, 5 replies; 21+ messages in thread
From: Qi Zhang @ 2017-05-26 19:52 UTC (permalink / raw)
To: helin.zhang, wenzhuo.lu; +Cc: dev, Qi Zhang
the patchset is base on
http://dpdk.org/dev/patchwork/patch/24482/
http://dpdk.org/dev/patchwork/patch/24476/
Qi Zhang (3):
net/ixgbe: replace macro with inline function
net/ixgbe: enable signature match for consistent API
net/ixgbe: enable IPv6 for consistent API
drivers/net/ixgbe/ixgbe_flow.c | 371 +++++++++++++++++++++++++----------------
1 file changed, 226 insertions(+), 145 deletions(-)
--
2.7.4
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 1/3] net/ixgbe: replace macro with inline function
2017-05-26 19:52 [dpdk-dev] [PATCH 0/3] net/ixgbe: enable signature match and ipv6 for consistent API Qi Zhang
@ 2017-05-26 19:52 ` Qi Zhang
2017-06-01 5:09 ` Lu, Wenzhuo
2017-05-26 19:52 ` [dpdk-dev] [PATCH 2/3] net/ixgbe: enable signature match for consistent API Qi Zhang
` (3 subsequent siblings)
4 siblings, 1 reply; 21+ messages in thread
From: Qi Zhang @ 2017-05-26 19:52 UTC (permalink / raw)
To: helin.zhang, wenzhuo.lu; +Cc: dev, Qi Zhang
Code clean, replace macro with inline funtion for searching
next no void pattern and next no void action
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ixgbe/ixgbe_flow.c | 208 +++++++++++++++--------------------------
1 file changed, 73 insertions(+), 135 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 815496b..c6fb2f8 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -79,23 +79,39 @@
#define IXGBE_MIN_N_TUPLE_PRIO 1
#define IXGBE_MAX_N_TUPLE_PRIO 7
#define IXGBE_MAX_FLX_SOURCE_OFF 62
-#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
- do { \
- item = pattern + index;\
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
- index++; \
- item = pattern + index; \
- } \
- } while (0)
-
-#define NEXT_ITEM_OF_ACTION(act, actions, index)\
- do { \
- act = actions + index; \
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
- index++; \
- act = actions + index; \
- } \
- } while (0)
+
+/**
+ * Endless loop will never happen with below assumption
+ * 1. there is at least one no-void item(END)
+ * 2. cur is before END.
+ */
+static inline
+const struct rte_flow_item *next_no_void_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ cur ? cur + 1 : &pattern[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+static inline
+const struct rte_flow_action *next_no_void_action(
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action *cur)
+{
+ const struct rte_flow_action *next =
+ cur ? cur + 1 : &actions[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
/**
* Please aware there's an asumption for all the parsers.
@@ -145,7 +161,6 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_udp *udp_mask;
const struct rte_flow_item_sctp *sctp_spec;
const struct rte_flow_item_sctp *sctp_mask;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error,
@@ -167,11 +182,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse pattern */
- index = 0;
-
/* the first not void item can be MAC or IPv4 */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
@@ -199,8 +211,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
/* check if the next not void item is IPv4 */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
rte_flow_error_set(error,
EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
@@ -253,8 +264,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->proto = ipv4_spec->hdr.next_proto_id;
/* check if the next not void item is TCP or UDP */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
@@ -372,8 +382,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -382,14 +391,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/**
* n-tuple only supports forwarding,
* check if the first not void action is QUEUE.
*/
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -401,8 +407,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
((const struct rte_flow_action_queue *)act->conf)->index;
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -521,7 +526,6 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_spec;
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -544,15 +548,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* Parse pattern */
- index = 0;
-
+ item = next_no_void_pattern(pattern, NULL);
/* The first non-void item should be MAC. */
- item = pattern + index;
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
- index++;
- item = pattern + index;
- }
if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -611,12 +608,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
/* Check if the next non-void item is END. */
- index++;
- item = pattern + index;
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
- index++;
- item = pattern + index;
- }
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -626,13 +618,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
/* Parse action */
- index = 0;
- /* Check if the first non-void action is QUEUE or DROP. */
- act = actions + index;
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
- index++;
- act = actions + index;
- }
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
act->type != RTE_FLOW_ACTION_TYPE_DROP) {
rte_flow_error_set(error, EINVAL,
@@ -649,12 +635,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
}
/* Check if the next non-void item is END */
- index++;
- act = actions + index;
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
- index++;
- act = actions + index;
- }
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -794,7 +775,6 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_tcp *tcp_spec;
const struct rte_flow_item_tcp *tcp_mask;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -817,11 +797,9 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse pattern */
- index = 0;
/* the first not void item should be MAC or IPv4 or IPv6 or TCP */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
@@ -850,8 +828,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is IPv4 or IPv6 */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
rte_flow_error_set(error, EINVAL,
@@ -873,8 +850,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is TCP */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -918,8 +894,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -928,11 +903,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -952,8 +924,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -1049,7 +1020,6 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_e_tag *e_tag_mask;
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1071,11 +1041,9 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
NULL, "NULL attribute.");
return -rte_errno;
}
- /* parse pattern */
- index = 0;
/* The first not void item should be e-tag. */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1122,8 +1090,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1160,11 +1127,8 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1177,8 +1141,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
filter->pool = act_q->index;
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1227,7 +1190,6 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
const struct rte_flow_action_mark *mark;
- uint32_t index;
/* parse attr */
/* must be input direction */
@@ -1257,11 +1219,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE or DROP. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
act->type != RTE_FLOW_ACTION_TYPE_DROP) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1279,8 +1238,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
}
/* check if the next not void item is MARK */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
(act->type != RTE_FLOW_ACTION_TYPE_END)) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1295,8 +1253,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
mark = (const struct rte_flow_action_mark *)act->conf;
rule->soft_id = mark->id;
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
}
/* check if the next not void item is END */
@@ -1378,7 +1335,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
const struct rte_flow_item_raw *raw_mask;
const struct rte_flow_item_raw *raw_spec;
- uint32_t index, j;
+ uint32_t j;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1410,14 +1367,11 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
rule->mask.vlan_tci_mask = 0;
rule->mask.flex_bytes_mask = 0;
- /* parse pattern */
- index = 0;
-
/**
* The first not void item should be
* MAC or IPv4 or TCP or UDP or SCTP.
*/
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_TCP &&
@@ -1510,8 +1464,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is vlan or ipv4.
* IPv6 is not supported.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1560,8 +1513,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
/**
* Check if the next not void item is not vlan.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1631,8 +1583,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is
* TCP or UDP or SCTP or END.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
@@ -1699,8 +1650,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
tcp_spec->hdr.dst_port;
}
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1760,8 +1710,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
udp_spec->hdr.dst_port;
}
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1823,8 +1772,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
sctp_spec->hdr.dst_port;
}
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1912,8 +1860,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1981,7 +1928,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_item_vlan *vlan_spec;
const struct rte_flow_item_vlan *vlan_mask;
- uint32_t index, j;
+ uint32_t j;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -2012,14 +1959,11 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
rule->mask.vlan_tci_mask = 0;
- /* parse pattern */
- index = 0;
-
/**
* The first not void item should be
* MAC or IPv4 or IPv6 or UDP or VxLAN.
*/
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
@@ -2054,8 +1998,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is IPv4 or IPv6. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2086,8 +2029,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is UDP or NVGRE. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2117,8 +2059,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is VxLAN. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2274,8 +2215,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* check if the next not void item is MAC */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2358,8 +2298,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
* Check if the next not void item is vlan or ipv4.
* IPv6 is not supported.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2395,8 +2334,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* More than one tags are not supported. */
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
--
2.7.4
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 2/3] net/ixgbe: enable signature match for consistent API
2017-05-26 19:52 [dpdk-dev] [PATCH 0/3] net/ixgbe: enable signature match and ipv6 for consistent API Qi Zhang
2017-05-26 19:52 ` [dpdk-dev] [PATCH 1/3] net/ixgbe: replace macro with inline function Qi Zhang
@ 2017-05-26 19:52 ` Qi Zhang
2017-06-01 5:50 ` Lu, Wenzhuo
2017-05-26 19:52 ` [dpdk-dev] [PATCH 3/3] net/ixgbe: enable IPv6 " Qi Zhang
` (2 subsequent siblings)
4 siblings, 1 reply; 21+ messages in thread
From: Qi Zhang @ 2017-05-26 19:52 UTC (permalink / raw)
To: helin.zhang, wenzhuo.lu; +Cc: dev, Qi Zhang
Enable signature match for rte_flow API.
RTE_FLOW_ITEM_TYPE_ROUGHLY specify a signature match.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ixgbe/ixgbe_flow.c | 71 +++++++++++++++++++++++++++++++++++-------
1 file changed, 59 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index c6fb2f8..0638c32 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1268,6 +1268,47 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
return 0;
}
+/* search next no void pattern and skip roughly */
+static inline
+const struct rte_flow_item *next_no_roughly_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ next_no_void_pattern(pattern, cur);
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_ROUGHLY)
+ return next;
+ next = next_no_void_pattern(pattern, next);
+ }
+}
+
+static inline uint8_t signature_match(const struct rte_flow_item pattern[])
+{
+ const struct rte_flow_item_roughly *spec;
+ const struct rte_flow_item *item;
+ int i = 0;
+
+ while (1) {
+ item = pattern + i;
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ break;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_ROUGHLY) {
+ spec =
+ (const struct rte_flow_item_roughly *)item->spec;
+ if (spec->threshold)
+ return 1;
+ else
+ return 0;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
/**
* Parse the rule to see if it is a IP or MAC VLAN flow director rule.
* And get the flow director filter info BTW.
@@ -1277,6 +1318,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
* The next not void item could be UDP or TCP or SCTP (optional)
* The next not void item could be RAW (for flexbyte, optional)
* The next not void item must be END.
+ * A Roughly Match pattern can appear at any place before END (optional)
* MAC VLAN PATTERN:
* The first not void item must be ETH.
* The second not void item must be MAC VLAN.
@@ -1371,7 +1413,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* The first not void item should be
* MAC or IPv4 or TCP or UDP or SCTP.
*/
- item = next_no_void_pattern(pattern, NULL);
+ item = next_no_roughly_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_TCP &&
@@ -1384,7 +1426,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
return -rte_errno;
}
- rule->mode = RTE_FDIR_MODE_PERFECT;
+ if (signature_match(pattern))
+ rule->mode = RTE_FDIR_MODE_SIGNATURE;
+ else
+ rule->mode = RTE_FDIR_MODE_PERFECT;
/*Not supported last point for range*/
if (item->last) {
@@ -1421,14 +1466,13 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
if (item->mask) {
- /* If ethernet has meaning, it means MAC VLAN mode. */
- rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
rule->b_mask = TRUE;
eth_mask = (const struct rte_flow_item_eth *)item->mask;
/* Ether type should be masked. */
- if (eth_mask->type) {
+ if (eth_mask->type ||
+ rule->mode == RTE_FDIR_MODE_SIGNATURE) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1436,6 +1480,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
return -rte_errno;
}
+ /* If ethernet has meaning, it means MAC VLAN mode. */
+ rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+
/**
* src MAC address must be masked,
* and don't support dst MAC address mask.
@@ -1464,7 +1511,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is vlan or ipv4.
* IPv6 is not supported.
*/
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1513,7 +1560,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
/**
* Check if the next not void item is not vlan.
*/
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1583,7 +1630,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is
* TCP or UDP or SCTP or END.
*/
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
@@ -1650,7 +1697,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
tcp_spec->hdr.dst_port;
}
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1710,7 +1757,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
udp_spec->hdr.dst_port;
}
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1772,7 +1819,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
sctp_spec->hdr.dst_port;
}
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1860,7 +1907,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
/* check if the next not void item is END */
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
--
2.7.4
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH 3/3] net/ixgbe: enable IPv6 for consistent API
2017-05-26 19:52 [dpdk-dev] [PATCH 0/3] net/ixgbe: enable signature match and ipv6 for consistent API Qi Zhang
2017-05-26 19:52 ` [dpdk-dev] [PATCH 1/3] net/ixgbe: replace macro with inline function Qi Zhang
2017-05-26 19:52 ` [dpdk-dev] [PATCH 2/3] net/ixgbe: enable signature match for consistent API Qi Zhang
@ 2017-05-26 19:52 ` Qi Zhang
2017-06-01 6:15 ` Lu, Wenzhuo
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 0/3] net/ixgbe: enable signature match and ipv6 " Qi Zhang
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 " Qi Zhang
4 siblings, 1 reply; 21+ messages in thread
From: Qi Zhang @ 2017-05-26 19:52 UTC (permalink / raw)
To: helin.zhang, wenzhuo.lu; +Cc: dev, Qi Zhang
Enable IPv6 support with rte_flow API.
Only support Sigature Match.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ixgbe/ixgbe_flow.c | 112 ++++++++++++++++++++++++++++++++++++++---
1 file changed, 104 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 0638c32..1746a9e 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1313,12 +1313,13 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[])
* Parse the rule to see if it is a IP or MAC VLAN flow director rule.
* And get the flow director filter info BTW.
* UDP/TCP/SCTP PATTERN:
- * The first not void item can be ETH or IPV4.
- * The second not void item must be IPV4 if the first one is ETH.
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
* The next not void item could be UDP or TCP or SCTP (optional)
* The next not void item could be RAW (for flexbyte, optional)
* The next not void item must be END.
- * A Roughly Match pattern can appear at any place before END (optional)
+ * A Roughly Match pattern can appear at any place before END.
+ * Roughly Match is optional for IPV4 but is required for IPV6
* MAC VLAN PATTERN:
* The first not void item must be ETH.
* The second not void item must be MAC VLAN.
@@ -1366,6 +1367,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_item_ipv4 *ipv4_spec;
const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec;
+ const struct rte_flow_item_ipv6 *ipv6_mask;
const struct rte_flow_item_tcp *tcp_spec;
const struct rte_flow_item_tcp *tcp_mask;
const struct rte_flow_item_udp *udp_spec;
@@ -1377,7 +1380,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
const struct rte_flow_item_raw *raw_mask;
const struct rte_flow_item_raw *raw_spec;
- uint32_t j;
+ uint8_t j;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1416,6 +1419,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
item = next_no_roughly_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
@@ -1570,7 +1574,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
}
}
- /* Get the IP info. */
+ /* Get the IPV4 info. */
if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
/**
* Set the flow type even if there's no content
@@ -1644,13 +1648,105 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
}
}
+ /* Get the IPV6 info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_IPV6;
+
+ /**
+ * 1. must signature match
+ * 2. not support last
+ * 3. mask must not null
+ */
+ if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
+ item->last ||
+ !item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ rule->b_mask = TRUE;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check src addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
+ rule->mask.src_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.src_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* check dst addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
+ rule->mask.dst_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ item = next_no_roughly_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
/* Get the TCP info. */
if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
/**
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
+ rule->ixgbe_fdir.formatted.flow_type |=
IXGBE_ATR_FLOW_TYPE_TCPV4;
/*Not supported last point for range*/
if (item->last) {
@@ -1715,7 +1811,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
+ rule->ixgbe_fdir.formatted.flow_type |=
IXGBE_ATR_FLOW_TYPE_UDPV4;
/*Not supported last point for range*/
if (item->last) {
@@ -1775,7 +1871,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
+ rule->ixgbe_fdir.formatted.flow_type |=
IXGBE_ATR_FLOW_TYPE_SCTPV4;
/*Not supported last point for range*/
if (item->last) {
--
2.7.4
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [dpdk-dev] [PATCH 1/3] net/ixgbe: replace macro with inline function
2017-05-26 19:52 ` [dpdk-dev] [PATCH 1/3] net/ixgbe: replace macro with inline function Qi Zhang
@ 2017-06-01 5:09 ` Lu, Wenzhuo
0 siblings, 0 replies; 21+ messages in thread
From: Lu, Wenzhuo @ 2017-06-01 5:09 UTC (permalink / raw)
To: Zhang, Qi Z, Zhang, Helin; +Cc: dev
Hi Qi,
> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Saturday, May 27, 2017 3:52 AM
> To: Zhang, Helin; Lu, Wenzhuo
> Cc: dev@dpdk.org; Zhang, Qi Z
> Subject: [PATCH 1/3] net/ixgbe: replace macro with inline function
>
> Code clean, replace macro with inline funtion for searching next no void
> pattern and next no void action
>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
> drivers/net/ixgbe/ixgbe_flow.c | 208 +++++++++++++++--------------------------
> 1 file changed, 73 insertions(+), 135 deletions(-)
This patch is good, but don't understand the necessity to replace the macro. Would you like to add more explanation in the commit log?
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [dpdk-dev] [PATCH 2/3] net/ixgbe: enable signature match for consistent API
2017-05-26 19:52 ` [dpdk-dev] [PATCH 2/3] net/ixgbe: enable signature match for consistent API Qi Zhang
@ 2017-06-01 5:50 ` Lu, Wenzhuo
2017-06-01 5:56 ` Zhang, Qi Z
0 siblings, 1 reply; 21+ messages in thread
From: Lu, Wenzhuo @ 2017-06-01 5:50 UTC (permalink / raw)
To: Zhang, Qi Z, Zhang, Helin; +Cc: dev
Hi Qi,
> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Saturday, May 27, 2017 3:52 AM
> To: Zhang, Helin; Lu, Wenzhuo
> Cc: dev@dpdk.org; Zhang, Qi Z
> Subject: [PATCH 2/3] net/ixgbe: enable signature match for consistent API
>
> Enable signature match for rte_flow API.
> RTE_FLOW_ITEM_TYPE_ROUGHLY specify a signature match.
>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Now I understand why you want to change the macro to the function in the first patch. :)
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [dpdk-dev] [PATCH 2/3] net/ixgbe: enable signature match for consistent API
2017-06-01 5:50 ` Lu, Wenzhuo
@ 2017-06-01 5:56 ` Zhang, Qi Z
2017-06-01 6:13 ` Lu, Wenzhuo
0 siblings, 1 reply; 21+ messages in thread
From: Zhang, Qi Z @ 2017-06-01 5:56 UTC (permalink / raw)
To: Lu, Wenzhuo, Zhang, Helin; +Cc: dev
> -----Original Message-----
> From: Lu, Wenzhuo
> Sent: Thursday, June 1, 2017 1:50 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH 2/3] net/ixgbe: enable signature match for consistent
> API
>
> Hi Qi,
>
> > -----Original Message-----
> > From: Zhang, Qi Z
> > Sent: Saturday, May 27, 2017 3:52 AM
> > To: Zhang, Helin; Lu, Wenzhuo
> > Cc: dev@dpdk.org; Zhang, Qi Z
> > Subject: [PATCH 2/3] net/ixgbe: enable signature match for consistent API
> >
> > Enable signature match for rte_flow API.
> > RTE_FLOW_ITEM_TYPE_ROUGHLY specify a signature match.
> >
> > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
>
> Now I understand why you want to change the macro to the function in the
> first patch. :)
OK, I can merge the 2 patches into 1 in v2, if you think it's not necessary to separate.
Thanks
Qi
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [dpdk-dev] [PATCH 2/3] net/ixgbe: enable signature match for consistent API
2017-06-01 5:56 ` Zhang, Qi Z
@ 2017-06-01 6:13 ` Lu, Wenzhuo
2017-06-01 6:14 ` Zhang, Qi Z
0 siblings, 1 reply; 21+ messages in thread
From: Lu, Wenzhuo @ 2017-06-01 6:13 UTC (permalink / raw)
To: Zhang, Qi Z, Zhang, Helin; +Cc: dev
Hi Qi,
> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Thursday, June 1, 2017 1:56 PM
> To: Lu, Wenzhuo; Zhang, Helin
> Cc: dev@dpdk.org
> Subject: RE: [PATCH 2/3] net/ixgbe: enable signature match for consistent
> API
>
>
>
> > -----Original Message-----
> > From: Lu, Wenzhuo
> > Sent: Thursday, June 1, 2017 1:50 PM
> > To: Zhang, Qi Z <qi.z.zhang@intel.com>; Zhang, Helin
> > <helin.zhang@intel.com>
> > Cc: dev@dpdk.org
> > Subject: RE: [PATCH 2/3] net/ixgbe: enable signature match for
> > consistent API
> >
> > Hi Qi,
> >
> > > -----Original Message-----
> > > From: Zhang, Qi Z
> > > Sent: Saturday, May 27, 2017 3:52 AM
> > > To: Zhang, Helin; Lu, Wenzhuo
> > > Cc: dev@dpdk.org; Zhang, Qi Z
> > > Subject: [PATCH 2/3] net/ixgbe: enable signature match for
> > > consistent API
> > >
> > > Enable signature match for rte_flow API.
> > > RTE_FLOW_ITEM_TYPE_ROUGHLY specify a signature match.
> > >
> > > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> > Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> >
> > Now I understand why you want to change the macro to the function in
> > the first patch. :)
>
> OK, I can merge the 2 patches into 1 in v2, if you think it's not necessary to
> separate.
It's better to have these 2 patches. Adding more info of the purpose in the commit log, like "removing the index ...", can make it easier to understand. :)
>
> Thanks
> Qi
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [dpdk-dev] [PATCH 2/3] net/ixgbe: enable signature match for consistent API
2017-06-01 6:13 ` Lu, Wenzhuo
@ 2017-06-01 6:14 ` Zhang, Qi Z
0 siblings, 0 replies; 21+ messages in thread
From: Zhang, Qi Z @ 2017-06-01 6:14 UTC (permalink / raw)
To: Lu, Wenzhuo, Zhang, Helin; +Cc: dev
> -----Original Message-----
> From: Lu, Wenzhuo
> Sent: Thursday, June 1, 2017 2:13 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH 2/3] net/ixgbe: enable signature match for consistent
> API
>
> Hi Qi,
>
> > -----Original Message-----
> > From: Zhang, Qi Z
> > Sent: Thursday, June 1, 2017 1:56 PM
> > To: Lu, Wenzhuo; Zhang, Helin
> > Cc: dev@dpdk.org
> > Subject: RE: [PATCH 2/3] net/ixgbe: enable signature match for
> > consistent API
> >
> >
> >
> > > -----Original Message-----
> > > From: Lu, Wenzhuo
> > > Sent: Thursday, June 1, 2017 1:50 PM
> > > To: Zhang, Qi Z <qi.z.zhang@intel.com>; Zhang, Helin
> > > <helin.zhang@intel.com>
> > > Cc: dev@dpdk.org
> > > Subject: RE: [PATCH 2/3] net/ixgbe: enable signature match for
> > > consistent API
> > >
> > > Hi Qi,
> > >
> > > > -----Original Message-----
> > > > From: Zhang, Qi Z
> > > > Sent: Saturday, May 27, 2017 3:52 AM
> > > > To: Zhang, Helin; Lu, Wenzhuo
> > > > Cc: dev@dpdk.org; Zhang, Qi Z
> > > > Subject: [PATCH 2/3] net/ixgbe: enable signature match for
> > > > consistent API
> > > >
> > > > Enable signature match for rte_flow API.
> > > > RTE_FLOW_ITEM_TYPE_ROUGHLY specify a signature match.
> > > >
> > > > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> > > Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > >
> > > Now I understand why you want to change the macro to the function in
> > > the first patch. :)
> >
> > OK, I can merge the 2 patches into 1 in v2, if you think it's not
> > necessary to separate.
> It's better to have these 2 patches. Adding more info of the purpose in the
> commit log, like "removing the index ...", can make it easier to understand. :)
OK, that make sense.
>
> >
> > Thanks
> > Qi
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [dpdk-dev] [PATCH 3/3] net/ixgbe: enable IPv6 for consistent API
2017-05-26 19:52 ` [dpdk-dev] [PATCH 3/3] net/ixgbe: enable IPv6 " Qi Zhang
@ 2017-06-01 6:15 ` Lu, Wenzhuo
2017-06-01 6:19 ` Zhang, Qi Z
0 siblings, 1 reply; 21+ messages in thread
From: Lu, Wenzhuo @ 2017-06-01 6:15 UTC (permalink / raw)
To: Zhang, Qi Z, Zhang, Helin; +Cc: dev
Hi Qi,
> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Saturday, May 27, 2017 3:53 AM
> To: Zhang, Helin; Lu, Wenzhuo
> Cc: dev@dpdk.org; Zhang, Qi Z
> Subject: [PATCH 3/3] net/ixgbe: enable IPv6 for consistent API
>
> Enable IPv6 support with rte_flow API.
> Only support Sigature Match.
>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
> drivers/net/ixgbe/ixgbe_flow.c | 112
> ++++++++++++++++++++++++++++++++++++++---
> 1 file changed, 104 insertions(+), 8 deletions(-)
> /* Get the TCP info. */
> if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
> /**
> * Set the flow type even if there's no content
> * as we must have a flow type.
> */
> - rule->ixgbe_fdir.formatted.flow_type =
> + rule->ixgbe_fdir.formatted.flow_type |=
> IXGBE_ATR_FLOW_TYPE_TCPV4;
This macro is misleading, better change it to IXGBE_ATR_L4TYPE_TCP.
> /*Not supported last point for range*/
> if (item->last) {
> @@ -1715,7 +1811,7 @@ ixgbe_parse_fdir_filter_normal(const struct
> rte_flow_attr *attr,
> * Set the flow type even if there's no content
> * as we must have a flow type.
> */
> - rule->ixgbe_fdir.formatted.flow_type =
> + rule->ixgbe_fdir.formatted.flow_type |=
> IXGBE_ATR_FLOW_TYPE_UDPV4;
IXGBE_ATR_L4TYPE_UDP is better.
> /*Not supported last point for range*/
> if (item->last) {
> @@ -1775,7 +1871,7 @@ ixgbe_parse_fdir_filter_normal(const struct
> rte_flow_attr *attr,
> * Set the flow type even if there's no content
> * as we must have a flow type.
> */
> - rule->ixgbe_fdir.formatted.flow_type =
> + rule->ixgbe_fdir.formatted.flow_type |=
> IXGBE_ATR_FLOW_TYPE_SCTPV4;
IXGBE_ATR_L4TYPE_SCTP is better.
> /*Not supported last point for range*/
> if (item->last) {
> --
> 2.7.4
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [dpdk-dev] [PATCH 3/3] net/ixgbe: enable IPv6 for consistent API
2017-06-01 6:15 ` Lu, Wenzhuo
@ 2017-06-01 6:19 ` Zhang, Qi Z
0 siblings, 0 replies; 21+ messages in thread
From: Zhang, Qi Z @ 2017-06-01 6:19 UTC (permalink / raw)
To: Lu, Wenzhuo, Zhang, Helin; +Cc: dev
> -----Original Message-----
> From: Lu, Wenzhuo
> Sent: Thursday, June 1, 2017 2:16 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH 3/3] net/ixgbe: enable IPv6 for consistent API
>
> Hi Qi,
>
> > -----Original Message-----
> > From: Zhang, Qi Z
> > Sent: Saturday, May 27, 2017 3:53 AM
> > To: Zhang, Helin; Lu, Wenzhuo
> > Cc: dev@dpdk.org; Zhang, Qi Z
> > Subject: [PATCH 3/3] net/ixgbe: enable IPv6 for consistent API
> >
> > Enable IPv6 support with rte_flow API.
> > Only support Sigature Match.
> >
> > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> > ---
> > drivers/net/ixgbe/ixgbe_flow.c | 112
> > ++++++++++++++++++++++++++++++++++++++---
> > 1 file changed, 104 insertions(+), 8 deletions(-)
>
> > /* Get the TCP info. */
> > if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
> > /**
> > * Set the flow type even if there's no content
> > * as we must have a flow type.
> > */
> > - rule->ixgbe_fdir.formatted.flow_type =
> > + rule->ixgbe_fdir.formatted.flow_type |=
> > IXGBE_ATR_FLOW_TYPE_TCPV4;
> This macro is misleading, better change it to IXGBE_ATR_L4TYPE_TCP.
>
> > /*Not supported last point for range*/
> > if (item->last) {
> > @@ -1715,7 +1811,7 @@ ixgbe_parse_fdir_filter_normal(const struct
> > rte_flow_attr *attr,
> > * Set the flow type even if there's no content
> > * as we must have a flow type.
> > */
> > - rule->ixgbe_fdir.formatted.flow_type =
> > + rule->ixgbe_fdir.formatted.flow_type |=
> > IXGBE_ATR_FLOW_TYPE_UDPV4;
> IXGBE_ATR_L4TYPE_UDP is better.
>
> > /*Not supported last point for range*/
> > if (item->last) {
> > @@ -1775,7 +1871,7 @@ ixgbe_parse_fdir_filter_normal(const struct
> > rte_flow_attr *attr,
> > * Set the flow type even if there's no content
> > * as we must have a flow type.
> > */
> > - rule->ixgbe_fdir.formatted.flow_type =
> > + rule->ixgbe_fdir.formatted.flow_type |=
> > IXGBE_ATR_FLOW_TYPE_SCTPV4;
> IXGBE_ATR_L4TYPE_SCTP is better.
>
> > /*Not supported last point for range*/
> > if (item->last) {
> > --
> > 2.7.4
Yes, should use the macro but not enum here.
Will change in v2
Thanks
Qi
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH v2 0/3] net/ixgbe: enable signature match and ipv6 for consistent API.
2017-05-26 19:52 [dpdk-dev] [PATCH 0/3] net/ixgbe: enable signature match and ipv6 for consistent API Qi Zhang
` (2 preceding siblings ...)
2017-05-26 19:52 ` [dpdk-dev] [PATCH 3/3] net/ixgbe: enable IPv6 " Qi Zhang
@ 2017-06-07 23:08 ` Qi Zhang
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 1/3] net/ixgbe: replace macro with inline function Qi Zhang
` (3 more replies)
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 " Qi Zhang
4 siblings, 4 replies; 21+ messages in thread
From: Qi Zhang @ 2017-06-07 23:08 UTC (permalink / raw)
To: wenzhuo.lu, helin.zhang; +Cc: dev, Qi Zhang
The patchset is based on
http://dpdk.org/dev/patchwork/patch/25015
http://dpdk.org/dev/patchwork/patch/25185
v2:
- add comment to explain macro replacement
- fix wrong flow type assignment
Qi Zhang (3):
net/ixgbe: replace macro with inline function
net/ixgbe: enable signature match for consistent API
net/ixgbe: enable IPv6 for consistent API
drivers/net/ixgbe/ixgbe_flow.c | 377 +++++++++++++++++++++++++----------------
1 file changed, 229 insertions(+), 148 deletions(-)
--
2.7.4
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH v2 1/3] net/ixgbe: replace macro with inline function
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 0/3] net/ixgbe: enable signature match and ipv6 " Qi Zhang
@ 2017-06-07 23:08 ` Qi Zhang
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 2/3] net/ixgbe: enable signature match for consistent API Qi Zhang
` (2 subsequent siblings)
3 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2017-06-07 23:08 UTC (permalink / raw)
To: wenzhuo.lu, helin.zhang; +Cc: dev, Qi Zhang
Code clean with 2 purposes.
1. No variable "index" needed
2. inline function make it easy and safe when be nest into a loop.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
v2:
- Update the commit log
drivers/net/ixgbe/ixgbe_flow.c | 208 +++++++++++++++--------------------------
1 file changed, 73 insertions(+), 135 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index cae123e..69829e8 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -79,23 +79,39 @@
#define IXGBE_MIN_N_TUPLE_PRIO 1
#define IXGBE_MAX_N_TUPLE_PRIO 7
#define IXGBE_MAX_FLX_SOURCE_OFF 62
-#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
- do { \
- item = pattern + index;\
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
- index++; \
- item = pattern + index; \
- } \
- } while (0)
-
-#define NEXT_ITEM_OF_ACTION(act, actions, index)\
- do { \
- act = actions + index; \
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
- index++; \
- act = actions + index; \
- } \
- } while (0)
+
+/**
+ * Endless loop will never happen with below assumption
+ * 1. there is at least one no-void item(END)
+ * 2. cur is before END.
+ */
+static inline
+const struct rte_flow_item *next_no_void_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ cur ? cur + 1 : &pattern[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+static inline
+const struct rte_flow_action *next_no_void_action(
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action *cur)
+{
+ const struct rte_flow_action *next =
+ cur ? cur + 1 : &actions[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
/**
* Please aware there's an asumption for all the parsers.
@@ -145,7 +161,6 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_udp *udp_mask;
const struct rte_flow_item_sctp *sctp_spec;
const struct rte_flow_item_sctp *sctp_mask;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error,
@@ -167,11 +182,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse pattern */
- index = 0;
-
/* the first not void item can be MAC or IPv4 */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
@@ -199,8 +211,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
/* check if the next not void item is IPv4 */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
rte_flow_error_set(error,
EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
@@ -253,8 +264,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->proto = ipv4_spec->hdr.next_proto_id;
/* check if the next not void item is TCP or UDP */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
@@ -372,8 +382,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -382,14 +391,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/**
* n-tuple only supports forwarding,
* check if the first not void action is QUEUE.
*/
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -401,8 +407,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
((const struct rte_flow_action_queue *)act->conf)->index;
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -521,7 +526,6 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_spec;
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -544,15 +548,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* Parse pattern */
- index = 0;
-
+ item = next_no_void_pattern(pattern, NULL);
/* The first non-void item should be MAC. */
- item = pattern + index;
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
- index++;
- item = pattern + index;
- }
if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -611,12 +608,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
/* Check if the next non-void item is END. */
- index++;
- item = pattern + index;
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
- index++;
- item = pattern + index;
- }
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -626,13 +618,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
/* Parse action */
- index = 0;
- /* Check if the first non-void action is QUEUE or DROP. */
- act = actions + index;
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
- index++;
- act = actions + index;
- }
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
act->type != RTE_FLOW_ACTION_TYPE_DROP) {
rte_flow_error_set(error, EINVAL,
@@ -649,12 +635,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
}
/* Check if the next non-void item is END */
- index++;
- act = actions + index;
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
- index++;
- act = actions + index;
- }
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -794,7 +775,6 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_tcp *tcp_spec;
const struct rte_flow_item_tcp *tcp_mask;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -817,11 +797,9 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse pattern */
- index = 0;
/* the first not void item should be MAC or IPv4 or IPv6 or TCP */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
@@ -850,8 +828,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is IPv4 or IPv6 */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
rte_flow_error_set(error, EINVAL,
@@ -873,8 +850,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is TCP */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -918,8 +894,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -928,11 +903,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -952,8 +924,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -1049,7 +1020,6 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_e_tag *e_tag_mask;
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1071,11 +1041,9 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
NULL, "NULL attribute.");
return -rte_errno;
}
- /* parse pattern */
- index = 0;
/* The first not void item should be e-tag. */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1122,8 +1090,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1160,11 +1127,8 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1177,8 +1141,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
filter->pool = act_q->index;
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1227,7 +1190,6 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
const struct rte_flow_action_mark *mark;
- uint32_t index;
/* parse attr */
/* must be input direction */
@@ -1257,11 +1219,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE or DROP. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
act->type != RTE_FLOW_ACTION_TYPE_DROP) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1279,8 +1238,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
}
/* check if the next not void item is MARK */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
(act->type != RTE_FLOW_ACTION_TYPE_END)) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1295,8 +1253,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
mark = (const struct rte_flow_action_mark *)act->conf;
rule->soft_id = mark->id;
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
}
/* check if the next not void item is END */
@@ -1378,7 +1335,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
const struct rte_flow_item_raw *raw_mask;
const struct rte_flow_item_raw *raw_spec;
- uint32_t index, j;
+ uint32_t j;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1410,14 +1367,11 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
rule->mask.vlan_tci_mask = 0;
rule->mask.flex_bytes_mask = 0;
- /* parse pattern */
- index = 0;
-
/**
* The first not void item should be
* MAC or IPv4 or TCP or UDP or SCTP.
*/
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_TCP &&
@@ -1510,8 +1464,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is vlan or ipv4.
* IPv6 is not supported.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1558,8 +1511,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
/* More than one tags are not supported. */
/* Next not void item must be END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1629,8 +1581,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is
* TCP or UDP or SCTP or END.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
@@ -1697,8 +1648,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
tcp_spec->hdr.dst_port;
}
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1758,8 +1708,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
udp_spec->hdr.dst_port;
}
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1821,8 +1770,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
sctp_spec->hdr.dst_port;
}
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1906,8 +1854,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1975,7 +1922,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_item_vlan *vlan_spec;
const struct rte_flow_item_vlan *vlan_mask;
- uint32_t index, j;
+ uint32_t j;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -2006,14 +1953,11 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
rule->mask.vlan_tci_mask = 0;
- /* parse pattern */
- index = 0;
-
/**
* The first not void item should be
* MAC or IPv4 or IPv6 or UDP or VxLAN.
*/
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
@@ -2048,8 +1992,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is IPv4 or IPv6. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2080,8 +2023,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is UDP or NVGRE. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2111,8 +2053,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is VxLAN. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2268,8 +2209,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* check if the next not void item is MAC */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2352,8 +2292,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
* Check if the next not void item is vlan or ipv4.
* IPv6 is not supported.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2389,8 +2328,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* More than one tags are not supported. */
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
--
2.7.4
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH v2 2/3] net/ixgbe: enable signature match for consistent API
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 0/3] net/ixgbe: enable signature match and ipv6 " Qi Zhang
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 1/3] net/ixgbe: replace macro with inline function Qi Zhang
@ 2017-06-07 23:08 ` Qi Zhang
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 3/3] net/ixgbe: enable IPv6 " Qi Zhang
2017-06-08 6:57 ` [dpdk-dev] [PATCH v2 0/3] net/ixgbe: enable signature match and ipv6 " Lu, Wenzhuo
3 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2017-06-07 23:08 UTC (permalink / raw)
To: wenzhuo.lu, helin.zhang; +Cc: dev, Qi Zhang
Enable signature match for rte_flow API.
RTE_FLOW_ITEM_TYPE_FUZZY specify a signature match.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ixgbe/ixgbe_flow.c | 71 +++++++++++++++++++++++++++++++++++-------
1 file changed, 59 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 69829e8..bc42fac 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1268,6 +1268,47 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
return 0;
}
+/* search next no void pattern and skip roughly */
+static inline
+const struct rte_flow_item *next_no_roughly_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ next_no_void_pattern(pattern, cur);
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
+ return next;
+ next = next_no_void_pattern(pattern, next);
+ }
+}
+
+static inline uint8_t signature_match(const struct rte_flow_item pattern[])
+{
+ const struct rte_flow_item_fuzzy *spec;
+ const struct rte_flow_item *item;
+ int i = 0;
+
+ while (1) {
+ item = pattern + i;
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ break;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
+ spec =
+ (const struct rte_flow_item_fuzzy *)item->spec;
+ if (spec->threshold)
+ return 1;
+ else
+ return 0;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
/**
* Parse the rule to see if it is a IP or MAC VLAN flow director rule.
* And get the flow director filter info BTW.
@@ -1277,6 +1318,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
* The next not void item could be UDP or TCP or SCTP (optional)
* The next not void item could be RAW (for flexbyte, optional)
* The next not void item must be END.
+ * A Roughly Match pattern can appear at any place before END (optional)
* MAC VLAN PATTERN:
* The first not void item must be ETH.
* The second not void item must be MAC VLAN.
@@ -1371,7 +1413,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* The first not void item should be
* MAC or IPv4 or TCP or UDP or SCTP.
*/
- item = next_no_void_pattern(pattern, NULL);
+ item = next_no_roughly_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_TCP &&
@@ -1384,7 +1426,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
return -rte_errno;
}
- rule->mode = RTE_FDIR_MODE_PERFECT;
+ if (signature_match(pattern))
+ rule->mode = RTE_FDIR_MODE_SIGNATURE;
+ else
+ rule->mode = RTE_FDIR_MODE_PERFECT;
/*Not supported last point for range*/
if (item->last) {
@@ -1421,14 +1466,13 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
if (item->mask) {
- /* If ethernet has meaning, it means MAC VLAN mode. */
- rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
rule->b_mask = TRUE;
eth_mask = (const struct rte_flow_item_eth *)item->mask;
/* Ether type should be masked. */
- if (eth_mask->type) {
+ if (eth_mask->type ||
+ rule->mode == RTE_FDIR_MODE_SIGNATURE) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1436,6 +1480,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
return -rte_errno;
}
+ /* If ethernet has meaning, it means MAC VLAN mode. */
+ rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+
/**
* src MAC address must be masked,
* and don't support dst MAC address mask.
@@ -1464,7 +1511,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is vlan or ipv4.
* IPv6 is not supported.
*/
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1511,7 +1558,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
/* More than one tags are not supported. */
/* Next not void item must be END */
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1581,7 +1628,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is
* TCP or UDP or SCTP or END.
*/
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
@@ -1648,7 +1695,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
tcp_spec->hdr.dst_port;
}
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1708,7 +1755,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
udp_spec->hdr.dst_port;
}
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1770,7 +1817,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
sctp_spec->hdr.dst_port;
}
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1854,7 +1901,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
/* check if the next not void item is END */
- item = next_no_void_pattern(pattern, item);
+ item = next_no_roughly_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
--
2.7.4
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH v2 3/3] net/ixgbe: enable IPv6 for consistent API
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 0/3] net/ixgbe: enable signature match and ipv6 " Qi Zhang
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 1/3] net/ixgbe: replace macro with inline function Qi Zhang
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 2/3] net/ixgbe: enable signature match for consistent API Qi Zhang
@ 2017-06-07 23:08 ` Qi Zhang
2017-06-08 6:57 ` [dpdk-dev] [PATCH v2 0/3] net/ixgbe: enable signature match and ipv6 " Lu, Wenzhuo
3 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2017-06-07 23:08 UTC (permalink / raw)
To: wenzhuo.lu, helin.zhang; +Cc: dev, Qi Zhang
Enable IPv6 support with rte_flow API.
Only support Sigature Match.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
v2:
- fix flow type assignment.
drivers/net/ixgbe/ixgbe_flow.c | 118 +++++++++++++++++++++++++++++++++++++----
1 file changed, 107 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index bc42fac..4daf3ad 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1313,12 +1313,13 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[])
* Parse the rule to see if it is a IP or MAC VLAN flow director rule.
* And get the flow director filter info BTW.
* UDP/TCP/SCTP PATTERN:
- * The first not void item can be ETH or IPV4.
- * The second not void item must be IPV4 if the first one is ETH.
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
* The next not void item could be UDP or TCP or SCTP (optional)
* The next not void item could be RAW (for flexbyte, optional)
* The next not void item must be END.
- * A Roughly Match pattern can appear at any place before END (optional)
+ * A Roughly Match pattern can appear at any place before END.
+ * Roughly Match is optional for IPV4 but is required for IPV6
* MAC VLAN PATTERN:
* The first not void item must be ETH.
* The second not void item must be MAC VLAN.
@@ -1366,6 +1367,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_item_ipv4 *ipv4_spec;
const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec;
+ const struct rte_flow_item_ipv6 *ipv6_mask;
const struct rte_flow_item_tcp *tcp_spec;
const struct rte_flow_item_tcp *tcp_mask;
const struct rte_flow_item_udp *udp_spec;
@@ -1377,7 +1380,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
const struct rte_flow_item_raw *raw_mask;
const struct rte_flow_item_raw *raw_spec;
- uint32_t j;
+ uint8_t j;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1416,6 +1419,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
item = next_no_roughly_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
@@ -1568,7 +1572,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
}
}
- /* Get the IP info. */
+ /* Get the IPV4 info. */
if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
/**
* Set the flow type even if there's no content
@@ -1642,14 +1646,106 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
}
}
+ /* Get the IPV6 info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_IPV6;
+
+ /**
+ * 1. must signature match
+ * 2. not support last
+ * 3. mask must not null
+ */
+ if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
+ item->last ||
+ !item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ rule->b_mask = TRUE;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check src addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
+ rule->mask.src_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.src_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* check dst addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
+ rule->mask.dst_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ item = next_no_roughly_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
/* Get the TCP info. */
if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
/**
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
- IXGBE_ATR_FLOW_TYPE_TCPV4;
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_TCP;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -1713,8 +1809,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
- IXGBE_ATR_FLOW_TYPE_UDPV4;
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_UDP;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -1773,8 +1869,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
- IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_SCTP;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
--
2.7.4
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [dpdk-dev] [PATCH v2 0/3] net/ixgbe: enable signature match and ipv6 for consistent API.
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 0/3] net/ixgbe: enable signature match and ipv6 " Qi Zhang
` (2 preceding siblings ...)
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 3/3] net/ixgbe: enable IPv6 " Qi Zhang
@ 2017-06-08 6:57 ` Lu, Wenzhuo
3 siblings, 0 replies; 21+ messages in thread
From: Lu, Wenzhuo @ 2017-06-08 6:57 UTC (permalink / raw)
To: Zhang, Qi Z, Zhang, Helin; +Cc: dev
Hi,
> -----Original Message-----
> From: Zhang, Qi Z
> Sent: Thursday, June 8, 2017 7:09 AM
> To: Lu, Wenzhuo; Zhang, Helin
> Cc: dev@dpdk.org; Zhang, Qi Z
> Subject: [PATCH v2 0/3] net/ixgbe: enable signature match and ipv6 for
> consistent API.
>
> The patchset is based on
> http://dpdk.org/dev/patchwork/patch/25015
> http://dpdk.org/dev/patchwork/patch/25185
>
> v2:
> - add comment to explain macro replacement
> - fix wrong flow type assignment
>
> Qi Zhang (3):
> net/ixgbe: replace macro with inline function
> net/ixgbe: enable signature match for consistent API
> net/ixgbe: enable IPv6 for consistent API
>
> drivers/net/ixgbe/ixgbe_flow.c | 377 +++++++++++++++++++++++++-------------
> ---
> 1 file changed, 229 insertions(+), 148 deletions(-)
>
> --
> 2.7.4
Series- Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH v4 0/3] net/ixgbe: enable signature match and ipv6 for consistent API.
2017-05-26 19:52 [dpdk-dev] [PATCH 0/3] net/ixgbe: enable signature match and ipv6 for consistent API Qi Zhang
` (3 preceding siblings ...)
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 0/3] net/ixgbe: enable signature match and ipv6 " Qi Zhang
@ 2017-07-05 2:21 ` Qi Zhang
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 1/3] net/ixgbe: replace macro with inline function Qi Zhang
` (3 more replies)
4 siblings, 4 replies; 21+ messages in thread
From: Qi Zhang @ 2017-07-05 2:21 UTC (permalink / raw)
To: wenzhuo.lu, helin.zhang; +Cc: dev, Qi Zhang
The patchset is based on
http://dpdk.org/dev/patchwork/patch/25292/
v4:
- fix missing rename from roughly to fuzzy
v3:
- will check fuzzy match's last and mask value.
v2:
- add comment to explain macro replacement
- fix wrong flow type assignment
Qi Zhang (3):
net/ixgbe: replace macro with inline function
net/ixgbe: enable signature match for consistent API
net/ixgbe: enable IPv6 for consistent API
drivers/net/ixgbe/ixgbe_flow.c | 397 ++++++++++++++++++++++++++---------------
1 file changed, 249 insertions(+), 148 deletions(-)
--
2.7.4
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH v4 1/3] net/ixgbe: replace macro with inline function
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 " Qi Zhang
@ 2017-07-05 2:21 ` Qi Zhang
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 2/3] net/ixgbe: enable signature match for consistent API Qi Zhang
` (2 subsequent siblings)
3 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2017-07-05 2:21 UTC (permalink / raw)
To: wenzhuo.lu, helin.zhang; +Cc: dev, Qi Zhang
Code clean with 2 purposes.
1. No variable "index" needed
2. inline function make it easy and safe when be nest into a loop.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
v2:
- Update the commit log
drivers/net/ixgbe/ixgbe_flow.c | 208 +++++++++++++++--------------------------
1 file changed, 73 insertions(+), 135 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index ccc73fa..067252a 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -79,23 +79,39 @@
#define IXGBE_MIN_N_TUPLE_PRIO 1
#define IXGBE_MAX_N_TUPLE_PRIO 7
#define IXGBE_MAX_FLX_SOURCE_OFF 62
-#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
- do { \
- item = pattern + index;\
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
- index++; \
- item = pattern + index; \
- } \
- } while (0)
-
-#define NEXT_ITEM_OF_ACTION(act, actions, index)\
- do { \
- act = actions + index; \
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
- index++; \
- act = actions + index; \
- } \
- } while (0)
+
+/**
+ * Endless loop will never happen with below assumption
+ * 1. there is at least one no-void item(END)
+ * 2. cur is before END.
+ */
+static inline
+const struct rte_flow_item *next_no_void_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ cur ? cur + 1 : &pattern[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
+
+static inline
+const struct rte_flow_action *next_no_void_action(
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action *cur)
+{
+ const struct rte_flow_action *next =
+ cur ? cur + 1 : &actions[0];
+ while (1) {
+ if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return next;
+ next++;
+ }
+}
/**
* Please aware there's an asumption for all the parsers.
@@ -145,7 +161,6 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_udp *udp_mask;
const struct rte_flow_item_sctp *sctp_spec;
const struct rte_flow_item_sctp *sctp_mask;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error,
@@ -167,11 +182,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse pattern */
- index = 0;
-
/* the first not void item can be MAC or IPv4 */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
@@ -199,8 +211,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
/* check if the next not void item is IPv4 */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
rte_flow_error_set(error,
EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
@@ -253,8 +264,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->proto = ipv4_spec->hdr.next_proto_id;
/* check if the next not void item is TCP or UDP */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
@@ -372,8 +382,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -382,14 +391,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/**
* n-tuple only supports forwarding,
* check if the first not void action is QUEUE.
*/
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -401,8 +407,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
((const struct rte_flow_action_queue *)act->conf)->index;
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -521,7 +526,6 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_spec;
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -544,15 +548,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* Parse pattern */
- index = 0;
-
+ item = next_no_void_pattern(pattern, NULL);
/* The first non-void item should be MAC. */
- item = pattern + index;
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
- index++;
- item = pattern + index;
- }
if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -611,12 +608,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
/* Check if the next non-void item is END. */
- index++;
- item = pattern + index;
- while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
- index++;
- item = pattern + index;
- }
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -626,13 +618,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
/* Parse action */
- index = 0;
- /* Check if the first non-void action is QUEUE or DROP. */
- act = actions + index;
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
- index++;
- act = actions + index;
- }
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
act->type != RTE_FLOW_ACTION_TYPE_DROP) {
rte_flow_error_set(error, EINVAL,
@@ -649,12 +635,7 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
}
/* Check if the next non-void item is END */
- index++;
- act = actions + index;
- while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
- index++;
- act = actions + index;
- }
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -794,7 +775,6 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_tcp *tcp_spec;
const struct rte_flow_item_tcp *tcp_mask;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -817,11 +797,9 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse pattern */
- index = 0;
/* the first not void item should be MAC or IPv4 or IPv6 or TCP */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
@@ -850,8 +828,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is IPv4 or IPv6 */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
rte_flow_error_set(error, EINVAL,
@@ -873,8 +850,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is TCP */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -918,8 +894,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -928,11 +903,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -952,8 +924,7 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
}
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_syn_filter));
rte_flow_error_set(error, EINVAL,
@@ -1049,7 +1020,6 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item_e_tag *e_tag_mask;
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
- uint32_t index;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1071,11 +1041,9 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
NULL, "NULL attribute.");
return -rte_errno;
}
- /* parse pattern */
- index = 0;
/* The first not void item should be e-tag. */
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1122,8 +1090,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1160,11 +1127,8 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1177,8 +1141,7 @@ cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
filter->pool = act_q->index;
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1227,7 +1190,6 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
const struct rte_flow_action_mark *mark;
- uint32_t index;
/* parse attr */
/* must be input direction */
@@ -1257,11 +1219,8 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
return -rte_errno;
}
- /* parse action */
- index = 0;
-
/* check if the first not void action is QUEUE or DROP. */
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
act->type != RTE_FLOW_ACTION_TYPE_DROP) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1279,8 +1238,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
}
/* check if the next not void item is MARK */
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
(act->type != RTE_FLOW_ACTION_TYPE_END)) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1295,8 +1253,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
mark = (const struct rte_flow_action_mark *)act->conf;
rule->soft_id = mark->id;
- index++;
- NEXT_ITEM_OF_ACTION(act, actions, index);
+ act = next_no_void_action(actions, act);
}
/* check if the next not void item is END */
@@ -1378,7 +1335,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
const struct rte_flow_item_raw *raw_mask;
const struct rte_flow_item_raw *raw_spec;
- uint32_t index, j;
+ uint32_t j;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1410,14 +1367,11 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
rule->mask.vlan_tci_mask = 0;
rule->mask.flex_bytes_mask = 0;
- /* parse pattern */
- index = 0;
-
/**
* The first not void item should be
* MAC or IPv4 or TCP or UDP or SCTP.
*/
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_TCP &&
@@ -1510,8 +1464,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is vlan or ipv4.
* IPv6 is not supported.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1558,8 +1511,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
/* More than one tags are not supported. */
/* Next not void item must be END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1629,8 +1581,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is
* TCP or UDP or SCTP or END.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
@@ -1697,8 +1648,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
tcp_spec->hdr.dst_port;
}
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1758,8 +1708,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
udp_spec->hdr.dst_port;
}
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1821,8 +1770,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
sctp_spec->hdr.dst_port;
}
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1906,8 +1854,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1975,7 +1922,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_item_vlan *vlan_spec;
const struct rte_flow_item_vlan *vlan_mask;
- uint32_t index, j;
+ uint32_t j;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -2006,14 +1953,11 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
rule->mask.vlan_tci_mask = 0;
- /* parse pattern */
- index = 0;
-
/**
* The first not void item should be
* MAC or IPv4 or IPv6 or UDP or VxLAN.
*/
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
@@ -2048,8 +1992,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is IPv4 or IPv6. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2080,8 +2023,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is UDP or NVGRE. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2111,8 +2053,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* Check if the next not void item is VxLAN. */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2268,8 +2209,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
}
/* check if the next not void item is MAC */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2352,8 +2292,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
* Check if the next not void item is vlan or ipv4.
* IPv6 is not supported.
*/
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
(item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2389,8 +2328,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* More than one tags are not supported. */
/* check if the next not void item is END */
- index++;
- NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
--
2.7.4
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH v4 2/3] net/ixgbe: enable signature match for consistent API
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 " Qi Zhang
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 1/3] net/ixgbe: replace macro with inline function Qi Zhang
@ 2017-07-05 2:21 ` Qi Zhang
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 3/3] net/ixgbe: enable IPv6 " Qi Zhang
2017-07-05 18:30 ` [dpdk-dev] [PATCH v4 0/3] net/ixgbe: enable signature match and ipv6 " Ferruh Yigit
3 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2017-07-05 2:21 UTC (permalink / raw)
To: wenzhuo.lu, helin.zhang; +Cc: dev, Qi Zhang
Enable signature match for rte_flow API.
RTE_FLOW_ITEM_TYPE_FUZZY specify a signature match.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
v4:
- rename roughly to fuzzy
v2:
- will check fuzzy match's last and mask value.
drivers/net/ixgbe/ixgbe_flow.c | 91 ++++++++++++++++++++++++++++++++++++------
1 file changed, 79 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 067252a..593f267 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1268,6 +1268,67 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
return 0;
}
+/* search next no void pattern and skip fuzzy */
+static inline
+const struct rte_flow_item *next_no_fuzzy_pattern(
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_item *cur)
+{
+ const struct rte_flow_item *next =
+ next_no_void_pattern(pattern, cur);
+ while (1) {
+ if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
+ return next;
+ next = next_no_void_pattern(pattern, next);
+ }
+}
+
+static inline uint8_t signature_match(const struct rte_flow_item pattern[])
+{
+ const struct rte_flow_item_fuzzy *spec, *last, *mask;
+ const struct rte_flow_item *item;
+ uint32_t sh, lh, mh;
+ int i = 0;
+
+ while (1) {
+ item = pattern + i;
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ break;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
+ spec =
+ (const struct rte_flow_item_fuzzy *)item->spec;
+ last =
+ (const struct rte_flow_item_fuzzy *)item->last;
+ mask =
+ (const struct rte_flow_item_fuzzy *)item->mask;
+
+ if (!spec || !mask)
+ return 0;
+
+ sh = spec->thresh;
+
+ if (!last)
+ lh = sh;
+ else
+ lh = last->thresh;
+
+ mh = mask->thresh;
+ sh = sh & mh;
+ lh = lh & mh;
+
+ if (!sh || sh > lh)
+ return 0;
+
+ return 1;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
/**
* Parse the rule to see if it is a IP or MAC VLAN flow director rule.
* And get the flow director filter info BTW.
@@ -1277,6 +1338,7 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
* The next not void item could be UDP or TCP or SCTP (optional)
* The next not void item could be RAW (for flexbyte, optional)
* The next not void item must be END.
+ * A Fuzzy Match pattern can appear at any place before END (optional)
* MAC VLAN PATTERN:
* The first not void item must be ETH.
* The second not void item must be MAC VLAN.
@@ -1371,7 +1433,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* The first not void item should be
* MAC or IPv4 or TCP or UDP or SCTP.
*/
- item = next_no_void_pattern(pattern, NULL);
+ item = next_no_fuzzy_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
item->type != RTE_FLOW_ITEM_TYPE_TCP &&
@@ -1384,7 +1446,10 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
return -rte_errno;
}
- rule->mode = RTE_FDIR_MODE_PERFECT;
+ if (signature_match(pattern))
+ rule->mode = RTE_FDIR_MODE_SIGNATURE;
+ else
+ rule->mode = RTE_FDIR_MODE_PERFECT;
/*Not supported last point for range*/
if (item->last) {
@@ -1421,14 +1486,13 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
if (item->mask) {
- /* If ethernet has meaning, it means MAC VLAN mode. */
- rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
rule->b_mask = TRUE;
eth_mask = (const struct rte_flow_item_eth *)item->mask;
/* Ether type should be masked. */
- if (eth_mask->type) {
+ if (eth_mask->type ||
+ rule->mode == RTE_FDIR_MODE_SIGNATURE) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1436,6 +1500,9 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
return -rte_errno;
}
+ /* If ethernet has meaning, it means MAC VLAN mode. */
+ rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+
/**
* src MAC address must be masked,
* and don't support dst MAC address mask.
@@ -1464,7 +1531,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is vlan or ipv4.
* IPv6 is not supported.
*/
- item = next_no_void_pattern(pattern, item);
+ item = next_no_fuzzy_pattern(pattern, item);
if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1511,7 +1578,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
/* More than one tags are not supported. */
/* Next not void item must be END */
- item = next_no_void_pattern(pattern, item);
+ item = next_no_fuzzy_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1581,7 +1648,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Check if the next not void item is
* TCP or UDP or SCTP or END.
*/
- item = next_no_void_pattern(pattern, item);
+ item = next_no_fuzzy_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
@@ -1648,7 +1715,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
tcp_spec->hdr.dst_port;
}
- item = next_no_void_pattern(pattern, item);
+ item = next_no_fuzzy_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1708,7 +1775,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
udp_spec->hdr.dst_port;
}
- item = next_no_void_pattern(pattern, item);
+ item = next_no_fuzzy_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1770,7 +1837,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
sctp_spec->hdr.dst_port;
}
- item = next_no_void_pattern(pattern, item);
+ item = next_no_fuzzy_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -1854,7 +1921,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
/* check if the next not void item is END */
- item = next_no_void_pattern(pattern, item);
+ item = next_no_fuzzy_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
--
2.7.4
^ permalink raw reply [flat|nested] 21+ messages in thread
* [dpdk-dev] [PATCH v4 3/3] net/ixgbe: enable IPv6 for consistent API
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 " Qi Zhang
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 1/3] net/ixgbe: replace macro with inline function Qi Zhang
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 2/3] net/ixgbe: enable signature match for consistent API Qi Zhang
@ 2017-07-05 2:21 ` Qi Zhang
2017-07-05 18:30 ` [dpdk-dev] [PATCH v4 0/3] net/ixgbe: enable signature match and ipv6 " Ferruh Yigit
3 siblings, 0 replies; 21+ messages in thread
From: Qi Zhang @ 2017-07-05 2:21 UTC (permalink / raw)
To: wenzhuo.lu, helin.zhang; +Cc: dev, Qi Zhang
Enable IPv6 support with rte_flow API.
Only support Sigature Match.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
v4:
- rename roughly to fuzzy.
v2:
- fix flow type assignment.
drivers/net/ixgbe/ixgbe_flow.c | 118 +++++++++++++++++++++++++++++++++++++----
1 file changed, 107 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 593f267..6885abd 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1333,12 +1333,13 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[])
* Parse the rule to see if it is a IP or MAC VLAN flow director rule.
* And get the flow director filter info BTW.
* UDP/TCP/SCTP PATTERN:
- * The first not void item can be ETH or IPV4.
- * The second not void item must be IPV4 if the first one is ETH.
+ * The first not void item can be ETH or IPV4 or IPV6
+ * The second not void item must be IPV4 or IPV6 if the first one is ETH.
* The next not void item could be UDP or TCP or SCTP (optional)
* The next not void item could be RAW (for flexbyte, optional)
* The next not void item must be END.
- * A Fuzzy Match pattern can appear at any place before END (optional)
+ * A Fuzzy Match pattern can appear at any place before END.
+ * Fuzzy Match is optional for IPV4 but is required for IPV6
* MAC VLAN PATTERN:
* The first not void item must be ETH.
* The second not void item must be MAC VLAN.
@@ -1386,6 +1387,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
const struct rte_flow_item_eth *eth_mask;
const struct rte_flow_item_ipv4 *ipv4_spec;
const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec;
+ const struct rte_flow_item_ipv6 *ipv6_mask;
const struct rte_flow_item_tcp *tcp_spec;
const struct rte_flow_item_tcp *tcp_mask;
const struct rte_flow_item_udp *udp_spec;
@@ -1397,7 +1400,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
const struct rte_flow_item_raw *raw_mask;
const struct rte_flow_item_raw *raw_spec;
- uint32_t j;
+ uint8_t j;
if (!pattern) {
rte_flow_error_set(error, EINVAL,
@@ -1436,6 +1439,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
item = next_no_fuzzy_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
item->type != RTE_FLOW_ITEM_TYPE_TCP &&
item->type != RTE_FLOW_ITEM_TYPE_UDP &&
item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
@@ -1588,7 +1592,7 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
}
}
- /* Get the IP info. */
+ /* Get the IPV4 info. */
if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
/**
* Set the flow type even if there's no content
@@ -1662,14 +1666,106 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
}
}
+ /* Get the IPV6 info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_IPV6;
+
+ /**
+ * 1. must signature match
+ * 2. not support last
+ * 3. mask must not null
+ */
+ if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
+ item->last ||
+ !item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ rule->b_mask = TRUE;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* check src addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
+ rule->mask.src_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.src_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* check dst addr mask */
+ for (j = 0; j < 16; j++) {
+ if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
+ rule->mask.dst_ipv6_mask |= 1 << j;
+ } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ item = next_no_fuzzy_pattern(pattern, item);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END &&
+ item->type != RTE_FLOW_ITEM_TYPE_RAW) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
/* Get the TCP info. */
if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
/**
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
- IXGBE_ATR_FLOW_TYPE_TCPV4;
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_TCP;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -1733,8 +1829,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
- IXGBE_ATR_FLOW_TYPE_UDPV4;
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_UDP;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
@@ -1793,8 +1889,8 @@ ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
* Set the flow type even if there's no content
* as we must have a flow type.
*/
- rule->ixgbe_fdir.formatted.flow_type =
- IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ rule->ixgbe_fdir.formatted.flow_type |=
+ IXGBE_ATR_L4TYPE_SCTP;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error, EINVAL,
--
2.7.4
^ permalink raw reply [flat|nested] 21+ messages in thread
* Re: [dpdk-dev] [PATCH v4 0/3] net/ixgbe: enable signature match and ipv6 for consistent API.
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 " Qi Zhang
` (2 preceding siblings ...)
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 3/3] net/ixgbe: enable IPv6 " Qi Zhang
@ 2017-07-05 18:30 ` Ferruh Yigit
3 siblings, 0 replies; 21+ messages in thread
From: Ferruh Yigit @ 2017-07-05 18:30 UTC (permalink / raw)
To: Qi Zhang, wenzhuo.lu, helin.zhang; +Cc: dev
On 7/5/2017 3:21 AM, Qi Zhang wrote:
> The patchset is based on
> http://dpdk.org/dev/patchwork/patch/25292/
>
> v4:
> - fix missing rename from roughly to fuzzy
>
> v3:
> - will check fuzzy match's last and mask value.
>
> v2:
> - add comment to explain macro replacement
> - fix wrong flow type assignment
>
> Qi Zhang (3):
> net/ixgbe: replace macro with inline function
> net/ixgbe: enable signature match for consistent API
> net/ixgbe: enable IPv6 for consistent API
Series applied to dpdk-next-net/master, thanks.
(Keept ack from Wenzhuo given for v2.)
^ permalink raw reply [flat|nested] 21+ messages in thread
end of thread, other threads:[~2017-07-05 18:30 UTC | newest]
Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-26 19:52 [dpdk-dev] [PATCH 0/3] net/ixgbe: enable signature match and ipv6 for consistent API Qi Zhang
2017-05-26 19:52 ` [dpdk-dev] [PATCH 1/3] net/ixgbe: replace macro with inline function Qi Zhang
2017-06-01 5:09 ` Lu, Wenzhuo
2017-05-26 19:52 ` [dpdk-dev] [PATCH 2/3] net/ixgbe: enable signature match for consistent API Qi Zhang
2017-06-01 5:50 ` Lu, Wenzhuo
2017-06-01 5:56 ` Zhang, Qi Z
2017-06-01 6:13 ` Lu, Wenzhuo
2017-06-01 6:14 ` Zhang, Qi Z
2017-05-26 19:52 ` [dpdk-dev] [PATCH 3/3] net/ixgbe: enable IPv6 " Qi Zhang
2017-06-01 6:15 ` Lu, Wenzhuo
2017-06-01 6:19 ` Zhang, Qi Z
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 0/3] net/ixgbe: enable signature match and ipv6 " Qi Zhang
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 1/3] net/ixgbe: replace macro with inline function Qi Zhang
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 2/3] net/ixgbe: enable signature match for consistent API Qi Zhang
2017-06-07 23:08 ` [dpdk-dev] [PATCH v2 3/3] net/ixgbe: enable IPv6 " Qi Zhang
2017-06-08 6:57 ` [dpdk-dev] [PATCH v2 0/3] net/ixgbe: enable signature match and ipv6 " Lu, Wenzhuo
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 " Qi Zhang
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 1/3] net/ixgbe: replace macro with inline function Qi Zhang
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 2/3] net/ixgbe: enable signature match for consistent API Qi Zhang
2017-07-05 2:21 ` [dpdk-dev] [PATCH v4 3/3] net/ixgbe: enable IPv6 " Qi Zhang
2017-07-05 18:30 ` [dpdk-dev] [PATCH v4 0/3] net/ixgbe: enable signature match and ipv6 " Ferruh Yigit
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).