* [dpdk-dev] [PATCH] net/i40e: add NVGRE parsing function
@ 2017-05-18 9:20 Beilei Xing
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 0/2] net/i40e: extend tunnel filter support Beilei Xing
0 siblings, 1 reply; 16+ messages in thread
From: Beilei Xing @ 2017-05-18 9:20 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds NVGRE parsing function to support NVGRE
classification.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 338 ++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 336 insertions(+), 2 deletions(-)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 8d1fcde..6c97019 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -114,6 +114,12 @@ static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -296,7 +302,40 @@ static enum rte_flow_item_type pattern_vxlan_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
-/* Pattern matched MPLS */
+static enum rte_flow_item_type pattern_nvgre_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static enum rte_flow_item_type pattern_mpls_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
@@ -329,7 +368,6 @@ static enum rte_flow_item_type pattern_mpls_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
-/* Pattern matched QINQ */
static enum rte_flow_item_type pattern_qinq_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
@@ -362,6 +400,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
+ /* NVGRE */
+ { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
/* MPLSoUDP & MPLSoGRE */
{ pattern_mpls_1, i40e_flow_parse_mpls_filter },
{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
@@ -1589,6 +1632,297 @@ i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
}
/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_eth *o_eth_spec = NULL;
+ const struct rte_flow_item_eth *o_eth_mask = NULL;
+ const struct rte_flow_item_nvgre *nvgre_spec = NULL;
+ const struct rte_flow_item_nvgre *nvgre_mask = NULL;
+ const struct rte_flow_item_eth *i_eth_spec = NULL;
+ const struct rte_flow_item_eth *i_eth_mask = NULL;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ bool is_tni_masked = 0;
+ enum rte_flow_item_type item_type;
+ bool nvgre_flag = 0;
+ uint32_t tenant_id_be = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(ð_mask->dst) ||
+ !is_zero_ether_addr(ð_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!nvgre_flag)
+ rte_memcpy(&filter->outer_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ else
+ rte_memcpy(&filter->inner_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ }
+
+ if (!nvgre_flag) {
+ o_eth_spec = eth_spec;
+ o_eth_mask = eth_mask;
+ } else {
+ i_eth_spec = eth_spec;
+ i_eth_mask = eth_mask;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (nvgre_flag) {
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ } else {
+ if (vlan_spec || vlan_mask)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ nvgre_spec =
+ (const struct rte_flow_item_nvgre *)item->spec;
+ nvgre_mask =
+ (const struct rte_flow_item_nvgre *)item->mask;
+ /* Check if NVGRE item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, either spec or mask shouldn't be NULL.
+ */
+ if ((!nvgre_spec && nvgre_mask) ||
+ (nvgre_spec && !nvgre_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ /* Check if TNI is masked. */
+ if (nvgre_mask) {
+ is_tni_masked =
+ i40e_check_tenant_id_mask(nvgre_mask->tni);
+ if (is_tni_masked < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TNI mask");
+ return -rte_errno;
+ }
+ }
+ nvgre_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Check specification and mask to get the filter type */
+ if (vlan_spec && vlan_mask &&
+ (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ /* If there's inner vlan */
+ filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
+ & I40E_TCI_MASK;
+ if (nvgre_spec && nvgre_mask && !is_tni_masked) {
+ /* If there's nvgre */
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ nvgre_spec->tni, 3);
+ filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if (!nvgre_spec && !nvgre_mask) {
+ /* If there's no nvgre */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if ((!vlan_spec && !vlan_mask) ||
+ (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+ /* If there's no inner vlan */
+ if (nvgre_spec && nvgre_mask && !is_tni_masked) {
+ /* If there's nvgre */
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ nvgre_spec->tni, 3);
+ filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_TENID;
+ else if (o_eth_spec && o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ } else if (!nvgre_spec && !nvgre_mask) {
+ /* If there's no nvgre */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask) {
+ filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Not supported by tunnel filter.");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = RTE_TUNNEL_TYPE_NVGRE;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
* 2. Supported filter types: MPLS label.
* 3. Mask of fields which need to be matched should be
* filled with 1.
--
2.5.5
^ permalink raw reply [flat|nested] 16+ messages in thread
* [dpdk-dev] [PATCH v2 0/2] net/i40e: extend tunnel filter support
2017-05-18 9:20 [dpdk-dev] [PATCH] net/i40e: add NVGRE parsing function Beilei Xing
@ 2017-06-01 6:56 ` Beilei Xing
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: optimize vxlan parsing function Beilei Xing
` (2 more replies)
0 siblings, 3 replies; 16+ messages in thread
From: Beilei Xing @ 2017-06-01 6:56 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
This patchset extends tunnel filter support with vxlan parsing
function optimization and NVGRE parsing function.
v2 changes:
- Add vxlan parsing function optimization.
- Optmize NVGRE parsing function.
Beilei Xing (2):
net/i40e: optimize vxlan parsing function
net/i40e: add NVGRE parsing function
drivers/net/i40e/i40e_flow.c | 429 +++++++++++++++++++++++++++++++------------
1 file changed, 315 insertions(+), 114 deletions(-)
--
2.5.5
^ permalink raw reply [flat|nested] 16+ messages in thread
* [dpdk-dev] [PATCH v2 1/2] net/i40e: optimize vxlan parsing function
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 0/2] net/i40e: extend tunnel filter support Beilei Xing
@ 2017-06-01 6:56 ` Beilei Xing
2017-06-07 3:27 ` Lu, Wenzhuo
2017-06-07 3:30 ` Yuanhan Liu
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE " Beilei Xing
2017-06-07 6:53 ` [dpdk-dev] [PATCH v3 0/2] net/i40e: extend tunnel filter support Beilei Xing
2 siblings, 2 replies; 16+ messages in thread
From: Beilei Xing @ 2017-06-01 6:56 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
This commit optimizes vxlan parsing function.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 176 ++++++++++++++-----------------------------
1 file changed, 55 insertions(+), 121 deletions(-)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 8d1fcde..3916584 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1262,27 +1262,27 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
return 0;
}
+static uint16_t i40e_supported_tunnel_filter_types[] = {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
+ ETH_TUNNEL_FILTER_IVLAN,
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
+ ETH_TUNNEL_FILTER_IMAC,
+ ETH_TUNNEL_FILTER_IMAC,
+};
+
static int
-i40e_check_tenant_id_mask(const uint8_t *mask)
+i40e_check_tunnel_filter_type(uint8_t filter_type)
{
- uint32_t j;
- int is_masked = 0;
-
- for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
- if (*(mask + j) == UINT8_MAX) {
- if (j > 0 && (*(mask + j) != *(mask + j - 1)))
- return -EINVAL;
- is_masked = 0;
- } else if (*(mask + j) == 0) {
- if (j > 0 && (*(mask + j) != *(mask + j - 1)))
- return -EINVAL;
- is_masked = 1;
- } else {
- return -EINVAL;
- }
+ uint8_t i;
+
+ for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
+ if (filter_type == i40e_supported_tunnel_filter_types[i])
+ return 0;
}
- return is_masked;
+ return -1;
}
/* 1. Last in item should be NULL as range is not supported.
@@ -1302,18 +1302,17 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *item = pattern;
const struct rte_flow_item_eth *eth_spec;
const struct rte_flow_item_eth *eth_mask;
- const struct rte_flow_item_eth *o_eth_spec = NULL;
- const struct rte_flow_item_eth *o_eth_mask = NULL;
- const struct rte_flow_item_vxlan *vxlan_spec = NULL;
- const struct rte_flow_item_vxlan *vxlan_mask = NULL;
- const struct rte_flow_item_eth *i_eth_spec = NULL;
- const struct rte_flow_item_eth *i_eth_mask = NULL;
- const struct rte_flow_item_vlan *vlan_spec = NULL;
- const struct rte_flow_item_vlan *vlan_mask = NULL;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ uint8_t filter_type = 0;
bool is_vni_masked = 0;
+ uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
enum rte_flow_item_type item_type;
bool vxlan_flag = 0;
uint32_t tenant_id_be = 0;
+ int ret;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
@@ -1351,24 +1350,18 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
return -rte_errno;
}
- if (!vxlan_flag)
+ if (!vxlan_flag) {
rte_memcpy(&filter->outer_mac,
ð_spec->dst,
ETHER_ADDR_LEN);
- else
+ filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ } else {
rte_memcpy(&filter->inner_mac,
ð_spec->dst,
ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ }
}
-
- if (!vxlan_flag) {
- o_eth_spec = eth_spec;
- o_eth_mask = eth_mask;
- } else {
- i_eth_spec = eth_spec;
- i_eth_mask = eth_mask;
- }
-
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec =
@@ -1376,10 +1369,6 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
vlan_mask =
(const struct rte_flow_item_vlan *)item->mask;
if (vxlan_flag) {
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1395,6 +1384,15 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
"Invalid vlan item");
return -rte_errno;
}
+
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK))
+ filter->inner_vlan =
+ rte_be_to_cpu_16(vlan_spec->tci) &
+ I40E_TCI_MASK;
+ filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
@@ -1453,17 +1451,25 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
/* Check if VNI is masked. */
- if (vxlan_mask) {
+ if (vxlan_spec && vxlan_mask) {
is_vni_masked =
- i40e_check_tenant_id_mask(vxlan_mask->vni);
- if (is_vni_masked < 0) {
+ !!memcmp(vxlan_mask->vni, vni_mask,
+ RTE_DIM(vni_mask));
+ if (is_vni_masked) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid VNI mask");
return -rte_errno;
}
+
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter_type |= ETH_TUNNEL_FILTER_TENID;
}
+
vxlan_flag = 1;
break;
default:
@@ -1471,87 +1477,15 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
}
- /* Check specification and mask to get the filter type */
- if (vlan_spec && vlan_mask &&
- (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
- /* If there's inner vlan */
- filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
- & I40E_TCI_MASK;
- if (vxlan_spec && vxlan_mask && !is_vni_masked) {
- /* If there's vxlan */
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- vxlan_spec->vni, 3);
- filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
- else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
- return -rte_errno;
- }
- } else if (!vxlan_spec && !vxlan_mask) {
- /* If there's no vxlan */
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_IVLAN;
- else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
- return -rte_errno;
- }
- } else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
- return -rte_errno;
- }
- } else if ((!vlan_spec && !vlan_mask) ||
- (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
- /* If there's no inner vlan */
- if (vxlan_spec && vxlan_mask && !is_vni_masked) {
- /* If there's vxlan */
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- vxlan_spec->vni, 3);
- filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_TENID;
- else if (o_eth_spec && o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
- } else if (!vxlan_spec && !vxlan_mask) {
- /* If there's no vxlan */
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask) {
- filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
- } else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Invalid filter type");
- return -rte_errno;
- }
- } else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Invalid filter type");
- return -rte_errno;
- }
- } else {
+ ret = i40e_check_tunnel_filter_type(filter_type);
+ if (ret < 0) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Not supported by tunnel filter.");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
return -rte_errno;
}
+ filter->filter_type = filter_type;
filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
--
2.5.5
^ permalink raw reply [flat|nested] 16+ messages in thread
* [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing function
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 0/2] net/i40e: extend tunnel filter support Beilei Xing
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: optimize vxlan parsing function Beilei Xing
@ 2017-06-01 6:56 ` Beilei Xing
2017-06-07 5:46 ` Lu, Wenzhuo
2017-06-07 6:53 ` [dpdk-dev] [PATCH v3 0/2] net/i40e: extend tunnel filter support Beilei Xing
2 siblings, 1 reply; 16+ messages in thread
From: Beilei Xing @ 2017-06-01 6:56 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
This patch adds NVGRE parsing function to support NVGRE
classification.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 271 ++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 269 insertions(+), 2 deletions(-)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 3916584..913fd21 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -114,6 +114,12 @@ static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -296,7 +302,40 @@ static enum rte_flow_item_type pattern_vxlan_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
-/* Pattern matched MPLS */
+static enum rte_flow_item_type pattern_nvgre_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static enum rte_flow_item_type pattern_mpls_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
@@ -329,7 +368,6 @@ static enum rte_flow_item_type pattern_mpls_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
-/* Pattern matched QINQ */
static enum rte_flow_item_type pattern_qinq_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
@@ -362,6 +400,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
+ /* NVGRE */
+ { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
/* MPLSoUDP & MPLSoGRE */
{ pattern_mpls_1, i40e_flow_parse_mpls_filter },
{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
@@ -1523,6 +1566,230 @@ i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
}
/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ enum rte_flow_item_type item_type;
+ uint8_t filter_type = 0;
+ bool is_tni_masked = 0;
+ uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
+ bool nvgre_flag = 0;
+ uint32_t tenant_id_be = 0;
+ int ret;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(ð_mask->dst) ||
+ !is_zero_ether_addr(ð_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!nvgre_flag) {
+ rte_memcpy(&filter->outer_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ } else {
+ rte_memcpy(&filter->inner_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ }
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (nvgre_flag) {
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ } else {
+ if (vlan_spec || vlan_mask)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK))
+ filter->inner_vlan =
+ rte_be_to_cpu_16(vlan_spec->tci) &
+ I40E_TCI_MASK;
+ filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ nvgre_spec =
+ (const struct rte_flow_item_nvgre *)item->spec;
+ nvgre_mask =
+ (const struct rte_flow_item_nvgre *)item->mask;
+ /* Check if NVGRE item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, either spec or mask shouldn't be NULL.
+ */
+ if ((!nvgre_spec && nvgre_mask) ||
+ (nvgre_spec && !nvgre_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec && nvgre_mask) {
+ is_tni_masked =
+ !!memcmp(nvgre_mask->tni, tni_mask,
+ RTE_DIM(tni_mask));
+ if (is_tni_masked) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TNI mask");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ nvgre_spec->tni, 3);
+ filter->tenant_id =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter_type |= ETH_TUNNEL_FILTER_TENID;
+ }
+
+ nvgre_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ ret = i40e_check_tunnel_filter_type(filter_type);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ filter->filter_type = filter_type;
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
* 2. Supported filter types: MPLS label.
* 3. Mask of fields which need to be matched should be
* filled with 1.
--
2.5.5
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/2] net/i40e: optimize vxlan parsing function
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: optimize vxlan parsing function Beilei Xing
@ 2017-06-07 3:27 ` Lu, Wenzhuo
2017-06-07 3:30 ` Yuanhan Liu
1 sibling, 0 replies; 16+ messages in thread
From: Lu, Wenzhuo @ 2017-06-07 3:27 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing; +Cc: dev
Hi,
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Beilei Xing
> Sent: Thursday, June 1, 2017 2:57 PM
> To: Wu, Jingjing
> Cc: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2 1/2] net/i40e: optimize vxlan parsing function
>
> This commit optimizes vxlan parsing function.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/2] net/i40e: optimize vxlan parsing function
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: optimize vxlan parsing function Beilei Xing
2017-06-07 3:27 ` Lu, Wenzhuo
@ 2017-06-07 3:30 ` Yuanhan Liu
2017-06-07 4:21 ` Xing, Beilei
1 sibling, 1 reply; 16+ messages in thread
From: Yuanhan Liu @ 2017-06-07 3:30 UTC (permalink / raw)
To: Beilei Xing; +Cc: jingjing.wu, dev
On Thu, Jun 01, 2017 at 02:56:30PM +0800, Beilei Xing wrote:
> This commit optimizes vxlan parsing function.
How?
--yliu
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/i40e/i40e_flow.c | 176 ++++++++++++++-----------------------------
> 1 file changed, 55 insertions(+), 121 deletions(-)
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/2] net/i40e: optimize vxlan parsing function
2017-06-07 3:30 ` Yuanhan Liu
@ 2017-06-07 4:21 ` Xing, Beilei
0 siblings, 0 replies; 16+ messages in thread
From: Xing, Beilei @ 2017-06-07 4:21 UTC (permalink / raw)
To: Yuanhan Liu; +Cc: Wu, Jingjing, dev
> -----Original Message-----
> From: Yuanhan Liu [mailto:yliu@fridaylinux.org]
> Sent: Wednesday, June 7, 2017 11:31 AM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2 1/2] net/i40e: optimize vxlan parsing
> function
>
> On Thu, Jun 01, 2017 at 02:56:30PM +0800, Beilei Xing wrote:
> > This commit optimizes vxlan parsing function.
>
> How?
>
> --yliu
The original parsing function is a little complex and not easy to read when parsing filter type, this patch optimizes the function and makes it more readable.
>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/i40e/i40e_flow.c | 176 ++++++++++++++--------------------------
> ---
> > 1 file changed, 55 insertions(+), 121 deletions(-)
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing function
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE " Beilei Xing
@ 2017-06-07 5:46 ` Lu, Wenzhuo
2017-06-07 6:06 ` Xing, Beilei
0 siblings, 1 reply; 16+ messages in thread
From: Lu, Wenzhuo @ 2017-06-07 5:46 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing; +Cc: dev
Hi Beilei,
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Beilei Xing
> Sent: Thursday, June 1, 2017 2:57 PM
> To: Wu, Jingjing
> Cc: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing function
>
> This patch adds NVGRE parsing function to support NVGRE classification.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/i40e/i40e_flow.c | 271
> ++++++++++++++++++++++++++++++++++++++++++-
> 1 file changed, 269 insertions(+), 2 deletions(-)
>
> /* 1. Last in item should be NULL as range is not supported.
> + * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
> + * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
> + * 3. Mask of fields which need to be matched should be
> + * filled with 1.
> + * 4. Mask of fields which needn't to be matched should be
> + * filled with 0.
> + */
> +static int
> +i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
> + const struct rte_flow_item *pattern,
> + struct rte_flow_error *error,
> + struct i40e_tunnel_filter_conf *filter) {
> + const struct rte_flow_item *item = pattern;
> + const struct rte_flow_item_eth *eth_spec;
> + const struct rte_flow_item_eth *eth_mask;
> + const struct rte_flow_item_nvgre *nvgre_spec;
> + const struct rte_flow_item_nvgre *nvgre_mask;
> + const struct rte_flow_item_vlan *vlan_spec;
> + const struct rte_flow_item_vlan *vlan_mask;
> + enum rte_flow_item_type item_type;
> + uint8_t filter_type = 0;
> + bool is_tni_masked = 0;
> + uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
> + bool nvgre_flag = 0;
> + uint32_t tenant_id_be = 0;
> + int ret;
> +
> + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> + if (item->last) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Not support range");
> + return -rte_errno;
> + }
> + item_type = item->type;
> + switch (item_type) {
> + case RTE_FLOW_ITEM_TYPE_ETH:
> + eth_spec = (const struct rte_flow_item_eth *)item-
> >spec;
> + eth_mask = (const struct rte_flow_item_eth *)item-
> >mask;
> + if ((!eth_spec && eth_mask) ||
> + (eth_spec && !eth_mask)) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Invalid ether spec/mask");
> + return -rte_errno;
> + }
> +
> + if (eth_spec && eth_mask) {
> + /* DST address of inner MAC shouldn't be
> masked.
> + * SRC address of Inner MAC should be
> masked.
> + */
> + if (!is_broadcast_ether_addr(ð_mask->dst)
> ||
> + !is_zero_ether_addr(ð_mask->src) ||
> + eth_mask->type) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Invalid ether spec/mask");
> + return -rte_errno;
> + }
> +
> + if (!nvgre_flag) {
> + rte_memcpy(&filter->outer_mac,
> + ð_spec->dst,
> + ETHER_ADDR_LEN);
> + filter_type |=
> ETH_TUNNEL_FILTER_OMAC;
> + } else {
> + rte_memcpy(&filter->inner_mac,
> + ð_spec->dst,
> + ETHER_ADDR_LEN);
> + filter_type |=
> ETH_TUNNEL_FILTER_IMAC;
> + }
> + }
Nothing to do if both spec and mask are NULL, right? If so, would you like to add comments here?
> +
> + break;
> + case RTE_FLOW_ITEM_TYPE_VLAN:
> + vlan_spec =
> + (const struct rte_flow_item_vlan *)item-
> >spec;
> + vlan_mask =
> + (const struct rte_flow_item_vlan *)item-
> >mask;
> + if (nvgre_flag) {
Why need to check nvgre_flag? Seems VLAN must be after NVGRE, so this flag is always 1.
> + if (!(vlan_spec && vlan_mask)) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Invalid vlan item");
> + return -rte_errno;
> + }
> + } else {
> + if (vlan_spec || vlan_mask)
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Invalid vlan item");
> + return -rte_errno;
> + }
> +
> + if (vlan_spec && vlan_mask) {
> + if (vlan_mask->tci ==
> + rte_cpu_to_be_16(I40E_TCI_MASK))
> + filter->inner_vlan =
> + rte_be_to_cpu_16(vlan_spec->tci)
> &
> + I40E_TCI_MASK;
> + filter_type |= ETH_TUNNEL_FILTER_IVLAN;
> + }
> + break;
> + case RTE_FLOW_ITEM_TYPE_IPV4:
> + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
> + /* IPv4 is used to describe protocol,
> + * spec and mask should be NULL.
> + */
> + if (item->spec || item->mask) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Invalid IPv4 item");
> + return -rte_errno;
> + }
> + break;
> + case RTE_FLOW_ITEM_TYPE_IPV6:
> + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
> + /* IPv6 is used to describe protocol,
> + * spec and mask should be NULL.
> + */
> + if (item->spec || item->mask) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Invalid IPv6 item");
> + return -rte_errno;
> + }
> + break;
> + case RTE_FLOW_ITEM_TYPE_NVGRE:
> + nvgre_spec =
> + (const struct rte_flow_item_nvgre *)item-
> >spec;
> + nvgre_mask =
> + (const struct rte_flow_item_nvgre *)item-
> >mask;
> + /* Check if NVGRE item is used to describe protocol.
> + * If yes, both spec and mask should be NULL.
> + * If no, either spec or mask shouldn't be NULL.
> + */
> + if ((!nvgre_spec && nvgre_mask) ||
> + (nvgre_spec && !nvgre_mask)) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Invalid NVGRE item");
> + return -rte_errno;
> + }
> +
> + if (nvgre_spec && nvgre_mask) {
> + is_tni_masked =
> + !!memcmp(nvgre_mask->tni,
> tni_mask,
> + RTE_DIM(tni_mask));
> + if (is_tni_masked) {
> + rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Invalid TNI mask");
> + return -rte_errno;
> + }
> + rte_memcpy(((uint8_t *)&tenant_id_be + 1),
> + nvgre_spec->tni, 3);
> + filter->tenant_id =
> + rte_be_to_cpu_32(tenant_id_be);
> + filter_type |= ETH_TUNNEL_FILTER_TENID;
> + }
A similar concern. Is here a comments for NULL spec and mask better?
> +
> + nvgre_flag = 1;
> + break;
> + default:
> + break;
> + }
> + }
> +
> + ret = i40e_check_tunnel_filter_type(filter_type);
> + if (ret < 0) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + NULL,
> + "Invalid filter type");
> + return -rte_errno;
> + }
> + filter->filter_type = filter_type;
> +
> + filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
> +
> + return 0;
> +}
> +
> +static int
> +i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
> + const struct rte_flow_attr *attr,
> + const struct rte_flow_item pattern[],
> + const struct rte_flow_action actions[],
> + struct rte_flow_error *error,
> + union i40e_filter_t *filter)
> +{
> + struct i40e_tunnel_filter_conf *tunnel_filter =
> + &filter->consistent_tunnel_filter;
> + int ret;
> +
> + ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
> + error, tunnel_filter);
> + if (ret)
> + return ret;
> +
> + ret = i40e_flow_parse_tunnel_action(dev, actions, error,
> tunnel_filter);
> + if (ret)
> + return ret;
> +
> + ret = i40e_flow_parse_attr(attr, error);
> + if (ret)
> + return ret;
> +
> + cons_filter_type = RTE_ETH_FILTER_TUNNEL;
> +
> + return ret;
> +}
> +
> +/* 1. Last in item should be NULL as range is not supported.
> * 2. Supported filter types: MPLS label.
> * 3. Mask of fields which need to be matched should be
> * filled with 1.
> --
> 2.5.5
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing function
2017-06-07 5:46 ` Lu, Wenzhuo
@ 2017-06-07 6:06 ` Xing, Beilei
2017-06-07 6:12 ` Lu, Wenzhuo
0 siblings, 1 reply; 16+ messages in thread
From: Xing, Beilei @ 2017-06-07 6:06 UTC (permalink / raw)
To: Lu, Wenzhuo, Wu, Jingjing; +Cc: dev
> -----Original Message-----
> From: Lu, Wenzhuo
> Sent: Wednesday, June 7, 2017 1:46 PM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing
> function
>
> Hi Beilei,
>
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Beilei Xing
> > Sent: Thursday, June 1, 2017 2:57 PM
> > To: Wu, Jingjing
> > Cc: dev@dpdk.org
> > Subject: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing function
> >
> > This patch adds NVGRE parsing function to support NVGRE classification.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/i40e/i40e_flow.c | 271
> > ++++++++++++++++++++++++++++++++++++++++++-
> > 1 file changed, 269 insertions(+), 2 deletions(-)
>
> >
> > /* 1. Last in item should be NULL as range is not supported.
> > + * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
> > + * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
> > + * 3. Mask of fields which need to be matched should be
> > + * filled with 1.
> > + * 4. Mask of fields which needn't to be matched should be
> > + * filled with 0.
> > + */
> > +static int
> > +i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
> > + const struct rte_flow_item *pattern,
> > + struct rte_flow_error *error,
> > + struct i40e_tunnel_filter_conf *filter) {
> > + const struct rte_flow_item *item = pattern;
> > + const struct rte_flow_item_eth *eth_spec;
> > + const struct rte_flow_item_eth *eth_mask;
> > + const struct rte_flow_item_nvgre *nvgre_spec;
> > + const struct rte_flow_item_nvgre *nvgre_mask;
> > + const struct rte_flow_item_vlan *vlan_spec;
> > + const struct rte_flow_item_vlan *vlan_mask;
> > + enum rte_flow_item_type item_type;
> > + uint8_t filter_type = 0;
> > + bool is_tni_masked = 0;
> > + uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
> > + bool nvgre_flag = 0;
> > + uint32_t tenant_id_be = 0;
> > + int ret;
> > +
> > + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > + if (item->last) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Not support range");
> > + return -rte_errno;
> > + }
> > + item_type = item->type;
> > + switch (item_type) {
> > + case RTE_FLOW_ITEM_TYPE_ETH:
> > + eth_spec = (const struct rte_flow_item_eth *)item-
> > >spec;
> > + eth_mask = (const struct rte_flow_item_eth *)item-
> > >mask;
> > + if ((!eth_spec && eth_mask) ||
> > + (eth_spec && !eth_mask)) {
> > + rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Invalid ether spec/mask");
> > + return -rte_errno;
> > + }
> > +
> > + if (eth_spec && eth_mask) {
> > + /* DST address of inner MAC shouldn't be
> > masked.
> > + * SRC address of Inner MAC should be
> > masked.
> > + */
> > + if (!is_broadcast_ether_addr(ð_mask-
> >dst)
> > ||
> > + !is_zero_ether_addr(ð_mask->src) ||
> > + eth_mask->type) {
> > + rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Invalid ether spec/mask");
> > + return -rte_errno;
> > + }
> > +
> > + if (!nvgre_flag) {
> > + rte_memcpy(&filter->outer_mac,
> > + ð_spec->dst,
> > + ETHER_ADDR_LEN);
> > + filter_type |=
> > ETH_TUNNEL_FILTER_OMAC;
> > + } else {
> > + rte_memcpy(&filter->inner_mac,
> > + ð_spec->dst,
> > + ETHER_ADDR_LEN);
> > + filter_type |=
> > ETH_TUNNEL_FILTER_IMAC;
> > + }
> > + }
> Nothing to do if both spec and mask are NULL, right? If so, would you like to
> add comments here?
OK. Will update in v2.
>
> > +
> > + break;
> > + case RTE_FLOW_ITEM_TYPE_VLAN:
> > + vlan_spec =
> > + (const struct rte_flow_item_vlan *)item-
> > >spec;
> > + vlan_mask =
> > + (const struct rte_flow_item_vlan *)item-
> > >mask;
> > + if (nvgre_flag) {
> Why need to check nvgre_flag? Seems VLAN must be after NVGRE, so this
> flag is always 1.
It's used to distinguish outer mac or inner mac.
>
> > + if (!(vlan_spec && vlan_mask)) {
> > + rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Invalid vlan item");
> > + return -rte_errno;
> > + }
> > + } else {
> > + if (vlan_spec || vlan_mask)
> > + rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Invalid vlan item");
> > + return -rte_errno;
> > + }
> > +
> > + if (vlan_spec && vlan_mask) {
> > + if (vlan_mask->tci ==
> > + rte_cpu_to_be_16(I40E_TCI_MASK))
> > + filter->inner_vlan =
> > + rte_be_to_cpu_16(vlan_spec->tci)
> > &
> > + I40E_TCI_MASK;
> > + filter_type |= ETH_TUNNEL_FILTER_IVLAN;
> > + }
> > + break;
> > + case RTE_FLOW_ITEM_TYPE_IPV4:
> > + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
> > + /* IPv4 is used to describe protocol,
> > + * spec and mask should be NULL.
> > + */
> > + if (item->spec || item->mask) {
> > + rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Invalid IPv4 item");
> > + return -rte_errno;
> > + }
> > + break;
> > + case RTE_FLOW_ITEM_TYPE_IPV6:
> > + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
> > + /* IPv6 is used to describe protocol,
> > + * spec and mask should be NULL.
> > + */
> > + if (item->spec || item->mask) {
> > + rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Invalid IPv6 item");
> > + return -rte_errno;
> > + }
> > + break;
> > + case RTE_FLOW_ITEM_TYPE_NVGRE:
> > + nvgre_spec =
> > + (const struct rte_flow_item_nvgre *)item-
> > >spec;
> > + nvgre_mask =
> > + (const struct rte_flow_item_nvgre *)item-
> > >mask;
> > + /* Check if NVGRE item is used to describe protocol.
> > + * If yes, both spec and mask should be NULL.
> > + * If no, either spec or mask shouldn't be NULL.
> > + */
> > + if ((!nvgre_spec && nvgre_mask) ||
> > + (nvgre_spec && !nvgre_mask)) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Invalid NVGRE item");
> > + return -rte_errno;
> > + }
> > +
> > + if (nvgre_spec && nvgre_mask) {
> > + is_tni_masked =
> > + !!memcmp(nvgre_mask->tni,
> > tni_mask,
> > + RTE_DIM(tni_mask));
> > + if (is_tni_masked) {
> > + rte_flow_error_set(error, EINVAL,
> > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Invalid TNI mask");
> > + return -rte_errno;
> > + }
> > + rte_memcpy(((uint8_t *)&tenant_id_be + 1),
> > + nvgre_spec->tni, 3);
> > + filter->tenant_id =
> > + rte_be_to_cpu_32(tenant_id_be);
> > + filter_type |= ETH_TUNNEL_FILTER_TENID;
> > + }
> A similar concern. Is here a comments for NULL spec and mask better?
OK.
>
> > +
> > + nvgre_flag = 1;
> > + break;
> > + default:
> > + break;
> > + }
> > + }
> > +
> > + ret = i40e_check_tunnel_filter_type(filter_type);
> > + if (ret < 0) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ITEM,
> > + NULL,
> > + "Invalid filter type");
> > + return -rte_errno;
> > + }
> > + filter->filter_type = filter_type;
> > +
> > + filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
> > +
> > + return 0;
> > +}
> > +
> > +static int
> > +i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
> > + const struct rte_flow_attr *attr,
> > + const struct rte_flow_item pattern[],
> > + const struct rte_flow_action actions[],
> > + struct rte_flow_error *error,
> > + union i40e_filter_t *filter)
> > +{
> > + struct i40e_tunnel_filter_conf *tunnel_filter =
> > + &filter->consistent_tunnel_filter;
> > + int ret;
> > +
> > + ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
> > + error, tunnel_filter);
> > + if (ret)
> > + return ret;
> > +
> > + ret = i40e_flow_parse_tunnel_action(dev, actions, error,
> > tunnel_filter);
> > + if (ret)
> > + return ret;
> > +
> > + ret = i40e_flow_parse_attr(attr, error);
> > + if (ret)
> > + return ret;
> > +
> > + cons_filter_type = RTE_ETH_FILTER_TUNNEL;
> > +
> > + return ret;
> > +}
> > +
> > +/* 1. Last in item should be NULL as range is not supported.
> > * 2. Supported filter types: MPLS label.
> > * 3. Mask of fields which need to be matched should be
> > * filled with 1.
> > --
> > 2.5.5
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing function
2017-06-07 6:06 ` Xing, Beilei
@ 2017-06-07 6:12 ` Lu, Wenzhuo
2017-06-07 6:22 ` Xing, Beilei
0 siblings, 1 reply; 16+ messages in thread
From: Lu, Wenzhuo @ 2017-06-07 6:12 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing; +Cc: dev
> -----Original Message-----
> From: Xing, Beilei
> Sent: Wednesday, June 7, 2017 2:07 PM
> To: Lu, Wenzhuo; Wu, Jingjing
> Cc: dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing function
>
>
>
> > -----Original Message-----
> > From: Lu, Wenzhuo
> > Sent: Wednesday, June 7, 2017 1:46 PM
> > To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> > <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org
> > Subject: RE: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing
> > function
> >
> > Hi Beilei,
> >
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Beilei Xing
> > > Sent: Thursday, June 1, 2017 2:57 PM
> > > To: Wu, Jingjing
> > > Cc: dev@dpdk.org
> > > Subject: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing
> > > function
> > >
> > > This patch adds NVGRE parsing function to support NVGRE classification.
> > >
> > > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > > ---
> > > drivers/net/i40e/i40e_flow.c | 271
> > > ++++++++++++++++++++++++++++++++++++++++++-
> > > 1 file changed, 269 insertions(+), 2 deletions(-)
>
> >
> > > +
> > > + break;
> > > + case RTE_FLOW_ITEM_TYPE_VLAN:
> > > + vlan_spec =
> > > + (const struct rte_flow_item_vlan *)item-
> > > >spec;
> > > + vlan_mask =
> > > + (const struct rte_flow_item_vlan *)item-
> > > >mask;
> > > + if (nvgre_flag) {
> > Why need to check nvgre_flag? Seems VLAN must be after NVGRE, so this
> > flag is always 1.
>
> It's used to distinguish outer mac or inner mac.
I know you need to add this flag for MAC. But I'm talking about VLAN. There's only inner VLAN. So, seems it's useless here.
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing function
2017-06-07 6:12 ` Lu, Wenzhuo
@ 2017-06-07 6:22 ` Xing, Beilei
0 siblings, 0 replies; 16+ messages in thread
From: Xing, Beilei @ 2017-06-07 6:22 UTC (permalink / raw)
To: Lu, Wenzhuo, Wu, Jingjing; +Cc: dev
> -----Original Message-----
> From: Lu, Wenzhuo
> Sent: Wednesday, June 7, 2017 2:12 PM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing
> function
>
> > -----Original Message-----
> > From: Xing, Beilei
> > Sent: Wednesday, June 7, 2017 2:07 PM
> > To: Lu, Wenzhuo; Wu, Jingjing
> > Cc: dev@dpdk.org
> > Subject: RE: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing
> > function
> >
> >
> >
> > > -----Original Message-----
> > > From: Lu, Wenzhuo
> > > Sent: Wednesday, June 7, 2017 1:46 PM
> > > To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> > > <jingjing.wu@intel.com>
> > > Cc: dev@dpdk.org
> > > Subject: RE: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing
> > > function
> > >
> > > Hi Beilei,
> > >
> > > > -----Original Message-----
> > > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Beilei Xing
> > > > Sent: Thursday, June 1, 2017 2:57 PM
> > > > To: Wu, Jingjing
> > > > Cc: dev@dpdk.org
> > > > Subject: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing
> > > > function
> > > >
> > > > This patch adds NVGRE parsing function to support NVGRE classification.
> > > >
> > > > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > > > ---
> > > > drivers/net/i40e/i40e_flow.c | 271
> > > > ++++++++++++++++++++++++++++++++++++++++++-
> > > > 1 file changed, 269 insertions(+), 2 deletions(-)
>
> >
> > >
> > > > +
> > > > + break;
> > > > + case RTE_FLOW_ITEM_TYPE_VLAN:
> > > > + vlan_spec =
> > > > + (const struct rte_flow_item_vlan *)item-
> > > > >spec;
> > > > + vlan_mask =
> > > > + (const struct rte_flow_item_vlan *)item-
> > > > >mask;
> > > > + if (nvgre_flag) {
> > > Why need to check nvgre_flag? Seems VLAN must be after NVGRE, so
> > > this flag is always 1.
> >
> > It's used to distinguish outer mac or inner mac.
> I know you need to add this flag for MAC. But I'm talking about VLAN. There's
> only inner VLAN. So, seems it's useless here.
Oh yes, sorry for misunderstanding, outer vlan is not supported here, it can be removed. Will update in next version.
^ permalink raw reply [flat|nested] 16+ messages in thread
* [dpdk-dev] [PATCH v3 0/2] net/i40e: extend tunnel filter support
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 0/2] net/i40e: extend tunnel filter support Beilei Xing
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: optimize vxlan parsing function Beilei Xing
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE " Beilei Xing
@ 2017-06-07 6:53 ` Beilei Xing
2017-06-07 6:53 ` [dpdk-dev] [PATCH v3 1/2] net/i40e: optimize vxlan parsing function Beilei Xing
2017-06-07 6:53 ` [dpdk-dev] [PATCH v3 2/2] net/i40e: add NVGRE " Beilei Xing
2 siblings, 2 replies; 16+ messages in thread
From: Beilei Xing @ 2017-06-07 6:53 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
This patchset extends tunnel filter support with vxlan parsing function optimization and NVGRE parsing function.
v2 changes:
- Add vxlan parsing function optimization.
- Optmize NVGRE parsing function.
v3 changes:
- Polish commit log.
- Delete redundant if statements.
Beilei Xing (2):
net/i40e: optimize vxlan parsing function
net/i40e: add NVGRE parsing function
drivers/net/i40e/i40e_flow.c | 445 +++++++++++++++++++++++++++++++------------
1 file changed, 319 insertions(+), 126 deletions(-)
--
2.5.5
^ permalink raw reply [flat|nested] 16+ messages in thread
* [dpdk-dev] [PATCH v3 1/2] net/i40e: optimize vxlan parsing function
2017-06-07 6:53 ` [dpdk-dev] [PATCH v3 0/2] net/i40e: extend tunnel filter support Beilei Xing
@ 2017-06-07 6:53 ` Beilei Xing
2017-06-07 6:53 ` [dpdk-dev] [PATCH v3 2/2] net/i40e: add NVGRE " Beilei Xing
1 sibling, 0 replies; 16+ messages in thread
From: Beilei Xing @ 2017-06-07 6:53 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
The current vxlan parsing function is not easy to read when parsing
filter type, this patch optimizes the function and makes it more
readable.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 196 ++++++++++++++-----------------------------
1 file changed, 63 insertions(+), 133 deletions(-)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 37b55e7..b4ba555 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1268,27 +1268,27 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
return 0;
}
+static uint16_t i40e_supported_tunnel_filter_types[] = {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
+ ETH_TUNNEL_FILTER_IVLAN,
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
+ ETH_TUNNEL_FILTER_IMAC,
+ ETH_TUNNEL_FILTER_IMAC,
+};
+
static int
-i40e_check_tenant_id_mask(const uint8_t *mask)
+i40e_check_tunnel_filter_type(uint8_t filter_type)
{
- uint32_t j;
- int is_masked = 0;
-
- for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
- if (*(mask + j) == UINT8_MAX) {
- if (j > 0 && (*(mask + j) != *(mask + j - 1)))
- return -EINVAL;
- is_masked = 0;
- } else if (*(mask + j) == 0) {
- if (j > 0 && (*(mask + j) != *(mask + j - 1)))
- return -EINVAL;
- is_masked = 1;
- } else {
- return -EINVAL;
- }
+ uint8_t i;
+
+ for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
+ if (filter_type == i40e_supported_tunnel_filter_types[i])
+ return 0;
}
- return is_masked;
+ return -1;
}
/* 1. Last in item should be NULL as range is not supported.
@@ -1308,18 +1308,17 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *item = pattern;
const struct rte_flow_item_eth *eth_spec;
const struct rte_flow_item_eth *eth_mask;
- const struct rte_flow_item_eth *o_eth_spec = NULL;
- const struct rte_flow_item_eth *o_eth_mask = NULL;
- const struct rte_flow_item_vxlan *vxlan_spec = NULL;
- const struct rte_flow_item_vxlan *vxlan_mask = NULL;
- const struct rte_flow_item_eth *i_eth_spec = NULL;
- const struct rte_flow_item_eth *i_eth_mask = NULL;
- const struct rte_flow_item_vlan *vlan_spec = NULL;
- const struct rte_flow_item_vlan *vlan_mask = NULL;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ uint8_t filter_type = 0;
bool is_vni_masked = 0;
+ uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
enum rte_flow_item_type item_type;
bool vxlan_flag = 0;
uint32_t tenant_id_be = 0;
+ int ret;
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
if (item->last) {
@@ -1334,6 +1333,11 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_ETH:
eth_spec = (const struct rte_flow_item_eth *)item->spec;
eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Check if ETH item is used for place holder.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
if ((!eth_spec && eth_mask) ||
(eth_spec && !eth_mask)) {
rte_flow_error_set(error, EINVAL,
@@ -1357,50 +1361,40 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
return -rte_errno;
}
- if (!vxlan_flag)
+ if (!vxlan_flag) {
rte_memcpy(&filter->outer_mac,
ð_spec->dst,
ETHER_ADDR_LEN);
- else
+ filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ } else {
rte_memcpy(&filter->inner_mac,
ð_spec->dst,
ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ }
}
-
- if (!vxlan_flag) {
- o_eth_spec = eth_spec;
- o_eth_mask = eth_mask;
- } else {
- i_eth_spec = eth_spec;
- i_eth_mask = eth_mask;
- }
-
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec =
(const struct rte_flow_item_vlan *)item->spec;
vlan_mask =
(const struct rte_flow_item_vlan *)item->mask;
- if (vxlan_flag) {
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
- if (!(vlan_spec && vlan_mask)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid vlan item");
- return -rte_errno;
- }
- } else {
- if (vlan_spec || vlan_mask)
- rte_flow_error_set(error, EINVAL,
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid vlan item");
return -rte_errno;
}
+
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK))
+ filter->inner_vlan =
+ rte_be_to_cpu_16(vlan_spec->tci) &
+ I40E_TCI_MASK;
+ filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
@@ -1447,7 +1441,7 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
(const struct rte_flow_item_vxlan *)item->mask;
/* Check if VXLAN item is used to describe protocol.
* If yes, both spec and mask should be NULL.
- * If no, either spec or mask shouldn't be NULL.
+ * If no, both spec and mask shouldn't be NULL.
*/
if ((!vxlan_spec && vxlan_mask) ||
(vxlan_spec && !vxlan_mask)) {
@@ -1459,17 +1453,25 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
/* Check if VNI is masked. */
- if (vxlan_mask) {
+ if (vxlan_spec && vxlan_mask) {
is_vni_masked =
- i40e_check_tenant_id_mask(vxlan_mask->vni);
- if (is_vni_masked < 0) {
+ !!memcmp(vxlan_mask->vni, vni_mask,
+ RTE_DIM(vni_mask));
+ if (is_vni_masked) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
"Invalid VNI mask");
return -rte_errno;
}
+
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter_type |= ETH_TUNNEL_FILTER_TENID;
}
+
vxlan_flag = 1;
break;
default:
@@ -1477,87 +1479,15 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
}
- /* Check specification and mask to get the filter type */
- if (vlan_spec && vlan_mask &&
- (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
- /* If there's inner vlan */
- filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
- & I40E_TCI_MASK;
- if (vxlan_spec && vxlan_mask && !is_vni_masked) {
- /* If there's vxlan */
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- vxlan_spec->vni, 3);
- filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
- else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
- return -rte_errno;
- }
- } else if (!vxlan_spec && !vxlan_mask) {
- /* If there's no vxlan */
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_IVLAN;
- else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
- return -rte_errno;
- }
- } else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL,
- "Invalid filter type");
- return -rte_errno;
- }
- } else if ((!vlan_spec && !vlan_mask) ||
- (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
- /* If there's no inner vlan */
- if (vxlan_spec && vxlan_mask && !is_vni_masked) {
- /* If there's vxlan */
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- vxlan_spec->vni, 3);
- filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_IMAC_TENID;
- else if (o_eth_spec && o_eth_mask &&
- i_eth_spec && i_eth_mask)
- filter->filter_type =
- RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
- } else if (!vxlan_spec && !vxlan_mask) {
- /* If there's no vxlan */
- if (!o_eth_spec && !o_eth_mask &&
- i_eth_spec && i_eth_mask) {
- filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
- } else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Invalid filter type");
- return -rte_errno;
- }
- } else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Invalid filter type");
- return -rte_errno;
- }
- } else {
+ ret = i40e_check_tunnel_filter_type(filter_type);
+ if (ret < 0) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "Not supported by tunnel filter.");
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
return -rte_errno;
}
+ filter->filter_type = filter_type;
filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
--
2.5.5
^ permalink raw reply [flat|nested] 16+ messages in thread
* [dpdk-dev] [PATCH v3 2/2] net/i40e: add NVGRE parsing function
2017-06-07 6:53 ` [dpdk-dev] [PATCH v3 0/2] net/i40e: extend tunnel filter support Beilei Xing
2017-06-07 6:53 ` [dpdk-dev] [PATCH v3 1/2] net/i40e: optimize vxlan parsing function Beilei Xing
@ 2017-06-07 6:53 ` Beilei Xing
2017-06-07 8:07 ` Lu, Wenzhuo
1 sibling, 1 reply; 16+ messages in thread
From: Beilei Xing @ 2017-06-07 6:53 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
This patch adds NVGRE parsing function to support NVGRE
classification.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 267 ++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 265 insertions(+), 2 deletions(-)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index b4ba555..fab4a0d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -114,6 +114,12 @@ static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -296,7 +302,40 @@ static enum rte_flow_item_type pattern_vxlan_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
-/* Pattern matched MPLS */
+static enum rte_flow_item_type pattern_nvgre_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_nvgre_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static enum rte_flow_item_type pattern_mpls_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_IPV4,
@@ -329,7 +368,6 @@ static enum rte_flow_item_type pattern_mpls_4[] = {
RTE_FLOW_ITEM_TYPE_END,
};
-/* Pattern matched QINQ */
static enum rte_flow_item_type pattern_qinq_1[] = {
RTE_FLOW_ITEM_TYPE_ETH,
RTE_FLOW_ITEM_TYPE_VLAN,
@@ -362,6 +400,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
{ pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
+ /* NVGRE */
+ { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
+ { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
/* MPLSoUDP & MPLSoGRE */
{ pattern_mpls_1, i40e_flow_parse_mpls_filter },
{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
@@ -1525,6 +1568,226 @@ i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
}
/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ enum rte_flow_item_type item_type;
+ uint8_t filter_type = 0;
+ bool is_tni_masked = 0;
+ uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
+ bool nvgre_flag = 0;
+ uint32_t tenant_id_be = 0;
+ int ret;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Check if ETH item is used for place holder.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(ð_mask->dst) ||
+ !is_zero_ether_addr(ð_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!nvgre_flag) {
+ rte_memcpy(&filter->outer_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_OMAC;
+ } else {
+ rte_memcpy(&filter->inner_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ filter_type |= ETH_TUNNEL_FILTER_IMAC;
+ }
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK))
+ filter->inner_vlan =
+ rte_be_to_cpu_16(vlan_spec->tci) &
+ I40E_TCI_MASK;
+ filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ nvgre_spec =
+ (const struct rte_flow_item_nvgre *)item->spec;
+ nvgre_mask =
+ (const struct rte_flow_item_nvgre *)item->mask;
+ /* Check if NVGRE item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!nvgre_spec && nvgre_mask) ||
+ (nvgre_spec && !nvgre_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec && nvgre_mask) {
+ is_tni_masked =
+ !!memcmp(nvgre_mask->tni, tni_mask,
+ RTE_DIM(tni_mask));
+ if (is_tni_masked) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TNI mask");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ nvgre_spec->tni, 3);
+ filter->tenant_id =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter_type |= ETH_TUNNEL_FILTER_TENID;
+ }
+
+ nvgre_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ ret = i40e_check_tunnel_filter_type(filter_type);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ filter->filter_type = filter_type;
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
* 2. Supported filter types: MPLS label.
* 3. Mask of fields which need to be matched should be
* filled with 1.
--
2.5.5
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] net/i40e: add NVGRE parsing function
2017-06-07 6:53 ` [dpdk-dev] [PATCH v3 2/2] net/i40e: add NVGRE " Beilei Xing
@ 2017-06-07 8:07 ` Lu, Wenzhuo
2017-06-08 10:28 ` Ferruh Yigit
0 siblings, 1 reply; 16+ messages in thread
From: Lu, Wenzhuo @ 2017-06-07 8:07 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing; +Cc: dev
Hi,
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Beilei Xing
> Sent: Wednesday, June 7, 2017 2:54 PM
> To: Wu, Jingjing
> Cc: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v3 2/2] net/i40e: add NVGRE parsing function
>
> This patch adds NVGRE parsing function to support NVGRE classification.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] net/i40e: add NVGRE parsing function
2017-06-07 8:07 ` Lu, Wenzhuo
@ 2017-06-08 10:28 ` Ferruh Yigit
0 siblings, 0 replies; 16+ messages in thread
From: Ferruh Yigit @ 2017-06-08 10:28 UTC (permalink / raw)
To: Lu, Wenzhuo, Xing, Beilei, Wu, Jingjing; +Cc: dev
On 6/7/2017 9:07 AM, Lu, Wenzhuo wrote:
> Hi,
>
>
>> -----Original Message-----
>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Beilei Xing
>> Sent: Wednesday, June 7, 2017 2:54 PM
>> To: Wu, Jingjing
>> Cc: dev@dpdk.org
>> Subject: [dpdk-dev] [PATCH v3 2/2] net/i40e: add NVGRE parsing function
>>
>> This patch adds NVGRE parsing function to support NVGRE classification.
>>
>> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Series applied to dpdk-next-net/master, thanks.
^ permalink raw reply [flat|nested] 16+ messages in thread
end of thread, other threads:[~2017-06-08 10:28 UTC | newest]
Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-18 9:20 [dpdk-dev] [PATCH] net/i40e: add NVGRE parsing function Beilei Xing
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 0/2] net/i40e: extend tunnel filter support Beilei Xing
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 1/2] net/i40e: optimize vxlan parsing function Beilei Xing
2017-06-07 3:27 ` Lu, Wenzhuo
2017-06-07 3:30 ` Yuanhan Liu
2017-06-07 4:21 ` Xing, Beilei
2017-06-01 6:56 ` [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE " Beilei Xing
2017-06-07 5:46 ` Lu, Wenzhuo
2017-06-07 6:06 ` Xing, Beilei
2017-06-07 6:12 ` Lu, Wenzhuo
2017-06-07 6:22 ` Xing, Beilei
2017-06-07 6:53 ` [dpdk-dev] [PATCH v3 0/2] net/i40e: extend tunnel filter support Beilei Xing
2017-06-07 6:53 ` [dpdk-dev] [PATCH v3 1/2] net/i40e: optimize vxlan parsing function Beilei Xing
2017-06-07 6:53 ` [dpdk-dev] [PATCH v3 2/2] net/i40e: add NVGRE " Beilei Xing
2017-06-07 8:07 ` Lu, Wenzhuo
2017-06-08 10:28 ` Ferruh Yigit
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).