* [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API
@ 2016-12-02 11:53 Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 01/24] net/i40e: store ethertype filter Beilei Xing
` (24 more replies)
0 siblings, 25 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
All pathes depend on Adrien's Generic flow API.
The patches mainly finish following functions:
1) Store and restore all kinds of filters.
2) Parse all kinds of filters.
3) Add flow validate function.
4) Add flow create function.
5) Add flow destroy function.
6) Add flow flush function.
Beilei Xing (24):
net/i40e: store ethertype filter
net/i40e: store tunnel filter
net/i40e: store flow director filter
net/i40e: store RSS hash info
net/i40e: restore ethertype filter
net/i40e: restore macvlan filter
net/i40e: restore tunnel filter
net/i40e: restore flow director filter
net/i40e: restore RSS hash info
ethdev: parse ethertype filter
net/i40e: add flow validate function
net/i40e: parse macvlan filter
net/i40e: parse VXLAN filter
net/i40e: parse NVGRE filter
net/i40e: parse flow director filter
net/i40e: add flow create function
net/i40e: destroy ethertype filter
net/i40e: destroy macvlan filter
net/i40e: destroy tunnel filter
net/i40e: destroy flow directory filter
net/i40e: add flow flush function
net/i40e: flush ethertype filters
net/i40e: flush macvlan filters
net/i40e: flush tunnel filters
drivers/net/i40e/i40e_ethdev.c | 2180 +++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 83 ++
drivers/net/i40e/i40e_fdir.c | 111 +-
lib/librte_ether/rte_flow.c | 136 +++
lib/librte_ether/rte_flow.h | 23 +
lib/librte_ether/rte_flow_driver.h | 34 +
6 files changed, 2555 insertions(+), 12 deletions(-)
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 01/24] net/i40e: store ethertype filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 02/24] net/i40e: store tunnel filter Beilei Xing
` (23 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Add support of storing ethertype filter in SW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 160 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 25 +++++++
2 files changed, 184 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 67778ba..30822a0 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -51,6 +51,7 @@
#include <rte_dev.h>
#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
+#include <rte_hash_crc.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -461,6 +462,17 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int i40e_ethertype_filter_convert(
+ const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter);
+static struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_info *ethertype_info,
+ const struct i40e_ethertype_filter_input *input);
+static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+static int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -937,9 +949,18 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
int ret;
uint32_t len;
uint8_t aq_fail = 0;
+ struct i40e_ethertype_info *ethertype_info = &pf->ethertype;
PMD_INIT_FUNC_TRACE();
+ char ethertype_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters ethertype_hash_params = {
+ .name = ethertype_hash_name,
+ .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
+ .key_len = sizeof(struct i40e_ethertype_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1179,8 +1200,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
pf->flags &= ~I40E_FLAG_DCB;
}
+ /* Initialize ethertype filter rule list and hash */
+ TAILQ_INIT(ðertype_info->ethertype_list);
+ snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
+ "ethertype_%s", dev->data->name);
+ ethertype_info->hash_table = rte_hash_create(ðertype_hash_params);
+ if (!ethertype_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
+ ret = -EINVAL;
+ goto err_ethertype_hash_table_create;
+ }
+ ethertype_info->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
+ sizeof(struct i40e_ethertype_filter *) *
+ I40E_MAX_ETHERTYPE_FILTER_NUM,
+ 0);
+ if (!ethertype_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for ethertype hash map!");
+ ret = -ENOMEM;
+ goto err_ethertype_hash_map_alloc;
+ }
+
return 0;
+err_ethertype_hash_map_alloc:
+ rte_hash_free(ethertype_info->hash_table);
+err_ethertype_hash_table_create:
+ rte_free(dev->data->mac_addrs);
err_mac_alloc:
i40e_vsi_release(pf->main_vsi);
err_setup_pf_switch:
@@ -1203,23 +1249,40 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
+ struct i40e_pf *pf;
struct rte_pci_device *pci_dev;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
+ struct i40e_ethertype_filter *p_ethertype;
int ret;
uint8_t aq_fail = 0;
+ struct i40e_ethertype_info *ethertype_info;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = dev->pci_dev;
+ ethertype_info = &pf->ethertype;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
+ /* Remove all ethertype director rules and hash */
+ if (ethertype_info->hash_map)
+ rte_free(ethertype_info->hash_map);
+ if (ethertype_info->hash_table)
+ rte_hash_free(ethertype_info->hash_table);
+
+ while ((p_ethertype = TAILQ_FIRST(ðertype_info->ethertype_list))) {
+ TAILQ_REMOVE(ðertype_info->ethertype_list,
+ p_ethertype, rules);
+ rte_free(p_ethertype);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
@@ -7986,6 +8049,74 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
return ret;
}
+static int
+i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter)
+{
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ filter->input.ether_type = input->ether_type;
+ filter->flags = input->flags;
+ filter->queue = input->queue;
+
+ return 0;
+}
+
+static struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_info *ethertype_info,
+ const struct i40e_ethertype_filter_input *input)
+{
+ int ret = 0;
+
+ ret = rte_hash_lookup(ethertype_info->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return ethertype_info->hash_map[ret];
+}
+
+static int
+i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_info *ethertype_info = &pf->ethertype;
+ int ret = 0;
+
+ ret = rte_hash_add_key(ethertype_info->hash_table,
+ &filter->input);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR,
+ "Failed to insert ethertype filter"
+ " to hash table %d!",
+ ret);
+ ethertype_info->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(ðertype_info->ethertype_list, filter, rules);
+
+ return 0;
+}
+
+static int
+i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_info *ethertype_info = &pf->ethertype;
+ int ret = 0;
+
+ ret = rte_hash_del_key(ethertype_info->hash_table,
+ &filter->input);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR,
+ "Failed to delete ethertype filter"
+ " to hash table %d!",
+ ret);
+ ethertype_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(ðertype_info->ethertype_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
@@ -7996,6 +8127,8 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_info *ethertype_info = &pf->ethertype;
+ struct i40e_ethertype_filter *ethertype_filter, *node;
struct i40e_control_filter_stats stats;
uint16_t flags = 0;
int ret;
@@ -8014,6 +8147,22 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
" not supported.");
+ /* Check if there is the filter in SW list */
+ ethertype_filter = rte_zmalloc("ethertype_filter",
+ sizeof(*ethertype_filter), 0);
+ i40e_ethertype_filter_convert(filter, ethertype_filter);
+ node = i40e_sw_ethertype_filter_lookup(ethertype_info,
+ ðertype_filter->input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
+ rte_free(ethertype_filter);
+ return -EINVAL;
+ } else if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
+ rte_free(ethertype_filter);
+ return -EINVAL;
+ }
+
if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
@@ -8034,7 +8183,16 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
stats.mac_etype_free, stats.etype_free);
if (ret < 0)
return -ENOSYS;
- return 0;
+
+ /* Add or delete a filter in SW list */
+ if (add)
+ ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ else {
+ ret = i40e_sw_ethertype_filter_del(pf, node);
+ rte_free(ethertype_filter);
+ }
+
+ return ret;
}
/*
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 298cef4..8604198 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -37,6 +37,7 @@
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
+#include <rte_hash.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -396,6 +397,29 @@ struct i40e_fdir_info {
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
};
+#define I40E_MAX_ETHERTYPE_FILTER_NUM 768
+
+/* Ethertype filter struct */
+struct i40e_ethertype_filter_input {
+ struct ether_addr mac_addr; /**< Mac address to match. */
+ uint16_t ether_type; /**< Ether type to match */
+};
+
+struct i40e_ethertype_filter {
+ TAILQ_ENTRY(i40e_ethertype_filter) rules;
+ struct i40e_ethertype_filter_input input;
+ uint16_t flags;
+ uint16_t queue;
+};
+
+TAILQ_HEAD(i40e_ethertype_filter_list, i40e_ethertype_filter);
+
+struct i40e_ethertype_info {
+ struct i40e_ethertype_filter_list ethertype_list;
+ struct i40e_ethertype_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -466,6 +490,7 @@ struct i40e_pf {
struct i40e_vmdq_info *vmdq;
struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_ethertype_info ethertype; /* Ethertype filter info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 02/24] net/i40e: store tunnel filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 01/24] net/i40e: store ethertype filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 03/24] net/i40e: store flow director filter Beilei Xing
` (22 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Add support of storing tunnel filter in SW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 161 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 35 +++++++++
2 files changed, 193 insertions(+), 3 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 30822a0..b20a851 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -473,6 +473,17 @@ static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
static int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter);
+static struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_info *tunnel_info,
+ const struct i40e_tunnel_filter_input *input);
+static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -950,6 +961,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
uint32_t len;
uint8_t aq_fail = 0;
struct i40e_ethertype_info *ethertype_info = &pf->ethertype;
+ struct i40e_tunnel_info *tunnel_info = &pf->tunnel;
PMD_INIT_FUNC_TRACE();
@@ -961,6 +973,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
.hash_func = rte_hash_crc,
};
+ char tunnel_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters tunnel_hash_params = {
+ .name = tunnel_hash_name,
+ .entries = I40E_MAX_TUNNEL_FILTER_NUM,
+ .key_len = sizeof(struct i40e_tunnel_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1221,8 +1241,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_ethertype_hash_map_alloc;
}
+ /* Initialize tunnel filter rule list and hash */
+ TAILQ_INIT(&tunnel_info->tunnel_list);
+ snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
+ "tunnel_%s", dev->data->name);
+ tunnel_info->hash_table = rte_hash_create(&tunnel_hash_params);
+ if (!tunnel_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
+ ret = -EINVAL;
+ goto err_tunnel_hash_table_create;
+ }
+ tunnel_info->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
+ sizeof(struct i40e_tunnel_filter *) *
+ I40E_MAX_TUNNEL_FILTER_NUM,
+ 0);
+ if (!tunnel_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for tunnel hash map!");
+ ret = -ENOMEM;
+ goto err_tunnel_hash_map_alloc;
+ }
+
return 0;
+err_tunnel_hash_map_alloc:
+ rte_hash_free(tunnel_info->hash_table);
+err_tunnel_hash_table_create:
+ rte_free(ethertype_info->hash_map);
err_ethertype_hash_map_alloc:
rte_hash_free(ethertype_info->hash_table);
err_ethertype_hash_table_create:
@@ -1254,9 +1299,11 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
struct i40e_ethertype_filter *p_ethertype;
+ struct i40e_tunnel_filter *p_tunnel;
int ret;
uint8_t aq_fail = 0;
struct i40e_ethertype_info *ethertype_info;
+ struct i40e_tunnel_info *tunnel_info;
PMD_INIT_FUNC_TRACE();
@@ -1267,6 +1314,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = dev->pci_dev;
ethertype_info = &pf->ethertype;
+ tunnel_info = &pf->tunnel;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1283,6 +1331,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_ethertype);
}
+ /* Remove all tunnel director rules and hash */
+ if (tunnel_info->hash_map)
+ rte_free(tunnel_info->hash_map);
+ if (tunnel_info->hash_table)
+ rte_hash_free(tunnel_info->hash_table);
+
+ while ((p_tunnel = TAILQ_FIRST(&tunnel_info->tunnel_list))) {
+ TAILQ_REMOVE(&tunnel_info->tunnel_list, p_tunnel, rules);
+ rte_free(p_tunnel);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
@@ -6491,6 +6550,79 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
}
static int
+i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data
+ *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ ether_addr_copy((struct ether_addr *)&cld_filter->outer_mac,
+ (struct ether_addr *)&tunnel_filter->input.outer_mac);
+ ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac,
+ (struct ether_addr *)&tunnel_filter->input.inner_mac);
+ tunnel_filter->input.inner_vlan = cld_filter->inner_vlan;
+ tunnel_filter->input.flags = cld_filter->flags;
+ tunnel_filter->input.tenant_id = cld_filter->tenant_id;
+ rte_memcpy(&tunnel_filter->input.ipaddr, &cld_filter->ipaddr,
+ sizeof(tunnel_filter->input.ipaddr));
+ tunnel_filter->queue = cld_filter->queue_number;
+
+ return 0;
+}
+
+static struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_info *tunnel_info,
+ const struct i40e_tunnel_filter_input *input)
+{
+ int ret = 0;
+
+ ret = rte_hash_lookup(tunnel_info->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return tunnel_info->hash_map[ret];
+}
+
+static int
+i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_info *tunnel_info = &pf->tunnel;
+ int ret = 0;
+
+ ret = rte_hash_add_key(tunnel_info->hash_table,
+ &tunnel_filter->input);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR,
+ "Failed to insert tunnel filter to hash table %d!",
+ ret);
+ tunnel_info->hash_map[ret] = tunnel_filter;
+
+ TAILQ_INSERT_TAIL(&tunnel_info->tunnel_list, tunnel_filter, rules);
+
+ return 0;
+}
+
+static int
+i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_info *tunnel_info = &pf->tunnel;
+ int ret = 0;
+
+ ret = rte_hash_del_key(tunnel_info->hash_table,
+ &tunnel_filter->input);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR,
+ "Failed to delete tunnel filter to hash table %d!",
+ ret);
+ tunnel_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&tunnel_info->tunnel_list, tunnel_filter, rules);
+ rte_free(tunnel_filter);
+
+ return 0;
+}
+
+static int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add)
@@ -6505,6 +6637,8 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct i40e_vsi *vsi = pf->main_vsi;
struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+ struct i40e_tunnel_info *tunnel_info = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
cld_filter = rte_zmalloc("tunnel_filter",
sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
@@ -6567,11 +6701,32 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
- if (add)
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ i40e_tunnel_filter_convert(cld_filter, tunnel);
+ node = i40e_sw_tunnel_filter_lookup(tunnel_info, &tunnel->input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ rte_free(tunnel);
+ return -EINVAL;
+ } else if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ rte_free(tunnel);
+ return -EINVAL;
+ }
+
+ if (add) {
ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
- else
+ if (ret < 0)
+ return ret;
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- cld_filter, 1);
+ cld_filter, 1);
+ if (ret < 0)
+ return ret;
+ ret = i40e_sw_tunnel_filter_del(pf, node);
+ rte_free(tunnel);
+ }
rte_free(cld_filter);
return ret;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 8604198..5f9cddd 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -420,6 +420,40 @@ struct i40e_ethertype_info {
struct rte_hash *hash_table;
};
+#define I40E_MAX_TUNNEL_FILTER_NUM 400
+
+/* Tunnel filter struct */
+struct i40e_tunnel_filter_input {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+ __le32 tenant_id;
+};
+
+struct i40e_tunnel_filter {
+ TAILQ_ENTRY(i40e_tunnel_filter) rules;
+ struct i40e_tunnel_filter_input input;
+ uint16_t queue;
+};
+
+TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter);
+
+struct i40e_tunnel_info {
+ struct i40e_tunnel_filter_list tunnel_list;
+ struct i40e_tunnel_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -491,6 +525,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_info ethertype; /* Ethertype filter info */
+ struct i40e_tunnel_info tunnel; /* Tunnel filter info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 03/24] net/i40e: store flow director filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 01/24] net/i40e: store ethertype filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 02/24] net/i40e: store tunnel filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 04/24] net/i40e: store RSS hash info Beilei Xing
` (21 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Add support for storing flow director filter in SW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 48 +++++++++++++++++++++
drivers/net/i40e/i40e_ethdev.h | 12 ++++++
drivers/net/i40e/i40e_fdir.c | 95 ++++++++++++++++++++++++++++++++++++++++++
3 files changed, 155 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index b20a851..c38536f 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -962,6 +962,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
uint8_t aq_fail = 0;
struct i40e_ethertype_info *ethertype_info = &pf->ethertype;
struct i40e_tunnel_info *tunnel_info = &pf->tunnel;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
PMD_INIT_FUNC_TRACE();
@@ -981,6 +982,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
.hash_func = rte_hash_crc,
};
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = I40E_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct rte_eth_fdir_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1262,8 +1271,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_tunnel_hash_map_alloc;
}
+ /* Initialize flow director filter rule list and hash */
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", dev->data->name);
+ fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ ret = -EINVAL;
+ goto err_fdir_hash_table_create;
+ }
+ fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
+ sizeof(struct i40e_fdir_filter *) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ ret = -ENOMEM;
+ goto err_fdir_hash_map_alloc;
+ }
+
return 0;
+err_fdir_hash_map_alloc:
+ rte_hash_free(fdir_info->hash_table);
+err_fdir_hash_table_create:
+ rte_free(tunnel_info->hash_map);
err_tunnel_hash_map_alloc:
rte_hash_free(tunnel_info->hash_table);
err_tunnel_hash_table_create:
@@ -1300,10 +1334,12 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct i40e_filter_control_settings settings;
struct i40e_ethertype_filter *p_ethertype;
struct i40e_tunnel_filter *p_tunnel;
+ struct i40e_fdir_filter *p_fdir;
int ret;
uint8_t aq_fail = 0;
struct i40e_ethertype_info *ethertype_info;
struct i40e_tunnel_info *tunnel_info;
+ struct i40e_fdir_info *fdir_info;
PMD_INIT_FUNC_TRACE();
@@ -1315,6 +1351,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
pci_dev = dev->pci_dev;
ethertype_info = &pf->ethertype;
tunnel_info = &pf->tunnel;
+ fdir_info = &pf->fdir;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1342,6 +1379,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_tunnel);
}
+ /* Remove all flow director rules and hash */
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_table)
+ rte_hash_free(fdir_info->hash_table);
+
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+ rte_free(p_fdir);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 5f9cddd..b6eed6a 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -377,6 +377,14 @@ struct i40e_fdir_flex_mask {
};
#define I40E_FILTER_PCTYPE_MAX 64
+#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
+
+struct i40e_fdir_filter {
+ TAILQ_ENTRY(i40e_fdir_filter) rules;
+ struct rte_eth_fdir_filter fdir;
+};
+
+TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
/*
* A structure used to define fields of a FDIR related info.
*/
@@ -395,6 +403,10 @@ struct i40e_fdir_info {
*/
struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+
+ struct i40e_fdir_filter_list fdir_list;
+ struct i40e_fdir_filter **hash_map;
+ struct rte_hash *hash_table;
};
#define I40E_MAX_ETHERTYPE_FILTER_NUM 768
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 335bf15..1913fe1 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -121,6 +121,16 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
bool add);
static int i40e_fdir_flush(struct rte_eth_dev *dev);
+static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter);
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input);
+static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
+static int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
+
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
{
@@ -1017,6 +1027,66 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
return ret;
}
+static int
+i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter)
+{
+ rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+ return 0;
+}
+
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input)
+{
+ int ret = 0;
+
+ ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+static int
+i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret = 0;
+
+ ret = rte_hash_add_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ fdir_info->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
+
+ return 0;
+}
+
+static int
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret = 0;
+
+ ret = rte_hash_del_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR,
+ "Failed to delete fdir filter to hash table %d!",
+ ret);
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
@@ -1032,6 +1102,8 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter, *node;
int ret = 0;
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1054,6 +1126,21 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
+ fdir_filter = rte_zmalloc("fdir_filter", sizeof(*fdir_filter), 0);
+ i40e_fdir_filter_convert(filter, fdir_filter);
+ node = i40e_sw_fdir_filter_lookup(fdir_info, &fdir_filter->fdir.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR,
+ "Conflict with existing flow director rules!");
+ rte_free(fdir_filter);
+ return -EINVAL;
+ } else if (!add && !node) {
+ PMD_DRV_LOG(ERR,
+ "There's no corresponding flow firector filter!");
+ rte_free(fdir_filter);
+ return -EINVAL;
+ }
+
memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
@@ -1077,6 +1164,14 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
pctype);
return ret;
}
+
+ if (add)
+ ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
+ else {
+ ret = i40e_sw_fdir_filter_del(pf, node);
+ rte_free(fdir_filter);
+ }
+
return ret;
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 04/24] net/i40e: store RSS hash info
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (2 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 03/24] net/i40e: store flow director filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 05/24] net/i40e: restore ethertype filter Beilei Xing
` (20 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Add support of storing lookup table and RSS
configuration in SW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 39 ++++++++++++++++++++++++++++++++++-----
drivers/net/i40e/i40e_ethdev.h | 6 ++++++
2 files changed, 40 insertions(+), 5 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index c38536f..521e7bb 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1356,6 +1356,9 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
+ if (pf->hash.reta)
+ rte_free(pf->hash.reta);
+
/* Remove all ethertype director rules and hash */
if (ethertype_info->hash_map)
rte_free(ethertype_info->hash_map);
@@ -3453,6 +3456,8 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
}
ret = i40e_set_rss_lut(pf->main_vsi, lut, reta_size);
+ /* Store updated lut */
+ rte_memcpy(pf->hash.reta, lut, sizeof(*lut) * reta_size);
out:
rte_free(lut);
@@ -6959,6 +6964,8 @@ i40e_pf_config_rss(struct i40e_pf *pf)
struct rte_eth_rss_conf rss_conf;
uint32_t i, lut = 0;
uint16_t j, num;
+ uint16_t reta_size = hw->func_caps.rss_table_size;
+ int ret = -EINVAL;
/*
* If both VMDQ and RSS enabled, not all of PF queues are configured.
@@ -6978,7 +6985,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
return -ENOTSUP;
}
- for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
+ for (i = 0, j = 0; i < reta_size; i++, j++) {
if (j == num)
j = 0;
lut = (lut << 8) | (j & ((0x1 <<
@@ -6987,6 +6994,19 @@ i40e_pf_config_rss(struct i40e_pf *pf)
I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
}
+ /* Store lut into SW */
+ uint8_t *reta;
+
+ reta = rte_zmalloc("i40e_rss_reta", reta_size, 0);
+ if (!reta) {
+ PMD_DRV_LOG(ERR, "No memory can be allocated");
+ return -ENOMEM;
+ }
+ pf->hash.reta = reta;
+ ret = i40e_get_rss_lut(pf->main_vsi, reta, reta_size);
+ if (ret < 0)
+ return ret;
+
rss_conf = pf->dev_data->dev_conf.rx_adv_conf.rss_conf;
if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
i40e_pf_disable_rss(pf);
@@ -7005,7 +7025,15 @@ i40e_pf_config_rss(struct i40e_pf *pf)
sizeof(uint32_t);
}
- return i40e_hw_rss_hash_set(pf, &rss_conf);
+ ret = i40e_hw_rss_hash_set(pf, &rss_conf);
+ if (ret < 0)
+ return ret;
+
+ /* store rss configuration into SW */
+ ret = i40e_dev_rss_hash_conf_get(
+ I40E_VSI_TO_ETH_DEV(pf->main_vsi), &pf->hash.rss_conf);
+
+ return ret;
}
static int
@@ -7158,9 +7186,10 @@ i40e_pf_config_mq_rx(struct i40e_pf *pf)
enum rte_eth_rx_mq_mode mq_mode = pf->dev_data->dev_conf.rxmode.mq_mode;
/* RSS setup */
- if (mq_mode & ETH_MQ_RX_RSS_FLAG)
- ret = i40e_pf_config_rss(pf);
- else
+ if (mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ if (!pf->hash.reta)
+ ret = i40e_pf_config_rss(pf);
+ } else
i40e_pf_disable_rss(pf);
return ret;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b6eed6a..d40010a 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -466,6 +466,11 @@ struct i40e_tunnel_info {
struct rte_hash *hash_table;
};
+struct i40e_hash_info {
+ uint8_t *reta;
+ struct rte_eth_rss_conf rss_conf;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -538,6 +543,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_info ethertype; /* Ethertype filter info */
struct i40e_tunnel_info tunnel; /* Tunnel filter info */
+ struct i40e_hash_info hash; /* Hash filter info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 05/24] net/i40e: restore ethertype filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (3 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 04/24] net/i40e: store RSS hash info Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 06/24] net/i40e: restore macvlan filter Beilei Xing
` (19 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Add support of restoring ethertype filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 39 +++++++++++++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 521e7bb..11c4c64 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -484,6 +484,9 @@ static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
static int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
+static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_filter_restore(struct i40e_pf *pf);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -1966,6 +1969,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+ i40e_filter_restore(pf);
+
return I40E_SUCCESS;
err_up:
@@ -10125,3 +10130,37 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
}
+
+/* Restore ethertype filter */
+static void
+i40e_ethertype_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags;
+
+ TAILQ_FOREACH(f, ethertype_list, rules) {
+ flags = 0;
+ if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ i40e_aq_add_rem_control_packet_filter(hw,
+ f->input.mac_addr.addr_bytes,
+ f->input.ether_type,
+ flags, pf->main_vsi->seid,
+ f->queue, 1, &stats, NULL);
+ }
+}
+
+static void
+i40e_filter_restore(struct i40e_pf *pf)
+{
+ i40e_ethertype_filter_restore(pf);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 06/24] net/i40e: restore macvlan filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (4 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 05/24] net/i40e: restore ethertype filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 07/24] net/i40e: restore tunnel filter Beilei Xing
` (18 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Add support of restoring macvlan filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 11c4c64..119ff94 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -485,6 +485,7 @@ static int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_macvlan_filter_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
static const struct rte_pci_id pci_id_i40e_map[] = {
@@ -10159,8 +10160,27 @@ i40e_ethertype_filter_restore(struct i40e_pf *pf)
}
}
+/* Restore macvlan filter */
+static void
+i40e_macvlan_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_mac_filter_info *mac_filter;
+ struct i40e_vsi *vsi;
+ int i;
+
+ for (i = 0; i < pf->vf_num; i++) {
+ vsi = pf->vfs[i].vsi;
+ TAILQ_FOREACH(f, &vsi->mac_list, next) {
+ mac_filter = &f->mac_info;
+ i40e_vsi_add_mac(vsi, mac_filter);
+ }
+ }
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
+ i40e_macvlan_filter_restore(pf);
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 07/24] net/i40e: restore tunnel filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (5 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 06/24] net/i40e: restore macvlan filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 08/24] net/i40e: restore flow director filter Beilei Xing
` (17 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Add support of restoring tunnel filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 119ff94..8ca69f2 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -486,6 +486,7 @@ static int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
static void i40e_macvlan_filter_restore(struct i40e_pf *pf);
+static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
static const struct rte_pci_id pci_id_i40e_map[] = {
@@ -10178,9 +10179,28 @@ i40e_macvlan_filter_restore(struct i40e_pf *pf)
}
}
+/* Restore tunnel filter */
+static void
+i40e_tunnel_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+
+ TAILQ_FOREACH(f, tunnel_list, rules) {
+ rte_memcpy(&cld_filter, &f->input, sizeof(f->input));
+ cld_filter.queue_number = f->queue;
+ i40e_aq_add_cloud_filters(hw, vsi->seid, &cld_filter, 1);
+ }
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_macvlan_filter_restore(pf);
+ i40e_tunnel_filter_restore(pf);
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 08/24] net/i40e: restore flow director filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (6 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 07/24] net/i40e: restore tunnel filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 09/24] net/i40e: restore RSS hash info Beilei Xing
` (16 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Add support of restoring flow director filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev.h | 1 +
drivers/net/i40e/i40e_fdir.c | 11 +++++++++++
3 files changed, 13 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 8ca69f2..a47d141 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -10203,4 +10203,5 @@ i40e_filter_restore(struct i40e_pf *pf)
i40e_ethertype_filter_restore(pf);
i40e_macvlan_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
+ i40e_fdir_filter_restore(pf);
}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index d40010a..35ac6d6 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -683,6 +683,7 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
+void i40e_fdir_filter_restore(struct i40e_pf *pf);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 1913fe1..e47a949 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1576,3 +1576,14 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+void
+i40e_fdir_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
+ struct i40e_fdir_filter *f;
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
+
+ TAILQ_FOREACH(f, fdir_list, rules)
+ i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 09/24] net/i40e: restore RSS hash info
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (7 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 08/24] net/i40e: restore flow director filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 10/24] ethdev: parse ethertype filter Beilei Xing
` (15 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Add support of restoring RSS hash info, include looup table
and RSS configuration.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index a47d141..997e2fe 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -487,6 +487,7 @@ static int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
static void i40e_macvlan_filter_restore(struct i40e_pf *pf);
static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
+static void i40e_rss_hash_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
static const struct rte_pci_id pci_id_i40e_map[] = {
@@ -10197,6 +10198,22 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf)
}
}
+/* Restore hash filter */
+static void
+i40e_rss_hash_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint16_t reta_size = hw->func_caps.rss_table_size;
+
+ if (pf->hash.reta) {
+ /* Restore lut to HW */
+ i40e_set_rss_lut(pf->main_vsi, pf->hash.reta, reta_size);
+
+ /* Restore RSS configuration to HW */
+ i40e_hw_rss_hash_set(pf, &pf->hash.rss_conf);
+ }
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
@@ -10204,4 +10221,5 @@ i40e_filter_restore(struct i40e_pf *pf)
i40e_macvlan_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
i40e_fdir_filter_restore(pf);
+ i40e_rss_hash_restore(pf);
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 10/24] ethdev: parse ethertype filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (8 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 09/24] net/i40e: restore RSS hash info Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-20 18:12 ` Ferruh Yigit
2016-12-02 11:53 ` [dpdk-dev] [PATCH 11/24] net/i40e: add flow validate function Beilei Xing
` (14 subsequent siblings)
24 siblings, 1 reply; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Check if the rule is a ethertype rule, and get the ethertype
info BTW.
Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
lib/librte_ether/rte_flow.c | 136 +++++++++++++++++++++++++++++++++++++
lib/librte_ether/rte_flow_driver.h | 34 ++++++++++
2 files changed, 170 insertions(+)
diff --git a/lib/librte_ether/rte_flow.c b/lib/librte_ether/rte_flow.c
index 064963d..acc9057 100644
--- a/lib/librte_ether/rte_flow.c
+++ b/lib/librte_ether/rte_flow.c
@@ -157,3 +157,139 @@ rte_flow_query(uint8_t port_id,
NULL, rte_strerror(ENOTSUP));
return -rte_errno;
}
+
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ */
+int
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t i, j;
+
+ /************************************************
+ * parse pattern
+ ************************************************/
+ i = 0;
+
+ /* The first not void item should be MAC. */
+ PATTERN_SKIP_VOID(filter, struct rte_eth_ethertype_filter,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /* Get the MAC info. */
+ if (!item->spec || !item->mask) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ /**
+ * Source MAC address must be masked.
+ * Destination MAC address must be totally masked or not.
+ */
+ if (eth_mask->src.addr_bytes[0] ||
+ (eth_mask->dst.addr_bytes[0] != 0xFF &&
+ eth_mask->dst.addr_bytes[0])) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ for (j = 1; j < ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j] !=
+ eth_mask->src.addr_bytes[0] ||
+ eth_mask->dst.addr_bytes[j] !=
+ eth_mask->dst.addr_bytes[0]) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ if ((eth_mask->type & 0xFFFF) != 0xFFFF) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (eth_mask->dst.addr_bytes[0]) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = (uint16_t)eth_spec->type;
+
+ /* Check if the next not void item is END. */
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_ethertype_filter,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /************************************************
+ * parse action
+ ************************************************/
+ i = 0;
+
+ /* Check if the first not void action is QUEUE or DROP. */
+ ACTION_SKIP_VOID(filter, struct rte_eth_ethertype_filter,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+ return -EINVAL;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next not void item is END */
+ i++;
+ ACTION_SKIP_VOID(filter, struct rte_eth_ethertype_filter,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+ return -EINVAL;
+ }
+
+ /************************************************
+ * parse attr
+ ************************************************/
+ /* Must be input direction */
+ if (!attr->ingress) {
+ error->type = RTE_FLOW_ERROR_TYPE_ATTR_INGRESS;
+ return -EINVAL;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ error->type = RTE_FLOW_ERROR_TYPE_ATTR_EGRESS;
+ return -EINVAL;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ error->type = RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/lib/librte_ether/rte_flow_driver.h b/lib/librte_ether/rte_flow_driver.h
index a88c621..2760c74 100644
--- a/lib/librte_ether/rte_flow_driver.h
+++ b/lib/librte_ether/rte_flow_driver.h
@@ -170,6 +170,40 @@ rte_flow_error_set(struct rte_flow_error *error,
const struct rte_flow_ops *
rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error);
+int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error);
+
+#define PATTERN_SKIP_VOID(filter, filter_struct, error_type) \
+ do { \
+ if (!pattern) { \
+ memset(filter, 0, sizeof(filter_struct)); \
+ error->type = error_type; \
+ return -EINVAL; \
+ } \
+ item = pattern + i; \
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
+ i++; \
+ item = pattern + i; \
+ } \
+ } while (0)
+
+#define ACTION_SKIP_VOID(filter, filter_struct, error_type) \
+ do { \
+ if (!actions) { \
+ memset(filter, 0, sizeof(filter_struct)); \
+ error->type = error_type; \
+ return -EINVAL; \
+ } \
+ act = actions + i; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
+ i++; \
+ act = actions + i; \
+ } \
+ } while (0)
+
#ifdef __cplusplus
}
#endif
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 11/24] net/i40e: add flow validate function
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (9 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 10/24] ethdev: parse ethertype filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 12/24] net/i40e: parse macvlan filter Beilei Xing
` (13 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
This patch adds handling RTE_ETH_FILTER_GENERIC filter type in
.filter_ctrl function, and result in a pointer to i40e_flow_ops.
This patch also adds flow validate ops.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 34 ++++++++++++++++++++++++++++++++++
1 file changed, 34 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 997e2fe..c1623c4 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -52,6 +52,7 @@
#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
#include <rte_hash_crc.h>
+#include <rte_flow_driver.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -490,6 +491,12 @@ static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
static void i40e_rss_hash_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
+static int i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -584,6 +591,10 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.mtu_set = i40e_dev_mtu_set,
};
+static const struct rte_flow_ops i40e_flow_ops = {
+ .validate = i40e_flow_validate,
+};
+
/* store statistics names and its offset in stats structure */
struct rte_i40e_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -8505,6 +8516,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &i40e_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -10223,3 +10239,21 @@ i40e_filter_restore(struct i40e_pf *pf)
i40e_fdir_filter_restore(pf);
i40e_rss_hash_restore(pf);
}
+
+static int
+i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_ethertype_filter ethertype_filter;
+ int ret;
+
+ ret = cons_parse_ethertype_filter(attr, pattern, actions,
+ ðertype_filter, error);
+ if (!ret)
+ return 0;
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 12/24] net/i40e: parse macvlan filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (10 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 11/24] net/i40e: add flow validate function Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 13/24] net/i40e: parse VXLAN filter Beilei Xing
` (12 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Check if the rule is a macvlan rule, and get the macvlan
info BTW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 160 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 160 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index c1623c4..18247c0 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -53,6 +53,7 @@
#include <rte_tailq.h>
#include <rte_hash_crc.h>
#include <rte_flow_driver.h>
+#include <rte_flow.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -491,6 +492,11 @@ static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
static void i40e_rss_hash_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
+static int i40e_parse_macvlan_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_mac_filter *filter,
+ struct rte_flow_error *error);
static int i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
@@ -10241,6 +10247,154 @@ i40e_filter_restore(struct i40e_pf *pf)
}
static int
+i40e_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ error->type = RTE_FLOW_ERROR_TYPE_ATTR_INGRESS;
+ return -EINVAL;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ error->type = RTE_FLOW_ERROR_TYPE_ATTR_EGRESS;
+ return -EINVAL;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ error->type = RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a macvlan rule.
+ * And get the macvlan filter info BTW.
+ */
+static int
+i40e_parse_macvlan_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_mac_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_action_vf *act_vf;
+ struct ether_addr macaddr_unmasked = {
+ .addr_bytes = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
+ };
+ struct ether_addr macaddr_masked = {
+ .addr_bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
+ };
+ uint32_t i;
+
+ /* Parse pattern */
+ i = 0;
+
+ /* the first item not void item should be ETH */
+ PATTERN_SKIP_VOID(filter, struct rte_eth_mac_filter,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if (!eth_spec || !eth_mask) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /**
+ * SRC MAC address should be masked.
+ * DST MAC address shouldn't be masked.
+ */
+ if (!is_same_ether_addr(ð_mask->src, &macaddr_masked) ||
+ !is_same_ether_addr(ð_mask->dst, &macaddr_unmasked)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /* Ethertype should be masked. */
+ if (eth_mask->type) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ rte_memcpy(&filter->mac_addr, ð_spec->dst, ETHER_ADDR_LEN);
+ filter->filter_type = RTE_MAC_PERFECT_MATCH;
+
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_mac_filter,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+ if (!(vlan_spec && vlan_mask)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /* PRI and CFI should be masked. */
+ if (vlan_mask->tci == rte_cpu_to_be_16(0x0FFF))
+ filter->filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ else if (vlan_mask->tci == 0x0)
+ filter->filter_type = RTE_MAC_PERFECT_MATCH;
+ else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_mac_filter,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ }
+
+ /* Check if the next not void item is END. */
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /* Parse action */
+ i = 0;
+
+ /* Check if the next not void item is VF. */
+ ACTION_SKIP_VOID(filter, struct rte_eth_mac_filter,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+ if (act->type != RTE_FLOW_ACTION_TYPE_VF) {
+ error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+ return -EINVAL;
+ }
+
+ filter->is_vf = 1;
+
+ act_vf = (const struct rte_flow_action_vf *)act->conf;
+ filter->dst_id = act_vf->id;
+
+ /* Check if the next not void item is END. */
+ i++;
+ ACTION_SKIP_VOID(filter, struct rte_eth_mac_filter,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+ return -EINVAL;
+ }
+
+ return i40e_parse_attr(attr, error);
+}
+
+static int
i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
@@ -10248,6 +10402,7 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_mac_filter macvlan_filter;
int ret;
ret = cons_parse_ethertype_filter(attr, pattern, actions,
@@ -10255,5 +10410,10 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
if (!ret)
return 0;
+ ret = i40e_parse_macvlan_filter(attr, pattern, actions,
+ &macvlan_filter, error);
+ if (!ret)
+ return 0;
+
return ret;
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 13/24] net/i40e: parse VXLAN filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (11 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 12/24] net/i40e: parse macvlan filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 14/24] net/i40e: parse NVGRE filter Beilei Xing
` (11 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Check if the rule is a VXLAN rule, and get the VXLAN
info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 349 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 349 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 18247c0..3bdef8e 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -497,6 +497,11 @@ static int i40e_parse_macvlan_filter(const struct rte_flow_attr *attr,
const struct rte_flow_action *actions,
struct rte_eth_mac_filter *filter,
struct rte_flow_error *error);
+static int i40e_parse_tunnel_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_tunnel_filter_conf *filter,
+ struct rte_flow_error *error);
static int i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
@@ -10394,6 +10399,344 @@ i40e_parse_macvlan_filter(const struct rte_flow_attr *attr,
return i40e_parse_attr(attr, error);
}
+/* Parse to get the action and attr info of a tunnle filter */
+static int
+i40e_parse_tunnel_act_attr(const struct rte_flow_attr *attr,
+ const struct rte_flow_action *actions,
+ struct rte_eth_tunnel_filter_conf *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t i;
+
+ /* parse action */
+ i = 0;
+
+ /* Check if the first not void action is QUEUE. */
+ ACTION_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+ return -EINVAL;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue_id = act_q->index;
+
+ /* Check if the next not void item is END */
+ i++;
+ ACTION_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+ return -EINVAL;
+ }
+
+ return i40e_parse_attr(attr, error);
+}
+
+/**
+ * Parse the rule to see if it is a vxlan rule.
+ * And get the tunnel filter info BTW.
+ */
+static int
+i40e_parse_vxlan_tunnel_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_tunnel_filter_conf *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *o_eth_spec = NULL;
+ const struct rte_flow_item_eth *o_eth_mask = NULL;
+ const struct rte_flow_item_vxlan *vxlan_spec = NULL;
+ const struct rte_flow_item_vxlan *vxlan_mask = NULL;
+ const struct rte_flow_item_eth *i_eth_spec, *i_eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ struct ether_addr macaddr_unmasked = {
+ .addr_bytes = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
+ };
+ struct ether_addr macaddr_masked = {
+ .addr_bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
+ };
+ bool is_vni_masked = 0;
+ uint32_t i;
+
+ /* parse pattern */
+ i = 0;
+
+ /* The first not void item should be ETH or IP or UDP or VXLAN */
+ PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ o_eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ o_eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ if ((!o_eth_spec && o_eth_mask) ||
+ (o_eth_spec && !o_eth_mask)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (o_eth_spec)
+ rte_memcpy(&filter->outer_mac, &o_eth_spec->dst,
+ ETHER_ADDR_LEN);
+
+ if (o_eth_mask) {
+ /**
+ * DST MAC address shouldn't be masked.
+ * SRC MAC address should be masked.
+ * Ethertype should be masked.
+ */
+ if (!is_same_ether_addr(&o_eth_mask->dst,
+ &macaddr_unmasked) ||
+ !is_same_ether_addr(&o_eth_mask->src,
+ &macaddr_masked) ||
+ o_eth_mask->type) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /**
+ * If the item is IP, the content should be NULL.
+ * Only used to describe the protocol stack.
+ */
+ if (item->spec || item->mask) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /* Check if the next not void item is UDP */
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /**
+ * If the item is UDP, the content should be NULL
+ * Only used to describe the protocol stack.
+ */
+ if (item->spec || item->mask) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /* Check if the next not void item is VXLAN */
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ }
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ vxlan_spec = (const struct rte_flow_item_vxlan *)item->spec;
+ vxlan_mask = (const struct rte_flow_item_vxlan *)item->mask;
+
+ /**
+ * Check if VXLAN item is used to describe the protocol stack.
+ * If yes, both vxlan_spec and vxlan_mask should be NULL.
+ * If no, either vxlan_spec or vxlan_mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_mask) {
+ for (uint32_t j = 0; j < RTE_DIM(vxlan_mask->vni); j++) {
+ if (vxlan_mask->vni[j] == 0xFF) {
+ if (j > 0 &&
+ (vxlan_mask->vni[j] !=
+ vxlan_mask->vni[j - 1])) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ is_vni_masked = 0;
+ } else if (vxlan_mask->vni[j] == 0) {
+ if (j > 0 &&
+ (vxlan_mask->vni[j] !=
+ vxlan_mask->vni[j - 1])) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ is_vni_masked = 1;
+ } else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Check if the next not void item is ETH. */
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ i_eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ i_eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ if (!i_eth_spec || !i_eth_mask) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /**
+ * DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_same_ether_addr(&i_eth_mask->dst, &macaddr_unmasked) ||
+ !is_same_ether_addr(&i_eth_mask->src, &macaddr_masked) ||
+ i_eth_mask->type) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ rte_memcpy(&filter->inner_mac, &i_eth_spec->dst,
+ ETHER_ADDR_LEN);
+
+ /* Check if the next not void item is VLAN or END. */
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+ if (!(vlan_spec && vlan_mask)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /* Check if the next not void item is END. */
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ if (vlan_spec && vlan_mask &&
+ (vlan_mask->tci == rte_cpu_to_be_16(0x0FFF))) {
+ filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci) & 0xFFF;
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ } else if (!vxlan_spec && !vxlan_mask) {
+ if (!o_eth_spec && !o_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ } else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ } else if ((!vlan_spec && !vlan_mask) ||
+ (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_TENID;
+ else
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ } else if (!vxlan_spec && !vxlan_mask) {
+ if (!o_eth_spec && !o_eth_mask)
+ filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+ else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ } else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ } else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+
+ return i40e_parse_tunnel_act_attr(attr, actions, filter, error);
+}
+
+static int
+i40e_parse_tunnel_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_tunnel_filter_conf *rule,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = i40e_parse_vxlan_tunnel_filter(attr, pattern,
+ actions, rule, error);
+ if (!ret)
+ return 0;
+
+ return ret;
+}
+
static int
i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -10403,6 +10746,7 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
{
struct rte_eth_ethertype_filter ethertype_filter;
struct rte_eth_mac_filter macvlan_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
int ret;
ret = cons_parse_ethertype_filter(attr, pattern, actions,
@@ -10415,5 +10759,10 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
if (!ret)
return 0;
+ ret = i40e_parse_tunnel_filter(attr, pattern, actions,
+ &tunnel_filter, error);
+ if (!ret)
+ return 0;
+
return ret;
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 14/24] net/i40e: parse NVGRE filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (12 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 13/24] net/i40e: parse VXLAN filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 15/24] net/i40e: parse flow director filter Beilei Xing
` (10 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Check if the rule is a NVGRE rule, and get the NVGRE
info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 269 +++++++++++++++++++++++++++++++++++++++++
lib/librte_ether/rte_flow.h | 23 ++++
2 files changed, 292 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 3bdef8e..1ffafa0 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -10720,6 +10720,270 @@ i40e_parse_vxlan_tunnel_filter(const struct rte_flow_attr *attr,
return i40e_parse_tunnel_act_attr(attr, actions, filter, error);
}
+/* whether it is NVGRE tunnel rule */
+static int
+i40e_parse_nvgre_tunnel_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_tunnel_filter_conf *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *o_eth_spec = NULL;
+ const struct rte_flow_item_eth *o_eth_mask = NULL;
+ const struct rte_flow_item_nvgre *nvgre_spec = NULL;
+ const struct rte_flow_item_nvgre *nvgre_mask = NULL;
+ const struct rte_flow_item_eth *i_eth_spec, *i_eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ struct ether_addr macaddr_unmasked = {
+ .addr_bytes = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
+ };
+ struct ether_addr macaddr_masked = {
+ .addr_bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
+ };
+ bool is_tni_masked = 0;
+ uint32_t i;
+
+ /* parse pattern */
+ i = 0;
+
+ /* The first not void item should be ETH or IP or UDP or VXLAN. */
+ PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ o_eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ o_eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ if ((!o_eth_spec && o_eth_mask) ||
+ (o_eth_spec && !o_eth_mask)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (o_eth_spec)
+ rte_memcpy(&filter->outer_mac, &o_eth_spec->dst,
+ ETHER_ADDR_LEN);
+
+ if (o_eth_mask) {
+ /**
+ * DST MAC address shouldn't be masked.
+ * SRC MAC address should be masked.
+ * Ethertype should be masked.
+ */
+ if (!is_same_ether_addr(&o_eth_mask->dst,
+ &macaddr_unmasked) ||
+ !is_same_ether_addr(&o_eth_mask->src,
+ &macaddr_masked) ||
+ o_eth_mask->type) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /**
+ * If the item is IP, the content should be NULL.
+ * Only used to describe the protocol stack.
+ */
+ if (item->spec || item->mask) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /* Check if the next not void item is UDP. */
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ nvgre_spec = (const struct rte_flow_item_nvgre *)item->spec;
+ nvgre_mask = (const struct rte_flow_item_nvgre *)item->mask;
+
+ /**
+ * Check if NVGRE item is used to describe the protocol stack.
+ * If yes, both nvgre_spec and nvgre_mask should be NULL.
+ * If no, either nvgre_spec or nvgre_mask shouldn't be NULL.
+ */
+ if ((!nvgre_spec && nvgre_mask) ||
+ (nvgre_spec && !nvgre_mask)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /* Check if TNI isn't masked */
+ if (nvgre_mask) {
+ for (uint32_t j = 0; j < RTE_DIM(nvgre_mask->tni); j++) {
+ if (nvgre_mask->tni[j] == 0xFF) {
+ if (j > 0 &&
+ (nvgre_mask->tni[j] !=
+ nvgre_mask->tni[j - 1])) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ is_tni_masked = 0;
+ } else if (nvgre_mask->tni[j] == 0) {
+ if (j > 0 &&
+ (nvgre_mask->tni[j] !=
+ nvgre_mask->tni[j - 1])) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ is_tni_masked = 1;
+ } else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* check if the next not void item is ETH*/
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ i_eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ i_eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ if (!i_eth_spec || !i_eth_mask) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ rte_memcpy(&filter->inner_mac, &i_eth_spec->dst,
+ ETHER_ADDR_LEN);
+
+ /**
+ * DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_same_ether_addr(&i_eth_mask->dst, &macaddr_unmasked) ||
+ !is_same_ether_addr(&i_eth_mask->src, &macaddr_masked) ||
+ i_eth_mask->type) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /* Check if the next not void item is VLAN or END. */
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+ if (!(vlan_spec && vlan_mask)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /* check if the next not void item is END */
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_tunnel_filter_conf,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ if (vlan_spec && vlan_mask &&
+ (vlan_mask->tci == rte_cpu_to_be_16(0x0FFF))) {
+ filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci) & 0xFFF;
+ if (nvgre_spec && nvgre_mask && !is_tni_masked) {
+ rte_memcpy(&filter->tenant_id, nvgre_spec->tni,
+ RTE_DIM(nvgre_spec->tni));
+ if (!o_eth_spec && !o_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ } else if (!nvgre_spec && !nvgre_mask) {
+ if (!o_eth_spec && !o_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ } else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ } else if ((!vlan_spec && !vlan_mask) ||
+ (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+ if (nvgre_spec && nvgre_mask && !is_tni_masked) {
+ rte_memcpy(&filter->tenant_id, nvgre_spec->tni,
+ RTE_DIM(nvgre_spec->tni));
+ if (!o_eth_spec && !o_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_TENID;
+ else
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ } else if (!nvgre_spec && !nvgre_mask) {
+ if (!o_eth_spec && !o_eth_mask)
+ filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+ else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ } else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ } else {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ filter->tunnel_type = RTE_TUNNEL_TYPE_NVGRE;
+
+ return i40e_parse_tunnel_act_attr(attr, actions, filter, error);
+}
+
static int
i40e_parse_tunnel_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
@@ -10734,6 +10998,11 @@ i40e_parse_tunnel_filter(const struct rte_flow_attr *attr,
if (!ret)
return 0;
+ ret = i40e_parse_nvgre_tunnel_filter(attr, pattern,
+ actions, rule, error);
+ if (!ret)
+ return 0;
+
return ret;
}
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
index 211f307..6bdbba1 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ether/rte_flow.h
@@ -270,6 +270,13 @@ enum rte_flow_item_type {
* See struct rte_flow_item_vxlan.
*/
RTE_FLOW_ITEM_TYPE_VXLAN,
+
+ /**
+ * Matches a NVGRE header.
+ *
+ * See struct rte_flow_item_nvgre.
+ */
+ RTE_FLOW_ITEM_TYPE_NVGRE,
};
/**
@@ -461,6 +468,22 @@ struct rte_flow_item_vxlan {
};
/**
+ * RTE_FLOW_ITEM_TYPE_NVGRE.
+ *
+ * Matches a NVGRE header.
+ */
+struct rte_flow_item_nvgre {
+ uint32_t flags0:1; /**< 0 */
+ uint32_t rsvd1:1; /**< 1 bit not defined */
+ uint32_t flags1:2; /**< 2 bits, 1 0 */
+ uint32_t rsvd0:9; /**< Reserved0 */
+ uint32_t ver:3; /**< version */
+ uint32_t protocol:16; /**< protocol type, 0x6558 */
+ uint8_t tni[3]; /**< tenant network ID or virtual subnet ID */
+ uint8_t flow_id; /**< flow ID or Reserved */
+};
+
+/**
* Matching pattern item definition.
*
* A pattern is formed by stacking items starting from the lowest protocol
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 15/24] net/i40e: parse flow director filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (13 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 14/24] net/i40e: parse NVGRE filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 16/24] net/i40e: add flow create function Beilei Xing
` (9 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
Check if the rule is a flow director rule, and get the
flow director info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 537 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 537 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 1ffafa0..12255fa 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -502,6 +502,11 @@ static int i40e_parse_tunnel_filter(const struct rte_flow_attr *attr,
const struct rte_flow_action *actions,
struct rte_eth_tunnel_filter_conf *filter,
struct rte_flow_error *error);
+static int i40e_parse_fdir_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_fdir_filter *filter,
+ struct rte_flow_error *error);
static int i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
@@ -11006,6 +11011,532 @@ i40e_parse_tunnel_filter(const struct rte_flow_attr *attr,
return ret;
}
+/**
+ * Parse the rule to see if it is a flow firector rule.
+ * And get the flow director filter info.
+ */
+static int
+i40e_parse_fdir_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_fdir_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vf *vf_spec;
+ const struct rte_flow_action_mark *mark_spec;
+ const struct rte_flow_action_queue *act_q;
+ struct ether_addr macaddr_masked = {
+ .addr_bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0}
+ };
+ uint32_t i;
+ uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set = I40E_INSET_NONE;
+ uint16_t l3 = 0;
+ uint16_t flag_offset;
+
+ /* parse pattern */
+ i = 0;
+
+ /* The first not void item should be ETH or IPv4 or IPv6 */
+ PATTERN_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ /* Check if the not void item is ETH. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ if ((!eth_spec && eth_mask) || (eth_spec && !eth_mask)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (eth_spec) {
+ filter->input.flow.l2_flow.ether_type =
+ (uint16_t)eth_spec->type;
+ }
+
+ if (eth_mask) {
+ /* SRC address and DST address should be masked. */
+ if (!is_same_ether_addr(ð_mask->src,
+ &macaddr_masked) ||
+ !is_same_ether_addr(ð_mask->dst,
+ &macaddr_masked)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (eth_mask->type && eth_mask->type != 0xFFFF) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ }
+
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+ RTE_FLOW_ERROR_TYPE_ITEM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VF &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ /* Check if the not void item is VLAN. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+ if (i == 1) {
+ if ((vlan_spec && !vlan_mask) ||
+ (!vlan_spec && vlan_mask)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ } else if (i == 2) {
+ if (!vlan_spec || !vlan_mask) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ if (vlan_spec && vlan_mask) {
+ filter->input.flow_ext.vlan_tci =
+ rte_be_to_cpu_16(vlan_spec->tci) & 0x0FFF;
+ if (vlan_mask->tpid ||
+ (vlan_mask->tci && vlan_mask->tci !=
+ rte_cpu_to_be_16(0x0FFF))) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ input_set |= I40E_INSET_VLAN_INNER;
+ }
+
+ /* Check if the next not void item is IPV4 or IPV6. */
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+ RTE_FLOW_ERROR_TYPE_ITEM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ /* Check if the not void item is IPV4. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+
+ /* Check if it is fragment. */
+ flag_offset = rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
+ if (flag_offset & !IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ if (flag_offset & IPV4_HDR_OFFSET_MASK ||
+ flag_offset & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+ if (ipv4_spec) {
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+ }
+
+ if (ipv4_mask) {
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (ipv4_mask->hdr.src_addr == 0xFFFFFFFF)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == 0xFFFFFFFF)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == 0xFF)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == 0xFF)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == 0xFF)
+ input_set |= I40E_INSET_IPV4_PROTO;
+ }
+ }
+
+ /* Check if the not void item is IPV6. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_mask = (const struct rte_flow_item_ipv6 *)item->mask;
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto == 44)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+
+ uint32_t j;
+
+ if (ipv6_spec) {
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(ipv6_spec->hdr.vtc_flow << 4);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+ }
+
+ if (ipv6_mask) {
+ if (ipv6_mask->hdr.payload_len) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
+ if (ipv6_mask->hdr.src_addr[j] != 0xFF ||
+ ipv6_mask->hdr.dst_addr[j] != 0xFF) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ input_set |= I40E_INSET_IPV6_SRC;
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow & rte_cpu_to_be_16(0xFF0))
+ == rte_cpu_to_be_16(0xFF0))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == 0xFF)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == 0xFF)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* Check the next not void item */
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+ RTE_FLOW_ERROR_TYPE_ITEM);
+ if ((flow_type == RTE_ETH_FLOW_FRAG_IPV4) ||
+ (flow_type == RTE_ETH_FLOW_FRAG_IPV6)) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ } else {
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_VF &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Check if the next not void item is TCP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+ if (!tcp_spec || !tcp_mask) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (tcp_spec) {
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ }
+
+ if (tcp_mask) {
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if ((tcp_mask->hdr.src_port != 0xFFFF &&
+ tcp_mask->hdr.src_port != 0) ||
+ (tcp_mask->hdr.dst_port != 0xFFFF &&
+ tcp_mask->hdr.dst_port != 0)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (tcp_mask->hdr.src_port == 0xFFFF)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (tcp_mask->hdr.dst_port == 0xFFFF)
+ input_set |= I40E_INSET_DST_PORT;
+ }
+ }
+
+ /* Check if the not void item is UDP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+ if (!udp_spec || !udp_mask) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (udp_spec) {
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ }
+
+ if (udp_mask) {
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if ((udp_mask->hdr.src_port != 0xFFFF &&
+ udp_mask->hdr.src_port != 0) ||
+ (udp_mask->hdr.dst_port != 0xFFFF &&
+ udp_mask->hdr.dst_port != 0)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (udp_mask->hdr.src_port == 0xFFFF)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (udp_mask->hdr.dst_port == 0xFFFF)
+ input_set |= I40E_INSET_DST_PORT;
+ }
+ }
+
+ /* Check if the not void item is SCTP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+ sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+ sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+
+ if (!sctp_spec || !sctp_mask) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (sctp_spec) {
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ }
+ }
+
+ if (sctp_mask) {
+ if (sctp_mask->hdr.cksum) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if ((sctp_mask->hdr.src_port != 0xFFFF &&
+ sctp_mask->hdr.src_port != 0) ||
+ (sctp_mask->hdr.dst_port != 0xFFFF &&
+ sctp_mask->hdr.dst_port != 0) ||
+ (sctp_mask->hdr.tag != 0xFFFFFFFF &&
+ sctp_mask->hdr.tag != 0)) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (sctp_mask->hdr.src_port == 0xFFFF)
+ input_set |= I40E_INSET_SRC_PORT;
+ if (sctp_mask->hdr.dst_port == 0xFFFF)
+ input_set |= I40E_INSET_DST_PORT;
+ if (sctp_mask->hdr.tag == 0xFFFFFFFF)
+ input_set |= I40E_INSET_SCTP_VT;
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP ||
+ item->type == RTE_FLOW_ITEM_TYPE_UDP ||
+ item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+ RTE_FLOW_ERROR_TYPE_ITEM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_VF &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ /* Check if the next not void item is VF. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_VF) {
+ vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ filter->input.flow_ext.is_vf = 1;
+ filter->input.flow_ext.dst_id = vf_spec->id;
+ }
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ /* Check if the next not void item is END. */
+ i++;
+ PATTERN_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+ RTE_FLOW_ERROR_TYPE_ITEM);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+ }
+
+ pctype = i40e_flowtype_to_pctype(flow_type);
+ if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ PMD_DRV_LOG(ERR, "Not supported flow type (%u)",
+ conf->flow_type);
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ if (input_set != i40e_get_default_input_set(pctype)) {
+ PMD_DRV_LOG(ERR, "Invalid input set");
+ error->type = RTE_FLOW_ERROR_TYPE_ITEM;
+ return -EINVAL;
+ }
+
+ filter->input.flow_type = flow_type;
+
+ /* parse action */
+ i = 0;
+
+ /* Check if the first not void action is QUEUE or DROP. */
+ ACTION_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+ return -EINVAL;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.flex_off = 0;
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+ else
+ filter->action.behavior = RTE_ETH_FDIR_REJECT;
+
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->action.rx_queue = act_q->index;
+
+ /* Check if the next not void item is MARK or END. */
+ i++;
+ ACTION_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+ if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+ act->type != RTE_FLOW_ACTION_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+ return -EINVAL;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->soft_id = mark_spec->id;
+
+ /* Check if the next not void item is END. */
+ i++;
+ ACTION_SKIP_VOID(filter, struct rte_eth_fdir_filter,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ error->type = RTE_FLOW_ERROR_TYPE_ACTION;
+ return -EINVAL;
+ }
+ }
+
+ return i40e_parse_attr(attr, error);
+}
+
static int
i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -11014,6 +11545,7 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_fdir_filter fdir_filter;
struct rte_eth_mac_filter macvlan_filter;
struct rte_eth_tunnel_filter_conf tunnel_filter;
int ret;
@@ -11023,6 +11555,11 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
if (!ret)
return 0;
+ ret = i40e_parse_fdir_filter(attr, pattern, actions,
+ &fdir_filter, error);
+ if (!ret)
+ return 0;
+
ret = i40e_parse_macvlan_filter(attr, pattern, actions,
&macvlan_filter, error);
if (!ret)
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 16/24] net/i40e: add flow create function
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (14 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 15/24] net/i40e: parse flow director filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 17/24] net/i40e: destroy ethertype filter Beilei Xing
` (8 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
This patch adds flow create ops.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 93 ++++++++++++++++++++++++++++++++++++++++++
drivers/net/i40e/i40e_ethdev.h | 3 ++
drivers/net/i40e/i40e_fdir.c | 2 +-
3 files changed, 97 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 12255fa..ac93489 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -512,6 +512,17 @@ static int i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
const struct rte_flow_action *actions,
struct rte_flow_error *error);
+static struct i40e_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+
+struct i40e_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+ struct i40e_vsi *vsi;
+};
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
@@ -609,6 +620,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
static const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
+ .create = (void *)i40e_flow_create,
};
/* store statistics names and its offset in stats structure */
@@ -11572,3 +11584,84 @@ i40e_flow_validate(__rte_unused struct rte_eth_dev *dev,
return ret;
}
+
+static struct i40e_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_fdir_filter fdir_filter;
+ struct rte_eth_mac_filter macvlan_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
+ struct i40e_flow *flow = NULL;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct i40e_flow), 0);
+ if (!flow) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return flow;
+ }
+
+ memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = cons_parse_ethertype_filter(attr, pattern, actions,
+ ðertype_filter, error);
+ if (!ret) {
+ ret = i40e_ethertype_filter_set(pf, ðertype_filter, 1);
+ if (ret)
+ goto free;
+ flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ return flow;
+ }
+
+ memset(&fdir_filter, 0, sizeof(struct rte_eth_fdir_filter));
+ ret = i40e_parse_fdir_filter(attr, pattern, actions,
+ &fdir_filter, error);
+ if (!ret) {
+ ret = i40e_add_del_fdir_filter(dev, &fdir_filter, 1);
+ if (ret)
+ goto free;
+ flow->filter_type = RTE_ETH_FILTER_FDIR;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ return flow;
+ }
+
+ memset(&macvlan_filter, 0, sizeof(struct rte_eth_mac_filter));
+ ret = i40e_parse_macvlan_filter(attr, pattern, actions,
+ &macvlan_filter, error);
+ if (!ret) {
+ struct i40e_vsi *vsi;
+
+ ret = i40e_vf_mac_filter_set(pf, &macvlan_filter, 1);
+ if (ret)
+ goto free;
+ flow->filter_type = RTE_ETH_FILTER_MACVLAN;
+ vsi = pf->vfs[macvlan_filter.dst_id].vsi;
+ flow->vsi = vsi;
+ flow->rule = TAILQ_LAST(&vsi->mac_list, i40e_mac_filter_list);
+ return flow;
+ }
+
+ memset(&tunnel_filter, 0, sizeof(struct rte_eth_tunnel_filter_conf));
+ ret = i40e_parse_tunnel_filter(attr, pattern, actions,
+ &tunnel_filter, error);
+ if (!ret) {
+ ret = i40e_dev_tunnel_filter_set(pf, &tunnel_filter, 1);
+ if (ret)
+ goto free;
+ flow->filter_type = RTE_ETH_FILTER_TUNNEL;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ return flow;
+ }
+
+free:
+ rte_free(flow);
+ return NULL;
+}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 35ac6d6..9b60651 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -684,6 +684,9 @@ int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
void i40e_fdir_filter_restore(struct i40e_pf *pf);
+int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index e47a949..30fcd5c 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1093,7 +1093,7 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
-static int
+int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 17/24] net/i40e: destroy ethertype filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (15 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 16/24] net/i40e: add flow create function Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 18/24] net/i40e: destroy macvlan filter Beilei Xing
` (7 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
This patch adds a function to destroy the ethertype filter.
And this patch also adds flow destroy function.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 69 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 69 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index ac93489..a3ed1f0 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -517,6 +517,11 @@ static struct i40e_flow *i40e_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
const struct rte_flow_action *actions,
struct rte_flow_error *error);
+static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+static int i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
struct i40e_flow {
enum rte_filter_type filter_type;
@@ -621,6 +626,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
static const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = (void *)i40e_flow_create,
+ .destroy = i40e_flow_destroy,
};
/* store statistics names and its offset in stats structure */
@@ -11665,3 +11671,66 @@ i40e_flow_create(struct rte_eth_dev *dev,
rte_free(flow);
return NULL;
}
+
+static int
+i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_info *ethertype_info = &pf->ethertype;
+ struct i40e_ethertype_filter *node;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret = 0;
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->input.mac_addr.addr_bytes,
+ filter->input.ether_type,
+ flags, pf->main_vsi->seid,
+ filter->queue, 0, &stats, NULL);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_ethertype_filter_lookup(ethertype_info, &filter->input);
+ if (node)
+ ret = i40e_sw_ethertype_filter_del(pf, node);
+ else
+ return -EINVAL;
+
+ return ret;
+}
+
+static int
+i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_flow *pmd_flow = (struct i40e_flow *)flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ int ret;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_dev_destroy_ethertype_filter(pf,
+ (struct i40e_ethertype_filter *)pmd_flow->rule);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ error->type = RTE_FLOW_ERROR_TYPE_HANDLE;
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 18/24] net/i40e: destroy macvlan filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (16 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 17/24] net/i40e: destroy ethertype filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 19/24] net/i40e: destroy tunnel filter Beilei Xing
` (6 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
This patch adds a function to destroy the macvlan filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 30 ++++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index a3ed1f0..fddd46d 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -519,6 +519,9 @@ static struct i40e_flow *i40e_flow_create(struct rte_eth_dev *dev,
struct rte_flow_error *error);
static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_dev_destroy_macvlan_filter(struct i40e_pf *pf,
+ struct i40e_vsi *vsi,
+ struct i40e_mac_filter *filter);
static int i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
@@ -11708,6 +11711,29 @@ i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
}
static int
+i40e_dev_destroy_macvlan_filter(struct i40e_pf *pf,
+ struct i40e_vsi *vsi,
+ struct i40e_mac_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ int ret;
+
+ (void)rte_memcpy(hw->mac.addr, hw->mac.perm_addr,
+ ETHER_ADDR_LEN);
+ ret = i40e_vsi_delete_mac(vsi, &filter->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to delete MAC filter.");
+ return -1;
+ }
+
+ /* Clear device address as it has been removed. */
+ if (is_same_ether_addr(&pf->dev_addr, &filter->mac_info.mac_addr))
+ memset(&pf->dev_addr, 0, sizeof(struct ether_addr));
+
+ return 0;
+}
+
+static int
i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error)
@@ -11722,6 +11748,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_dev_destroy_ethertype_filter(pf,
(struct i40e_ethertype_filter *)pmd_flow->rule);
break;
+ case RTE_ETH_FILTER_MACVLAN:
+ ret = i40e_dev_destroy_macvlan_filter(pf,
+ pmd_flow->vsi, (struct i40e_mac_filter *)pmd_flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 19/24] net/i40e: destroy tunnel filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (17 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 18/24] net/i40e: destroy macvlan filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 20/24] net/i40e: destroy flow directory filter Beilei Xing
` (5 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
This patch adds a function to destroy the tunnel filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 43 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 43 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index fddd46d..4847c04 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -522,6 +522,8 @@ static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
static int i40e_dev_destroy_macvlan_filter(struct i40e_pf *pf,
struct i40e_vsi *vsi,
struct i40e_mac_filter *filter);
+static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter);
static int i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
@@ -11734,6 +11736,43 @@ i40e_dev_destroy_macvlan_filter(struct i40e_pf *pf,
}
static int
+i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+ struct i40e_tunnel_info *tunnel_info = &pf->tunnel;
+ struct i40e_tunnel_filter *node;
+ int ret = 0;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
+ (struct ether_addr *)&cld_filter.outer_mac);
+ ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
+ (struct ether_addr *)&cld_filter.inner_mac);
+ cld_filter.inner_vlan = filter->input.inner_vlan;
+ cld_filter.flags = filter->input.flags;
+ cld_filter.tenant_id = filter->input.tenant_id;
+ rte_memcpy(&cld_filter.ipaddr, &filter->input.ipaddr,
+ sizeof(cld_filter.ipaddr));
+ cld_filter.queue_number = filter->queue;
+
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter, 1);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_tunnel_filter_lookup(tunnel_info, &filter->input);
+ if (node)
+ ret = i40e_sw_tunnel_filter_del(pf, node);
+ else
+ return -EINVAL;
+
+ return ret;
+}
+
+static int
i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error)
@@ -11752,6 +11791,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_dev_destroy_macvlan_filter(pf,
pmd_flow->vsi, (struct i40e_mac_filter *)pmd_flow->rule);
break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_destroy_tunnel_filter(pf,
+ (struct i40e_tunnel_filter *)pmd_flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 20/24] net/i40e: destroy flow directory filter
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (18 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 19/24] net/i40e: destroy tunnel filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 21/24] net/i40e: add flow flush function Beilei Xing
` (4 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
This patch adds a function to destroy the flow directory filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4847c04..34e431b 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -11795,6 +11795,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_dev_destroy_tunnel_filter(pf,
(struct i40e_tunnel_filter *)pmd_flow->rule);
break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &((struct i40e_fdir_filter *)pmd_flow->rule)->fdir, 0);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 21/24] net/i40e: add flow flush function
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (19 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 20/24] net/i40e: destroy flow directory filter Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 22/24] net/i40e: flush ethertype filters Beilei Xing
` (3 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
This patch adds flow flush ops.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 17 +++++++++++++++++
drivers/net/i40e/i40e_ethdev.h | 1 +
drivers/net/i40e/i40e_fdir.c | 3 +--
3 files changed, 19 insertions(+), 2 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 34e431b..1a5b767 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -527,6 +527,8 @@ static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
static int i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
+static int i40e_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
struct i40e_flow {
enum rte_filter_type filter_type;
@@ -632,6 +634,7 @@ static const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = (void *)i40e_flow_create,
.destroy = i40e_flow_destroy,
+ .flush = i40e_flow_flush,
};
/* store statistics names and its offset in stats structure */
@@ -11811,3 +11814,17 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
return ret;
}
+
+static int
+i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ ret = i40e_fdir_flush(dev);
+ if (ret) {
+ error->type = RTE_FLOW_ERROR_TYPE_HANDLE;
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 9b60651..8e6cc51 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -684,6 +684,7 @@ int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
void i40e_fdir_filter_restore(struct i40e_pf *pf);
+int i40e_fdir_flush(struct rte_eth_dev *dev);
int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 30fcd5c..8329987 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -119,7 +119,6 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
-static int i40e_fdir_flush(struct rte_eth_dev *dev);
static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
struct i40e_fdir_filter *filter);
@@ -1315,7 +1314,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
* i40e_fdir_flush - clear all filters of Flow Director table
* @pf: board private structure
*/
-static int
+int
i40e_fdir_flush(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 22/24] net/i40e: flush ethertype filters
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (20 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 21/24] net/i40e: add flow flush function Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 23/24] net/i40e: flush macvlan filters Beilei Xing
` (2 subsequent siblings)
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
This patch is to flush all ethertype filters.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 1a5b767..e685c99 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -527,6 +527,7 @@ static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
static int i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
+static int i40e_ethertype_filter_flush(struct i40e_pf *pf);
static int i40e_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error);
@@ -11815,9 +11816,25 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
return ret;
}
+/* Flush all ethertype filters */
+static int
+i40e_ethertype_filter_flush(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ int ret = 0;
+
+ while ((f = TAILQ_FIRST(ethertype_list)))
+ ret = i40e_dev_destroy_ethertype_filter(pf, f);
+
+ return ret;
+}
+
static int
i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
int ret = 0;
ret = i40e_fdir_flush(dev);
@@ -11826,5 +11843,11 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return ret;
}
+ ret = i40e_ethertype_filter_flush(pf);
+ if (ret) {
+ error->type = RTE_FLOW_ERROR_TYPE_HANDLE;
+ return ret;
+ }
+
return ret;
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 23/24] net/i40e: flush macvlan filters
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (21 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 22/24] net/i40e: flush ethertype filters Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 24/24] net/i40e: flush tunnel filters Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
This patch is to flush all macvlan filters.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 24 ++++++++++++++++++++++++
1 file changed, 24 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index e685c99..26a8c5a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -528,6 +528,7 @@ static int i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
static int i40e_ethertype_filter_flush(struct i40e_pf *pf);
+static int i40e_macvlan_filter_flush(struct i40e_pf *pf);
static int i40e_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error);
@@ -11831,6 +11832,23 @@ i40e_ethertype_filter_flush(struct i40e_pf *pf)
return ret;
}
+/* Flush all macvlan filters */
+static int
+i40e_macvlan_filter_flush(struct i40e_pf *pf)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_vsi *vsi;
+ int i, ret = 0;
+
+ for (i = 0; i < pf->vf_num; i++) {
+ vsi = pf->vfs[i].vsi;
+ while ((f = TAILQ_FIRST(&vsi->mac_list)))
+ ret = i40e_dev_destroy_macvlan_filter(pf, vsi, f);
+ }
+
+ return ret;
+}
+
static int
i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
{
@@ -11849,5 +11867,11 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return ret;
}
+ ret = i40e_macvlan_filter_flush(pf);
+ if (ret) {
+ error->type = RTE_FLOW_ERROR_TYPE_HANDLE;
+ return ret;
+ }
+
return ret;
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH 24/24] net/i40e: flush tunnel filters
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (22 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 23/24] net/i40e: flush macvlan filters Beilei Xing
@ 2016-12-02 11:53 ` Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
24 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-02 11:53 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu
This patch is to flush all tunnel filters.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 26a8c5a..71d1f37 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -529,6 +529,7 @@ static int i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow_error *error);
static int i40e_ethertype_filter_flush(struct i40e_pf *pf);
static int i40e_macvlan_filter_flush(struct i40e_pf *pf);
+static int i40e_tunnel_filter_flush(struct i40e_pf *pf);
static int i40e_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error);
@@ -11849,6 +11850,21 @@ i40e_macvlan_filter_flush(struct i40e_pf *pf)
return ret;
}
+/* Flush all tunnel filters */
+static int
+i40e_tunnel_filter_flush(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ int ret = 0;
+
+ while ((f = TAILQ_FIRST(tunnel_list)))
+ ret = i40e_dev_destroy_tunnel_filter(pf, f);
+
+ return ret;
+}
+
static int
i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
{
@@ -11873,5 +11889,11 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return ret;
}
+ ret = i40e_tunnel_filter_flush(pf);
+ if (ret) {
+ error->type = RTE_FLOW_ERROR_TYPE_HANDLE;
+ return ret;
+ }
+
return ret;
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH 10/24] ethdev: parse ethertype filter
2016-12-02 11:53 ` [dpdk-dev] [PATCH 10/24] ethdev: parse ethertype filter Beilei Xing
@ 2016-12-20 18:12 ` Ferruh Yigit
2016-12-21 3:54 ` Xing, Beilei
0 siblings, 1 reply; 175+ messages in thread
From: Ferruh Yigit @ 2016-12-20 18:12 UTC (permalink / raw)
To: Beilei Xing, jingjing.wu, helin.zhang; +Cc: dev, wenzhuo.lu, Adrien Mazarguil
On 12/2/2016 11:53 AM, Beilei Xing wrote:
> Check if the rule is a ethertype rule, and get the ethertype
> info BTW.
>
> Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
CC: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> lib/librte_ether/rte_flow.c | 136 +++++++++++++++++++++++++++++++++++++
> lib/librte_ether/rte_flow_driver.h | 34 ++++++++++
<...>
> diff --git a/lib/librte_ether/rte_flow_driver.h b/lib/librte_ether/rte_flow_driver.h
> index a88c621..2760c74 100644
> --- a/lib/librte_ether/rte_flow_driver.h
> +++ b/lib/librte_ether/rte_flow_driver.h
> @@ -170,6 +170,40 @@ rte_flow_error_set(struct rte_flow_error *error,
> const struct rte_flow_ops *
> rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error);
>
> +int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
> + const struct rte_flow_item *pattern,
> + const struct rte_flow_action *actions,
> + struct rte_eth_ethertype_filter *filter,
> + struct rte_flow_error *error);
Although this is helper function, it may be good if it follows the
rte_follow namespace.
> +
> +#define PATTERN_SKIP_VOID(filter, filter_struct, error_type) \
> + do { \
> + if (!pattern) { \
> + memset(filter, 0, sizeof(filter_struct)); \
> + error->type = error_type; \
> + return -EINVAL; \
> + } \
> + item = pattern + i; \
I believe macros that relies on variables that not passed as argument is
not good idea.
> + while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
> + i++; \
> + item = pattern + i; \
> + } \
> + } while (0)
> +
> +#define ACTION_SKIP_VOID(filter, filter_struct, error_type) \
> + do { \
> + if (!actions) { \
> + memset(filter, 0, sizeof(filter_struct)); \
> + error->type = error_type; \
> + return -EINVAL; \
> + } \
> + act = actions + i; \
> + while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
> + i++; \
> + act = actions + i; \
> + } \
> + } while (0)
Are these macros generic enough for all rte_flow consumers?
What do you think separate this patch, and use these after applied,
meanwhile keeping function and MACROS PMD internal?
> +
> #ifdef __cplusplus
> }
> #endif
>
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH 10/24] ethdev: parse ethertype filter
2016-12-20 18:12 ` Ferruh Yigit
@ 2016-12-21 3:54 ` Xing, Beilei
2016-12-23 8:43 ` Adrien Mazarguil
0 siblings, 1 reply; 175+ messages in thread
From: Xing, Beilei @ 2016-12-21 3:54 UTC (permalink / raw)
To: Yigit, Ferruh, Wu, Jingjing, Zhang, Helin
Cc: dev, Lu, Wenzhuo, Adrien Mazarguil
Hi Ferruh,
> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, December 21, 2016 2:12 AM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Zhang, Helin <helin.zhang@intel.com>
> Cc: dev@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Adrien Mazarguil
> <adrien.mazarguil@6wind.com>
> Subject: Re: [dpdk-dev] [PATCH 10/24] ethdev: parse ethertype filter
>
> On 12/2/2016 11:53 AM, Beilei Xing wrote:
> > Check if the rule is a ethertype rule, and get the ethertype info BTW.
> >
> > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
>
> CC: Adrien Mazarguil <adrien.mazarguil@6wind.com>
>
> > lib/librte_ether/rte_flow.c | 136
> +++++++++++++++++++++++++++++++++++++
> > lib/librte_ether/rte_flow_driver.h | 34 ++++++++++
>
> <...>
>
> > diff --git a/lib/librte_ether/rte_flow_driver.h
> > b/lib/librte_ether/rte_flow_driver.h
> > index a88c621..2760c74 100644
> > --- a/lib/librte_ether/rte_flow_driver.h
> > +++ b/lib/librte_ether/rte_flow_driver.h
> > @@ -170,6 +170,40 @@ rte_flow_error_set(struct rte_flow_error *error,
> > const struct rte_flow_ops * rte_flow_ops_get(uint8_t port_id, struct
> > rte_flow_error *error);
> >
> > +int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
> > + const struct rte_flow_item *pattern,
> > + const struct rte_flow_action *actions,
> > + struct rte_eth_ethertype_filter *filter,
> > + struct rte_flow_error *error);
>
> Although this is helper function, it may be good if it follows the rte_follow
> namespace.
OK, I will rename it in the next version, thanks very much.
>
> > +
> > +#define PATTERN_SKIP_VOID(filter, filter_struct, error_type)
> \
> > + do { \
> > + if (!pattern) { \
> > + memset(filter, 0, sizeof(filter_struct)); \
> > + error->type = error_type; \
> > + return -EINVAL;
> \
> > + } \
> > + item = pattern + i; \
>
> I believe macros that relies on variables that not passed as argument is not
> good idea.
Yes, I'm reworking the macros, and it will be changed in v2.
>
> > + while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
> \
> > + i++; \
> > + item = pattern + i; \
> > + } \
> > + } while (0)
> > +
> > +#define ACTION_SKIP_VOID(filter, filter_struct, error_type)
> \
> > + do { \
> > + if (!actions) { \
> > + memset(filter, 0, sizeof(filter_struct)); \
> > + error->type = error_type; \
> > + return -EINVAL;
> \
> > + } \
> > + act = actions + i; \
> > + while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
> > + i++; \
> > + act = actions + i; \
> > + } \
> > + } while (0)
>
> Are these macros generic enough for all rte_flow consumers?
>
> What do you think separate this patch, and use these after applied,
> meanwhile keeping function and MACROS PMD internal?
The main purpose of the macros is to reduce the code in PMD, otherwise there'll be many such codes to get the next non-void item in all parse functions, including the parse_ethertype_filter function in rte_flow.c. But actually I'm not very sure if it's generic enough for all consumers, although I think it's general at present:)
Thanks for your advice, I'll move the macros to PMD currently, then there'll be no macros used in parse_ethertype_filter function, and optimize it after applied.
BTW, I plan to send out V2 patch set in this week.
Best Regards,
Beilei
>
> > +
> > #ifdef __cplusplus
> > }
> > #endif
> >
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH 10/24] ethdev: parse ethertype filter
2016-12-21 3:54 ` Xing, Beilei
@ 2016-12-23 8:43 ` Adrien Mazarguil
2016-12-27 6:36 ` Xing, Beilei
0 siblings, 1 reply; 175+ messages in thread
From: Adrien Mazarguil @ 2016-12-23 8:43 UTC (permalink / raw)
To: Xing, Beilei; +Cc: Yigit, Ferruh, Wu, Jingjing, Zhang, Helin, dev, Lu, Wenzhuo
Hi all,
On Wed, Dec 21, 2016 at 03:54:50AM +0000, Xing, Beilei wrote:
> Hi Ferruh,
>
> > -----Original Message-----
> > From: Yigit, Ferruh
> > Sent: Wednesday, December 21, 2016 2:12 AM
> > To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> > <jingjing.wu@intel.com>; Zhang, Helin <helin.zhang@intel.com>
> > Cc: dev@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Adrien Mazarguil
> > <adrien.mazarguil@6wind.com>
> > Subject: Re: [dpdk-dev] [PATCH 10/24] ethdev: parse ethertype filter
> >
> > On 12/2/2016 11:53 AM, Beilei Xing wrote:
> > > Check if the rule is a ethertype rule, and get the ethertype info BTW.
> > >
> > > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > > ---
> >
> > CC: Adrien Mazarguil <adrien.mazarguil@6wind.com>
Thanks again for CC'ing me.
> > > lib/librte_ether/rte_flow.c | 136
> > +++++++++++++++++++++++++++++++++++++
> > > lib/librte_ether/rte_flow_driver.h | 34 ++++++++++
> >
> > <...>
> >
> > > diff --git a/lib/librte_ether/rte_flow_driver.h
> > > b/lib/librte_ether/rte_flow_driver.h
> > > index a88c621..2760c74 100644
> > > --- a/lib/librte_ether/rte_flow_driver.h
> > > +++ b/lib/librte_ether/rte_flow_driver.h
> > > @@ -170,6 +170,40 @@ rte_flow_error_set(struct rte_flow_error *error,
> > > const struct rte_flow_ops * rte_flow_ops_get(uint8_t port_id, struct
> > > rte_flow_error *error);
> > >
> > > +int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
> > > + const struct rte_flow_item *pattern,
> > > + const struct rte_flow_action *actions,
> > > + struct rte_eth_ethertype_filter *filter,
> > > + struct rte_flow_error *error);
> >
> > Although this is helper function, it may be good if it follows the rte_follow
> > namespace.
>
> OK, I will rename it in the next version, thanks very much.
Agreed, all public symbols exposed by headers must be prefixed with
rte_flow.
Now I'm not so sure about the need to convert a rte_flow rule to a
rte_eth_ethertype_filter. This definition basically makes rte_flow depend on
rte_eth_ctrl.h (related #include is missing by the way).
I understand that both ixgbe and i40e would benefit from it, and considering
rte_flow_driver.h is free from ABI versioning I guess it's acceptable, but
remember we'll gradually remove existing filter types so we should avoid new
dependencies on them. Just keep in mind this will be temporary.
Please add full documentation as well in Doxygen style like for existing
symbols. We have to maintain this API properly documented.
> > > +
> > > +#define PATTERN_SKIP_VOID(filter, filter_struct, error_type)
> > \
> > > + do { \
> > > + if (!pattern) { \
> > > + memset(filter, 0, sizeof(filter_struct)); \
> > > + error->type = error_type; \
> > > + return -EINVAL;
> > \
> > > + } \
> > > + item = pattern + i; \
> >
> > I believe macros that relies on variables that not passed as argument is not
> > good idea.
>
> Yes, I'm reworking the macros, and it will be changed in v2.
>
> >
> > > + while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
> > \
> > > + i++; \
> > > + item = pattern + i; \
> > > + } \
> > > + } while (0)
> > > +
> > > +#define ACTION_SKIP_VOID(filter, filter_struct, error_type)
> > \
> > > + do { \
> > > + if (!actions) { \
> > > + memset(filter, 0, sizeof(filter_struct)); \
> > > + error->type = error_type; \
> > > + return -EINVAL;
> > \
> > > + } \
> > > + act = actions + i; \
> > > + while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
> > > + i++; \
> > > + act = actions + i; \
> > > + } \
> > > + } while (0)
> >
> > Are these macros generic enough for all rte_flow consumers?
> >
> > What do you think separate this patch, and use these after applied,
> > meanwhile keeping function and MACROS PMD internal?
>
> The main purpose of the macros is to reduce the code in PMD, otherwise there'll be many such codes to get the next non-void item in all parse functions, including the parse_ethertype_filter function in rte_flow.c. But actually I'm not very sure if it's generic enough for all consumers, although I think it's general at present:)
I'll concede skipping VOIDs can be tedious depending on the parser
implementation, but I do not think these macros need to be exposed
either. PMDs can duplicate some code such as this.
I think ixgbe and i40e share a fair amount of code already, and factoring it
should be part of larger task to create a common Intel-specific library
instead.
> Thanks for your advice, I'll move the macros to PMD currently, then there'll be no macros used in parse_ethertype_filter function, and optimize it after applied.
>
> BTW, I plan to send out V2 patch set in this week.
>
> Best Regards,
> Beilei
>
> >
> > > +
> > > #ifdef __cplusplus
> > > }
> > > #endif
> > >
>
--
Adrien Mazarguil
6WIND
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
` (23 preceding siblings ...)
2016-12-02 11:53 ` [dpdk-dev] [PATCH 24/24] net/i40e: flush tunnel filters Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 01/17] net/i40e: store ethertype filter Beilei Xing
` (17 more replies)
24 siblings, 18 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
All pathes depend on Adrien's Generic flow API(rte_flow).
The patches mainly finish following functions:
1) Store and restore all kinds of filters.
2) Parse all kinds of filters.
3) Add flow validate function.
4) Add flow create function.
5) Add flow destroy function.
6) Add flow flush function.
v2 changes:
Add i40e_flow.c, all flow ops are implemented in the file.
Change the whole implementation of all parse flow functions.
Update error info for all flow ops.
Add flow_list to store flows created.
Beilei Xing (17):
net/i40e: store ethertype filter
net/i40e: store tunnel filter
net/i40e: store flow director filter
net/i40e: restore ethertype filter
net/i40e: restore tunnel filter
net/i40e: restore flow director filter
net/i40e: add flow validate function
net/i40e: parse flow director filter
net/i40e: parse tunnel filter
net/i40e: add flow create function
net/i40e: add flow destroy function
net/i40e: destroy ethertype filter
net/i40e: destroy tunnel filter
net/i40e: destroy flow directory filter
net/i40e: add flow flush function
net/i40e: flush ethertype filters
net/i40e: flush tunnel filters
drivers/net/i40e/Makefile | 2 +
drivers/net/i40e/i40e_ethdev.c | 496 ++++++++++--
drivers/net/i40e/i40e_ethdev.h | 175 ++++
drivers/net/i40e/i40e_fdir.c | 112 ++-
drivers/net/i40e/i40e_flow.c | 1723 ++++++++++++++++++++++++++++++++++++++++
5 files changed, 2442 insertions(+), 66 deletions(-)
create mode 100644 drivers/net/i40e/i40e_flow.c
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 01/17] net/i40e: store ethertype filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-28 2:22 ` Wu, Jingjing
2016-12-28 3:22 ` Tiwei Bie
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 02/17] net/i40e: store tunnel filter Beilei Xing
` (16 subsequent siblings)
17 siblings, 2 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no ethertype filter stored in SW.
This patch stores ethertype filter with cuckoo hash
in SW, also adds protection if an ethertype filter
has been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 164 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 26 +++++++
3 files changed, 190 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 66997b6..11175c4 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -117,5 +117,6 @@ DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_kvargs
+DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_hash
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index f42f4ba..80dd8d7 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -51,6 +51,7 @@
#include <rte_dev.h>
#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
+#include <rte_hash_crc.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -461,6 +462,17 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int i40e_ethertype_filter_convert(
+ const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter);
+static struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input);
+static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+static int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -937,9 +949,18 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
int ret;
uint32_t len;
uint8_t aq_fail = 0;
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
PMD_INIT_FUNC_TRACE();
+ char ethertype_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters ethertype_hash_params = {
+ .name = ethertype_hash_name,
+ .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
+ .key_len = sizeof(struct i40e_ethertype_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1179,8 +1200,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
pf->flags &= ~I40E_FLAG_DCB;
}
+ /* Initialize ethertype filter rule list and hash */
+ TAILQ_INIT(ðertype_rule->ethertype_list);
+ snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
+ "ethertype_%s", dev->data->name);
+ ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params);
+ if (!ethertype_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
+ ret = -EINVAL;
+ goto err_ethertype_hash_table_create;
+ }
+ ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
+ sizeof(struct i40e_ethertype_filter *) *
+ I40E_MAX_ETHERTYPE_FILTER_NUM,
+ 0);
+ if (!ethertype_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for ethertype hash map!");
+ ret = -ENOMEM;
+ goto err_ethertype_hash_map_alloc;
+ }
+
return 0;
+err_ethertype_hash_map_alloc:
+ rte_hash_free(ethertype_rule->hash_table);
+err_ethertype_hash_table_create:
+ rte_free(dev->data->mac_addrs);
err_mac_alloc:
i40e_vsi_release(pf->main_vsi);
err_setup_pf_switch:
@@ -1203,23 +1249,40 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
+ struct i40e_pf *pf;
struct rte_pci_device *pci_dev;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
+ struct i40e_ethertype_filter *p_ethertype;
int ret;
uint8_t aq_fail = 0;
+ struct i40e_ethertype_rule *ethertype_rule;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = dev->pci_dev;
+ ethertype_rule = &pf->ethertype;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
+ /* Remove all ethertype director rules and hash */
+ if (ethertype_rule->hash_map)
+ rte_free(ethertype_rule->hash_map);
+ if (ethertype_rule->hash_table)
+ rte_hash_free(ethertype_rule->hash_table);
+
+ while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) {
+ TAILQ_REMOVE(ðertype_rule->ethertype_list,
+ p_ethertype, rules);
+ rte_free(p_ethertype);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
@@ -7954,6 +8017,78 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
return ret;
}
+/* Convert ethertype filter structure */
+static int
+i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter)
+{
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ filter->input.ether_type = input->ether_type;
+ filter->flags = input->flags;
+ filter->queue = input->queue;
+
+ return 0;
+}
+
+/* Check if there exists the ehtertype filter */
+static struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input)
+{
+ int ret = 0;
+
+ ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return ethertype_rule->hash_map[ret];
+}
+
+/* Add ethertype filter in SW list */
+static int
+i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ int ret = 0;
+
+ ret = rte_hash_add_key(ethertype_rule->hash_table,
+ &filter->input);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR,
+ "Failed to insert ethertype filter"
+ " to hash table %d!",
+ ret);
+ ethertype_rule->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(ðertype_rule->ethertype_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete ethertype filter in SW list */
+static int
+i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ int ret = 0;
+
+ ret = rte_hash_del_key(ethertype_rule->hash_table,
+ &filter->input);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR,
+ "Failed to delete ethertype filter"
+ " to hash table %d!",
+ ret);
+ ethertype_rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(ðertype_rule->ethertype_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
@@ -7964,6 +8099,8 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *ethertype_filter, *node;
struct i40e_control_filter_stats stats;
uint16_t flags = 0;
int ret;
@@ -7982,6 +8119,22 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
" not supported.");
+ /* Check if there is the filter in SW list */
+ ethertype_filter = rte_zmalloc("ethertype_filter",
+ sizeof(*ethertype_filter), 0);
+ i40e_ethertype_filter_convert(filter, ethertype_filter);
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
+ ðertype_filter->input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
+ rte_free(ethertype_filter);
+ return -EINVAL;
+ } else if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
+ rte_free(ethertype_filter);
+ return -EINVAL;
+ }
+
if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
@@ -8002,7 +8155,16 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
stats.mac_etype_free, stats.etype_free);
if (ret < 0)
return -ENOSYS;
- return 0;
+
+ /* Add or delete a filter in SW list */
+ if (add) {
+ ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ } else {
+ ret = i40e_sw_ethertype_filter_del(pf, node);
+ rte_free(ethertype_filter);
+ }
+
+ return ret;
}
/*
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 298cef4..316af80 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -37,6 +37,7 @@
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
+#include <rte_hash.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -396,6 +397,30 @@ struct i40e_fdir_info {
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
};
+/* Ethertype filter number HW supports */
+#define I40E_MAX_ETHERTYPE_FILTER_NUM 768
+
+/* Ethertype filter struct */
+struct i40e_ethertype_filter_input {
+ struct ether_addr mac_addr; /* Mac address to match */
+ uint16_t ether_type; /* Ether type to match */
+};
+
+struct i40e_ethertype_filter {
+ TAILQ_ENTRY(i40e_ethertype_filter) rules;
+ struct i40e_ethertype_filter_input input;
+ uint16_t flags; /* Flags from RTE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_ethertype_filter_list, i40e_ethertype_filter);
+
+struct i40e_ethertype_rule {
+ struct i40e_ethertype_filter_list ethertype_list;
+ struct i40e_ethertype_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -466,6 +491,7 @@ struct i40e_pf {
struct i40e_vmdq_info *vmdq;
struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 02/17] net/i40e: store tunnel filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 01/17] net/i40e: store ethertype filter Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-28 3:27 ` Tiwei Bie
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 03/17] net/i40e: store flow director filter Beilei Xing
` (15 subsequent siblings)
17 siblings, 1 reply; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no tunnel filter stored in SW.
This patch stores tunnel filter in SW with cuckoo
hash, also adds protection if a tunnel filter has
been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 167 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 27 +++++++
2 files changed, 191 insertions(+), 3 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 80dd8d7..c012d5d 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -473,6 +473,17 @@ static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
static int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter);
+static struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input);
+static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -950,6 +961,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
uint32_t len;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
PMD_INIT_FUNC_TRACE();
@@ -961,6 +973,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
.hash_func = rte_hash_crc,
};
+ char tunnel_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters tunnel_hash_params = {
+ .name = tunnel_hash_name,
+ .entries = I40E_MAX_TUNNEL_FILTER_NUM,
+ .key_len = sizeof(struct i40e_tunnel_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1221,8 +1241,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_ethertype_hash_map_alloc;
}
+ /* Initialize tunnel filter rule list and hash */
+ TAILQ_INIT(&tunnel_rule->tunnel_list);
+ snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
+ "tunnel_%s", dev->data->name);
+ tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
+ if (!tunnel_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
+ ret = -EINVAL;
+ goto err_tunnel_hash_table_create;
+ }
+ tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
+ sizeof(struct i40e_tunnel_filter *) *
+ I40E_MAX_TUNNEL_FILTER_NUM,
+ 0);
+ if (!tunnel_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for tunnel hash map!");
+ ret = -ENOMEM;
+ goto err_tunnel_hash_map_alloc;
+ }
+
return 0;
+err_tunnel_hash_map_alloc:
+ rte_hash_free(tunnel_rule->hash_table);
+err_tunnel_hash_table_create:
+ rte_free(ethertype_rule->hash_map);
err_ethertype_hash_map_alloc:
rte_hash_free(ethertype_rule->hash_table);
err_ethertype_hash_table_create:
@@ -1254,9 +1299,11 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
struct i40e_ethertype_filter *p_ethertype;
+ struct i40e_tunnel_filter *p_tunnel;
int ret;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule;
+ struct i40e_tunnel_rule *tunnel_rule;
PMD_INIT_FUNC_TRACE();
@@ -1267,6 +1314,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = dev->pci_dev;
ethertype_rule = &pf->ethertype;
+ tunnel_rule = &pf->tunnel;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1283,6 +1331,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_ethertype);
}
+ /* Remove all tunnel director rules and hash */
+ if (tunnel_rule->hash_map)
+ rte_free(tunnel_rule->hash_map);
+ if (tunnel_rule->hash_table)
+ rte_hash_free(tunnel_rule->hash_table);
+
+ while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
+ TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
+ rte_free(p_tunnel);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
@@ -6482,6 +6541,81 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
return 0;
}
+/* Convert tunnel filter structure */
+static int
+i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data
+ *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ ether_addr_copy((struct ether_addr *)&cld_filter->outer_mac,
+ (struct ether_addr *)&tunnel_filter->input.outer_mac);
+ ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac,
+ (struct ether_addr *)&tunnel_filter->input.inner_mac);
+ tunnel_filter->input.inner_vlan = cld_filter->inner_vlan;
+ tunnel_filter->input.flags = cld_filter->flags;
+ tunnel_filter->input.tenant_id = cld_filter->tenant_id;
+ tunnel_filter->queue = cld_filter->queue_number;
+
+ return 0;
+}
+
+/* Check if there exists the tunnel filter */
+static struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input)
+{
+ int ret = 0;
+
+ ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return tunnel_rule->hash_map[ret];
+}
+
+/* Add a tunnel filter into the SW list */
+static int
+i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ int ret = 0;
+
+ ret = rte_hash_add_key(tunnel_rule->hash_table,
+ &tunnel_filter->input);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR,
+ "Failed to insert tunnel filter to hash table %d!",
+ ret);
+ tunnel_rule->hash_map[ret] = tunnel_filter;
+
+ TAILQ_INSERT_TAIL(&tunnel_rule->tunnel_list, tunnel_filter, rules);
+
+ return 0;
+}
+
+/* Delete a tunnel filter from the SW list */
+static int
+i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ int ret = 0;
+
+ ret = rte_hash_del_key(tunnel_rule->hash_table,
+ &tunnel_filter->input);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR,
+ "Failed to delete tunnel filter to hash table %d!",
+ ret);
+ tunnel_rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&tunnel_rule->tunnel_list, tunnel_filter, rules);
+ rte_free(tunnel_filter);
+
+ return 0;
+}
+
static int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
@@ -6497,6 +6631,8 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct i40e_vsi *vsi = pf->main_vsi;
struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
cld_filter = rte_zmalloc("tunnel_filter",
sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
@@ -6559,11 +6695,36 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
- if (add)
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ i40e_tunnel_filter_convert(cld_filter, tunnel);
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &tunnel->input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ rte_free(tunnel);
+ return -EINVAL;
+ } else if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ rte_free(tunnel);
+ return -EINVAL;
+ }
+
+ if (add) {
ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
- else
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ return ret;
+ }
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- cld_filter, 1);
+ cld_filter, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ return ret;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, node);
+ rte_free(tunnel);
+ }
rte_free(cld_filter);
return ret;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 316af80..c05436c 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -421,6 +421,32 @@ struct i40e_ethertype_rule {
struct rte_hash *hash_table;
};
+/* Tunnel filter number HW supports */
+#define I40E_MAX_TUNNEL_FILTER_NUM 400
+
+/* Tunnel filter struct */
+struct i40e_tunnel_filter_input {
+ uint8_t outer_mac[6]; /* Outer mac address to match */
+ uint8_t inner_mac[6]; /* Inner mac address to match */
+ uint16_t inner_vlan; /* Inner vlan address to match */
+ uint16_t flags; /* Filter type flag */
+ uint32_t tenant_id; /* Tenant id to match */
+};
+
+struct i40e_tunnel_filter {
+ TAILQ_ENTRY(i40e_tunnel_filter) rules;
+ struct i40e_tunnel_filter_input input;
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter);
+
+struct i40e_tunnel_rule {
+ struct i40e_tunnel_filter_list tunnel_list;
+ struct i40e_tunnel_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -492,6 +518,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
+ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 03/17] net/i40e: store flow director filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 01/17] net/i40e: store ethertype filter Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 02/17] net/i40e: store tunnel filter Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-28 3:38 ` Tiwei Bie
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 04/17] net/i40e: restore ethertype filter Beilei Xing
` (14 subsequent siblings)
17 siblings, 1 reply; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no flow director filter stored in SW. This
patch stores flow director filters in SW with cuckoo hash,
also adds protection if a flow director filter has been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 48 +++++++++++++++++++++
drivers/net/i40e/i40e_ethdev.h | 12 ++++++
drivers/net/i40e/i40e_fdir.c | 98 ++++++++++++++++++++++++++++++++++++++++++
3 files changed, 158 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index c012d5d..427ebdc 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -962,6 +962,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
PMD_INIT_FUNC_TRACE();
@@ -981,6 +982,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
.hash_func = rte_hash_crc,
};
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = I40E_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct rte_eth_fdir_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1262,8 +1271,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_tunnel_hash_map_alloc;
}
+ /* Initialize flow director filter rule list and hash */
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", dev->data->name);
+ fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ ret = -EINVAL;
+ goto err_fdir_hash_table_create;
+ }
+ fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
+ sizeof(struct i40e_fdir_filter *) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ ret = -ENOMEM;
+ goto err_fdir_hash_map_alloc;
+ }
+
return 0;
+err_fdir_hash_map_alloc:
+ rte_hash_free(fdir_info->hash_table);
+err_fdir_hash_table_create:
+ rte_free(tunnel_rule->hash_map);
err_tunnel_hash_map_alloc:
rte_hash_free(tunnel_rule->hash_table);
err_tunnel_hash_table_create:
@@ -1300,10 +1334,12 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct i40e_filter_control_settings settings;
struct i40e_ethertype_filter *p_ethertype;
struct i40e_tunnel_filter *p_tunnel;
+ struct i40e_fdir_filter *p_fdir;
int ret;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule;
struct i40e_tunnel_rule *tunnel_rule;
+ struct i40e_fdir_info *fdir_info;
PMD_INIT_FUNC_TRACE();
@@ -1315,6 +1351,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
pci_dev = dev->pci_dev;
ethertype_rule = &pf->ethertype;
tunnel_rule = &pf->tunnel;
+ fdir_info = &pf->fdir;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1342,6 +1379,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_tunnel);
}
+ /* Remove all flow director rules and hash */
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_table)
+ rte_hash_free(fdir_info->hash_table);
+
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+ rte_free(p_fdir);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index c05436c..71756ae 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -377,6 +377,14 @@ struct i40e_fdir_flex_mask {
};
#define I40E_FILTER_PCTYPE_MAX 64
+#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
+
+struct i40e_fdir_filter {
+ TAILQ_ENTRY(i40e_fdir_filter) rules;
+ struct rte_eth_fdir_filter fdir;
+};
+
+TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
/*
* A structure used to define fields of a FDIR related info.
*/
@@ -395,6 +403,10 @@ struct i40e_fdir_info {
*/
struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+
+ struct i40e_fdir_filter_list fdir_list;
+ struct i40e_fdir_filter **hash_map;
+ struct rte_hash *hash_table;
};
/* Ethertype filter number HW supports */
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 335bf15..faa2495 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -121,6 +121,16 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
bool add);
static int i40e_fdir_flush(struct rte_eth_dev *dev);
+static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter);
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input);
+static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
+static int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
+
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
{
@@ -1017,6 +1027,69 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
return ret;
}
+static int
+i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter)
+{
+ rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+ return 0;
+}
+
+/* Check if there exists the flow director filter */
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input)
+{
+ int ret = 0;
+
+ ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+/* Add a flow director filter into the SW list */
+static int
+i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret = 0;
+
+ ret = rte_hash_add_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ fdir_info->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete a flow director filter from the SW list */
+static int
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret = 0;
+
+ ret = rte_hash_del_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR,
+ "Failed to delete fdir filter to hash table %d!",
+ ret);
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
@@ -1032,6 +1105,8 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter, *node;
int ret = 0;
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1054,6 +1129,21 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
+ fdir_filter = rte_zmalloc("fdir_filter", sizeof(*fdir_filter), 0);
+ i40e_fdir_filter_convert(filter, fdir_filter);
+ node = i40e_sw_fdir_filter_lookup(fdir_info, &fdir_filter->fdir.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR,
+ "Conflict with existing flow director rules!");
+ rte_free(fdir_filter);
+ return -EINVAL;
+ } else if (!add && !node) {
+ PMD_DRV_LOG(ERR,
+ "There's no corresponding flow firector filter!");
+ rte_free(fdir_filter);
+ return -EINVAL;
+ }
+
memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
@@ -1077,6 +1167,14 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
pctype);
return ret;
}
+
+ if (add) {
+ ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
+ } else {
+ ret = i40e_sw_fdir_filter_del(pf, node);
+ rte_free(fdir_filter);
+ }
+
return ret;
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 04/17] net/i40e: restore ethertype filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (2 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 03/17] net/i40e: store flow director filter Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-28 2:25 ` Wu, Jingjing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 05/17] net/i40e: restore tunnel filter Beilei Xing
` (13 subsequent siblings)
17 siblings, 1 reply; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring ethertype filter in case filter
dropped accidentally, as all filters need to be added and
removed by user obviously for generic filter API.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 39 +++++++++++++++++++++++++++++++++++++++
1 file changed, 39 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 427ebdc..cd7c309 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -484,6 +484,9 @@ static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
static int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
+static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_filter_restore(struct i40e_pf *pf);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -1964,6 +1967,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+ i40e_filter_restore(pf);
+
return I40E_SUCCESS;
err_up:
@@ -10066,3 +10071,37 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
}
+
+/* Restore ethertype filter */
+static void
+i40e_ethertype_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags;
+
+ TAILQ_FOREACH(f, ethertype_list, rules) {
+ flags = 0;
+ if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ i40e_aq_add_rem_control_packet_filter(hw,
+ f->input.mac_addr.addr_bytes,
+ f->input.ether_type,
+ flags, pf->main_vsi->seid,
+ f->queue, 1, &stats, NULL);
+ }
+}
+
+static void
+i40e_filter_restore(struct i40e_pf *pf)
+{
+ i40e_ethertype_filter_restore(pf);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 05/17] net/i40e: restore tunnel filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (3 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 04/17] net/i40e: restore ethertype filter Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 06/17] net/i40e: restore flow director filter Beilei Xing
` (12 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring tunnel filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index cd7c309..7b4c426 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -485,6 +485,7 @@ static int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
static const struct rte_pci_id pci_id_i40e_map[] = {
@@ -10100,8 +10101,28 @@ i40e_ethertype_filter_restore(struct i40e_pf *pf)
}
}
+/* Restore tunnel filter */
+static void
+i40e_tunnel_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+
+ TAILQ_FOREACH(f, tunnel_list, rules) {
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ rte_memcpy(&cld_filter, &f->input, sizeof(f->input));
+ cld_filter.queue_number = f->queue;
+ i40e_aq_add_cloud_filters(hw, vsi->seid, &cld_filter, 1);
+ }
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
+ i40e_tunnel_filter_restore(pf);
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 06/17] net/i40e: restore flow director filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (4 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 05/17] net/i40e: restore tunnel filter Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function Beilei Xing
` (11 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring flow director filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev.h | 1 +
drivers/net/i40e/i40e_fdir.c | 12 ++++++++++++
3 files changed, 14 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7b4c426..7f98b79 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -10125,4 +10125,5 @@ i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
+ i40e_fdir_filter_restore(pf);
}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 71756ae..6089895 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -670,6 +670,7 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
+void i40e_fdir_filter_restore(struct i40e_pf *pf);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index faa2495..0bed525 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1579,3 +1579,15 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* Restore flow director filter */
+void
+i40e_fdir_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
+ struct i40e_fdir_filter *f;
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
+
+ TAILQ_FOREACH(f, fdir_list, rules)
+ i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (5 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 06/17] net/i40e: restore flow director filter Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-27 12:40 ` Adrien Mazarguil
` (2 more replies)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 08/17] net/i40e: parse flow director filter Beilei Xing
` (10 subsequent siblings)
17 siblings, 3 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_validation function to check if
a flow is valid according to the flow pattern.
i40e_parse_ethertype_filter is added first, it also gets
the ethertype info.
i40e_flow.c is added to handle all generic filter events.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 5 +
drivers/net/i40e/i40e_ethdev.h | 20 ++
drivers/net/i40e/i40e_flow.c | 431 +++++++++++++++++++++++++++++++++++++++++
4 files changed, 457 insertions(+)
create mode 100644 drivers/net/i40e/i40e_flow.c
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 11175c4..89bd85a 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -105,6 +105,7 @@ endif
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
# vector PMD driver needs SSE4.1 support
ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7f98b79..80024ed 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -8452,6 +8452,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &i40e_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 6089895..bbe52f0 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -38,6 +38,7 @@
#include <rte_time.h>
#include <rte_kvargs.h>
#include <rte_hash.h>
+#include <rte_flow_driver.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -629,6 +630,23 @@ struct i40e_adapter {
struct rte_timecounter tx_tstamp_tc;
};
+union i40e_filter_t {
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_fdir_filter fdir_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
+} cons_filter;
+
+typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+struct i40e_valid_pattern {
+ enum rte_flow_item_type *items;
+ parse_filter_t parse_filter;
+};
+
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
@@ -823,4 +841,6 @@ i40e_calc_itr_interval(int16_t interval)
((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_SR) || \
((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_LR))
+const struct rte_flow_ops i40e_flow_ops;
+
#endif /* _I40E_ETHDEV_H_ */
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
new file mode 100644
index 0000000..bf451ef
--- /dev/null
+++ b/drivers/net/i40e/i40e_flow.c
@@ -0,0 +1,431 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+
+static int i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static int i40e_parse_ethertype_pattern(const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_parse_ethertype_act(const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+
+const struct rte_flow_ops i40e_flow_ops = {
+ .validate = i40e_flow_validate,
+};
+
+/* Pattern matched ethertype filter */
+static enum rte_flow_item_type pattern_ethertype[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static int
+i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_ethertype_filter *ethertype_filter =
+ &filter->ethertype_filter;
+ int ret;
+
+ ret = i40e_parse_ethertype_pattern(pattern, error, ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_ethertype_act(actions, error, ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ if (ethertype_filter->queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Invalid queue ID for"
+ " ethertype_filter.");
+ return -EINVAL;
+ }
+ if (ethertype_filter->ether_type == ETHER_TYPE_IPv4 ||
+ ethertype_filter->ether_type == ETHER_TYPE_IPv6) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Unsupported ether_type in"
+ " control packet filter.");
+ return -ENOTSUP;
+ }
+ if (ethertype_filter->ether_type == ETHER_TYPE_VLAN)
+ PMD_DRV_LOG(WARNING, "filter vlan ether_type in"
+ " first tag is not supported.");
+
+ return ret;
+}
+
+static struct i40e_valid_pattern i40e_supported_patterns[] = {
+ /* Ethertype */
+ { pattern_ethertype, i40e_parse_ethertype_filter },
+};
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+i40e_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = i40e_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = i40e_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+ pb = pe;
+ break;
+ }
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+i40e_match_pattern(enum rte_flow_item_type *item_array,
+ struct rte_flow_item *pattern)
+{
+ struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+static parse_filter_t
+i40e_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+ parse_filter_t parse_filter = NULL;
+ uint8_t i = 0;
+
+ for (; i < RTE_DIM(i40e_supported_patterns); i++) {
+ if (i40e_match_pattern(i40e_supported_patterns[i].items,
+ pattern)) {
+ parse_filter = i40e_supported_patterns[i].parse_filter;
+ break;
+ }
+ }
+
+ return parse_filter;
+}
+
+/* Parse attributes */
+static int
+i40e_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL, "Only support ingress.");
+ return -EINVAL;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL, "Not support egress.");
+ return -EINVAL;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL, "Not support priority.");
+ return -EINVAL;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL, "Not support group.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_parse_ethertype_pattern(const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ enum rte_flow_item_type item_type;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ /* Get the MAC info. */
+ if (!eth_spec || !eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "NULL ETH spec/mask");
+ return -EINVAL;
+ }
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(ð_mask->src) ||
+ (!is_zero_ether_addr(ð_mask->dst) &&
+ !is_broadcast_ether_addr(ð_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid MAC_addr mask");
+ return -EINVAL;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid ethertype mask");
+ return -EINVAL;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(ð_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_parse_ethertype_act(const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Not supported action.");
+ return -EINVAL;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Not supported action.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item *items;
+ parse_filter_t parse_filter;
+ uint32_t item_num = 0; /* non-void item number of pattern*/
+ uint32_t i = 0;
+ int ret;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -EINVAL;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -EINVAL;
+ }
+
+ memset(&cons_filter, 0, sizeof(cons_filter));
+
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = rte_zmalloc("i40e_pattern",
+ item_num * sizeof(struct rte_flow_item), 0);
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for PMD internal items.");
+ return -ENOMEM;
+ }
+
+ i40e_pattern_skip_void_item(items, pattern);
+
+ /* Find if there's matched parse filter function */
+ parse_filter = i40e_find_parse_filter_func(items);
+ if (!parse_filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Unsupported pattern");
+ return -EINVAL;
+ }
+
+ ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
+
+ rte_free(items);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 08/17] net/i40e: parse flow director filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (6 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 09/17] net/i40e: parse tunnel filter Beilei Xing
` (9 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_parse_fdir_filter to check if a rule
is a flow director rule according to the flow pattern,
and the function also gets the flow director info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 56 +---
drivers/net/i40e/i40e_ethdev.h | 55 ++++
drivers/net/i40e/i40e_flow.c | 587 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 643 insertions(+), 55 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 80024ed..6607390 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -139,60 +139,6 @@
#define I40E_DEFAULT_DCB_APP_NUM 1
#define I40E_DEFAULT_DCB_APP_PRIO 3
-#define I40E_INSET_NONE 0x00000000000000000ULL
-
-/* bit0 ~ bit 7 */
-#define I40E_INSET_DMAC 0x0000000000000001ULL
-#define I40E_INSET_SMAC 0x0000000000000002ULL
-#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
-#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
-#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
-
-/* bit 8 ~ bit 15 */
-#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
-#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
-#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
-#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
-#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
-#define I40E_INSET_DST_PORT 0x0000000000002000ULL
-#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
-
-/* bit 16 ~ bit 31 */
-#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
-#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
-#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
-#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
-#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
-#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
-#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
-#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
-
-/* bit 32 ~ bit 47, tunnel fields */
-#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
-#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
-#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
-#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
-#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
-#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
-
-/* bit 48 ~ bit 55 */
-#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
-
-/* bit 56 ~ bit 63, Flex Payload */
-#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD \
- (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
- I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
- I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
- I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
-
/**
* Below are values for writing un-exposed registers suggested
* by silicon experts
@@ -7619,7 +7565,7 @@ i40e_validate_input_set(enum i40e_filter_pctype pctype,
}
/* default input set fields combination per pctype */
-static uint64_t
+uint64_t
i40e_get_default_input_set(uint16_t pctype)
{
static const uint64_t default_inset_table[] = {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index bbe52f0..c9cea02 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -190,6 +190,60 @@ enum i40e_flxpld_layer_idx {
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
+#define I40E_INSET_NONE 0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC 0x0000000000000001ULL
+#define I40E_INSET_SMAC 0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
+#define I40E_INSET_DST_PORT 0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+ I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
struct i40e_adapter;
/**
@@ -700,6 +754,7 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+uint64_t i40e_get_default_input_set(uint16_t pctype);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index bf451ef..6281de6 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -51,6 +51,10 @@
#include "base/i40e_type.h"
#include "i40e_ethdev.h"
+#define I40E_IPV4_TC_SHIFT 4
+#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_FRAG_HEADER 44
+
static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -62,6 +66,12 @@ static int i40e_parse_ethertype_pattern(const struct rte_flow_item *pattern,
static int i40e_parse_ethertype_act(const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_ethertype_filter *filter);
+static int i40e_parse_fdir_pattern(const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_parse_fdir_act(const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
@@ -75,6 +85,107 @@ static enum rte_flow_item_type pattern_ethertype[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* Pattern matched flow director filter */
+static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static int
i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -122,9 +233,76 @@ i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
return ret;
}
+static int
+i40e_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_fdir_filter *fdir_filter =
+ &filter->fdir_filter;
+ int ret;
+
+ ret = i40e_parse_fdir_pattern(pattern, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_fdir_act(actions, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ if (dev->data->dev_conf.fdir_conf.mode !=
+ RTE_FDIR_MODE_PERFECT) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Check the mode in fdir_conf.");
+ return -ENOTSUP;
+ }
+
+ if (fdir_filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Invalid queue ID for FDIR.");
+ return -EINVAL;
+ }
+ if (fdir_filter->input.flow_ext.is_vf &&
+ fdir_filter->input.flow_ext.dst_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid VF ID for FDIR.");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_parse_ethertype_filter },
+ /* FDIR */
+ { pattern_fdir_ipv4, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_ext, i40e_parse_fdir_filter },
};
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
@@ -369,6 +547,415 @@ i40e_parse_ethertype_act(const struct rte_flow_action *actions,
}
static int
+i40e_parse_fdir_pattern(const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set = I40E_INSET_NONE;
+ uint16_t flag_offset;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3;
+ uint32_t j;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if (eth_spec || eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid ETH spec/mask");
+ return -EINVAL;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ if (!ipv4_spec || !ipv4_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "NULL IPv4 spec/mask");
+ return -EINVAL;
+ }
+
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid IPv4 mask.");
+ return -EINVAL;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Get filter info */
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ /* Check if it is fragment. */
+ flag_offset =
+ rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
+ if (flag_offset & IPV4_HDR_OFFSET_MASK ||
+ flag_offset & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (!ipv6_spec || !ipv6_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "NULL IPv6 spec/mask");
+ return -EINVAL;
+ }
+
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid IPv6 mask");
+ return -EINVAL;
+ }
+
+ /* SCR and DST address of IPv6 shouldn't be masked */
+ for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
+ if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
+ ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid IPv6 mask");
+ return -EINVAL;
+ }
+ }
+
+ input_set |= I40E_INSET_IPV6_SRC;
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(ipv6_spec->hdr.vtc_flow <<
+ I40E_IPV4_TC_SHIFT);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!tcp_spec || !tcp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "NULL TCP spec/mask");
+ return -EINVAL;
+ }
+
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid TCP mask");
+ return -EINVAL;
+ }
+
+ if (tcp_mask->hdr.src_port != UINT16_MAX ||
+ tcp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid TCP mask");
+ return -EINVAL;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (!udp_spec || !udp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "NULL UDP spec/mask");
+ return -EINVAL;
+ }
+
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid UDP mask");
+ return -EINVAL;
+ }
+
+ if (udp_mask->hdr.src_port != UINT16_MAX ||
+ udp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid UDP mask");
+ return -EINVAL;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (!sctp_spec || !sctp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "NULL SCTP spec/mask");
+ return -EINVAL;
+ }
+
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid UDP mask");
+ return -EINVAL;
+ }
+
+ if (sctp_mask->hdr.src_port != UINT16_MAX ||
+ sctp_mask->hdr.dst_port != UINT16_MAX ||
+ sctp_mask->hdr.tag != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid UDP mask");
+ return -EINVAL;
+ }
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ filter->input.flow_ext.is_vf = 1;
+ filter->input.flow_ext.dst_id = vf_spec->id;
+ break;
+ default:
+ break;
+ }
+ }
+
+ pctype = i40e_flowtype_to_pctype(flow_type);
+ if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Unsupported flow type");
+ return -EINVAL;
+ }
+
+ if (input_set != i40e_get_default_input_set(pctype)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid input set.");
+ return -EINVAL;
+ }
+ filter->input.flow_type = flow_type;
+
+ return 0;
+}
+
+/* Parse to get the action info of a FDIR filter */
+static int
+i40e_parse_fdir_act(const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec;
+ uint32_t index = 0;
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -EINVAL;
+ }
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Invalid action.");
+ return -EINVAL;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.flex_off = 0;
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+ else
+ filter->action.behavior = RTE_ETH_FDIR_REJECT;
+
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->action.rx_queue = act_q->index;
+
+ /* Check if the next non-void item is MARK or END. */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+ act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Invalid action.");
+ return -EINVAL;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->soft_id = mark_spec->id;
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Invalid action.");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 09/17] net/i40e: parse tunnel filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (7 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 08/17] net/i40e: parse flow director filter Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 10/17] net/i40e: add flow create function Beilei Xing
` (8 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_parse_tunnel_filter to check if
a rule is a tunnel rule according to items of the flow
pattern, and the function also gets the tunnel info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 390 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 390 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 6281de6..88b6613 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -54,6 +54,8 @@
#define I40E_IPV4_TC_SHIFT 4
#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
#define I40E_IPV6_FRAG_HEADER 44
+#define I40E_TENANT_ARRAY_NUM 3
+#define I40E_TCI_MASK 0x0FFF
static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -72,6 +74,12 @@ static int i40e_parse_fdir_pattern(const struct rte_flow_item *pattern,
static int i40e_parse_fdir_act(const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_fdir_filter *filter);
+static int i40e_parse_tunnel_pattern(const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
+static int i40e_parse_tunnel_act(const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
@@ -186,6 +194,45 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* Pattern matched tunnel filter */
+static enum rte_flow_item_type pattern_vxlan_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static int
i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -283,6 +330,41 @@ i40e_parse_fdir_filter(struct rte_eth_dev *dev,
return 0;
}
+static int
+i40e_parse_tunnel_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_eth_tunnel_filter_conf *tunnel_filter =
+ &filter->tunnel_filter;
+ int ret;
+
+ ret = i40e_parse_tunnel_pattern(pattern, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_tunnel_act(actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ if (tunnel_filter->queue_id >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Invalid queue ID for tunnel filter");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_parse_ethertype_filter },
@@ -303,6 +385,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_fdir_ipv6_tcp_ext, i40e_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp, i40e_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp_ext, i40e_parse_fdir_filter },
+ /* tunnel */
+ { pattern_vxlan_1, i40e_parse_tunnel_filter },
+ { pattern_vxlan_2, i40e_parse_tunnel_filter },
+ { pattern_vxlan_3, i40e_parse_tunnel_filter },
+ { pattern_vxlan_4, i40e_parse_tunnel_filter },
};
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
@@ -955,6 +1042,309 @@ i40e_parse_fdir_act(const struct rte_flow_action *actions,
return 0;
}
+/* Parse to get the action info of a tunnle filter */
+static int i40e_parse_tunnel_act(const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -EINVAL;
+ }
+
+ /* Check if the first non-void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Not supported action.");
+ return -EINVAL;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue_id = act_q->index;
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Not supported action.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+i40e_check_tenant_id_mask(const uint8_t *mask)
+{
+ uint32_t j;
+ int is_masked = 0;
+
+ for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
+ if (*(mask + j) == UINT8_MAX) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 0;
+ } else if (*(mask + j) == 0) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 1;
+ } else
+ return -EINVAL;
+ }
+
+ return is_masked;
+}
+
+static int
+i40e_parse_vxlan_pattern(const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_eth *o_eth_spec;
+ const struct rte_flow_item_eth *o_eth_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ const struct rte_flow_item_eth *i_eth_spec;
+ const struct rte_flow_item_eth *i_eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ bool is_vni_masked = 0;
+ enum rte_flow_item_type item_type;
+ bool vxlan_flag = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid ether spec/mask");
+ return -EINVAL;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(ð_mask->dst) ||
+ !is_zero_ether_addr(ð_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid ether spec/mask");
+ return -EINVAL;
+ }
+
+ if (!vxlan_flag)
+ rte_memcpy(&filter->outer_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ else
+ rte_memcpy(&filter->inner_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ }
+
+ if (!vxlan_flag) {
+ o_eth_spec = eth_spec;
+ o_eth_mask = eth_mask;
+ } else {
+ i_eth_spec = eth_spec;
+ i_eth_mask = eth_mask;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vxlan_flag) {
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid vlan item");
+ return -EINVAL;
+ }
+ } else {
+ if (vlan_spec || vlan_mask)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid vlan item");
+ return -EINVAL;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* IPv4/IPv6/UDP are used to describe protocol,
+ * spec amd mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid IPv4 item");
+ return -EINVAL;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec =
+ (const struct rte_flow_item_vxlan *)item->spec;
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, either spec or mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid VXLAN item");
+ return -EINVAL;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_mask) {
+ is_vni_masked =
+ i40e_check_tenant_id_mask(vxlan_mask->vni);
+ if (is_vni_masked < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid VNI mask");
+ return -EINVAL;
+ }
+ }
+ vxlan_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Check specification and mask to get the filter type */
+ if (vlan_spec && vlan_mask &&
+ (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ /* If there's inner vlan */
+ filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
+ & I40E_TCI_MASK;
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -EINVAL;
+ }
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -EINVAL;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -EINVAL;
+ }
+ } else if ((!vlan_spec && !vlan_mask) ||
+ (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+ /* If there's no inner vlan */
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_TENID;
+ else if (o_eth_spec && o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask) {
+ filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -EINVAL;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -EINVAL;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Not supported by tunnel filter.");
+ return -EINVAL;
+ }
+
+ filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_parse_tunnel_pattern(const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ int ret;
+
+ ret = i40e_parse_vxlan_pattern(pattern, error, filter);
+
+ return ret;
+}
+
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 10/17] net/i40e: add flow create function
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (8 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 09/17] net/i40e: parse tunnel filter Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 11/17] net/i40e: add flow destroy function Beilei Xing
` (7 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_create function to create a
rule. It will check if a flow matches ethertype filter
or flow director filter or tunnel filter, if the flow
matches some kind of filter, then set the filter to HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 9 +++--
drivers/net/i40e/i40e_ethdev.h | 21 ++++++++++++
drivers/net/i40e/i40e_fdir.c | 2 +-
drivers/net/i40e/i40e_flow.c | 76 ++++++++++++++++++++++++++++++++++++++++++
4 files changed, 102 insertions(+), 6 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 6607390..f8d41b4 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -353,9 +353,6 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_ethertype_filter_set(struct i40e_pf *pf,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -1242,6 +1239,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_fdir_hash_map_alloc;
}
+ TAILQ_INIT(&pf->flow_list);
+
return 0;
err_fdir_hash_map_alloc:
@@ -6616,7 +6615,7 @@ i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
return 0;
}
-static int
+int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add)
@@ -8254,7 +8253,7 @@ i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
*/
-static int
+int
i40e_ethertype_filter_set(struct i40e_pf *pf,
struct rte_eth_ethertype_filter *filter,
bool add)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index c9cea02..6b6858f 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -536,6 +536,17 @@ struct i40e_mirror_rule {
TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
/*
+ * Struct to store flow created.
+ */
+struct i40e_flow {
+ TAILQ_ENTRY(i40e_flow) node;
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+TAILQ_HEAD(i40e_flow_list, i40e_flow);
+
+/*
* Structure to store private data specific for PF instance.
*/
struct i40e_pf {
@@ -592,6 +603,7 @@ struct i40e_pf {
bool floating_veb; /* The flag to use the floating VEB */
/* The floating enable flag for the specific VF */
bool floating_veb_list[I40E_MAX_VF];
+ struct i40e_flow_list flow_list;
};
enum pending_msg {
@@ -755,6 +767,15 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
uint64_t i40e_get_default_input_set(uint16_t pctype);
+int i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 0bed525..6c1bb18 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1096,7 +1096,7 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
-static int
+int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 88b6613..6a8c3a7 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -62,6 +62,11 @@ static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
static int i40e_parse_ethertype_pattern(const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct rte_eth_ethertype_filter *filter);
@@ -85,8 +90,11 @@ static int i40e_parse_attr(const struct rte_flow_attr *attr,
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
+ .create = i40e_flow_create,
};
+enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
+
/* Pattern matched ethertype filter */
static enum rte_flow_item_type pattern_ethertype[] = {
RTE_FLOW_ITEM_TYPE_ETH,
@@ -258,6 +266,8 @@ i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
+
if (ethertype_filter->queue >= pf->dev_data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -305,6 +315,8 @@ i40e_parse_fdir_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_FDIR;
+
if (dev->data->dev_conf.fdir_conf.mode !=
RTE_FDIR_MODE_PERFECT) {
rte_flow_error_set(error, ENOTSUP,
@@ -355,6 +367,8 @@ i40e_parse_tunnel_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
if (tunnel_filter->queue_id >= pf->dev_data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1406,3 +1420,65 @@ i40e_flow_validate(struct rte_eth_dev *dev,
return ret;
}
+
+static struct rte_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_flow *flow;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct i40e_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+
+ ret = i40e_flow_validate(dev, attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (cons_filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_set(pf,
+ &cons_filter.ethertype_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &cons_filter.fdir_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_tunnel_filter_set(pf,
+ &cons_filter.tunnel_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ break;
+ default:
+ goto free_flow;
+ break;
+ }
+
+ flow->filter_type = cons_filter_type;
+ TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+ return (struct rte_flow *)flow;
+
+free_flow:
+ rte_free(flow);
+ return NULL;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 11/17] net/i40e: add flow destroy function
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (9 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 10/17] net/i40e: add flow create function Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 12/17] net/i40e: destroy ethertype filter Beilei Xing
` (6 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_destroy function to destroy
a flow for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 6a8c3a7..2a61c4f 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -67,6 +67,9 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static int i40e_flow_destroy(__rte_unused struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
static int i40e_parse_ethertype_pattern(const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct rte_eth_ethertype_filter *filter);
@@ -91,6 +94,7 @@ static int i40e_parse_attr(const struct rte_flow_attr *attr,
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = i40e_flow_create,
+ .destroy = i40e_flow_destroy,
};
enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
@@ -1482,3 +1486,28 @@ i40e_flow_create(struct rte_eth_dev *dev,
rte_free(flow);
return NULL;
}
+
+static int
+i40e_flow_destroy(__rte_unused struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct i40e_flow *pmd_flow = (struct i40e_flow *)flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ int ret;
+
+ switch (filter_type) {
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 12/17] net/i40e: destroy ethertype filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (10 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 11/17] net/i40e: add flow destroy function Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-28 3:30 ` Wu, Jingjing
2016-12-28 4:56 ` Tiwei Bie
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 13/17] net/i40e: destroy tunnel filter Beilei Xing
` (5 subsequent siblings)
17 siblings, 2 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_dev_destroy_ethertype_filter function
to destroy a ethertype filter for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 10 ++-------
drivers/net/i40e/i40e_ethdev.h | 5 +++++
drivers/net/i40e/i40e_flow.c | 51 ++++++++++++++++++++++++++++++++++++++++--
3 files changed, 56 insertions(+), 10 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index f8d41b4..fbab2a1 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -408,14 +408,8 @@ static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int i40e_ethertype_filter_convert(
const struct rte_eth_ethertype_filter *input,
struct i40e_ethertype_filter *filter);
-static struct i40e_ethertype_filter *
-i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
- const struct i40e_ethertype_filter_input *input);
static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
-static int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
- struct i40e_ethertype_filter *filter);
-
static int i40e_tunnel_filter_convert(
struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter,
struct i40e_tunnel_filter *tunnel_filter);
@@ -8191,7 +8185,7 @@ i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
}
/* Check if there exists the ehtertype filter */
-static struct i40e_ethertype_filter *
+struct i40e_ethertype_filter *
i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input)
{
@@ -8227,7 +8221,7 @@ i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
}
/* Delete ethertype filter in SW list */
-static int
+int
i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter)
{
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 6b6858f..997527a 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -776,6 +776,11 @@ int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add);
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input);
+int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 2a61c4f..732c411 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -49,6 +49,7 @@
#include "i40e_logs.h"
#include "base/i40e_type.h"
+#include "base/i40e_prototype.h"
#include "i40e_ethdev.h"
#define I40E_IPV4_TC_SHIFT 4
@@ -67,7 +68,7 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
-static int i40e_flow_destroy(__rte_unused struct rte_eth_dev *dev,
+static int i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
static int i40e_parse_ethertype_pattern(const struct rte_flow_item *pattern,
@@ -90,6 +91,8 @@ static int i40e_parse_tunnel_act(const struct rte_flow_action *actions,
struct rte_eth_tunnel_filter_conf *filter);
static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
+static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1492,11 +1495,16 @@ i40e_flow_destroy(__rte_unused struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error)
{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_flow *pmd_flow = (struct i40e_flow *)flow;
enum rte_filter_type filter_type = pmd_flow->filter_type;
int ret;
switch (filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_dev_destroy_ethertype_filter(pf,
+ (struct i40e_ethertype_filter *)pmd_flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -1504,10 +1512,49 @@ i40e_flow_destroy(__rte_unused struct rte_eth_dev *dev,
break;
}
- if (ret)
+ if (!ret) {
+ TAILQ_REMOVE(&pf->flow_list, pmd_flow, node);
+ free(pmd_flow);
+ } else {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to destroy flow.");
+ }
+
+ return ret;
+}
+
+static int
+i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *node;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret = 0;
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->input.mac_addr.addr_bytes,
+ filter->input.ether_type,
+ flags, pf->main_vsi->seid,
+ filter->queue, 0, &stats, NULL);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
+ if (node)
+ ret = i40e_sw_ethertype_filter_del(pf, node);
+ else
+ return -EINVAL;
return ret;
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 13/17] net/i40e: destroy tunnel filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (11 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 12/17] net/i40e: destroy ethertype filter Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 14/17] net/i40e: destroy flow directory filter Beilei Xing
` (4 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_dev_destroy_tunnel_filter function
to destroy a tunnel filter for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 10 ++--------
drivers/net/i40e/i40e_ethdev.h | 5 +++++
drivers/net/i40e/i40e_flow.c | 41 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 48 insertions(+), 8 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index fbab2a1..38e0713 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -413,14 +413,8 @@ static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
static int i40e_tunnel_filter_convert(
struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter,
struct i40e_tunnel_filter *tunnel_filter);
-static struct i40e_tunnel_filter *
-i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
- const struct i40e_tunnel_filter_input *input);
static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
-static int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
- struct i40e_tunnel_filter *tunnel_filter);
-
static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
@@ -6553,7 +6547,7 @@ i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data
}
/* Check if there exists the tunnel filter */
-static struct i40e_tunnel_filter *
+struct i40e_tunnel_filter *
i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
const struct i40e_tunnel_filter_input *input)
{
@@ -6588,7 +6582,7 @@ i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
}
/* Delete a tunnel filter from the SW list */
-static int
+int
i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter)
{
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 997527a..b8c7d41 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -781,6 +781,11 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input);
int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input);
+int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 732c411..6a22deb 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -93,6 +93,8 @@ static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1505,6 +1507,10 @@ i40e_flow_destroy(__rte_unused struct rte_eth_dev *dev,
ret = i40e_dev_destroy_ethertype_filter(pf,
(struct i40e_ethertype_filter *)pmd_flow->rule);
break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_destroy_tunnel_filter(pf,
+ (struct i40e_tunnel_filter *)pmd_flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -1558,3 +1564,38 @@ i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
return ret;
}
+
+static int
+i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *node;
+ int ret = 0;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
+ (struct ether_addr *)&cld_filter.outer_mac);
+ ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
+ (struct ether_addr *)&cld_filter.inner_mac);
+ cld_filter.inner_vlan = filter->input.inner_vlan;
+ cld_filter.flags = filter->input.flags;
+ cld_filter.tenant_id = filter->input.tenant_id;
+ cld_filter.queue_number = filter->queue;
+
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter, 1);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
+ if (node)
+ ret = i40e_sw_tunnel_filter_del(pf, node);
+ else
+ return -EINVAL;
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 14/17] net/i40e: destroy flow directory filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (12 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 13/17] net/i40e: destroy tunnel filter Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function Beilei Xing
` (3 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch supports destroying a flow directory filter
for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 6a22deb..4c7856c 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1511,6 +1511,10 @@ i40e_flow_destroy(__rte_unused struct rte_eth_dev *dev,
ret = i40e_dev_destroy_tunnel_filter(pf,
(struct i40e_tunnel_filter *)pmd_flow->rule);
break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &((struct i40e_fdir_filter *)pmd_flow->rule)->fdir, 0);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (13 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 14/17] net/i40e: destroy flow directory filter Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-27 12:40 ` Adrien Mazarguil
2016-12-28 5:35 ` Tiwei Bie
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 16/17] net/i40e: flush ethertype filters Beilei Xing
` (2 subsequent siblings)
17 siblings, 2 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_flush function to flush all
filters for users. And flow director flush function
is involved first.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.h | 3 +++
drivers/net/i40e/i40e_fdir.c | 8 ++------
drivers/net/i40e/i40e_flow.c | 46 ++++++++++++++++++++++++++++++++++++++++++
3 files changed, 51 insertions(+), 6 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b8c7d41..0b736d5 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -786,6 +786,9 @@ i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
const struct i40e_tunnel_filter_input *input);
int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
+int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
+int i40e_fdir_flush(struct rte_eth_dev *dev);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 6c1bb18..f10aeee 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -119,8 +119,6 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
-static int i40e_fdir_flush(struct rte_eth_dev *dev);
-
static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
struct i40e_fdir_filter *filter);
static struct i40e_fdir_filter *
@@ -128,8 +126,6 @@ i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
const struct rte_eth_fdir_input *input);
static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
struct i40e_fdir_filter *filter);
-static int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
- struct i40e_fdir_filter *filter);
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -1070,7 +1066,7 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
}
/* Delete a flow director filter from the SW list */
-static int
+int
i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
{
struct i40e_fdir_info *fdir_info = &pf->fdir;
@@ -1318,7 +1314,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
* i40e_fdir_flush - clear all filters of Flow Director table
* @pf: board private structure
*/
-static int
+int
i40e_fdir_flush(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 4c7856c..1d9f603 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -68,6 +68,8 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static int i40e_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
static int i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
@@ -95,11 +97,13 @@ static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
+static int i40e_fdir_filter_flush(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = i40e_flow_create,
.destroy = i40e_flow_destroy,
+ .flush = i40e_flow_flush,
};
enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
@@ -1603,3 +1607,45 @@ i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
return ret;
}
+
+static int
+i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret = 0;
+
+ ret = i40e_fdir_filter_flush(pf);
+ if (!ret)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush FDIR flows.");
+
+ return ret;
+}
+
+static int
+i40e_fdir_filter_flush(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter;
+ struct i40e_flow *flow;
+ int ret = 0;
+
+ ret = i40e_fdir_flush(dev);
+ if (!ret) {
+ /* Delete FDIR filters in FDIR list. */
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list)))
+ i40e_sw_fdir_filter_del(pf, fdir_filter);
+
+ /* Delete FDIR flows in flow list. */
+ TAILQ_FOREACH(flow, &pf->flow_list, node) {
+ if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 16/17] net/i40e: flush ethertype filters
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (14 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 17/17] net/i40e: flush tunnel filters Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_ethertype_filter_flush function
to flush all ethertype filters, including filters in
SW and HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 36 ++++++++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 1d9f603..c9b338e 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -98,6 +98,7 @@ static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
static int i40e_fdir_filter_flush(struct i40e_pf *pf);
+static int i40e_ethertype_filter_flush(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1620,6 +1621,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to flush FDIR flows.");
+ ret = i40e_ethertype_filter_flush(pf);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to ethertype flush flows.");
+ return ret;
+ }
+
return ret;
}
@@ -1649,3 +1658,30 @@ i40e_fdir_filter_flush(struct i40e_pf *pf)
return ret;
}
+
+/* Flush all ethertype filters */
+static int
+i40e_ethertype_filter_flush(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_flow *flow;
+ int ret = 0;
+
+ while ((f = TAILQ_FIRST(ethertype_list))) {
+ ret = i40e_dev_destroy_ethertype_filter(pf, f);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete ethertype flows in flow list. */
+ TAILQ_FOREACH(flow, &pf->flow_list, node) {
+ if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v2 17/17] net/i40e: flush tunnel filters
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (15 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 16/17] net/i40e: flush ethertype filters Beilei Xing
@ 2016-12-27 6:26 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-27 6:26 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_tunnel_filter_flush function
to flush all tunnel filters, including filters in
SW and HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 36 ++++++++++++++++++++++++++++++++++++
1 file changed, 36 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index c9b338e..bb23e31 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -99,6 +99,7 @@ static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
static int i40e_fdir_filter_flush(struct i40e_pf *pf);
static int i40e_ethertype_filter_flush(struct i40e_pf *pf);
+static int i40e_tunnel_filter_flush(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1629,6 +1630,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return ret;
}
+ ret = i40e_tunnel_filter_flush(pf);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush tunnel flows.");
+ return ret;
+ }
+
return ret;
}
@@ -1685,3 +1694,30 @@ i40e_ethertype_filter_flush(struct i40e_pf *pf)
return ret;
}
+
+/* Flush all tunnel filters */
+static int
+i40e_tunnel_filter_flush(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_flow *flow;
+ int ret = 0;
+
+ while ((f = TAILQ_FIRST(tunnel_list))) {
+ ret = i40e_dev_destroy_tunnel_filter(pf, f);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete tunnel flows in flow list. */
+ TAILQ_FOREACH(flow, &pf->flow_list, node) {
+ if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH 10/24] ethdev: parse ethertype filter
2016-12-23 8:43 ` Adrien Mazarguil
@ 2016-12-27 6:36 ` Xing, Beilei
0 siblings, 0 replies; 175+ messages in thread
From: Xing, Beilei @ 2016-12-27 6:36 UTC (permalink / raw)
To: Adrien Mazarguil
Cc: Yigit, Ferruh, Wu, Jingjing, Zhang, Helin, dev, Lu, Wenzhuo
> -----Original Message-----
> From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> Sent: Friday, December 23, 2016 4:43 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Yigit, Ferruh <ferruh.yigit@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Zhang, Helin <helin.zhang@intel.com>;
> dev@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH 10/24] ethdev: parse ethertype filter
>
> Hi all,
>
> On Wed, Dec 21, 2016 at 03:54:50AM +0000, Xing, Beilei wrote:
> > Hi Ferruh,
> >
> > > -----Original Message-----
> > > From: Yigit, Ferruh
> > > Sent: Wednesday, December 21, 2016 2:12 AM
> > > To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> > > <jingjing.wu@intel.com>; Zhang, Helin <helin.zhang@intel.com>
> > > Cc: dev@dpdk.org; Lu, Wenzhuo <wenzhuo.lu@intel.com>; Adrien
> > > Mazarguil <adrien.mazarguil@6wind.com>
> > > Subject: Re: [dpdk-dev] [PATCH 10/24] ethdev: parse ethertype filter
> > >
> > > On 12/2/2016 11:53 AM, Beilei Xing wrote:
> > > > Check if the rule is a ethertype rule, and get the ethertype info BTW.
> > > >
> > > > Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
> > > > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > > > ---
> > >
> > > CC: Adrien Mazarguil <adrien.mazarguil@6wind.com>
>
> Thanks again for CC'ing me.
>
> > > > lib/librte_ether/rte_flow.c | 136
> > > +++++++++++++++++++++++++++++++++++++
> > > > lib/librte_ether/rte_flow_driver.h | 34 ++++++++++
> > >
> > > <...>
> > >
> > > > diff --git a/lib/librte_ether/rte_flow_driver.h
> > > > b/lib/librte_ether/rte_flow_driver.h
> > > > index a88c621..2760c74 100644
> > > > --- a/lib/librte_ether/rte_flow_driver.h
> > > > +++ b/lib/librte_ether/rte_flow_driver.h
> > > > @@ -170,6 +170,40 @@ rte_flow_error_set(struct rte_flow_error
> > > > *error, const struct rte_flow_ops * rte_flow_ops_get(uint8_t
> > > > port_id, struct rte_flow_error *error);
> > > >
> > > > +int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
> > > > + const struct rte_flow_item *pattern,
> > > > + const struct rte_flow_action *actions,
> > > > + struct rte_eth_ethertype_filter *filter,
> > > > + struct rte_flow_error *error);
> > >
> > > Although this is helper function, it may be good if it follows the
> > > rte_follow namespace.
> >
> > OK, I will rename it in the next version, thanks very much.
>
> Agreed, all public symbols exposed by headers must be prefixed with
> rte_flow.
>
> Now I'm not so sure about the need to convert a rte_flow rule to a
> rte_eth_ethertype_filter. This definition basically makes rte_flow depend on
> rte_eth_ctrl.h (related #include is missing by the way).
>
Since the whole implementation of parse function is modified, there'll be no common rte_eth_ethertype_filter here temporarily.
> I understand that both ixgbe and i40e would benefit from it, and considering
> rte_flow_driver.h is free from ABI versioning I guess it's acceptable, but
> remember we'll gradually remove existing filter types so we should avoid
> new dependencies on them. Just keep in mind this will be temporary.
>
i40e and ixgbe all use existing filter types in rte_flow_driver.h. if all existing filter types will be removed, we need to change the fiter info after applied.
> Please add full documentation as well in Doxygen style like for existing
> symbols. We have to maintain this API properly documented.
>
> > > > +
> > > > +#define PATTERN_SKIP_VOID(filter, filter_struct, error_type)
> > > \
> > > > + do { \
> > > > + if (!pattern) { \
> > > > + memset(filter, 0, sizeof(filter_struct)); \
> > > > + error->type = error_type; \
> > > > + return -EINVAL;
> > > \
> > > > + } \
> > > > + item = pattern + i; \
> > >
> > > I believe macros that relies on variables that not passed as
> > > argument is not good idea.
> >
> > Yes, I'm reworking the macros, and it will be changed in v2.
> >
> > >
> > > > + while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
> > > \
> > > > + i++; \
> > > > + item = pattern + i; \
> > > > + } \
> > > > + } while (0)
> > > > +
> > > > +#define ACTION_SKIP_VOID(filter, filter_struct, error_type)
> > > \
> > > > + do { \
> > > > + if (!actions) { \
> > > > + memset(filter, 0, sizeof(filter_struct)); \
> > > > + error->type = error_type; \
> > > > + return -EINVAL;
> > > \
> > > > + } \
> > > > + act = actions + i; \
> > > > + while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
> > > > + i++; \
> > > > + act = actions + i; \
> > > > + } \
> > > > + } while (0)
> > >
> > > Are these macros generic enough for all rte_flow consumers?
> > >
> > > What do you think separate this patch, and use these after applied,
> > > meanwhile keeping function and MACROS PMD internal?
> >
> > The main purpose of the macros is to reduce the code in PMD, otherwise
> > there'll be many such codes to get the next non-void item in all parse
> > functions, including the parse_ethertype_filter function in
> > rte_flow.c. But actually I'm not very sure if it's generic enough for
> > all consumers, although I think it's general at present:)
>
> I'll concede skipping VOIDs can be tedious depending on the parser
> implementation, but I do not think these macros need to be exposed either.
> PMDs can duplicate some code such as this.
>
> I think ixgbe and i40e share a fair amount of code already, and factoring it
> should be part of larger task to create a common Intel-specific library instead.
Good point. Thanks. We'll consider related implementation for the common code.
In V2 patch set, there'll be no common code temporarily since the implementation of parsing functions is different between ixgbe and i40e.
>
> > Thanks for your advice, I'll move the macros to PMD currently, then there'll
> be no macros used in parse_ethertype_filter function, and optimize it after
> applied.
> >
> > BTW, I plan to send out V2 patch set in this week.
> >
> > Best Regards,
> > Beilei
> >
> > >
> > > > +
> > > > #ifdef __cplusplus
> > > > }
> > > > #endif
> > > >
> >
>
> --
> Adrien Mazarguil
> 6WIND
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function Beilei Xing
@ 2016-12-27 12:40 ` Adrien Mazarguil
2016-12-28 9:00 ` Xing, Beilei
2016-12-28 2:52 ` Wu, Jingjing
2016-12-28 4:08 ` Tiwei Bie
2 siblings, 1 reply; 175+ messages in thread
From: Adrien Mazarguil @ 2016-12-27 12:40 UTC (permalink / raw)
To: Beilei Xing; +Cc: jingjing.wu, helin.zhang, dev
Hi Beilei,
A few comments below.
On Tue, Dec 27, 2016 at 02:26:14PM +0800, Beilei Xing wrote:
> This patch adds i40e_flow_validation function to check if
> a flow is valid according to the flow pattern.
> i40e_parse_ethertype_filter is added first, it also gets
> the ethertype info.
> i40e_flow.c is added to handle all generic filter events.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/i40e/Makefile | 1 +
> drivers/net/i40e/i40e_ethdev.c | 5 +
> drivers/net/i40e/i40e_ethdev.h | 20 ++
> drivers/net/i40e/i40e_flow.c | 431 +++++++++++++++++++++++++++++++++++++++++
> 4 files changed, 457 insertions(+)
> create mode 100644 drivers/net/i40e/i40e_flow.c
[...]
> diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
> new file mode 100644
> index 0000000..bf451ef
> --- /dev/null
> +++ b/drivers/net/i40e/i40e_flow.c
[...]
> + if (ethertype_filter->queue >= pf->dev_data->nb_rx_queues) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION,
> + NULL, "Invalid queue ID for"
> + " ethertype_filter.");
When setting an error type related to an existing object provided by the
application, you should set the related cause pointer to a non-NULL
value. In this particular case, retrieving the action object seems difficult
so it can remain that way, however there are many places in this series
where it can be done.
> + return -EINVAL;
While this is perfectly valid, you could also return -rte_errno to avoid
duplicating EINVAL.
[...]
> + }
> + if (ethertype_filter->ether_type == ETHER_TYPE_IPv4 ||
> + ethertype_filter->ether_type == ETHER_TYPE_IPv6) {
> + rte_flow_error_set(error, ENOTSUP,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + NULL, "Unsupported ether_type in"
> + " control packet filter.");
> + return -ENOTSUP;
> + }
> + if (ethertype_filter->ether_type == ETHER_TYPE_VLAN)
> + PMD_DRV_LOG(WARNING, "filter vlan ether_type in"
> + " first tag is not supported.");
> +
> + return ret;
> +}
[...]
> +/* Parse attributes */
> +static int
> +i40e_parse_attr(const struct rte_flow_attr *attr,
> + struct rte_flow_error *error)
> +{
> + /* Must be input direction */
> + if (!attr->ingress) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
> + NULL, "Only support ingress.");
Regarding my previous comment, &attr could replace NULL here as well as in
subsequent calls to rte_flow_error_set().
> + return -EINVAL;
> + }
> +
> + /* Not supported */
> + if (attr->egress) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
> + NULL, "Not support egress.");
> + return -EINVAL;
> + }
> +
> + /* Not supported */
> + if (attr->priority) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
> + NULL, "Not support priority.");
> + return -EINVAL;
> + }
> +
> + /* Not supported */
> + if (attr->group) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
> + NULL, "Not support group.");
> + return -EINVAL;
> + }
> +
> + return 0;
> +}
> +
> +static int
> +i40e_parse_ethertype_pattern(const struct rte_flow_item *pattern,
> + struct rte_flow_error *error,
> + struct rte_eth_ethertype_filter *filter)
> +{
> + const struct rte_flow_item *item = pattern;
> + const struct rte_flow_item_eth *eth_spec;
> + const struct rte_flow_item_eth *eth_mask;
> + enum rte_flow_item_type item_type;
> +
> + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> + item_type = item->type;
> + switch (item_type) {
> + case RTE_FLOW_ITEM_TYPE_ETH:
> + eth_spec = (const struct rte_flow_item_eth *)item->spec;
> + eth_mask = (const struct rte_flow_item_eth *)item->mask;
> + /* Get the MAC info. */
> + if (!eth_spec || !eth_mask) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + NULL,
> + "NULL ETH spec/mask");
> + return -EINVAL;
> + }
While optional, I think you should allow eth_spec and eth_mask to be NULL
here as described in [1]:
- If eth_spec is NULL, you can match anything that looks like a valid
Ethernet header.
- If eth_mask is NULL, you should assume a default mask (for Ethernet it
usually means matching source/destination MACs perfectly).
- You must check the "last" field as well, if non-NULL it may probably be
supported as long as the following condition is satisfied:
(spec & mask) == (last & mask)
[1] http://dpdk.org/doc/guides/prog_guide/rte_flow.html#pattern-item
[...]
> + const struct rte_flow_action_queue *act_q;
> + uint32_t index = 0;
> +
> + /* Check if the first non-void action is QUEUE or DROP. */
> + NEXT_ITEM_OF_ACTION(act, actions, index);
> + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
> + act->type != RTE_FLOW_ACTION_TYPE_DROP) {
> + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
> + NULL, "Not supported action.");
Again, you could report &act instead of NULL here (please check all
remaining calls to rte_flow_error_set()).
[...]
--
Adrien Mazarguil
6WIND
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function Beilei Xing
@ 2016-12-27 12:40 ` Adrien Mazarguil
2016-12-28 8:02 ` Xing, Beilei
2016-12-28 5:35 ` Tiwei Bie
1 sibling, 1 reply; 175+ messages in thread
From: Adrien Mazarguil @ 2016-12-27 12:40 UTC (permalink / raw)
To: Beilei Xing; +Cc: jingjing.wu, helin.zhang, dev
Hi Beilei,
On Tue, Dec 27, 2016 at 02:26:22PM +0800, Beilei Xing wrote:
> This patch adds i40e_flow_flush function to flush all
> filters for users. And flow director flush function
> is involved first.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/i40e/i40e_ethdev.h | 3 +++
> drivers/net/i40e/i40e_fdir.c | 8 ++------
> drivers/net/i40e/i40e_flow.c | 46 ++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 51 insertions(+), 6 deletions(-)
[...]
> diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
[...]
> +static int
> +i40e_fdir_filter_flush(struct i40e_pf *pf)
> +{
> + struct rte_eth_dev *dev = pf->adapter->eth_dev;
> + struct i40e_fdir_info *fdir_info = &pf->fdir;
> + struct i40e_fdir_filter *fdir_filter;
> + struct i40e_flow *flow;
> + int ret = 0;
> +
> + ret = i40e_fdir_flush(dev);
> + if (!ret) {
> + /* Delete FDIR filters in FDIR list. */
> + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list)))
> + i40e_sw_fdir_filter_del(pf, fdir_filter);
> +
> + /* Delete FDIR flows in flow list. */
> + TAILQ_FOREACH(flow, &pf->flow_list, node) {
> + if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
> + TAILQ_REMOVE(&pf->flow_list, flow, node);
> + rte_free(flow);
> + }
> + }
Be careful, I'm not sure calling TAILQ_REMOVE() followed by rte_free()
inside a TAILQ_FOREACH() is safe. BSD has the _SAFE() variant for this
purpose but Linux does not.
> + }
> +
> + return ret;
> +}
--
Adrien Mazarguil
6WIND
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 01/17] net/i40e: store ethertype filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 01/17] net/i40e: store ethertype filter Beilei Xing
@ 2016-12-28 2:22 ` Wu, Jingjing
2016-12-29 4:03 ` Xing, Beilei
2016-12-29 4:36 ` Xing, Beilei
2016-12-28 3:22 ` Tiwei Bie
1 sibling, 2 replies; 175+ messages in thread
From: Wu, Jingjing @ 2016-12-28 2:22 UTC (permalink / raw)
To: Xing, Beilei, Zhang, Helin; +Cc: dev
> +
> +/* Delete ethertype filter in SW list */ static int
> +i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
> + struct i40e_ethertype_filter *filter) {
> + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
> + int ret = 0;
> +
> + ret = rte_hash_del_key(ethertype_rule->hash_table,
> + &filter->input);
> + if (ret < 0)
> + PMD_DRV_LOG(ERR,
> + "Failed to delete ethertype filter"
> + " to hash table %d!",
> + ret);
> + ethertype_rule->hash_map[ret] = NULL;
> +
> + TAILQ_REMOVE(ðertype_rule->ethertype_list, filter, rules);
> + rte_free(filter);
It's better to free filter out of del function because the filter is also the input argument.
Or you can define this function to use key as argument but not filter.
> /*
> * Configure ethertype filter, which can director packet by filtering
> * with mac address and ether_type or only ether_type @@ -7964,6 +8099,8
> @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
> bool add)
> {
> struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
> + struct i40e_ethertype_filter *ethertype_filter, *node;
> struct i40e_control_filter_stats stats;
> uint16_t flags = 0;
> int ret;
> @@ -7982,6 +8119,22 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
> PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
> " not supported.");
>
> + /* Check if there is the filter in SW list */
> + ethertype_filter = rte_zmalloc("ethertype_filter",
> + sizeof(*ethertype_filter), 0);
> + i40e_ethertype_filter_convert(filter, ethertype_filter);
> + node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
> + ðertype_filter->input);
> + if (add && node) {
> + PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
> + rte_free(ethertype_filter);
> + return -EINVAL;
> + } else if (!add && !node) {
> + PMD_DRV_LOG(ERR, "There's no corresponding ethertype
> filter!");
> + rte_free(ethertype_filter);
> + return -EINVAL;
> + }
How about malloc ethertype_filter after check? Especially, no need to malloc it when delete a filter.
Thanks
Jingjing
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 04/17] net/i40e: restore ethertype filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 04/17] net/i40e: restore ethertype filter Beilei Xing
@ 2016-12-28 2:25 ` Wu, Jingjing
0 siblings, 0 replies; 175+ messages in thread
From: Wu, Jingjing @ 2016-12-28 2:25 UTC (permalink / raw)
To: Xing, Beilei, Zhang, Helin; +Cc: dev
> -----Original Message-----
> From: Xing, Beilei
> Sent: Tuesday, December 27, 2016 2:26 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin <helin.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: [PATCH v2 04/17] net/i40e: restore ethertype filter
>
> Add support of restoring ethertype filter in case filter dropped accidentally, as all
> filters need to be added and removed by user obviously for generic filter API.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/i40e/i40e_ethdev.c | 39
> +++++++++++++++++++++++++++++++++++++++
> 1 file changed, 39 insertions(+)
>
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 427ebdc..cd7c309 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -484,6 +484,9 @@ static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
> static int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
> struct i40e_tunnel_filter *tunnel_filter);
>
> +static void i40e_ethertype_filter_restore(struct i40e_pf *pf); static
> +void i40e_filter_restore(struct i40e_pf *pf);
> +
> static const struct rte_pci_id pci_id_i40e_map[] = {
> { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
> { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
> @@ -1964,6 +1967,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
> /* enable uio intr after callback register */
> rte_intr_enable(intr_handle);
>
> + i40e_filter_restore(pf);
> +
> return I40E_SUCCESS;
>
> err_up:
> @@ -10066,3 +10071,37 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev,
> uint16_t mtu)
>
> return ret;
> }
> +
> +/* Restore ethertype filter */
> +static void
> +i40e_ethertype_filter_restore(struct i40e_pf *pf) {
> + struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> + struct i40e_ethertype_filter_list
> + *ethertype_list = &pf->ethertype.ethertype_list;
> + struct i40e_ethertype_filter *f;
> + struct i40e_control_filter_stats stats;
> + uint16_t flags;
> +
> + TAILQ_FOREACH(f, ethertype_list, rules) {
> + flags = 0;
> + if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
> + flags |=
> I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
> + if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
> + flags |=
> I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
> + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
> +
> + memset(&stats, 0, sizeof(stats));
> + i40e_aq_add_rem_control_packet_filter(hw,
> + f->input.mac_addr.addr_bytes,
> + f->input.ether_type,
> + flags, pf->main_vsi->seid,
> + f->queue, 1, &stats, NULL);
> + }
How about to lig the stats to show how many filters are restored?
Thanks
Jingjing
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function Beilei Xing
2016-12-27 12:40 ` Adrien Mazarguil
@ 2016-12-28 2:52 ` Wu, Jingjing
2016-12-28 7:44 ` Xing, Beilei
2016-12-28 4:08 ` Tiwei Bie
2 siblings, 1 reply; 175+ messages in thread
From: Wu, Jingjing @ 2016-12-28 2:52 UTC (permalink / raw)
To: Xing, Beilei, Zhang, Helin; +Cc: dev
>
> +union i40e_filter_t {
> + struct rte_eth_ethertype_filter ethertype_filter;
> + struct rte_eth_fdir_filter fdir_filter;
> + struct rte_eth_tunnel_filter_conf tunnel_filter; } cons_filter;
> +
> +typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
> + const struct rte_flow_attr *attr,
> + const struct rte_flow_item pattern[],
> + const struct rte_flow_action actions[],
> + struct rte_flow_error *error,
> + union i40e_filter_t *filter);
You can use void* instead of define union i40e_filter_t.
> +struct i40e_valid_pattern {
> + enum rte_flow_item_type *items;
What the item points to? Add few comments
> +
> + ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
Will you use cons_filter later? If not, it looks like we don't need the argument at all.
> +
> + rte_free(items);
> +
> + return ret;
> +}
> --
> 2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 01/17] net/i40e: store ethertype filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 01/17] net/i40e: store ethertype filter Beilei Xing
2016-12-28 2:22 ` Wu, Jingjing
@ 2016-12-28 3:22 ` Tiwei Bie
1 sibling, 0 replies; 175+ messages in thread
From: Tiwei Bie @ 2016-12-28 3:22 UTC (permalink / raw)
To: Beilei Xing; +Cc: jingjing.wu, helin.zhang, dev
On Tue, Dec 27, 2016 at 02:26:08PM +0800, Beilei Xing wrote:
> Currently there's no ethertype filter stored in SW.
> This patch stores ethertype filter with cuckoo hash
> in SW, also adds protection if an ethertype filter
> has been added.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/i40e/Makefile | 1 +
> drivers/net/i40e/i40e_ethdev.c | 164 ++++++++++++++++++++++++++++++++++++++++-
> drivers/net/i40e/i40e_ethdev.h | 26 +++++++
> 3 files changed, 190 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
> index 66997b6..11175c4 100644
> --- a/drivers/net/i40e/Makefile
> +++ b/drivers/net/i40e/Makefile
> @@ -117,5 +117,6 @@ DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
> DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
> DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net
> DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_kvargs
> +DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_hash
>
> include $(RTE_SDK)/mk/rte.lib.mk
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index f42f4ba..80dd8d7 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
[...]
> @@ -1203,23 +1249,40 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
> static int
> eth_i40e_dev_uninit(struct rte_eth_dev *dev)
> {
> + struct i40e_pf *pf;
> struct rte_pci_device *pci_dev;
> struct i40e_hw *hw;
> struct i40e_filter_control_settings settings;
> + struct i40e_ethertype_filter *p_ethertype;
> int ret;
> uint8_t aq_fail = 0;
> + struct i40e_ethertype_rule *ethertype_rule;
>
> PMD_INIT_FUNC_TRACE();
>
> if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> return 0;
>
> + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> pci_dev = dev->pci_dev;
> + ethertype_rule = &pf->ethertype;
>
> if (hw->adapter_stopped == 0)
> i40e_dev_close(dev);
>
> + /* Remove all ethertype director rules and hash */
> + if (ethertype_rule->hash_map)
> + rte_free(ethertype_rule->hash_map);
> + if (ethertype_rule->hash_table)
> + rte_hash_free(ethertype_rule->hash_table);
> +
> + while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) {
There is a redundant pair of parentheses, or you should compare with
NULL.
> + TAILQ_REMOVE(ðertype_rule->ethertype_list,
> + p_ethertype, rules);
> + rte_free(p_ethertype);
> + }
> +
> dev->dev_ops = NULL;
> dev->rx_pkt_burst = NULL;
> dev->tx_pkt_burst = NULL;
> @@ -7954,6 +8017,78 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
> return ret;
> }
>
> +/* Convert ethertype filter structure */
> +static int
> +i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
> + struct i40e_ethertype_filter *filter)
> +{
> + rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
> + filter->input.ether_type = input->ether_type;
> + filter->flags = input->flags;
> + filter->queue = input->queue;
> +
> + return 0;
> +}
> +
> +/* Check if there exists the ehtertype filter */
> +static struct i40e_ethertype_filter *
> +i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
> + const struct i40e_ethertype_filter_input *input)
> +{
> + int ret = 0;
> +
The initialization is meaningless, as it will be written by the below
assignment unconditionally.
> + ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
> + if (ret < 0)
> + return NULL;
> +
> + return ethertype_rule->hash_map[ret];
> +}
> +
> +/* Add ethertype filter in SW list */
> +static int
> +i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
> + struct i40e_ethertype_filter *filter)
> +{
> + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
> + int ret = 0;
> +
Same here.
> + ret = rte_hash_add_key(ethertype_rule->hash_table,
> + &filter->input);
> + if (ret < 0)
> + PMD_DRV_LOG(ERR,
> + "Failed to insert ethertype filter"
> + " to hash table %d!",
> + ret);
Function should return when ret < 0.
> + ethertype_rule->hash_map[ret] = filter;
> +
> + TAILQ_INSERT_TAIL(ðertype_rule->ethertype_list, filter, rules);
> +
> + return 0;
> +}
> +
> +/* Delete ethertype filter in SW list */
> +static int
> +i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
> + struct i40e_ethertype_filter *filter)
> +{
> + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
> + int ret = 0;
> +
Same here.
> + ret = rte_hash_del_key(ethertype_rule->hash_table,
> + &filter->input);
> + if (ret < 0)
> + PMD_DRV_LOG(ERR,
> + "Failed to delete ethertype filter"
> + " to hash table %d!",
> + ret);
Function should return when ret < 0.
> + ethertype_rule->hash_map[ret] = NULL;
> +
> + TAILQ_REMOVE(ðertype_rule->ethertype_list, filter, rules);
> + rte_free(filter);
> +
> + return 0;
> +}
> +
> /*
> * Configure ethertype filter, which can director packet by filtering
> * with mac address and ether_type or only ether_type
> @@ -7964,6 +8099,8 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
> bool add)
> {
> struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
> + struct i40e_ethertype_filter *ethertype_filter, *node;
> struct i40e_control_filter_stats stats;
> uint16_t flags = 0;
> int ret;
> @@ -7982,6 +8119,22 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
> PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
> " not supported.");
>
> + /* Check if there is the filter in SW list */
> + ethertype_filter = rte_zmalloc("ethertype_filter",
> + sizeof(*ethertype_filter), 0);
> + i40e_ethertype_filter_convert(filter, ethertype_filter);
> + node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
> + ðertype_filter->input);
> + if (add && node) {
> + PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
> + rte_free(ethertype_filter);
> + return -EINVAL;
> + } else if (!add && !node) {
When `if (add && node)' is true, function will return. There is no need
to use `else' here.
Best regards,
Tiwei Bie
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 02/17] net/i40e: store tunnel filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 02/17] net/i40e: store tunnel filter Beilei Xing
@ 2016-12-28 3:27 ` Tiwei Bie
0 siblings, 0 replies; 175+ messages in thread
From: Tiwei Bie @ 2016-12-28 3:27 UTC (permalink / raw)
To: Beilei Xing; +Cc: jingjing.wu, helin.zhang, dev
On Tue, Dec 27, 2016 at 02:26:09PM +0800, Beilei Xing wrote:
> Currently there's no tunnel filter stored in SW.
> This patch stores tunnel filter in SW with cuckoo
> hash, also adds protection if a tunnel filter has
> been added.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/i40e/i40e_ethdev.c | 167 ++++++++++++++++++++++++++++++++++++++++-
> drivers/net/i40e/i40e_ethdev.h | 27 +++++++
> 2 files changed, 191 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 80dd8d7..c012d5d 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
[...]
> @@ -1267,6 +1314,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
> hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> pci_dev = dev->pci_dev;
> ethertype_rule = &pf->ethertype;
> + tunnel_rule = &pf->tunnel;
>
> if (hw->adapter_stopped == 0)
> i40e_dev_close(dev);
> @@ -1283,6 +1331,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
> rte_free(p_ethertype);
> }
>
> + /* Remove all tunnel director rules and hash */
> + if (tunnel_rule->hash_map)
> + rte_free(tunnel_rule->hash_map);
> + if (tunnel_rule->hash_table)
> + rte_hash_free(tunnel_rule->hash_table);
> +
> + while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
There is a redundant pair of parentheses, or you should compare with
NULL.
> + TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
> + rte_free(p_tunnel);
> + }
> +
> dev->dev_ops = NULL;
> dev->rx_pkt_burst = NULL;
> dev->tx_pkt_burst = NULL;
> @@ -6482,6 +6541,81 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
> return 0;
> }
>
> +/* Convert tunnel filter structure */
> +static int
> +i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data
> + *cld_filter,
> + struct i40e_tunnel_filter *tunnel_filter)
> +{
> + ether_addr_copy((struct ether_addr *)&cld_filter->outer_mac,
> + (struct ether_addr *)&tunnel_filter->input.outer_mac);
> + ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac,
> + (struct ether_addr *)&tunnel_filter->input.inner_mac);
> + tunnel_filter->input.inner_vlan = cld_filter->inner_vlan;
> + tunnel_filter->input.flags = cld_filter->flags;
> + tunnel_filter->input.tenant_id = cld_filter->tenant_id;
> + tunnel_filter->queue = cld_filter->queue_number;
> +
> + return 0;
> +}
> +
> +/* Check if there exists the tunnel filter */
> +static struct i40e_tunnel_filter *
> +i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
> + const struct i40e_tunnel_filter_input *input)
> +{
> + int ret = 0;
> +
The initialization is meaningless, as it will be written by the below
assignment unconditionally.
> + ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
> + if (ret < 0)
> + return NULL;
> +
> + return tunnel_rule->hash_map[ret];
> +}
> +
> +/* Add a tunnel filter into the SW list */
> +static int
> +i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
> + struct i40e_tunnel_filter *tunnel_filter)
> +{
> + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
> + int ret = 0;
> +
Same here.
> + ret = rte_hash_add_key(tunnel_rule->hash_table,
> + &tunnel_filter->input);
> + if (ret < 0)
> + PMD_DRV_LOG(ERR,
> + "Failed to insert tunnel filter to hash table %d!",
> + ret);
Function should return when ret < 0.
> + tunnel_rule->hash_map[ret] = tunnel_filter;
> +
> + TAILQ_INSERT_TAIL(&tunnel_rule->tunnel_list, tunnel_filter, rules);
> +
> + return 0;
> +}
> +
> +/* Delete a tunnel filter from the SW list */
> +static int
> +i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
> + struct i40e_tunnel_filter *tunnel_filter)
> +{
> + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
> + int ret = 0;
> +
Same here.
> + ret = rte_hash_del_key(tunnel_rule->hash_table,
> + &tunnel_filter->input);
> + if (ret < 0)
> + PMD_DRV_LOG(ERR,
> + "Failed to delete tunnel filter to hash table %d!",
> + ret);
Function should return when ret < 0.
> + tunnel_rule->hash_map[ret] = NULL;
> +
> + TAILQ_REMOVE(&tunnel_rule->tunnel_list, tunnel_filter, rules);
> + rte_free(tunnel_filter);
> +
> + return 0;
> +}
> +
> static int
> i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
> struct rte_eth_tunnel_filter_conf *tunnel_filter,
> @@ -6497,6 +6631,8 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
> struct i40e_vsi *vsi = pf->main_vsi;
> struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
> struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
> + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
> + struct i40e_tunnel_filter *tunnel, *node;
>
> cld_filter = rte_zmalloc("tunnel_filter",
> sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
> @@ -6559,11 +6695,36 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
> pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
> pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
>
> - if (add)
> + tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
> + i40e_tunnel_filter_convert(cld_filter, tunnel);
> + node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &tunnel->input);
> + if (add && node) {
> + PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
> + rte_free(tunnel);
> + return -EINVAL;
> + } else if (!add && !node) {
When `if (add && node)' is true, function will return. There is no need
to use `else' here.
Best regards,
Tiwei Bie
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 12/17] net/i40e: destroy ethertype filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 12/17] net/i40e: destroy ethertype filter Beilei Xing
@ 2016-12-28 3:30 ` Wu, Jingjing
2016-12-28 7:29 ` Xing, Beilei
2016-12-28 4:56 ` Tiwei Bie
1 sibling, 1 reply; 175+ messages in thread
From: Wu, Jingjing @ 2016-12-28 3:30 UTC (permalink / raw)
To: Xing, Beilei, Zhang, Helin; +Cc: dev
>
> const struct rte_flow_ops i40e_flow_ops = {
> .validate = i40e_flow_validate,
> @@ -1492,11 +1495,16 @@ i40e_flow_destroy(__rte_unused struct
> rte_eth_dev *dev,
> struct rte_flow *flow,
> struct rte_flow_error *error)
> {
> + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> struct i40e_flow *pmd_flow = (struct i40e_flow *)flow;
> enum rte_filter_type filter_type = pmd_flow->filter_type;
> int ret;
>
> switch (filter_type) {
> + case RTE_ETH_FILTER_ETHERTYPE:
> + ret = i40e_dev_destroy_ethertype_filter(pf,
> + (struct i40e_ethertype_filter *)pmd_flow->rule);
> + break;
> default:
> PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
> filter_type);
> @@ -1504,10 +1512,49 @@ i40e_flow_destroy(__rte_unused struct
> rte_eth_dev *dev,
> break;
> }
>
> - if (ret)
> + if (!ret) {
> + TAILQ_REMOVE(&pf->flow_list, pmd_flow, node);
> + free(pmd_flow);
Should it be freed inside the function? Is the API definition like that?
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 03/17] net/i40e: store flow director filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 03/17] net/i40e: store flow director filter Beilei Xing
@ 2016-12-28 3:38 ` Tiwei Bie
2016-12-28 7:10 ` Xing, Beilei
0 siblings, 1 reply; 175+ messages in thread
From: Tiwei Bie @ 2016-12-28 3:38 UTC (permalink / raw)
To: Beilei Xing; +Cc: jingjing.wu, helin.zhang, dev
On Tue, Dec 27, 2016 at 02:26:10PM +0800, Beilei Xing wrote:
> Currently there's no flow director filter stored in SW. This
> patch stores flow director filters in SW with cuckoo hash,
> also adds protection if a flow director filter has been added.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/i40e/i40e_ethdev.c | 48 +++++++++++++++++++++
> drivers/net/i40e/i40e_ethdev.h | 12 ++++++
> drivers/net/i40e/i40e_fdir.c | 98 ++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 158 insertions(+)
>
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index c012d5d..427ebdc 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
[...]
> @@ -1342,6 +1379,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
> rte_free(p_tunnel);
> }
>
> + /* Remove all flow director rules and hash */
> + if (fdir_info->hash_map)
> + rte_free(fdir_info->hash_map);
> + if (fdir_info->hash_table)
> + rte_hash_free(fdir_info->hash_table);
> +
> + while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
There is a redundant pair of parentheses, or you should compare with
NULL.
> + TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
> + rte_free(p_fdir);
> + }
> +
> dev->dev_ops = NULL;
> dev->rx_pkt_burst = NULL;
> dev->tx_pkt_burst = NULL;
[...]
> diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
> index 335bf15..faa2495 100644
> --- a/drivers/net/i40e/i40e_fdir.c
> +++ b/drivers/net/i40e/i40e_fdir.c
[...]
> +/* Check if there exists the flow director filter */
> +static struct i40e_fdir_filter *
> +i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
> + const struct rte_eth_fdir_input *input)
> +{
> + int ret = 0;
> +
The initialization is meaningless, as it will be written by the below
assignment unconditionally.
> + ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
> + if (ret < 0)
> + return NULL;
> +
> + return fdir_info->hash_map[ret];
> +}
> +
> +/* Add a flow director filter into the SW list */
> +static int
> +i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
> +{
> + struct i40e_fdir_info *fdir_info = &pf->fdir;
> + int ret = 0;
> +
Same here.
> + ret = rte_hash_add_key(fdir_info->hash_table,
> + &filter->fdir.input);
> + if (ret < 0)
> + PMD_DRV_LOG(ERR,
> + "Failed to insert fdir filter to hash table %d!",
> + ret);
Function should return when ret < 0.
> + fdir_info->hash_map[ret] = filter;
> +
> + TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
> +
> + return 0;
> +}
> +
> +/* Delete a flow director filter from the SW list */
> +static int
> +i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
> +{
> + struct i40e_fdir_info *fdir_info = &pf->fdir;
> + int ret = 0;
> +
Same here.
> + ret = rte_hash_del_key(fdir_info->hash_table,
> + &filter->fdir.input);
> + if (ret < 0)
> + PMD_DRV_LOG(ERR,
> + "Failed to delete fdir filter to hash table %d!",
> + ret);
Function should return when ret < 0.
> + fdir_info->hash_map[ret] = NULL;
> +
> + TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
> + rte_free(filter);
> +
> + return 0;
> +}
> +
> /*
> * i40e_add_del_fdir_filter - add or remove a flow director filter.
> * @pf: board private structure
> @@ -1032,6 +1105,8 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
> struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
> enum i40e_filter_pctype pctype;
> + struct i40e_fdir_info *fdir_info = &pf->fdir;
> + struct i40e_fdir_filter *fdir_filter, *node;
> int ret = 0;
>
> if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
> @@ -1054,6 +1129,21 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
> return -EINVAL;
> }
>
> + fdir_filter = rte_zmalloc("fdir_filter", sizeof(*fdir_filter), 0);
> + i40e_fdir_filter_convert(filter, fdir_filter);
> + node = i40e_sw_fdir_filter_lookup(fdir_info, &fdir_filter->fdir.input);
> + if (add && node) {
> + PMD_DRV_LOG(ERR,
> + "Conflict with existing flow director rules!");
> + rte_free(fdir_filter);
> + return -EINVAL;
> + } else if (!add && !node) {
When `if (add && node)' is true, function will return. There is no need
to use `else' here.
Best regards,
Tiwei Bie
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function Beilei Xing
2016-12-27 12:40 ` Adrien Mazarguil
2016-12-28 2:52 ` Wu, Jingjing
@ 2016-12-28 4:08 ` Tiwei Bie
2 siblings, 0 replies; 175+ messages in thread
From: Tiwei Bie @ 2016-12-28 4:08 UTC (permalink / raw)
To: Beilei Xing; +Cc: jingjing.wu, helin.zhang, dev
On Tue, Dec 27, 2016 at 02:26:14PM +0800, Beilei Xing wrote:
> This patch adds i40e_flow_validation function to check if
> a flow is valid according to the flow pattern.
> i40e_parse_ethertype_filter is added first, it also gets
> the ethertype info.
> i40e_flow.c is added to handle all generic filter events.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/i40e/Makefile | 1 +
> drivers/net/i40e/i40e_ethdev.c | 5 +
> drivers/net/i40e/i40e_ethdev.h | 20 ++
> drivers/net/i40e/i40e_flow.c | 431 +++++++++++++++++++++++++++++++++++++++++
> 4 files changed, 457 insertions(+)
> create mode 100644 drivers/net/i40e/i40e_flow.c
>
> diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
> index 11175c4..89bd85a 100644
> --- a/drivers/net/i40e/Makefile
> +++ b/drivers/net/i40e/Makefile
> @@ -105,6 +105,7 @@ endif
> SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
> SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
> SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
> +SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
>
> # vector PMD driver needs SSE4.1 support
> ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 7f98b79..80024ed 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -8452,6 +8452,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
> case RTE_ETH_FILTER_FDIR:
> ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
> break;
> + case RTE_ETH_FILTER_GENERIC:
> + if (filter_op != RTE_ETH_FILTER_GET)
> + return -EINVAL;
> + *(const void **)arg = &i40e_flow_ops;
> + break;
> default:
> PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
> filter_type);
> diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
> index 6089895..bbe52f0 100644
> --- a/drivers/net/i40e/i40e_ethdev.h
> +++ b/drivers/net/i40e/i40e_ethdev.h
> @@ -38,6 +38,7 @@
> #include <rte_time.h>
> #include <rte_kvargs.h>
> #include <rte_hash.h>
> +#include <rte_flow_driver.h>
>
> #define I40E_VLAN_TAG_SIZE 4
>
> @@ -629,6 +630,23 @@ struct i40e_adapter {
> struct rte_timecounter tx_tstamp_tc;
> };
>
> +union i40e_filter_t {
> + struct rte_eth_ethertype_filter ethertype_filter;
> + struct rte_eth_fdir_filter fdir_filter;
> + struct rte_eth_tunnel_filter_conf tunnel_filter;
> +} cons_filter;
> +
Are you sure that you want to define a variable in i40e_ethdev.h?
> +typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
> + const struct rte_flow_attr *attr,
> + const struct rte_flow_item pattern[],
> + const struct rte_flow_action actions[],
> + struct rte_flow_error *error,
> + union i40e_filter_t *filter);
> +struct i40e_valid_pattern {
> + enum rte_flow_item_type *items;
> + parse_filter_t parse_filter;
> +};
> +
> int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
> int i40e_vsi_release(struct i40e_vsi *vsi);
> struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
> @@ -823,4 +841,6 @@ i40e_calc_itr_interval(int16_t interval)
> ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_SR) || \
> ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_LR))
>
> +const struct rte_flow_ops i40e_flow_ops;
> +
Same here. Are you sure that you want to define a variable in i40e_ethdev.h?
Maybe you should add the `extern' qualifier.
Best regards,
Tiwei Bie
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 12/17] net/i40e: destroy ethertype filter
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 12/17] net/i40e: destroy ethertype filter Beilei Xing
2016-12-28 3:30 ` Wu, Jingjing
@ 2016-12-28 4:56 ` Tiwei Bie
2016-12-28 6:57 ` Xing, Beilei
1 sibling, 1 reply; 175+ messages in thread
From: Tiwei Bie @ 2016-12-28 4:56 UTC (permalink / raw)
To: Beilei Xing; +Cc: jingjing.wu, helin.zhang, dev
On Tue, Dec 27, 2016 at 02:26:19PM +0800, Beilei Xing wrote:
> This patch adds i40e_dev_destroy_ethertype_filter function
> to destroy a ethertype filter for users.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/i40e/i40e_ethdev.c | 10 ++-------
> drivers/net/i40e/i40e_ethdev.h | 5 +++++
> drivers/net/i40e/i40e_flow.c | 51 ++++++++++++++++++++++++++++++++++++++++--
> 3 files changed, 56 insertions(+), 10 deletions(-)
>
[...]
> diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
> index 2a61c4f..732c411 100644
> --- a/drivers/net/i40e/i40e_flow.c
> +++ b/drivers/net/i40e/i40e_flow.c
[...]
> @@ -1492,11 +1495,16 @@ i40e_flow_destroy(__rte_unused struct rte_eth_dev *dev,
The `__rte_unused' qualifier should be removed.
> struct rte_flow *flow,
> struct rte_flow_error *error)
> {
> + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> struct i40e_flow *pmd_flow = (struct i40e_flow *)flow;
> enum rte_filter_type filter_type = pmd_flow->filter_type;
> int ret;
>
> switch (filter_type) {
> + case RTE_ETH_FILTER_ETHERTYPE:
> + ret = i40e_dev_destroy_ethertype_filter(pf,
> + (struct i40e_ethertype_filter *)pmd_flow->rule);
> + break;
> default:
> PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
> filter_type);
> @@ -1504,10 +1512,49 @@ i40e_flow_destroy(__rte_unused struct rte_eth_dev *dev,
> break;
> }
>
> - if (ret)
> + if (!ret) {
> + TAILQ_REMOVE(&pf->flow_list, pmd_flow, node);
> + free(pmd_flow);
> + } else {
> rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> "Failed to destroy flow.");
> + }
Probably you should introduce the pf related code when introducing
i40e_flow_destroy() in the below patch:
[PATCH v2 11/17] net/i40e: add flow destroy function
> +
> + return ret;
> +}
> +
> +static int
> +i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
> + struct i40e_ethertype_filter *filter)
> +{
> + struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
> + struct i40e_ethertype_filter *node;
> + struct i40e_control_filter_stats stats;
> + uint16_t flags = 0;
> + int ret = 0;
> +
> + if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
> + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
> + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
> + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
> + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
> +
> + memset(&stats, 0, sizeof(stats));
> + ret = i40e_aq_add_rem_control_packet_filter(hw,
> + filter->input.mac_addr.addr_bytes,
> + filter->input.ether_type,
> + flags, pf->main_vsi->seid,
> + filter->queue, 0, &stats, NULL);
> + if (ret < 0)
> + return ret;
> +
> + node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
> + if (node)
> + ret = i40e_sw_ethertype_filter_del(pf, node);
> + else
> + return -EINVAL;
It would be more readable to check whether node equals NULL and return
when it's true, and call i40e_sw_ethertype_filter_del(pf, node) outside
the `if' statement:
node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
if (node == NULL)
return -EINVAL;
ret = i40e_sw_ethertype_filter_del(pf, node);
Best regards,
Tiwei Bie
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function Beilei Xing
2016-12-27 12:40 ` Adrien Mazarguil
@ 2016-12-28 5:35 ` Tiwei Bie
2016-12-28 6:48 ` Xing, Beilei
1 sibling, 1 reply; 175+ messages in thread
From: Tiwei Bie @ 2016-12-28 5:35 UTC (permalink / raw)
To: Beilei Xing; +Cc: jingjing.wu, helin.zhang, dev
On Tue, Dec 27, 2016 at 02:26:22PM +0800, Beilei Xing wrote:
> This patch adds i40e_flow_flush function to flush all
> filters for users. And flow director flush function
> is involved first.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/i40e/i40e_ethdev.h | 3 +++
> drivers/net/i40e/i40e_fdir.c | 8 ++------
> drivers/net/i40e/i40e_flow.c | 46 ++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 51 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
> index b8c7d41..0b736d5 100644
> --- a/drivers/net/i40e/i40e_ethdev.h
> +++ b/drivers/net/i40e/i40e_ethdev.h
> @@ -786,6 +786,9 @@ i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
> const struct i40e_tunnel_filter_input *input);
> int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
> struct i40e_tunnel_filter *tunnel_filter);
> +int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
> + struct i40e_fdir_filter *filter);
> +int i40e_fdir_flush(struct rte_eth_dev *dev);
>
Why don't declare them as the global functions at the beginning?
> /* I40E_DEV_PRIVATE_TO */
> #define I40E_DEV_PRIVATE_TO_PF(adapter) \
> diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
> index 6c1bb18..f10aeee 100644
> --- a/drivers/net/i40e/i40e_fdir.c
> +++ b/drivers/net/i40e/i40e_fdir.c
> @@ -119,8 +119,6 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
> enum i40e_filter_pctype pctype,
> const struct rte_eth_fdir_filter *filter,
> bool add);
> -static int i40e_fdir_flush(struct rte_eth_dev *dev);
> -
> static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
> struct i40e_fdir_filter *filter);
> static struct i40e_fdir_filter *
> @@ -128,8 +126,6 @@ i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
> const struct rte_eth_fdir_input *input);
> static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
> struct i40e_fdir_filter *filter);
> -static int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
> - struct i40e_fdir_filter *filter);
>
> static int
> i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
> @@ -1070,7 +1066,7 @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
> }
>
> /* Delete a flow director filter from the SW list */
> -static int
> +int
> i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
> {
> struct i40e_fdir_info *fdir_info = &pf->fdir;
> @@ -1318,7 +1314,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
> * i40e_fdir_flush - clear all filters of Flow Director table
> * @pf: board private structure
> */
> -static int
> +int
> i40e_fdir_flush(struct rte_eth_dev *dev)
> {
> struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
> index 4c7856c..1d9f603 100644
> --- a/drivers/net/i40e/i40e_flow.c
> +++ b/drivers/net/i40e/i40e_flow.c
> @@ -68,6 +68,8 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
> const struct rte_flow_item pattern[],
> const struct rte_flow_action actions[],
> struct rte_flow_error *error);
> +static int i40e_flow_flush(struct rte_eth_dev *dev,
> + struct rte_flow_error *error);
> static int i40e_flow_destroy(struct rte_eth_dev *dev,
> struct rte_flow *flow,
> struct rte_flow_error *error);
> @@ -95,11 +97,13 @@ static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
> struct i40e_ethertype_filter *filter);
> static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
> struct i40e_tunnel_filter *filter);
> +static int i40e_fdir_filter_flush(struct i40e_pf *pf);
>
> const struct rte_flow_ops i40e_flow_ops = {
> .validate = i40e_flow_validate,
> .create = i40e_flow_create,
> .destroy = i40e_flow_destroy,
> + .flush = i40e_flow_flush,
> };
>
> enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
> @@ -1603,3 +1607,45 @@ i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
>
> return ret;
> }
> +
> +static int
> +i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
> +{
> + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> + int ret = 0;
> +
Meaningless initialization.
> + ret = i40e_fdir_filter_flush(pf);
> + if (!ret)
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> + "Failed to flush FDIR flows.");
Just curious. What's the relationship between `ret' and the code (EINVAL)
passed to rte_flow_error_set()? Is `-ret' acceptable as the parameter?
Because the `-ret' which is actually returned by i40e_fdir_flush() is also
some standard UNIX errno. When error occurs, user should use which one to
figure out the failure reason? `-ret' or `rte_errno'?
> +
> + return ret;
> +}
> +
> +static int
> +i40e_fdir_filter_flush(struct i40e_pf *pf)
> +{
> + struct rte_eth_dev *dev = pf->adapter->eth_dev;
> + struct i40e_fdir_info *fdir_info = &pf->fdir;
> + struct i40e_fdir_filter *fdir_filter;
> + struct i40e_flow *flow;
> + int ret = 0;
> +
Meaningless initialization.
> + ret = i40e_fdir_flush(dev);
> + if (!ret) {
> + /* Delete FDIR filters in FDIR list. */
> + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list)))
> + i40e_sw_fdir_filter_del(pf, fdir_filter);
> +
The i40e_sw_fdir_filter_del() may fail, in which case fdir_filter won't
be removed from fdir_info->fdir_list. Will it lead to an infinite loop?
Should you check the retval of i40e_sw_fdir_filter_del() and break the
loop when it fails?
Best regards,
Tiwei Bie
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function
2016-12-28 5:35 ` Tiwei Bie
@ 2016-12-28 6:48 ` Xing, Beilei
2016-12-28 7:00 ` Tiwei Bie
0 siblings, 1 reply; 175+ messages in thread
From: Xing, Beilei @ 2016-12-28 6:48 UTC (permalink / raw)
To: Bie, Tiwei; +Cc: Wu, Jingjing, Zhang, Helin, dev
> -----Original Message-----
> From: Bie, Tiwei
> Sent: Wednesday, December 28, 2016 1:36 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function
>
> On Tue, Dec 27, 2016 at 02:26:22PM +0800, Beilei Xing wrote:
> > This patch adds i40e_flow_flush function to flush all filters for
> > users. And flow director flush function is involved first.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/i40e/i40e_ethdev.h | 3 +++
> > drivers/net/i40e/i40e_fdir.c | 8 ++------
> > drivers/net/i40e/i40e_flow.c | 46
> ++++++++++++++++++++++++++++++++++++++++++
> > 3 files changed, 51 insertions(+), 6 deletions(-)
> >
> > diff --git a/drivers/net/i40e/i40e_ethdev.h
> > b/drivers/net/i40e/i40e_ethdev.h index b8c7d41..0b736d5 100644
> > --- a/drivers/net/i40e/i40e_ethdev.h
> > +++ b/drivers/net/i40e/i40e_ethdev.h
> > @@ -786,6 +786,9 @@ i40e_sw_tunnel_filter_lookup(struct
> i40e_tunnel_rule *tunnel_rule,
> > const struct i40e_tunnel_filter_input *input); int
> > i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
> > struct i40e_tunnel_filter *tunnel_filter);
> > +int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
> > + struct i40e_fdir_filter *filter); int
> i40e_fdir_flush(struct
> > +rte_eth_dev *dev);
> >
>
> Why don't declare them as the global functions at the beginning?
When I implement the store/restore function, I plan this function is only used in i40e_ethdev.c.
I change them to the global functions since I add i40e_flow.c to rework all the flow ops.
>
> > /* I40E_DEV_PRIVATE_TO */
> > #define I40E_DEV_PRIVATE_TO_PF(adapter) \ diff --git
> > a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index
> > 6c1bb18..f10aeee 100644
> > --- a/drivers/net/i40e/i40e_fdir.c
> > +++ b/drivers/net/i40e/i40e_fdir.c
> > @@ -119,8 +119,6 @@ static int i40e_fdir_filter_programming(struct
> i40e_pf *pf,
> > enum i40e_filter_pctype pctype,
> > const struct rte_eth_fdir_filter *filter,
> > bool add);
> > -static int i40e_fdir_flush(struct rte_eth_dev *dev);
> > -
> > static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
> > struct i40e_fdir_filter *filter); static struct
> i40e_fdir_filter
> > * @@ -128,8 +126,6 @@ i40e_sw_fdir_filter_lookup(struct i40e_fdir_info
> > *fdir_info,
> > const struct rte_eth_fdir_input *input); static int
> > i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
> > struct i40e_fdir_filter *filter); -static int
> > i40e_sw_fdir_filter_del(struct i40e_pf *pf,
> > - struct i40e_fdir_filter *filter);
> >
> > static int
> > i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) @@ -1070,7 +1066,7
> > @@ i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct
> > i40e_fdir_filter *filter) }
> >
> > /* Delete a flow director filter from the SW list */ -static int
> > +int
> > i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_filter
> > *filter) {
> > struct i40e_fdir_info *fdir_info = &pf->fdir; @@ -1318,7 +1314,7 @@
> > i40e_fdir_filter_programming(struct i40e_pf *pf,
> > * i40e_fdir_flush - clear all filters of Flow Director table
> > * @pf: board private structure
> > */
> > -static int
> > +int
> > i40e_fdir_flush(struct rte_eth_dev *dev) {
> > struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > diff --git a/drivers/net/i40e/i40e_flow.c
> > b/drivers/net/i40e/i40e_flow.c index 4c7856c..1d9f603 100644
> > --- a/drivers/net/i40e/i40e_flow.c
> > +++ b/drivers/net/i40e/i40e_flow.c
> > @@ -68,6 +68,8 @@ static struct rte_flow *i40e_flow_create(struct
> rte_eth_dev *dev,
> > const struct rte_flow_item pattern[],
> > const struct rte_flow_action
> actions[],
> > struct rte_flow_error *error);
> > +static int i40e_flow_flush(struct rte_eth_dev *dev,
> > + struct rte_flow_error *error);
> > static int i40e_flow_destroy(struct rte_eth_dev *dev,
> > struct rte_flow *flow,
> > struct rte_flow_error *error); @@ -95,11 +97,13
> @@ static int
> > i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
> > struct i40e_ethertype_filter *filter); static
> int
> > i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
> > struct i40e_tunnel_filter *filter);
> > +static int i40e_fdir_filter_flush(struct i40e_pf *pf);
> >
> > const struct rte_flow_ops i40e_flow_ops = {
> > .validate = i40e_flow_validate,
> > .create = i40e_flow_create,
> > .destroy = i40e_flow_destroy,
> > + .flush = i40e_flow_flush,
> > };
> >
> > enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE; @@
> > -1603,3 +1607,45 @@ i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
> >
> > return ret;
> > }
> > +
> > +static int
> > +i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error
> > +*error) {
> > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > + int ret = 0;
> > +
>
> Meaningless initialization.
Yes, you're right, will change in next version. Thanks.
>
> > + ret = i40e_fdir_filter_flush(pf);
> > + if (!ret)
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > + "Failed to flush FDIR flows.");
>
> Just curious. What's the relationship between `ret' and the code (EINVAL)
> passed to rte_flow_error_set()? Is `-ret' acceptable as the parameter?
> Because the `-ret' which is actually returned by i40e_fdir_flush() is also some
> standard UNIX errno. When error occurs, user should use which one to figure
> out the failure reason? `-ret' or `rte_errno'?
rte_errno. I need to rework all rte_flow_error_set() according to Adrien's comments.
>
> > +
> > + return ret;
> > +}
> > +
> > +static int
> > +i40e_fdir_filter_flush(struct i40e_pf *pf) {
> > + struct rte_eth_dev *dev = pf->adapter->eth_dev;
> > + struct i40e_fdir_info *fdir_info = &pf->fdir;
> > + struct i40e_fdir_filter *fdir_filter;
> > + struct i40e_flow *flow;
> > + int ret = 0;
> > +
>
> Meaningless initialization.
>
> > + ret = i40e_fdir_flush(dev);
> > + if (!ret) {
> > + /* Delete FDIR filters in FDIR list. */
> > + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list)))
> > + i40e_sw_fdir_filter_del(pf, fdir_filter);
> > +
>
> The i40e_sw_fdir_filter_del() may fail, in which case fdir_filter won't be
> removed from fdir_info->fdir_list. Will it lead to an infinite loop?
> Should you check the retval of i40e_sw_fdir_filter_del() and break the loop
> when it fails?
Yes, thanks for catching this.
>
> Best regards,
> Tiwei Bie
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 12/17] net/i40e: destroy ethertype filter
2016-12-28 4:56 ` Tiwei Bie
@ 2016-12-28 6:57 ` Xing, Beilei
0 siblings, 0 replies; 175+ messages in thread
From: Xing, Beilei @ 2016-12-28 6:57 UTC (permalink / raw)
To: Bie, Tiwei; +Cc: Wu, Jingjing, Zhang, Helin, dev
> -----Original Message-----
> From: Bie, Tiwei
> Sent: Wednesday, December 28, 2016 12:56 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2 12/17] net/i40e: destroy ethertype filter
>
> On Tue, Dec 27, 2016 at 02:26:19PM +0800, Beilei Xing wrote:
> > This patch adds i40e_dev_destroy_ethertype_filter function to destroy
> > a ethertype filter for users.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/i40e/i40e_ethdev.c | 10 ++-------
> > drivers/net/i40e/i40e_ethdev.h | 5 +++++
> > drivers/net/i40e/i40e_flow.c | 51
> ++++++++++++++++++++++++++++++++++++++++--
> > 3 files changed, 56 insertions(+), 10 deletions(-)
> >
> [...]
> > diff --git a/drivers/net/i40e/i40e_flow.c
> > b/drivers/net/i40e/i40e_flow.c index 2a61c4f..732c411 100644
> > --- a/drivers/net/i40e/i40e_flow.c
> > +++ b/drivers/net/i40e/i40e_flow.c
> [...]
> > @@ -1492,11 +1495,16 @@ i40e_flow_destroy(__rte_unused struct
> > rte_eth_dev *dev,
>
> The `__rte_unused' qualifier should be removed.
Yes :)
>
> > struct rte_flow *flow,
> > struct rte_flow_error *error)
> > {
> > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > struct i40e_flow *pmd_flow = (struct i40e_flow *)flow;
> > enum rte_filter_type filter_type = pmd_flow->filter_type;
> > int ret;
> >
> > switch (filter_type) {
> > + case RTE_ETH_FILTER_ETHERTYPE:
> > + ret = i40e_dev_destroy_ethertype_filter(pf,
> > + (struct i40e_ethertype_filter *)pmd_flow-
> >rule);
> > + break;
> > default:
> > PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
> > filter_type);
> > @@ -1504,10 +1512,49 @@ i40e_flow_destroy(__rte_unused struct
> rte_eth_dev *dev,
> > break;
> > }
> >
> > - if (ret)
> > + if (!ret) {
> > + TAILQ_REMOVE(&pf->flow_list, pmd_flow, node);
> > + free(pmd_flow);
> > + } else {
> > rte_flow_error_set(error, EINVAL,
> > RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > "Failed to destroy flow.");
> > + }
>
> Probably you should introduce the pf related code when introducing
> i40e_flow_destroy() in the below patch:
>
> [PATCH v2 11/17] net/i40e: add flow destroy function
Good point, thanks.
>
> > +
> > + return ret;
> > +}
> > +
> > +static int
> > +i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
> > + struct i40e_ethertype_filter *filter) {
> > + struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> > + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
> > + struct i40e_ethertype_filter *node;
> > + struct i40e_control_filter_stats stats;
> > + uint16_t flags = 0;
> > + int ret = 0;
> > +
> > + if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
> > + flags |=
> I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
> > + if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
> > + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
> > + flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
> > +
> > + memset(&stats, 0, sizeof(stats));
> > + ret = i40e_aq_add_rem_control_packet_filter(hw,
> > + filter->input.mac_addr.addr_bytes,
> > + filter->input.ether_type,
> > + flags, pf->main_vsi->seid,
> > + filter->queue, 0, &stats, NULL);
> > + if (ret < 0)
> > + return ret;
> > +
> > + node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter-
> >input);
> > + if (node)
> > + ret = i40e_sw_ethertype_filter_del(pf, node);
> > + else
> > + return -EINVAL;
>
> It would be more readable to check whether node equals NULL and return
> when it's true, and call i40e_sw_ethertype_filter_del(pf, node) outside the
> `if' statement:
>
> node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter-
> >input);
> if (node == NULL)
> return -EINVAL;
>
> ret = i40e_sw_ethertype_filter_del(pf, node);
Make sense, got it:)
>
> Best regards,
> Tiwei Bie
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function
2016-12-28 6:48 ` Xing, Beilei
@ 2016-12-28 7:00 ` Tiwei Bie
2016-12-28 7:20 ` Xing, Beilei
0 siblings, 1 reply; 175+ messages in thread
From: Tiwei Bie @ 2016-12-28 7:00 UTC (permalink / raw)
To: Xing, Beilei; +Cc: Wu, Jingjing, Zhang, Helin, dev
On Wed, Dec 28, 2016 at 02:48:02PM +0800, Xing, Beilei wrote:
> > -----Original Message-----
> > From: Bie, Tiwei
> > Sent: Wednesday, December 28, 2016 1:36 PM
> > To: Xing, Beilei <beilei.xing@intel.com>
> > Cc: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> > <helin.zhang@intel.com>; dev@dpdk.org
> > Subject: Re: [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function
> >
> > On Tue, Dec 27, 2016 at 02:26:22PM +0800, Beilei Xing wrote:
> > > This patch adds i40e_flow_flush function to flush all filters for
> > > users. And flow director flush function is involved first.
> > >
> > > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > > ---
> > > drivers/net/i40e/i40e_ethdev.h | 3 +++
> > > drivers/net/i40e/i40e_fdir.c | 8 ++------
> > > drivers/net/i40e/i40e_flow.c | 46
> > ++++++++++++++++++++++++++++++++++++++++++
> > > 3 files changed, 51 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/drivers/net/i40e/i40e_ethdev.h
> > > b/drivers/net/i40e/i40e_ethdev.h index b8c7d41..0b736d5 100644
> > > --- a/drivers/net/i40e/i40e_ethdev.h
> > > +++ b/drivers/net/i40e/i40e_ethdev.h
> > > @@ -786,6 +786,9 @@ i40e_sw_tunnel_filter_lookup(struct
> > i40e_tunnel_rule *tunnel_rule,
> > > const struct i40e_tunnel_filter_input *input); int
> > > i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
> > > struct i40e_tunnel_filter *tunnel_filter);
> > > +int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
> > > + struct i40e_fdir_filter *filter); int
> > i40e_fdir_flush(struct
> > > +rte_eth_dev *dev);
> > >
> >
> > Why don't declare them as the global functions at the beginning?
>
> When I implement the store/restore function, I plan this function is only used in i40e_ethdev.c.
> I change them to the global functions since I add i40e_flow.c to rework all the flow ops.
>
These functions are also introduced in this patch set. There is no
particular reason to mark them as static at first and then turn them
into the global functions in the later patches. So it would be better
to declare them as the global ones when introducing them.
Best regards,
Tiwei Bie
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 03/17] net/i40e: store flow director filter
2016-12-28 3:38 ` Tiwei Bie
@ 2016-12-28 7:10 ` Xing, Beilei
2016-12-28 7:14 ` Tiwei Bie
0 siblings, 1 reply; 175+ messages in thread
From: Xing, Beilei @ 2016-12-28 7:10 UTC (permalink / raw)
To: Bie, Tiwei; +Cc: Wu, Jingjing, Zhang, Helin, dev
> -----Original Message-----
> From: Bie, Tiwei
> Sent: Wednesday, December 28, 2016 11:39 AM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2 03/17] net/i40e: store flow director filter
>
> On Tue, Dec 27, 2016 at 02:26:10PM +0800, Beilei Xing wrote:
> > Currently there's no flow director filter stored in SW. This patch
> > stores flow director filters in SW with cuckoo hash, also adds
> > protection if a flow director filter has been added.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/i40e/i40e_ethdev.c | 48 +++++++++++++++++++++
> > drivers/net/i40e/i40e_ethdev.h | 12 ++++++
> > drivers/net/i40e/i40e_fdir.c | 98
> ++++++++++++++++++++++++++++++++++++++++++
> > 3 files changed, 158 insertions(+)
> >
> > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > b/drivers/net/i40e/i40e_ethdev.c index c012d5d..427ebdc 100644
> > --- a/drivers/net/i40e/i40e_ethdev.c
> > +++ b/drivers/net/i40e/i40e_ethdev.c
> [...]
> > @@ -1342,6 +1379,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
> > rte_free(p_tunnel);
> > }
> >
> > + /* Remove all flow director rules and hash */
> > + if (fdir_info->hash_map)
> > + rte_free(fdir_info->hash_map);
> > + if (fdir_info->hash_table)
> > + rte_hash_free(fdir_info->hash_table);
> > +
> > + while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
>
> There is a redundant pair of parentheses, or you should compare with NULL.
I think the another parentheses is used to compare with NULL. In fact there's similar using in PMD.
>
> > + TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
> > + rte_free(p_fdir);
> > + }
> > +
> > dev->dev_ops = NULL;
> > dev->rx_pkt_burst = NULL;
> > dev->tx_pkt_burst = NULL;
> [...]
> > diff --git a/drivers/net/i40e/i40e_fdir.c
> > b/drivers/net/i40e/i40e_fdir.c index 335bf15..faa2495 100644
> > --- a/drivers/net/i40e/i40e_fdir.c
> > +++ b/drivers/net/i40e/i40e_fdir.c
> [...]
> > +/* Check if there exists the flow director filter */ static struct
> > +i40e_fdir_filter * i40e_sw_fdir_filter_lookup(struct i40e_fdir_info
> > +*fdir_info,
> > + const struct rte_eth_fdir_input *input) {
> > + int ret = 0;
> > +
>
> The initialization is meaningless, as it will be written by the below
> assignment unconditionally.
Yes, you're right.
>
> > + ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
> > + if (ret < 0)
> > + return NULL;
> > +
> > + return fdir_info->hash_map[ret];
> > +}
> > +
> > +/* Add a flow director filter into the SW list */
> > +static int
> > +i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
> > +{
> > + struct i40e_fdir_info *fdir_info = &pf->fdir;
> > + int ret = 0;
> > +
>
> Same here.
>
> > + ret = rte_hash_add_key(fdir_info->hash_table,
> > + &filter->fdir.input);
> > + if (ret < 0)
> > + PMD_DRV_LOG(ERR,
> > + "Failed to insert fdir filter to hash table %d!",
> > + ret);
>
> Function should return when ret < 0.
Thanks for catching it.
>
> > + fdir_info->hash_map[ret] = filter;
> > +
> > + TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
> > +
> > + return 0;
> > +}
> > +
> > +/* Delete a flow director filter from the SW list */
> > +static int
> > +i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
> > +{
> > + struct i40e_fdir_info *fdir_info = &pf->fdir;
> > + int ret = 0;
> > +
>
> Same here.
>
> > + ret = rte_hash_del_key(fdir_info->hash_table,
> > + &filter->fdir.input);
> > + if (ret < 0)
> > + PMD_DRV_LOG(ERR,
> > + "Failed to delete fdir filter to hash table %d!",
> > + ret);
>
> Function should return when ret < 0.
>
> > + fdir_info->hash_map[ret] = NULL;
> > +
> > + TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
> > + rte_free(filter);
> > +
> > + return 0;
> > +}
> > +
> > /*
> > * i40e_add_del_fdir_filter - add or remove a flow director filter.
> > * @pf: board private structure
> > @@ -1032,6 +1105,8 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
> > struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> >dev_private);
> > unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
> > enum i40e_filter_pctype pctype;
> > + struct i40e_fdir_info *fdir_info = &pf->fdir;
> > + struct i40e_fdir_filter *fdir_filter, *node;
> > int ret = 0;
> >
> > if (dev->data->dev_conf.fdir_conf.mode !=
> RTE_FDIR_MODE_PERFECT) {
> > @@ -1054,6 +1129,21 @@ i40e_add_del_fdir_filter(struct rte_eth_dev
> *dev,
> > return -EINVAL;
> > }
> >
> > + fdir_filter = rte_zmalloc("fdir_filter", sizeof(*fdir_filter), 0);
> > + i40e_fdir_filter_convert(filter, fdir_filter);
> > + node = i40e_sw_fdir_filter_lookup(fdir_info, &fdir_filter->fdir.input);
> > + if (add && node) {
> > + PMD_DRV_LOG(ERR,
> > + "Conflict with existing flow director rules!");
> > + rte_free(fdir_filter);
> > + return -EINVAL;
> > + } else if (!add && !node) {
>
> When `if (add && node)' is true, function will return. There is no need
> to use `else' here.
>
> Best regards,
> Tiwei Bie
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 03/17] net/i40e: store flow director filter
2016-12-28 7:10 ` Xing, Beilei
@ 2016-12-28 7:14 ` Tiwei Bie
2016-12-28 7:36 ` Tiwei Bie
0 siblings, 1 reply; 175+ messages in thread
From: Tiwei Bie @ 2016-12-28 7:14 UTC (permalink / raw)
To: Xing, Beilei; +Cc: Wu, Jingjing, Zhang, Helin, dev
On Wed, Dec 28, 2016 at 03:10:39PM +0800, Xing, Beilei wrote:
>
>
> > -----Original Message-----
> > From: Bie, Tiwei
> > Sent: Wednesday, December 28, 2016 11:39 AM
> > To: Xing, Beilei <beilei.xing@intel.com>
> > Cc: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> > <helin.zhang@intel.com>; dev@dpdk.org
> > Subject: Re: [dpdk-dev] [PATCH v2 03/17] net/i40e: store flow director filter
> >
> > On Tue, Dec 27, 2016 at 02:26:10PM +0800, Beilei Xing wrote:
> > > Currently there's no flow director filter stored in SW. This patch
> > > stores flow director filters in SW with cuckoo hash, also adds
> > > protection if a flow director filter has been added.
> > >
> > > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > > ---
> > > drivers/net/i40e/i40e_ethdev.c | 48 +++++++++++++++++++++
> > > drivers/net/i40e/i40e_ethdev.h | 12 ++++++
> > > drivers/net/i40e/i40e_fdir.c | 98
> > ++++++++++++++++++++++++++++++++++++++++++
> > > 3 files changed, 158 insertions(+)
> > >
> > > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > > b/drivers/net/i40e/i40e_ethdev.c index c012d5d..427ebdc 100644
> > > --- a/drivers/net/i40e/i40e_ethdev.c
> > > +++ b/drivers/net/i40e/i40e_ethdev.c
> > [...]
> > > @@ -1342,6 +1379,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
> > > rte_free(p_tunnel);
> > > }
> > >
> > > + /* Remove all flow director rules and hash */
> > > + if (fdir_info->hash_map)
> > > + rte_free(fdir_info->hash_map);
> > > + if (fdir_info->hash_table)
> > > + rte_hash_free(fdir_info->hash_table);
> > > +
> > > + while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
> >
> > There is a redundant pair of parentheses, or you should compare with NULL.
>
> I think the another parentheses is used to compare with NULL. In fact there's similar using in PMD.
>
The outer parentheses are redundant unless you compare it with NULL explicitly.
Any way, you could just follow the existing coding style.
Best regards,
Tiwei Bie
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function
2016-12-28 7:00 ` Tiwei Bie
@ 2016-12-28 7:20 ` Xing, Beilei
0 siblings, 0 replies; 175+ messages in thread
From: Xing, Beilei @ 2016-12-28 7:20 UTC (permalink / raw)
To: Bie, Tiwei; +Cc: Wu, Jingjing, Zhang, Helin, dev
> -----Original Message-----
> From: Bie, Tiwei
> Sent: Wednesday, December 28, 2016 3:01 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function
>
> On Wed, Dec 28, 2016 at 02:48:02PM +0800, Xing, Beilei wrote:
> > > -----Original Message-----
> > > From: Bie, Tiwei
> > > Sent: Wednesday, December 28, 2016 1:36 PM
> > > To: Xing, Beilei <beilei.xing@intel.com>
> > > Cc: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> > > <helin.zhang@intel.com>; dev@dpdk.org
> > > Subject: Re: [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush
> > > function
> > >
> > > On Tue, Dec 27, 2016 at 02:26:22PM +0800, Beilei Xing wrote:
> > > > This patch adds i40e_flow_flush function to flush all filters for
> > > > users. And flow director flush function is involved first.
> > > >
> > > > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > > > ---
> > > > drivers/net/i40e/i40e_ethdev.h | 3 +++
> > > > drivers/net/i40e/i40e_fdir.c | 8 ++------
> > > > drivers/net/i40e/i40e_flow.c | 46
> > > ++++++++++++++++++++++++++++++++++++++++++
> > > > 3 files changed, 51 insertions(+), 6 deletions(-)
> > > >
> > > > diff --git a/drivers/net/i40e/i40e_ethdev.h
> > > > b/drivers/net/i40e/i40e_ethdev.h index b8c7d41..0b736d5 100644
> > > > --- a/drivers/net/i40e/i40e_ethdev.h
> > > > +++ b/drivers/net/i40e/i40e_ethdev.h
> > > > @@ -786,6 +786,9 @@ i40e_sw_tunnel_filter_lookup(struct
> > > i40e_tunnel_rule *tunnel_rule,
> > > > const struct i40e_tunnel_filter_input *input); int
> > > > i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
> > > > struct i40e_tunnel_filter *tunnel_filter);
> > > > +int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
> > > > + struct i40e_fdir_filter *filter); int
> > > i40e_fdir_flush(struct
> > > > +rte_eth_dev *dev);
> > > >
> > >
> > > Why don't declare them as the global functions at the beginning?
> >
> > When I implement the store/restore function, I plan this function is only
> used in i40e_ethdev.c.
> > I change them to the global functions since I add i40e_flow.c to rework all
> the flow ops.
> >
>
> These functions are also introduced in this patch set. There is no particular
> reason to mark them as static at first and then turn them into the global
> functions in the later patches. So it would be better to declare them as the
> global ones when introducing them.
Yes, make sense. Will update in next version.
>
> Best regards,
> Tiwei Bie
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 12/17] net/i40e: destroy ethertype filter
2016-12-28 3:30 ` Wu, Jingjing
@ 2016-12-28 7:29 ` Xing, Beilei
0 siblings, 0 replies; 175+ messages in thread
From: Xing, Beilei @ 2016-12-28 7:29 UTC (permalink / raw)
To: Wu, Jingjing, Zhang, Helin; +Cc: dev
> -----Original Message-----
> From: Wu, Jingjing
> Sent: Wednesday, December 28, 2016 11:30 AM
> To: Xing, Beilei <beilei.xing@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH v2 12/17] net/i40e: destroy ethertype filter
>
> >
> > const struct rte_flow_ops i40e_flow_ops = {
> > .validate = i40e_flow_validate,
> > @@ -1492,11 +1495,16 @@ i40e_flow_destroy(__rte_unused struct
> > rte_eth_dev *dev,
> > struct rte_flow *flow,
> > struct rte_flow_error *error)
> > {
> > + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data-
> > >dev_private);
> > struct i40e_flow *pmd_flow = (struct i40e_flow *)flow;
> > enum rte_filter_type filter_type = pmd_flow->filter_type;
> > int ret;
> >
> > switch (filter_type) {
> > + case RTE_ETH_FILTER_ETHERTYPE:
> > + ret = i40e_dev_destroy_ethertype_filter(pf,
> > + (struct i40e_ethertype_filter *)pmd_flow-
> >rule);
> > + break;
> > default:
> > PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
> > filter_type);
> > @@ -1504,10 +1512,49 @@ i40e_flow_destroy(__rte_unused struct
> > rte_eth_dev *dev,
> > break;
> > }
> >
> > - if (ret)
> > + if (!ret) {
> > + TAILQ_REMOVE(&pf->flow_list, pmd_flow, node);
> > + free(pmd_flow);
> Should it be freed inside the function? Is the API definition like that?
Yes, since API or rte won't free the flow allocated by PMD. Please refer to the attached mail.
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 03/17] net/i40e: store flow director filter
2016-12-28 7:14 ` Tiwei Bie
@ 2016-12-28 7:36 ` Tiwei Bie
0 siblings, 0 replies; 175+ messages in thread
From: Tiwei Bie @ 2016-12-28 7:36 UTC (permalink / raw)
To: Xing, Beilei; +Cc: Wu, Jingjing, Zhang, Helin, dev
On Wed, Dec 28, 2016 at 03:14:55PM +0800, Tiwei Bie wrote:
> On Wed, Dec 28, 2016 at 03:10:39PM +0800, Xing, Beilei wrote:
> >
> >
> > > -----Original Message-----
> > > From: Bie, Tiwei
> > > Sent: Wednesday, December 28, 2016 11:39 AM
> > > To: Xing, Beilei <beilei.xing@intel.com>
> > > Cc: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> > > <helin.zhang@intel.com>; dev@dpdk.org
> > > Subject: Re: [dpdk-dev] [PATCH v2 03/17] net/i40e: store flow director filter
> > >
> > > On Tue, Dec 27, 2016 at 02:26:10PM +0800, Beilei Xing wrote:
> > > > Currently there's no flow director filter stored in SW. This patch
> > > > stores flow director filters in SW with cuckoo hash, also adds
> > > > protection if a flow director filter has been added.
> > > >
> > > > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > > > ---
> > > > drivers/net/i40e/i40e_ethdev.c | 48 +++++++++++++++++++++
> > > > drivers/net/i40e/i40e_ethdev.h | 12 ++++++
> > > > drivers/net/i40e/i40e_fdir.c | 98
> > > ++++++++++++++++++++++++++++++++++++++++++
> > > > 3 files changed, 158 insertions(+)
> > > >
> > > > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > > > b/drivers/net/i40e/i40e_ethdev.c index c012d5d..427ebdc 100644
> > > > --- a/drivers/net/i40e/i40e_ethdev.c
> > > > +++ b/drivers/net/i40e/i40e_ethdev.c
> > > [...]
> > > > @@ -1342,6 +1379,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
> > > > rte_free(p_tunnel);
> > > > }
> > > >
> > > > + /* Remove all flow director rules and hash */
> > > > + if (fdir_info->hash_map)
> > > > + rte_free(fdir_info->hash_map);
> > > > + if (fdir_info->hash_table)
> > > > + rte_hash_free(fdir_info->hash_table);
> > > > +
> > > > + while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
> > >
> > > There is a redundant pair of parentheses, or you should compare with NULL.
> >
> > I think the another parentheses is used to compare with NULL. In fact there's similar using in PMD.
> >
>
> The outer parentheses are redundant unless you compare it with NULL explicitly.
> Any way, you could just follow the existing coding style.
>
Sorry, I was wrong here. I just did a quick check and noticed that DPDK
has enabled the below option:
-Werror=parentheses
The outer parentheses are NOT redundant even if you don't compare it with
NULL explicitly.
Best regards,
Tiwei Bie
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function
2016-12-28 2:52 ` Wu, Jingjing
@ 2016-12-28 7:44 ` Xing, Beilei
0 siblings, 0 replies; 175+ messages in thread
From: Xing, Beilei @ 2016-12-28 7:44 UTC (permalink / raw)
To: Wu, Jingjing, Zhang, Helin; +Cc: dev
> -----Original Message-----
> From: Wu, Jingjing
> Sent: Wednesday, December 28, 2016 10:52 AM
> To: Xing, Beilei <beilei.xing@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH v2 07/17] net/i40e: add flow validate function
>
>
> >
> > +union i40e_filter_t {
> > + struct rte_eth_ethertype_filter ethertype_filter;
> > + struct rte_eth_fdir_filter fdir_filter;
> > + struct rte_eth_tunnel_filter_conf tunnel_filter; } cons_filter;
> > +
> > +typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
> > + const struct rte_flow_attr *attr,
> > + const struct rte_flow_item pattern[],
> > + const struct rte_flow_action actions[],
> > + struct rte_flow_error *error,
> > + union i40e_filter_t *filter);
> You can use void* instead of define union i40e_filter_t.
I tried the void * before, but I should determine the filter type when creating a flow. If using void*, I can get the filter info but I don't know which filer type it belongs to.
>
> > +struct i40e_valid_pattern {
> > + enum rte_flow_item_type *items;
> What the item points to? Add few comments
It's the pattern without VOID items. I'll add comments in next version.
> > +
> > + ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
>
> Will you use cons_filter later? If not, it looks like we don't need the argument
> at all.
Yes, it's used to create flow. We us parse_filter to get the filter info. When creating a flow, flow_validate will be involved first to get filter info, then set filter according to the filter info.
> > +
> > + rte_free(items);
> > +
> > + return ret;
> > +}
> > --
> > 2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function
2016-12-27 12:40 ` Adrien Mazarguil
@ 2016-12-28 8:02 ` Xing, Beilei
0 siblings, 0 replies; 175+ messages in thread
From: Xing, Beilei @ 2016-12-28 8:02 UTC (permalink / raw)
To: Adrien Mazarguil; +Cc: Wu, Jingjing, Zhang, Helin, dev
> -----Original Message-----
> From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> Sent: Tuesday, December 27, 2016 8:40 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function
>
> Hi Beilei,
>
> On Tue, Dec 27, 2016 at 02:26:22PM +0800, Beilei Xing wrote:
> > This patch adds i40e_flow_flush function to flush all filters for
> > users. And flow director flush function is involved first.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/i40e/i40e_ethdev.h | 3 +++
> > drivers/net/i40e/i40e_fdir.c | 8 ++------
> > drivers/net/i40e/i40e_flow.c | 46
> ++++++++++++++++++++++++++++++++++++++++++
> > 3 files changed, 51 insertions(+), 6 deletions(-)
> [...]
> > diff --git a/drivers/net/i40e/i40e_flow.c
> > b/drivers/net/i40e/i40e_flow.c
> [...]
> > +static int
> > +i40e_fdir_filter_flush(struct i40e_pf *pf) {
> > + struct rte_eth_dev *dev = pf->adapter->eth_dev;
> > + struct i40e_fdir_info *fdir_info = &pf->fdir;
> > + struct i40e_fdir_filter *fdir_filter;
> > + struct i40e_flow *flow;
> > + int ret = 0;
> > +
> > + ret = i40e_fdir_flush(dev);
> > + if (!ret) {
> > + /* Delete FDIR filters in FDIR list. */
> > + while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list)))
> > + i40e_sw_fdir_filter_del(pf, fdir_filter);
> > +
> > + /* Delete FDIR flows in flow list. */
> > + TAILQ_FOREACH(flow, &pf->flow_list, node) {
> > + if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
> > + TAILQ_REMOVE(&pf->flow_list, flow, node);
> > + rte_free(flow);
> > + }
> > + }
>
> Be careful, I'm not sure calling TAILQ_REMOVE() followed by rte_free()
> inside a TAILQ_FOREACH() is safe. BSD has the _SAFE() variant for this
> purpose but Linux does not.
>
Yes, thanks for reminder, I'll check it later:)
> > + }
> > +
> > + return ret;
> > +}
>
> --
> Adrien Mazarguil
> 6WIND
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function
2016-12-27 12:40 ` Adrien Mazarguil
@ 2016-12-28 9:00 ` Xing, Beilei
2016-12-28 9:29 ` Adrien Mazarguil
0 siblings, 1 reply; 175+ messages in thread
From: Xing, Beilei @ 2016-12-28 9:00 UTC (permalink / raw)
To: Adrien Mazarguil; +Cc: Wu, Jingjing, Zhang, Helin, dev, Lu, Wenzhuo
> -----Original Message-----
> From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> Sent: Tuesday, December 27, 2016 8:40 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate
> function
>
> Hi Beilei,
>
> A few comments below.
>
> On Tue, Dec 27, 2016 at 02:26:14PM +0800, Beilei Xing wrote:
> > This patch adds i40e_flow_validation function to check if a flow is
> > valid according to the flow pattern.
> > i40e_parse_ethertype_filter is added first, it also gets the ethertype
> > info.
> > i40e_flow.c is added to handle all generic filter events.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/i40e/Makefile | 1 +
> > drivers/net/i40e/i40e_ethdev.c | 5 +
> > drivers/net/i40e/i40e_ethdev.h | 20 ++
> > drivers/net/i40e/i40e_flow.c | 431
> +++++++++++++++++++++++++++++++++++++++++
> > 4 files changed, 457 insertions(+)
> > create mode 100644 drivers/net/i40e/i40e_flow.c
> [...]
> > diff --git a/drivers/net/i40e/i40e_flow.c
> > b/drivers/net/i40e/i40e_flow.c new file mode 100644 index
> > 0000000..bf451ef
> > --- /dev/null
> > +++ b/drivers/net/i40e/i40e_flow.c
> [...]
> > + if (ethertype_filter->queue >= pf->dev_data->nb_rx_queues) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ACTION,
> > + NULL, "Invalid queue ID for"
> > + " ethertype_filter.");
>
> When setting an error type related to an existing object provided by the
> application, you should set the related cause pointer to a non-NULL value. In
> this particular case, retrieving the action object seems difficult so it can
> remain that way, however there are many places in this series where it can
> be done.
OK, I got the meaning and usage of cause pointer now. Thanks for the explaination.
>
> > + return -EINVAL;
>
> While this is perfectly valid, you could also return -rte_errno to avoid
> duplicating EINVAL.
Yes, agree.
>
> [...]
> > + }
> > + if (ethertype_filter->ether_type == ETHER_TYPE_IPv4 ||
> > + ethertype_filter->ether_type == ETHER_TYPE_IPv6) {
> > + rte_flow_error_set(error, ENOTSUP,
> > + RTE_FLOW_ERROR_TYPE_ITEM,
> > + NULL, "Unsupported ether_type in"
> > + " control packet filter.");
> > + return -ENOTSUP;
> > + }
> > + if (ethertype_filter->ether_type == ETHER_TYPE_VLAN)
> > + PMD_DRV_LOG(WARNING, "filter vlan ether_type in"
> > + " first tag is not supported.");
> > +
> > + return ret;
> > +}
> [...]
> > +/* Parse attributes */
> > +static int
> > +i40e_parse_attr(const struct rte_flow_attr *attr,
> > + struct rte_flow_error *error)
> > +{
> > + /* Must be input direction */
> > + if (!attr->ingress) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
> > + NULL, "Only support ingress.");
>
> Regarding my previous comment, &attr could replace NULL here as well as in
> subsequent calls to rte_flow_error_set().
Got it, thanks.
>
> > + return -EINVAL;
> > + }
> > +
> > + /* Not supported */
> > + if (attr->egress) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
> > + NULL, "Not support egress.");
> > + return -EINVAL;
> > + }
> > +
> > + /* Not supported */
> > + if (attr->priority) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
> > + NULL, "Not support priority.");
> > + return -EINVAL;
> > + }
> > +
> > + /* Not supported */
> > + if (attr->group) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
> > + NULL, "Not support group.");
> > + return -EINVAL;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static int
> > +i40e_parse_ethertype_pattern(const struct rte_flow_item *pattern,
> > + struct rte_flow_error *error,
> > + struct rte_eth_ethertype_filter *filter) {
> > + const struct rte_flow_item *item = pattern;
> > + const struct rte_flow_item_eth *eth_spec;
> > + const struct rte_flow_item_eth *eth_mask;
> > + enum rte_flow_item_type item_type;
> > +
> > + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > + item_type = item->type;
> > + switch (item_type) {
> > + case RTE_FLOW_ITEM_TYPE_ETH:
> > + eth_spec = (const struct rte_flow_item_eth *)item-
> >spec;
> > + eth_mask = (const struct rte_flow_item_eth *)item-
> >mask;
> > + /* Get the MAC info. */
> > + if (!eth_spec || !eth_mask) {
> > + rte_flow_error_set(error, EINVAL,
> > +
> RTE_FLOW_ERROR_TYPE_ITEM,
> > + NULL,
> > + "NULL ETH spec/mask");
> > + return -EINVAL;
> > + }
>
> While optional, I think you should allow eth_spec and eth_mask to be NULL
> here as described in [1]:
>
> - If eth_spec is NULL, you can match anything that looks like a valid
> Ethernet header.
>
> - If eth_mask is NULL, you should assume a default mask (for Ethernet it
> usually means matching source/destination MACs perfectly).
>
> - You must check the "last" field as well, if non-NULL it may probably be
> supported as long as the following condition is satisfied:
>
> (spec & mask) == (last & mask)
>
> [1] http://dpdk.org/doc/guides/prog_guide/rte_flow.html#pattern-item
>
Thanks for the specification. In fact, we don't support the "last" for both ixgbe and i40e currently according to the original design, so we only support perfect match till now. We will support it in the future, as the deadline is coming, what do you think?
> [...]
> > + const struct rte_flow_action_queue *act_q;
> > + uint32_t index = 0;
> > +
> > + /* Check if the first non-void action is QUEUE or DROP. */
> > + NEXT_ITEM_OF_ACTION(act, actions, index);
> > + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
> > + act->type != RTE_FLOW_ACTION_TYPE_DROP) {
> > + rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ACTION,
> > + NULL, "Not supported action.");
>
> Again, you could report &act instead of NULL here (please check all remaining
> calls to rte_flow_error_set()).
>
> [...]
>
> --
> Adrien Mazarguil
> 6WIND
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function
2016-12-28 9:00 ` Xing, Beilei
@ 2016-12-28 9:29 ` Adrien Mazarguil
2016-12-28 10:03 ` Xing, Beilei
0 siblings, 1 reply; 175+ messages in thread
From: Adrien Mazarguil @ 2016-12-28 9:29 UTC (permalink / raw)
To: Xing, Beilei; +Cc: Wu, Jingjing, Zhang, Helin, dev, Lu, Wenzhuo
Hi Beilei,
On Wed, Dec 28, 2016 at 09:00:03AM +0000, Xing, Beilei wrote:
>
>
> > -----Original Message-----
> > From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> > Sent: Tuesday, December 27, 2016 8:40 PM
> > To: Xing, Beilei <beilei.xing@intel.com>
> > Cc: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> > <helin.zhang@intel.com>; dev@dpdk.org
> > Subject: Re: [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate
> > function
> >
> > Hi Beilei,
> >
> > A few comments below.
> >
> > On Tue, Dec 27, 2016 at 02:26:14PM +0800, Beilei Xing wrote:
> > > This patch adds i40e_flow_validation function to check if a flow is
> > > valid according to the flow pattern.
> > > i40e_parse_ethertype_filter is added first, it also gets the ethertype
> > > info.
> > > i40e_flow.c is added to handle all generic filter events.
> > >
> > > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > > ---
> > > drivers/net/i40e/Makefile | 1 +
> > > drivers/net/i40e/i40e_ethdev.c | 5 +
> > > drivers/net/i40e/i40e_ethdev.h | 20 ++
> > > drivers/net/i40e/i40e_flow.c | 431
> > +++++++++++++++++++++++++++++++++++++++++
> > > 4 files changed, 457 insertions(+)
> > > create mode 100644 drivers/net/i40e/i40e_flow.c
> > [...]
> > > diff --git a/drivers/net/i40e/i40e_flow.c
> > > b/drivers/net/i40e/i40e_flow.c new file mode 100644 index
> > > 0000000..bf451ef
> > > --- /dev/null
> > > +++ b/drivers/net/i40e/i40e_flow.c
> > [...]
> > > + if (ethertype_filter->queue >= pf->dev_data->nb_rx_queues) {
> > > + rte_flow_error_set(error, EINVAL,
> > > + RTE_FLOW_ERROR_TYPE_ACTION,
> > > + NULL, "Invalid queue ID for"
> > > + " ethertype_filter.");
> >
> > When setting an error type related to an existing object provided by the
> > application, you should set the related cause pointer to a non-NULL value. In
> > this particular case, retrieving the action object seems difficult so it can
> > remain that way, however there are many places in this series where it can
> > be done.
>
> OK, I got the meaning and usage of cause pointer now. Thanks for the explaination.
>
> >
> > > + return -EINVAL;
> >
> > While this is perfectly valid, you could also return -rte_errno to avoid
> > duplicating EINVAL.
>
> Yes, agree.
>
> >
> > [...]
> > > + }
> > > + if (ethertype_filter->ether_type == ETHER_TYPE_IPv4 ||
> > > + ethertype_filter->ether_type == ETHER_TYPE_IPv6) {
> > > + rte_flow_error_set(error, ENOTSUP,
> > > + RTE_FLOW_ERROR_TYPE_ITEM,
> > > + NULL, "Unsupported ether_type in"
> > > + " control packet filter.");
> > > + return -ENOTSUP;
> > > + }
> > > + if (ethertype_filter->ether_type == ETHER_TYPE_VLAN)
> > > + PMD_DRV_LOG(WARNING, "filter vlan ether_type in"
> > > + " first tag is not supported.");
> > > +
> > > + return ret;
> > > +}
> > [...]
> > > +/* Parse attributes */
> > > +static int
> > > +i40e_parse_attr(const struct rte_flow_attr *attr,
> > > + struct rte_flow_error *error)
> > > +{
> > > + /* Must be input direction */
> > > + if (!attr->ingress) {
> > > + rte_flow_error_set(error, EINVAL,
> > > + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
> > > + NULL, "Only support ingress.");
> >
> > Regarding my previous comment, &attr could replace NULL here as well as in
> > subsequent calls to rte_flow_error_set().
>
> Got it, thanks.
>
> >
> > > + return -EINVAL;
> > > + }
> > > +
> > > + /* Not supported */
> > > + if (attr->egress) {
> > > + rte_flow_error_set(error, EINVAL,
> > > + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
> > > + NULL, "Not support egress.");
> > > + return -EINVAL;
> > > + }
> > > +
> > > + /* Not supported */
> > > + if (attr->priority) {
> > > + rte_flow_error_set(error, EINVAL,
> > > + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
> > > + NULL, "Not support priority.");
> > > + return -EINVAL;
> > > + }
> > > +
> > > + /* Not supported */
> > > + if (attr->group) {
> > > + rte_flow_error_set(error, EINVAL,
> > > + RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
> > > + NULL, "Not support group.");
> > > + return -EINVAL;
> > > + }
> > > +
> > > + return 0;
> > > +}
> > > +
> > > +static int
> > > +i40e_parse_ethertype_pattern(const struct rte_flow_item *pattern,
> > > + struct rte_flow_error *error,
> > > + struct rte_eth_ethertype_filter *filter) {
> > > + const struct rte_flow_item *item = pattern;
> > > + const struct rte_flow_item_eth *eth_spec;
> > > + const struct rte_flow_item_eth *eth_mask;
> > > + enum rte_flow_item_type item_type;
> > > +
> > > + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > > + item_type = item->type;
> > > + switch (item_type) {
> > > + case RTE_FLOW_ITEM_TYPE_ETH:
> > > + eth_spec = (const struct rte_flow_item_eth *)item-
> > >spec;
> > > + eth_mask = (const struct rte_flow_item_eth *)item-
> > >mask;
> > > + /* Get the MAC info. */
> > > + if (!eth_spec || !eth_mask) {
> > > + rte_flow_error_set(error, EINVAL,
> > > +
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > > + NULL,
> > > + "NULL ETH spec/mask");
> > > + return -EINVAL;
> > > + }
> >
> > While optional, I think you should allow eth_spec and eth_mask to be NULL
> > here as described in [1]:
> >
> > - If eth_spec is NULL, you can match anything that looks like a valid
> > Ethernet header.
> >
> > - If eth_mask is NULL, you should assume a default mask (for Ethernet it
> > usually means matching source/destination MACs perfectly).
> >
> > - You must check the "last" field as well, if non-NULL it may probably be
> > supported as long as the following condition is satisfied:
> >
> > (spec & mask) == (last & mask)
> >
> > [1] http://dpdk.org/doc/guides/prog_guide/rte_flow.html#pattern-item
> >
>
> Thanks for the specification. In fact, we don't support the "last" for both ixgbe and i40e currently according to the original design, so we only support perfect match till now. We will support it in the future, as the deadline is coming, what do you think?
If you want to handle it later it's fine, however in that case you need to
at least generate an error when last is non-NULL (I did not see such a check
in this code).
Note that supporting it properly as defined in the API could be relatively
easy by implementing the above condition, it's just a small step above
simply checking for a NULL value.
> > [...]
> > > + const struct rte_flow_action_queue *act_q;
> > > + uint32_t index = 0;
> > > +
> > > + /* Check if the first non-void action is QUEUE or DROP. */
> > > + NEXT_ITEM_OF_ACTION(act, actions, index);
> > > + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
> > > + act->type != RTE_FLOW_ACTION_TYPE_DROP) {
> > > + rte_flow_error_set(error, EINVAL,
> > RTE_FLOW_ERROR_TYPE_ACTION,
> > > + NULL, "Not supported action.");
> >
> > Again, you could report &act instead of NULL here (please check all remaining
> > calls to rte_flow_error_set()).
> >
> > [...]
> >
> > --
> > Adrien Mazarguil
> > 6WIND
--
Adrien Mazarguil
6WIND
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function
2016-12-28 9:29 ` Adrien Mazarguil
@ 2016-12-28 10:03 ` Xing, Beilei
0 siblings, 0 replies; 175+ messages in thread
From: Xing, Beilei @ 2016-12-28 10:03 UTC (permalink / raw)
To: Adrien Mazarguil; +Cc: Wu, Jingjing, Zhang, Helin, dev, Lu, Wenzhuo
> -----Original Message-----
> From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> Sent: Wednesday, December 28, 2016 5:30 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>; dev@dpdk.org; Lu, Wenzhuo
> <wenzhuo.lu@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate
> function
>
> Hi Beilei,
>
> On Wed, Dec 28, 2016 at 09:00:03AM +0000, Xing, Beilei wrote:
> >
> >
> > > -----Original Message-----
> > > From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com]
> > > Sent: Tuesday, December 27, 2016 8:40 PM
> > > To: Xing, Beilei <beilei.xing@intel.com>
> > > Cc: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> > > <helin.zhang@intel.com>; dev@dpdk.org
> > > Subject: Re: [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate
> > > function
> > >
> > > Hi Beilei,
> > >
> > > A few comments below.
> > >
> > > On Tue, Dec 27, 2016 at 02:26:14PM +0800, Beilei Xing wrote:
> > > > This patch adds i40e_flow_validation function to check if a flow
> > > > is valid according to the flow pattern.
> > > > i40e_parse_ethertype_filter is added first, it also gets the
> > > > ethertype info.
> > > > i40e_flow.c is added to handle all generic filter events.
> > > >
> > > > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > > > ---
> > > > drivers/net/i40e/Makefile | 1 +
> > > > drivers/net/i40e/i40e_ethdev.c | 5 +
> > > > drivers/net/i40e/i40e_ethdev.h | 20 ++
> > > > drivers/net/i40e/i40e_flow.c | 431
> > > +++++++++++++++++++++++++++++++++++++++++
> > > > 4 files changed, 457 insertions(+) create mode 100644
> > > > drivers/net/i40e/i40e_flow.c
> > > [...]
> > > > diff --git a/drivers/net/i40e/i40e_flow.c
> > > > b/drivers/net/i40e/i40e_flow.c new file mode 100644 index
> > > > 0000000..bf451ef
> > > > --- /dev/null
> > > > +++ b/drivers/net/i40e/i40e_flow.c
> > > [...]
> > > > + if (ethertype_filter->queue >= pf->dev_data->nb_rx_queues) {
> > > > + rte_flow_error_set(error, EINVAL,
> > > > + RTE_FLOW_ERROR_TYPE_ACTION,
> > > > + NULL, "Invalid queue ID for"
> > > > + " ethertype_filter.");
> > >
> > > When setting an error type related to an existing object provided by
> > > the application, you should set the related cause pointer to a
> > > non-NULL value. In this particular case, retrieving the action
> > > object seems difficult so it can remain that way, however there are
> > > many places in this series where it can be done.
> >
> > OK, I got the meaning and usage of cause pointer now. Thanks for the
> explaination.
> >
> > >
> > > > + return -EINVAL;
> > >
> > > While this is perfectly valid, you could also return -rte_errno to
> > > avoid duplicating EINVAL.
> >
> > Yes, agree.
> >
> > >
> > > [...]
> > > > + }
> > > > + if (ethertype_filter->ether_type == ETHER_TYPE_IPv4 ||
> > > > + ethertype_filter->ether_type == ETHER_TYPE_IPv6) {
> > > > + rte_flow_error_set(error, ENOTSUP,
> > > > + RTE_FLOW_ERROR_TYPE_ITEM,
> > > > + NULL, "Unsupported ether_type in"
> > > > + " control packet filter.");
> > > > + return -ENOTSUP;
> > > > + }
> > > > + if (ethertype_filter->ether_type == ETHER_TYPE_VLAN)
> > > > + PMD_DRV_LOG(WARNING, "filter vlan ether_type in"
> > > > + " first tag is not supported.");
> > > > +
> > > > + return ret;
> > > > +}
> > > [...]
> > > > +/* Parse attributes */
> > > > +static int
> > > > +i40e_parse_attr(const struct rte_flow_attr *attr,
> > > > + struct rte_flow_error *error)
> > > > +{
> > > > + /* Must be input direction */
> > > > + if (!attr->ingress) {
> > > > + rte_flow_error_set(error, EINVAL,
> > > > + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
> > > > + NULL, "Only support ingress.");
> > >
> > > Regarding my previous comment, &attr could replace NULL here as well
> > > as in subsequent calls to rte_flow_error_set().
> >
> > Got it, thanks.
> >
> > >
> > > > + return -EINVAL;
> > > > + }
> > > > +
> > > > + /* Not supported */
> > > > + if (attr->egress) {
> > > > + rte_flow_error_set(error, EINVAL,
> > > > + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
> > > > + NULL, "Not support egress.");
> > > > + return -EINVAL;
> > > > + }
> > > > +
> > > > + /* Not supported */
> > > > + if (attr->priority) {
> > > > + rte_flow_error_set(error, EINVAL,
> > > > + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
> > > > + NULL, "Not support priority.");
> > > > + return -EINVAL;
> > > > + }
> > > > +
> > > > + /* Not supported */
> > > > + if (attr->group) {
> > > > + rte_flow_error_set(error, EINVAL,
> > > > + RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
> > > > + NULL, "Not support group.");
> > > > + return -EINVAL;
> > > > + }
> > > > +
> > > > + return 0;
> > > > +}
> > > > +
> > > > +static int
> > > > +i40e_parse_ethertype_pattern(const struct rte_flow_item *pattern,
> > > > + struct rte_flow_error *error,
> > > > + struct rte_eth_ethertype_filter *filter) {
> > > > + const struct rte_flow_item *item = pattern;
> > > > + const struct rte_flow_item_eth *eth_spec;
> > > > + const struct rte_flow_item_eth *eth_mask;
> > > > + enum rte_flow_item_type item_type;
> > > > +
> > > > + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > > > + item_type = item->type;
> > > > + switch (item_type) {
> > > > + case RTE_FLOW_ITEM_TYPE_ETH:
> > > > + eth_spec = (const struct rte_flow_item_eth *)item-
> > > >spec;
> > > > + eth_mask = (const struct rte_flow_item_eth *)item-
> > > >mask;
> > > > + /* Get the MAC info. */
> > > > + if (!eth_spec || !eth_mask) {
> > > > + rte_flow_error_set(error, EINVAL,
> > > > +
> > > RTE_FLOW_ERROR_TYPE_ITEM,
> > > > + NULL,
> > > > + "NULL ETH spec/mask");
> > > > + return -EINVAL;
> > > > + }
> > >
> > > While optional, I think you should allow eth_spec and eth_mask to be
> > > NULL here as described in [1]:
> > >
> > > - If eth_spec is NULL, you can match anything that looks like a valid
> > > Ethernet header.
> > >
> > > - If eth_mask is NULL, you should assume a default mask (for Ethernet it
> > > usually means matching source/destination MACs perfectly).
> > >
> > > - You must check the "last" field as well, if non-NULL it may probably be
> > > supported as long as the following condition is satisfied:
> > >
> > > (spec & mask) == (last & mask)
> > >
> > > [1] http://dpdk.org/doc/guides/prog_guide/rte_flow.html#pattern-item
> > >
> >
> > Thanks for the specification. In fact, we don't support the "last" for both
> ixgbe and i40e currently according to the original design, so we only support
> perfect match till now. We will support it in the future, as the deadline is
> coming, what do you think?
>
> If you want to handle it later it's fine, however in that case you need to at
> least generate an error when last is non-NULL (I did not see such a check in
> this code).
OK, will update the non-NULL condition in next version.
And thanks for all your comments.
>
> Note that supporting it properly as defined in the API could be relatively easy
> by implementing the above condition, it's just a small step above simply
> checking for a NULL value.
>
> > > [...]
> > > > + const struct rte_flow_action_queue *act_q;
> > > > + uint32_t index = 0;
> > > > +
> > > > + /* Check if the first non-void action is QUEUE or DROP. */
> > > > + NEXT_ITEM_OF_ACTION(act, actions, index);
> > > > + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
> > > > + act->type != RTE_FLOW_ACTION_TYPE_DROP) {
> > > > + rte_flow_error_set(error, EINVAL,
> > > RTE_FLOW_ERROR_TYPE_ACTION,
> > > > + NULL, "Not supported action.");
> > >
> > > Again, you could report &act instead of NULL here (please check all
> > > remaining calls to rte_flow_error_set()).
> > >
> > > [...]
> > >
> > > --
> > > Adrien Mazarguil
> > > 6WIND
>
> --
> Adrien Mazarguil
> 6WIND
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 01/17] net/i40e: store ethertype filter
2016-12-28 2:22 ` Wu, Jingjing
@ 2016-12-29 4:03 ` Xing, Beilei
2016-12-29 4:36 ` Xing, Beilei
1 sibling, 0 replies; 175+ messages in thread
From: Xing, Beilei @ 2016-12-29 4:03 UTC (permalink / raw)
To: Wu, Jingjing, Zhang, Helin; +Cc: dev
> -----Original Message-----
> From: Wu, Jingjing
> Sent: Wednesday, December 28, 2016 10:22 AM
> To: Xing, Beilei <beilei.xing@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH v2 01/17] net/i40e: store ethertype filter
>
> > +
> > +/* Delete ethertype filter in SW list */ static int
> > +i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
> > + struct i40e_ethertype_filter *filter) {
> > + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
> > + int ret = 0;
> > +
> > + ret = rte_hash_del_key(ethertype_rule->hash_table,
> > + &filter->input);
> > + if (ret < 0)
> > + PMD_DRV_LOG(ERR,
> > + "Failed to delete ethertype filter"
> > + " to hash table %d!",
> > + ret);
> > + ethertype_rule->hash_map[ret] = NULL;
> > +
> > + TAILQ_REMOVE(ðertype_rule->ethertype_list, filter, rules);
> > + rte_free(filter);
>
> It's better to free filter out of del function because the filter is also the input
> argument.
> Or you can define this function to use key as argument but not filter.
Thanks for the suggestion, I'll use the key as parameter in the next version.
>
> > /*
> > * Configure ethertype filter, which can director packet by filtering
> > * with mac address and ether_type or only ether_type @@ -7964,6
> > +8099,8 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
> > bool add)
> > {
> > struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> > + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
> > + struct i40e_ethertype_filter *ethertype_filter, *node;
> > struct i40e_control_filter_stats stats;
> > uint16_t flags = 0;
> > int ret;
> > @@ -7982,6 +8119,22 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
> > PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag
> is"
> > " not supported.");
> >
> > + /* Check if there is the filter in SW list */
> > + ethertype_filter = rte_zmalloc("ethertype_filter",
> > + sizeof(*ethertype_filter), 0);
> > + i40e_ethertype_filter_convert(filter, ethertype_filter);
> > + node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
> > + ðertype_filter->input);
> > + if (add && node) {
> > + PMD_DRV_LOG(ERR, "Conflict with existing ethertype
> rules!");
> > + rte_free(ethertype_filter);
> > + return -EINVAL;
> > + } else if (!add && !node) {
> > + PMD_DRV_LOG(ERR, "There's no corresponding ethertype
> > filter!");
> > + rte_free(ethertype_filter);
> > + return -EINVAL;
> > + }
> How about malloc ethertype_filter after check? Especially, no need to malloc
> it when delete a filter.
Malloc ethertype_filter because i40e_ethertype_filter_convert is involved first, and then check if a rule exists with ethertype_filter->input.
>
> Thanks
> Jingjing
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v2 01/17] net/i40e: store ethertype filter
2016-12-28 2:22 ` Wu, Jingjing
2016-12-29 4:03 ` Xing, Beilei
@ 2016-12-29 4:36 ` Xing, Beilei
1 sibling, 0 replies; 175+ messages in thread
From: Xing, Beilei @ 2016-12-29 4:36 UTC (permalink / raw)
To: Wu, Jingjing, Zhang, Helin; +Cc: dev
> -----Original Message-----
> From: Xing, Beilei
> Sent: Thursday, December 29, 2016 12:03 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin
> <helin.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH v2 01/17] net/i40e: store ethertype filter
>
> > -----Original Message-----
> > From: Wu, Jingjing
> > Sent: Wednesday, December 28, 2016 10:22 AM
> > To: Xing, Beilei <beilei.xing@intel.com>; Zhang, Helin
> > <helin.zhang@intel.com>
> > Cc: dev@dpdk.org
> > Subject: RE: [PATCH v2 01/17] net/i40e: store ethertype filter
> >
> > > +
> > > +/* Delete ethertype filter in SW list */ static int
> > > +i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
> > > + struct i40e_ethertype_filter *filter) {
> > > + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
> > > + int ret = 0;
> > > +
> > > + ret = rte_hash_del_key(ethertype_rule->hash_table,
> > > + &filter->input);
> > > + if (ret < 0)
> > > + PMD_DRV_LOG(ERR,
> > > + "Failed to delete ethertype filter"
> > > + " to hash table %d!",
> > > + ret);
> > > + ethertype_rule->hash_map[ret] = NULL;
> > > +
> > > + TAILQ_REMOVE(ðertype_rule->ethertype_list, filter, rules);
> > > + rte_free(filter);
> >
> > It's better to free filter out of del function because the filter is
> > also the input argument.
> > Or you can define this function to use key as argument but not filter.
>
> Thanks for the suggestion, I'll use the key as parameter in the next version.
>
> >
> > > /*
> > > * Configure ethertype filter, which can director packet by filtering
> > > * with mac address and ether_type or only ether_type @@ -7964,6
> > > +8099,8 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
> > > bool add)
> > > {
> > > struct i40e_hw *hw = I40E_PF_TO_HW(pf);
> > > + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
> > > + struct i40e_ethertype_filter *ethertype_filter, *node;
> > > struct i40e_control_filter_stats stats;
> > > uint16_t flags = 0;
> > > int ret;
> > > @@ -7982,6 +8119,22 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
> > > PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag
> > is"
> > > " not supported.");
> > >
> > > + /* Check if there is the filter in SW list */
> > > + ethertype_filter = rte_zmalloc("ethertype_filter",
> > > + sizeof(*ethertype_filter), 0);
> > > + i40e_ethertype_filter_convert(filter, ethertype_filter);
> > > + node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
> > > + ðertype_filter->input);
> > > + if (add && node) {
> > > + PMD_DRV_LOG(ERR, "Conflict with existing ethertype
> > rules!");
> > > + rte_free(ethertype_filter);
> > > + return -EINVAL;
> > > + } else if (!add && !node) {
> > > + PMD_DRV_LOG(ERR, "There's no corresponding ethertype
> > > filter!");
> > > + rte_free(ethertype_filter);
> > > + return -EINVAL;
> > > + }
> > How about malloc ethertype_filter after check? Especially, no need to
> > malloc it when delete a filter.
>
> Malloc ethertype_filter because i40e_ethertype_filter_convert is involved
> first, and then check if a rule exists with ethertype_filter->input.
Sorry, you are right. In fact I needn't to malloc before convert. Will update it in next version.
>
> >
> > Thanks
> > Jingjing
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
` (16 preceding siblings ...)
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 17/17] net/i40e: flush tunnel filters Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 01/17] net/i40e: store ethertype filter Beilei Xing
` (17 more replies)
17 siblings, 18 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
All pathes depend on Adrien's Generic flow API(rte_flow).
The patches mainly finish following functions:
1) Store and restore all kinds of filters.
2) Parse all kinds of filters.
3) Add flow validate function.
4) Add flow create function.
5) Add flow destroy function.
6) Add flow flush function.
v3 changes:
Set the related cause pointer to a non-NULL value when error happens.
Change return value when error happens.
Modify filter_del parameter with key.
Malloc filter after checking when delete a filter.
Delete meaningless initialization.
Add return value when there's error.
Change global variable definition.
Modify some function declaration.
v2 changes:
Add i40e_flow.c, all flow ops are implemented in the file.
Change the whole implementation of all parse flow functions.
Update error info for all flow ops.
Add flow_list to store flows created.
Beilei Xing (17):
net/i40e: store ethertype filter
net/i40e: store tunnel filter
net/i40e: store flow director filter
net/i40e: restore ethertype filter
net/i40e: restore tunnel filter
net/i40e: restore flow director filter
net/i40e: add flow validate function
net/i40e: parse flow director filter
net/i40e: parse tunnel filter
net/i40e: add flow create function
net/i40e: add flow destroy function
net/i40e: destroy ethertype filter
net/i40e: destroy tunnel filter
net/i40e: destroy flow directory filter
net/i40e: add flow flush function
net/i40e: flush ethertype filters
net/i40e: flush tunnel filters
drivers/net/i40e/Makefile | 2 +
drivers/net/i40e/i40e_ethdev.c | 519 ++++++++++--
drivers/net/i40e/i40e_ethdev.h | 173 ++++
drivers/net/i40e/i40e_fdir.c | 137 +++-
drivers/net/i40e/i40e_flow.c | 1772 ++++++++++++++++++++++++++++++++++++++++
5 files changed, 2537 insertions(+), 66 deletions(-)
create mode 100644 drivers/net/i40e/i40e_flow.c
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 01/17] net/i40e: store ethertype filter
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 02/17] net/i40e: store tunnel filter Beilei Xing
` (16 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no ethertype filter stored in SW.
This patch stores ethertype filter with cuckoo hash
in SW, also adds protection if an ethertype filter
has been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 166 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 31 ++++++++
3 files changed, 197 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 66997b6..11175c4 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -117,5 +117,6 @@ DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_kvargs
+DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_hash
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 8033c35..e43b4d9 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -51,6 +51,7 @@
#include <rte_dev.h>
#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
+#include <rte_hash_crc.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -461,6 +462,12 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int i40e_ethertype_filter_convert(
+ const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter);
+static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -938,9 +945,18 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
int ret;
uint32_t len;
uint8_t aq_fail = 0;
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
PMD_INIT_FUNC_TRACE();
+ char ethertype_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters ethertype_hash_params = {
+ .name = ethertype_hash_name,
+ .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
+ .key_len = sizeof(struct i40e_ethertype_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1180,8 +1196,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
pf->flags &= ~I40E_FLAG_DCB;
}
+ /* Initialize ethertype filter rule list and hash */
+ TAILQ_INIT(ðertype_rule->ethertype_list);
+ snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
+ "ethertype_%s", dev->data->name);
+ ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params);
+ if (!ethertype_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
+ ret = -EINVAL;
+ goto err_ethertype_hash_table_create;
+ }
+ ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
+ sizeof(struct i40e_ethertype_filter *) *
+ I40E_MAX_ETHERTYPE_FILTER_NUM,
+ 0);
+ if (!ethertype_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for ethertype hash map!");
+ ret = -ENOMEM;
+ goto err_ethertype_hash_map_alloc;
+ }
+
return 0;
+err_ethertype_hash_map_alloc:
+ rte_hash_free(ethertype_rule->hash_table);
+err_ethertype_hash_table_create:
+ rte_free(dev->data->mac_addrs);
err_mac_alloc:
i40e_vsi_release(pf->main_vsi);
err_setup_pf_switch:
@@ -1204,23 +1245,40 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
+ struct i40e_pf *pf;
struct rte_pci_device *pci_dev;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
+ struct i40e_ethertype_filter *p_ethertype;
int ret;
uint8_t aq_fail = 0;
+ struct i40e_ethertype_rule *ethertype_rule;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = dev->pci_dev;
+ ethertype_rule = &pf->ethertype;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
+ /* Remove all ethertype director rules and hash */
+ if (ethertype_rule->hash_map)
+ rte_free(ethertype_rule->hash_map);
+ if (ethertype_rule->hash_table)
+ rte_hash_free(ethertype_rule->hash_table);
+
+ while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) {
+ TAILQ_REMOVE(ðertype_rule->ethertype_list,
+ p_ethertype, rules);
+ rte_free(p_ethertype);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
@@ -7955,6 +8013,82 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
return ret;
}
+/* Convert ethertype filter structure */
+static int
+i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter)
+{
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ filter->input.ether_type = input->ether_type;
+ filter->flags = input->flags;
+ filter->queue = input->queue;
+
+ return 0;
+}
+
+/* Check if there exists the ehtertype filter */
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return ethertype_rule->hash_map[ret];
+}
+
+/* Add ethertype filter in SW list */
+static int
+i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete ethertype filter in SW list */
+int
+i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ struct i40e_ethertype_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
@@ -7965,6 +8099,9 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *ethertype_filter, *node;
+ struct i40e_ethertype_filter check_filter;
struct i40e_control_filter_stats stats;
uint16_t flags = 0;
int ret;
@@ -7983,6 +8120,21 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
" not supported.");
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_ethertype_filter_convert(filter, &check_filter);
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
+ &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
+ return -EINVAL;
+ }
+
if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
@@ -8003,7 +8155,19 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
stats.mac_etype_free, stats.etype_free);
if (ret < 0)
return -ENOSYS;
- return 0;
+
+ /* Add or delete a filter in SW list */
+ if (add) {
+ ethertype_filter = rte_zmalloc("ethertype_filter",
+ sizeof(*ethertype_filter), 0);
+ rte_memcpy(ethertype_filter, &check_filter,
+ sizeof(check_filter));
+ ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ } else {
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+ }
+
+ return ret;
}
/*
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 298cef4..3fb20ba 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -37,6 +37,7 @@
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
+#include <rte_hash.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -396,6 +397,30 @@ struct i40e_fdir_info {
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
};
+/* Ethertype filter number HW supports */
+#define I40E_MAX_ETHERTYPE_FILTER_NUM 768
+
+/* Ethertype filter struct */
+struct i40e_ethertype_filter_input {
+ struct ether_addr mac_addr; /* Mac address to match */
+ uint16_t ether_type; /* Ether type to match */
+};
+
+struct i40e_ethertype_filter {
+ TAILQ_ENTRY(i40e_ethertype_filter) rules;
+ struct i40e_ethertype_filter_input input;
+ uint16_t flags; /* Flags from RTE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_ethertype_filter_list, i40e_ethertype_filter);
+
+struct i40e_ethertype_rule {
+ struct i40e_ethertype_filter_list ethertype_list;
+ struct i40e_ethertype_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -466,6 +491,7 @@ struct i40e_pf {
struct i40e_vmdq_info *vmdq;
struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
@@ -616,6 +642,11 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input);
+int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 02/17] net/i40e: store tunnel filter
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 01/17] net/i40e: store ethertype filter Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 03/17] net/i40e: store flow director filter Beilei Xing
` (15 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no tunnel filter stored in SW.
This patch stores tunnel filter in SW with cuckoo
hash, also adds protection if a tunnel filter has
been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 169 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 32 ++++++++
2 files changed, 198 insertions(+), 3 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index e43b4d9..2bdb4d6 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -468,6 +468,12 @@ static int i40e_ethertype_filter_convert(
static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -946,6 +952,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
uint32_t len;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
PMD_INIT_FUNC_TRACE();
@@ -957,6 +964,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
.hash_func = rte_hash_crc,
};
+ char tunnel_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters tunnel_hash_params = {
+ .name = tunnel_hash_name,
+ .entries = I40E_MAX_TUNNEL_FILTER_NUM,
+ .key_len = sizeof(struct i40e_tunnel_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1217,8 +1232,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_ethertype_hash_map_alloc;
}
+ /* Initialize tunnel filter rule list and hash */
+ TAILQ_INIT(&tunnel_rule->tunnel_list);
+ snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
+ "tunnel_%s", dev->data->name);
+ tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
+ if (!tunnel_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
+ ret = -EINVAL;
+ goto err_tunnel_hash_table_create;
+ }
+ tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
+ sizeof(struct i40e_tunnel_filter *) *
+ I40E_MAX_TUNNEL_FILTER_NUM,
+ 0);
+ if (!tunnel_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for tunnel hash map!");
+ ret = -ENOMEM;
+ goto err_tunnel_hash_map_alloc;
+ }
+
return 0;
+err_tunnel_hash_map_alloc:
+ rte_hash_free(tunnel_rule->hash_table);
+err_tunnel_hash_table_create:
+ rte_free(ethertype_rule->hash_map);
err_ethertype_hash_map_alloc:
rte_hash_free(ethertype_rule->hash_table);
err_ethertype_hash_table_create:
@@ -1250,9 +1290,11 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
struct i40e_ethertype_filter *p_ethertype;
+ struct i40e_tunnel_filter *p_tunnel;
int ret;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule;
+ struct i40e_tunnel_rule *tunnel_rule;
PMD_INIT_FUNC_TRACE();
@@ -1263,6 +1305,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = dev->pci_dev;
ethertype_rule = &pf->ethertype;
+ tunnel_rule = &pf->tunnel;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1279,6 +1322,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_ethertype);
}
+ /* Remove all tunnel director rules and hash */
+ if (tunnel_rule->hash_map)
+ rte_free(tunnel_rule->hash_map);
+ if (tunnel_rule->hash_table)
+ rte_hash_free(tunnel_rule->hash_table);
+
+ while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
+ TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
+ rte_free(p_tunnel);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
@@ -6478,6 +6532,85 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
return 0;
}
+/* Convert tunnel filter structure */
+static int
+i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data
+ *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ ether_addr_copy((struct ether_addr *)&cld_filter->outer_mac,
+ (struct ether_addr *)&tunnel_filter->input.outer_mac);
+ ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac,
+ (struct ether_addr *)&tunnel_filter->input.inner_mac);
+ tunnel_filter->input.inner_vlan = cld_filter->inner_vlan;
+ tunnel_filter->input.flags = cld_filter->flags;
+ tunnel_filter->input.tenant_id = cld_filter->tenant_id;
+ tunnel_filter->queue = cld_filter->queue_number;
+
+ return 0;
+}
+
+/* Check if there exists the tunnel filter */
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return tunnel_rule->hash_map[ret];
+}
+
+/* Add a tunnel filter into the SW list */
+static int
+i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = tunnel_filter;
+
+ TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
+
+ return 0;
+}
+
+/* Delete a tunnel filter from the SW list */
+int
+i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel_filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ tunnel_filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
+ rte_free(tunnel_filter);
+
+ return 0;
+}
+
static int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
@@ -6493,6 +6626,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct i40e_vsi *vsi = pf->main_vsi;
struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
cld_filter = rte_zmalloc("tunnel_filter",
sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
@@ -6555,11 +6691,38 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
- if (add)
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ return -EINVAL;
+ }
+
+ if (add) {
ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
- else
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ return ret;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- cld_filter, 1);
+ cld_filter, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ return ret;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
rte_free(cld_filter);
return ret;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 3fb20ba..83f3594 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -421,6 +421,32 @@ struct i40e_ethertype_rule {
struct rte_hash *hash_table;
};
+/* Tunnel filter number HW supports */
+#define I40E_MAX_TUNNEL_FILTER_NUM 400
+
+/* Tunnel filter struct */
+struct i40e_tunnel_filter_input {
+ uint8_t outer_mac[6]; /* Outer mac address to match */
+ uint8_t inner_mac[6]; /* Inner mac address to match */
+ uint16_t inner_vlan; /* Inner vlan address to match */
+ uint16_t flags; /* Filter type flag */
+ uint32_t tenant_id; /* Tenant id to match */
+};
+
+struct i40e_tunnel_filter {
+ TAILQ_ENTRY(i40e_tunnel_filter) rules;
+ struct i40e_tunnel_filter_input input;
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter);
+
+struct i40e_tunnel_rule {
+ struct i40e_tunnel_filter_list tunnel_list;
+ struct i40e_tunnel_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -492,6 +518,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
+ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
@@ -647,6 +674,11 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input);
int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter_input *input);
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input);
+int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 03/17] net/i40e: store flow director filter
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 01/17] net/i40e: store ethertype filter Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 02/17] net/i40e: store tunnel filter Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 04/17] net/i40e: restore ethertype filter Beilei Xing
` (14 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no flow director filter stored in SW. This
patch stores flow director filters in SW with cuckoo hash,
also adds protection if a flow director filter has been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 48 +++++++++++++++++++
drivers/net/i40e/i40e_ethdev.h | 14 ++++++
drivers/net/i40e/i40e_fdir.c | 105 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 167 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 2bdb4d6..fb7d794 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -953,6 +953,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
PMD_INIT_FUNC_TRACE();
@@ -972,6 +973,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
.hash_func = rte_hash_crc,
};
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = I40E_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct rte_eth_fdir_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1253,8 +1262,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_tunnel_hash_map_alloc;
}
+ /* Initialize flow director filter rule list and hash */
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", dev->data->name);
+ fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ ret = -EINVAL;
+ goto err_fdir_hash_table_create;
+ }
+ fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
+ sizeof(struct i40e_fdir_filter *) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ ret = -ENOMEM;
+ goto err_fdir_hash_map_alloc;
+ }
+
return 0;
+err_fdir_hash_map_alloc:
+ rte_hash_free(fdir_info->hash_table);
+err_fdir_hash_table_create:
+ rte_free(tunnel_rule->hash_map);
err_tunnel_hash_map_alloc:
rte_hash_free(tunnel_rule->hash_table);
err_tunnel_hash_table_create:
@@ -1291,10 +1325,12 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct i40e_filter_control_settings settings;
struct i40e_ethertype_filter *p_ethertype;
struct i40e_tunnel_filter *p_tunnel;
+ struct i40e_fdir_filter *p_fdir;
int ret;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule;
struct i40e_tunnel_rule *tunnel_rule;
+ struct i40e_fdir_info *fdir_info;
PMD_INIT_FUNC_TRACE();
@@ -1306,6 +1342,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
pci_dev = dev->pci_dev;
ethertype_rule = &pf->ethertype;
tunnel_rule = &pf->tunnel;
+ fdir_info = &pf->fdir;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1333,6 +1370,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_tunnel);
}
+ /* Remove all flow director rules and hash */
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_table)
+ rte_hash_free(fdir_info->hash_table);
+
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+ rte_free(p_fdir);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 83f3594..b79fbd6 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -377,6 +377,14 @@ struct i40e_fdir_flex_mask {
};
#define I40E_FILTER_PCTYPE_MAX 64
+#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
+
+struct i40e_fdir_filter {
+ TAILQ_ENTRY(i40e_fdir_filter) rules;
+ struct rte_eth_fdir_filter fdir;
+};
+
+TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
/*
* A structure used to define fields of a FDIR related info.
*/
@@ -395,6 +403,10 @@ struct i40e_fdir_info {
*/
struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+
+ struct i40e_fdir_filter_list fdir_list;
+ struct i40e_fdir_filter **hash_map;
+ struct rte_hash *hash_table;
};
/* Ethertype filter number HW supports */
@@ -674,6 +686,8 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input);
int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter_input *input);
+int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
+ struct rte_eth_fdir_input *input);
struct i40e_tunnel_filter *
i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
const struct i40e_tunnel_filter_input *input);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 335bf15..4a29b37 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -121,6 +121,14 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
bool add);
static int i40e_fdir_flush(struct rte_eth_dev *dev);
+static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter);
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input);
+static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
+
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
{
@@ -1017,6 +1025,74 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
return ret;
}
+static int
+i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter)
+{
+ rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+ return 0;
+}
+
+/* Check if there exists the flow director filter */
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+/* Add a flow director filter into the SW list */
+static int
+i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ fdir_info->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete a flow director filter from the SW list */
+int
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(fdir_info->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
@@ -1032,6 +1108,9 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter, *node;
+ struct i40e_fdir_filter check_filter; /* Check if the filter exists */
int ret = 0;
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1054,6 +1133,22 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_fdir_filter_convert(filter, &check_filter);
+ node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR,
+ "Conflict with existing flow director rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR,
+ "There's no corresponding flow firector filter!");
+ return -EINVAL;
+ }
+
memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
@@ -1077,6 +1172,16 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
pctype);
return ret;
}
+
+ if (add) {
+ fdir_filter = rte_zmalloc("fdir_filter",
+ sizeof(*fdir_filter), 0);
+ rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
+ } else {
+ ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
+ }
+
return ret;
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 04/17] net/i40e: restore ethertype filter
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (2 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 03/17] net/i40e: store flow director filter Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 05/17] net/i40e: restore tunnel filter Beilei Xing
` (13 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring ethertype filter in case filter
dropped accidentally, as all filters need to be added and
removed by user obviously for generic filter API.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 44 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index fb7d794..6cd8c06 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -474,6 +474,9 @@ static int i40e_tunnel_filter_convert(
static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
+static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_filter_restore(struct i40e_pf *pf);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -1955,6 +1958,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+ i40e_filter_restore(pf);
+
return I40E_SUCCESS;
err_up:
@@ -10071,3 +10076,42 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
}
+
+/* Restore ethertype filter */
+static void
+i40e_ethertype_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags;
+
+ TAILQ_FOREACH(f, ethertype_list, rules) {
+ flags = 0;
+ if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ i40e_aq_add_rem_control_packet_filter(hw,
+ f->input.mac_addr.addr_bytes,
+ f->input.ether_type,
+ flags, pf->main_vsi->seid,
+ f->queue, 1, &stats, NULL);
+ }
+ PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
+ " mac_etype_used = %u, etype_used = %u,"
+ " mac_etype_free = %u, etype_free = %u\n",
+ ret, stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
+}
+
+static void
+i40e_filter_restore(struct i40e_pf *pf)
+{
+ i40e_ethertype_filter_restore(pf);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 05/17] net/i40e: restore tunnel filter
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (3 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 04/17] net/i40e: restore ethertype filter Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 06/17] net/i40e: restore flow director filter Beilei Xing
` (12 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring tunnel filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 6cd8c06..0d53c4e 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -475,6 +475,7 @@ static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
static const struct rte_pci_id pci_id_i40e_map[] = {
@@ -10110,8 +10111,28 @@ i40e_ethertype_filter_restore(struct i40e_pf *pf)
stats.mac_etype_free, stats.etype_free);
}
+/* Restore tunnel filter */
+static void
+i40e_tunnel_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+
+ TAILQ_FOREACH(f, tunnel_list, rules) {
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ rte_memcpy(&cld_filter, &f->input, sizeof(f->input));
+ cld_filter.queue_number = f->queue;
+ i40e_aq_add_cloud_filters(hw, vsi->seid, &cld_filter, 1);
+ }
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
+ i40e_tunnel_filter_restore(pf);
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 06/17] net/i40e: restore flow director filter
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (4 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 05/17] net/i40e: restore tunnel filter Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 07/17] net/i40e: add flow validate function Beilei Xing
` (11 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring flow director filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev.h | 1 +
drivers/net/i40e/i40e_fdir.c | 28 ++++++++++++++++++++++++++++
3 files changed, 30 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 0d53c4e..5bfa2e4 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -10135,4 +10135,5 @@ i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
+ i40e_fdir_filter_restore(pf);
}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b79fbd6..92f6f55 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -670,6 +670,7 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
+void i40e_fdir_filter_restore(struct i40e_pf *pf);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 4a29b37..78ce92c 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1586,3 +1586,31 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* Restore flow director filter */
+void
+i40e_fdir_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
+ struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
+ struct i40e_fdir_filter *f;
+ uint32_t fdstat;
+ uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
+ uint32_t best_cnt; /**< Number of filters in best effort spaces. */
+
+ TAILQ_FOREACH(f, fdir_list, rules)
+ i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+
+ fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
+ guarant_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+
+ printf("FDIR restore:"
+ "Guarant count: %d, Best count: %d\n",
+ guarant_cnt, best_cnt);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 07/17] net/i40e: add flow validate function
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (5 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 06/17] net/i40e: restore flow director filter Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 08/17] net/i40e: parse flow director filter Beilei Xing
` (10 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_validation function to check if
a flow is valid according to the flow pattern.
i40e_parse_ethertype_filter is added first, it also gets
the ethertype info.
i40e_flow.c is added to handle all generic filter events.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 7 +
drivers/net/i40e/i40e_ethdev.h | 18 ++
drivers/net/i40e/i40e_flow.c | 447 +++++++++++++++++++++++++++++++++++++++++
4 files changed, 473 insertions(+)
create mode 100644 drivers/net/i40e/i40e_flow.c
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 11175c4..89bd85a 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -105,6 +105,7 @@ endif
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
# vector PMD driver needs SSE4.1 support
ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 5bfa2e4..710631f 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -8426,6 +8426,8 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
return ret;
}
+const struct rte_flow_ops i40e_flow_ops;
+
static int
i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
@@ -8457,6 +8459,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &i40e_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 92f6f55..23f360b 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -38,6 +38,7 @@
#include <rte_time.h>
#include <rte_kvargs.h>
#include <rte_hash.h>
+#include <rte_flow_driver.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -629,6 +630,23 @@ struct i40e_adapter {
struct rte_timecounter tx_tstamp_tc;
};
+union i40e_filter_t {
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_fdir_filter fdir_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
+};
+
+typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+struct i40e_valid_pattern {
+ enum rte_flow_item_type *items;
+ parse_filter_t parse_filter;
+};
+
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
new file mode 100644
index 0000000..a9ff73f
--- /dev/null
+++ b/drivers/net/i40e/i40e_flow.c
@@ -0,0 +1,447 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+
+static int i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static int i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_parse_ethertype_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+
+const struct rte_flow_ops i40e_flow_ops = {
+ .validate = i40e_flow_validate,
+};
+
+union i40e_filter_t cons_filter;
+
+/* Pattern matched ethertype filter */
+static enum rte_flow_item_type pattern_ethertype[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static int
+i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_ethertype_filter *ethertype_filter =
+ &filter->ethertype_filter;
+ int ret;
+
+ ret = i40e_parse_ethertype_pattern(dev, pattern, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_ethertype_act(dev, actions, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static struct i40e_valid_pattern i40e_supported_patterns[] = {
+ /* Ethertype */
+ { pattern_ethertype, i40e_parse_ethertype_filter },
+};
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+i40e_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = i40e_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = i40e_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+ pb = pe;
+ break;
+ }
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+i40e_match_pattern(enum rte_flow_item_type *item_array,
+ struct rte_flow_item *pattern)
+{
+ struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+static parse_filter_t
+i40e_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+ parse_filter_t parse_filter = NULL;
+ uint8_t i = 0;
+
+ for (; i < RTE_DIM(i40e_supported_patterns); i++) {
+ if (i40e_match_pattern(i40e_supported_patterns[i].items,
+ pattern)) {
+ parse_filter = i40e_supported_patterns[i].parse_filter;
+ break;
+ }
+ }
+
+ return parse_filter;
+}
+
+/* Parse attributes */
+static int
+i40e_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ enum rte_flow_item_type item_type;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ /* Get the MAC info. */
+ if (!eth_spec || !eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL ETH spec/mask");
+ return -rte_errno;
+ }
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(ð_mask->src) ||
+ (!is_zero_ether_addr(ð_mask->dst) &&
+ !is_broadcast_ether_addr(ð_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(ð_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type in"
+ " control packet filter.");
+ return -rte_errno;
+ }
+ if (filter->ether_type == ETHER_TYPE_VLAN)
+ PMD_DRV_LOG(WARNING, "filter vlan ether_type in"
+ " first tag is not supported.");
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_parse_ethertype_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for"
+ " ethertype_filter.");
+ return -rte_errno;
+ }
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item *items; /* internal pattern w/o VOID items */
+ parse_filter_t parse_filter;
+ uint32_t item_num = 0; /* non-void item number of pattern*/
+ uint32_t i = 0;
+ int ret;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ memset(&cons_filter, 0, sizeof(cons_filter));
+
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = rte_zmalloc("i40e_pattern",
+ item_num * sizeof(struct rte_flow_item), 0);
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for PMD internal items.");
+ return -ENOMEM;
+ }
+
+ i40e_pattern_skip_void_item(items, pattern);
+
+ /* Find if there's matched parse filter function */
+ parse_filter = i40e_find_parse_filter_func(items);
+ if (!parse_filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ return -rte_errno;
+ }
+
+ ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
+
+ rte_free(items);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 08/17] net/i40e: parse flow director filter
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (6 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 07/17] net/i40e: add flow validate function Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 09/17] net/i40e: parse tunnel filter Beilei Xing
` (9 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_parse_fdir_filter to check if a rule
is a flow director rule according to the flow pattern,
and the function also gets the flow director info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 56 +---
drivers/net/i40e/i40e_ethdev.h | 55 ++++
drivers/net/i40e/i40e_flow.c | 607 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 663 insertions(+), 55 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 710631f..4a057b4 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -139,60 +139,6 @@
#define I40E_DEFAULT_DCB_APP_NUM 1
#define I40E_DEFAULT_DCB_APP_PRIO 3
-#define I40E_INSET_NONE 0x00000000000000000ULL
-
-/* bit0 ~ bit 7 */
-#define I40E_INSET_DMAC 0x0000000000000001ULL
-#define I40E_INSET_SMAC 0x0000000000000002ULL
-#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
-#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
-#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
-
-/* bit 8 ~ bit 15 */
-#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
-#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
-#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
-#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
-#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
-#define I40E_INSET_DST_PORT 0x0000000000002000ULL
-#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
-
-/* bit 16 ~ bit 31 */
-#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
-#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
-#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
-#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
-#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
-#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
-#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
-#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
-
-/* bit 32 ~ bit 47, tunnel fields */
-#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
-#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
-#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
-#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
-#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
-#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
-
-/* bit 48 ~ bit 55 */
-#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
-
-/* bit 56 ~ bit 63, Flex Payload */
-#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD \
- (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
- I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
- I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
- I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
-
/**
* Below are values for writing un-exposed registers suggested
* by silicon experts
@@ -7617,7 +7563,7 @@ i40e_validate_input_set(enum i40e_filter_pctype pctype,
}
/* default input set fields combination per pctype */
-static uint64_t
+uint64_t
i40e_get_default_input_set(uint16_t pctype)
{
static const uint64_t default_inset_table[] = {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 23f360b..9e3a48d 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -190,6 +190,60 @@ enum i40e_flxpld_layer_idx {
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
+#define I40E_INSET_NONE 0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC 0x0000000000000001ULL
+#define I40E_INSET_SMAC 0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
+#define I40E_INSET_DST_PORT 0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+ I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
struct i40e_adapter;
/**
@@ -712,6 +766,7 @@ i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
const struct i40e_tunnel_filter_input *input);
int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter_input *input);
+uint64_t i40e_get_default_input_set(uint16_t pctype);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index a9ff73f..7b872c9 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -51,6 +51,10 @@
#include "base/i40e_type.h"
#include "i40e_ethdev.h"
+#define I40E_IPV4_TC_SHIFT 4
+#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_FRAG_HEADER 44
+
static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -64,6 +68,14 @@ static int i40e_parse_ethertype_act(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_ethertype_filter *filter);
+static int i40e_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_parse_fdir_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
@@ -79,6 +91,107 @@ static enum rte_flow_item_type pattern_ethertype[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* Pattern matched flow director filter */
+static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static int
i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -108,9 +221,62 @@ i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
return ret;
}
+static int
+i40e_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_fdir_filter *fdir_filter =
+ &filter->fdir_filter;
+ int ret;
+
+ ret = i40e_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_fdir_act(dev, actions, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ if (dev->data->dev_conf.fdir_conf.mode !=
+ RTE_FDIR_MODE_PERFECT) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Check the mode in fdir_conf.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_parse_ethertype_filter },
+ /* FDIR */
+ { pattern_fdir_ipv4, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_ext, i40e_parse_fdir_filter },
};
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
@@ -385,6 +551,447 @@ i40e_parse_ethertype_act(struct rte_eth_dev *dev,
}
static int
+i40e_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set = I40E_INSET_NONE;
+ uint16_t flag_offset;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3;
+ uint32_t j;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if (eth_spec || eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH spec/mask");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ if (!ipv4_spec || !ipv4_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv4 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Get filter info */
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ /* Check if it is fragment. */
+ flag_offset =
+ rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
+ if (flag_offset & IPV4_HDR_OFFSET_MASK ||
+ flag_offset & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+ /* Get the filter info */
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (!ipv6_spec || !ipv6_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv6 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ /* SCR and DST address of IPv6 shouldn't be masked */
+ for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
+ if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
+ ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+ }
+
+ input_set |= I40E_INSET_IPV6_SRC;
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(ipv6_spec->hdr.vtc_flow <<
+ I40E_IPV4_TC_SHIFT);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!tcp_spec || !tcp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL TCP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (tcp_mask->hdr.src_port != UINT16_MAX ||
+ tcp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (!udp_spec || !udp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL UDP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (udp_mask->hdr.src_port != UINT16_MAX ||
+ udp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (!sctp_spec || !sctp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL SCTP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port != UINT16_MAX ||
+ sctp_mask->hdr.dst_port != UINT16_MAX ||
+ sctp_mask->hdr.tag != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ filter->input.flow_ext.is_vf = 1;
+ filter->input.flow_ext.dst_id = vf_spec->id;
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VF ID for FDIR.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ pctype = i40e_flowtype_to_pctype(flow_type);
+ if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported flow type");
+ return -rte_errno;
+ }
+
+ if (input_set != i40e_get_default_input_set(pctype)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid input set.");
+ return -rte_errno;
+ }
+ filter->input.flow_type = flow_type;
+
+ return 0;
+}
+
+/* Parse to get the action info of a FDIR filter */
+static int
+i40e_parse_fdir_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.flex_off = 0;
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+ else
+ filter->action.behavior = RTE_ETH_FDIR_REJECT;
+
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->action.rx_queue = act_q->index;
+
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID for FDIR.");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is MARK or END. */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+ act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->soft_id = mark_spec->id;
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 09/17] net/i40e: parse tunnel filter
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (7 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 08/17] net/i40e: parse flow director filter Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 10/17] net/i40e: add flow create function Beilei Xing
` (8 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_parse_tunnel_filter to check if
a rule is a tunnel rule according to items of the flow
pattern, and the function also gets the tunnel info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 394 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 394 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 7b872c9..53bfb2b 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -54,6 +54,8 @@
#define I40E_IPV4_TC_SHIFT 4
#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
#define I40E_IPV6_FRAG_HEADER 44
+#define I40E_TENANT_ARRAY_NUM 3
+#define I40E_TCI_MASK 0x0FFF
static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -76,6 +78,14 @@ static int i40e_parse_fdir_act(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_fdir_filter *filter);
+static int i40e_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
+static int i40e_parse_tunnel_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
@@ -192,6 +202,45 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* Pattern matched tunnel filter */
+static enum rte_flow_item_type pattern_vxlan_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static int
i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -257,6 +306,33 @@ i40e_parse_fdir_filter(struct rte_eth_dev *dev,
return 0;
}
+static int
+i40e_parse_tunnel_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_tunnel_filter_conf *tunnel_filter =
+ &filter->tunnel_filter;
+ int ret;
+
+ ret = i40e_parse_tunnel_pattern(dev, pattern, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_tunnel_act(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_parse_ethertype_filter },
@@ -277,6 +353,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_fdir_ipv6_tcp_ext, i40e_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp, i40e_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp_ext, i40e_parse_fdir_filter },
+ /* tunnel */
+ { pattern_vxlan_1, i40e_parse_tunnel_filter },
+ { pattern_vxlan_2, i40e_parse_tunnel_filter },
+ { pattern_vxlan_3, i40e_parse_tunnel_filter },
+ { pattern_vxlan_4, i40e_parse_tunnel_filter },
};
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
@@ -991,6 +1072,319 @@ i40e_parse_fdir_act(struct rte_eth_dev *dev,
return 0;
}
+/* Parse to get the action info of a tunnle filter */
+static int i40e_parse_tunnel_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue_id = act_q->index;
+ if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_check_tenant_id_mask(const uint8_t *mask)
+{
+ uint32_t j;
+ int is_masked = 0;
+
+ for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
+ if (*(mask + j) == UINT8_MAX) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 0;
+ } else if (*(mask + j) == 0) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 1;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return is_masked;
+}
+
+static int
+i40e_parse_vxlan_pattern(const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_eth *o_eth_spec;
+ const struct rte_flow_item_eth *o_eth_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ const struct rte_flow_item_eth *i_eth_spec;
+ const struct rte_flow_item_eth *i_eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ bool is_vni_masked = 0;
+ enum rte_flow_item_type item_type;
+ bool vxlan_flag = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(ð_mask->dst) ||
+ !is_zero_ether_addr(ð_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!vxlan_flag)
+ rte_memcpy(&filter->outer_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ else
+ rte_memcpy(&filter->inner_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ }
+
+ if (!vxlan_flag) {
+ o_eth_spec = eth_spec;
+ o_eth_mask = eth_mask;
+ } else {
+ i_eth_spec = eth_spec;
+ i_eth_mask = eth_mask;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vxlan_flag) {
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ } else {
+ if (vlan_spec || vlan_mask)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* IPv4/IPv6/UDP are used to describe protocol,
+ * spec amd mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec =
+ (const struct rte_flow_item_vxlan *)item->spec;
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, either spec or mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_mask) {
+ is_vni_masked =
+ i40e_check_tenant_id_mask(vxlan_mask->vni);
+ if (is_vni_masked < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+ }
+ vxlan_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Check specification and mask to get the filter type */
+ if (vlan_spec && vlan_mask &&
+ (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ /* If there's inner vlan */
+ filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
+ & I40E_TCI_MASK;
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if ((!vlan_spec && !vlan_mask) ||
+ (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+ /* If there's no inner vlan */
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_TENID;
+ else if (o_eth_spec && o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask) {
+ filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Not supported by tunnel filter.");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ int ret;
+
+ ret = i40e_parse_vxlan_pattern(pattern, error, filter);
+
+ return ret;
+}
+
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 10/17] net/i40e: add flow create function
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (8 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 09/17] net/i40e: parse tunnel filter Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 11/17] net/i40e: add flow destroy function Beilei Xing
` (7 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_create function to create a
rule. It will check if a flow matches ethertype filter
or flow director filter or tunnel filter, if the flow
matches some kind of filter, then set the filter to HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 9 +++--
drivers/net/i40e/i40e_ethdev.h | 21 ++++++++++++
drivers/net/i40e/i40e_fdir.c | 2 +-
drivers/net/i40e/i40e_flow.c | 77 ++++++++++++++++++++++++++++++++++++++++++
4 files changed, 103 insertions(+), 6 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4a057b4..fa59ee4 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -353,9 +353,6 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_ethertype_filter_set(struct i40e_pf *pf,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -1233,6 +1230,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_fdir_hash_map_alloc;
}
+ TAILQ_INIT(&pf->flow_list);
+
return 0;
err_fdir_hash_map_alloc:
@@ -6611,7 +6610,7 @@ i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
return 0;
}
-static int
+int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add)
@@ -8256,7 +8255,7 @@ i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
*/
-static int
+int
i40e_ethertype_filter_set(struct i40e_pf *pf,
struct rte_eth_ethertype_filter *filter,
bool add)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 9e3a48d..b33910d 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -536,6 +536,17 @@ struct i40e_mirror_rule {
TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
/*
+ * Struct to store flow created.
+ */
+struct i40e_flow {
+ TAILQ_ENTRY(i40e_flow) node;
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+TAILQ_HEAD(i40e_flow_list, i40e_flow);
+
+/*
* Structure to store private data specific for PF instance.
*/
struct i40e_pf {
@@ -592,6 +603,7 @@ struct i40e_pf {
bool floating_veb; /* The flag to use the floating VEB */
/* The floating enable flag for the specific VF */
bool floating_veb_list[I40E_MAX_VF];
+ struct i40e_flow_list flow_list;
};
enum pending_msg {
@@ -767,6 +779,15 @@ i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter_input *input);
uint64_t i40e_get_default_input_set(uint16_t pctype);
+int i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 78ce92c..e1f97bb 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1099,7 +1099,7 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
-static int
+int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 53bfb2b..8db8e0f 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -62,6 +62,11 @@ static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
static int i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
@@ -91,9 +96,11 @@ static int i40e_parse_attr(const struct rte_flow_attr *attr,
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
+ .create = i40e_flow_create,
};
union i40e_filter_t cons_filter;
+enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
/* Pattern matched ethertype filter */
static enum rte_flow_item_type pattern_ethertype[] = {
@@ -267,6 +274,8 @@ i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
+
return ret;
}
@@ -294,6 +303,8 @@ i40e_parse_fdir_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_FDIR;
+
if (dev->data->dev_conf.fdir_conf.mode !=
RTE_FDIR_MODE_PERFECT) {
rte_flow_error_set(error, ENOTSUP,
@@ -330,6 +341,8 @@ i40e_parse_tunnel_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
return ret;
}
@@ -1446,3 +1459,67 @@ i40e_flow_validate(struct rte_eth_dev *dev,
return ret;
}
+
+static struct rte_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_flow *flow;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct i40e_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+
+ ret = i40e_flow_validate(dev, attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (cons_filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_set(pf,
+ &cons_filter.ethertype_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &cons_filter.fdir_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_tunnel_filter_set(pf,
+ &cons_filter.tunnel_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ break;
+ default:
+ goto free_flow;
+ }
+
+ flow->filter_type = cons_filter_type;
+ TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+ return (struct rte_flow *)flow;
+
+free_flow:
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ return NULL;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 11/17] net/i40e: add flow destroy function
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (9 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 10/17] net/i40e: add flow create function Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 12/17] net/i40e: destroy ethertype filter Beilei Xing
` (6 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_destroy function to destroy
a flow for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 33 +++++++++++++++++++++++++++++++++
1 file changed, 33 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 8db8e0f..a64ef0e 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -67,6 +67,9 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static int i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
static int i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
@@ -97,6 +100,7 @@ static int i40e_parse_attr(const struct rte_flow_attr *attr,
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = i40e_flow_create,
+ .destroy = i40e_flow_destroy,
};
union i40e_filter_t cons_filter;
@@ -1523,3 +1527,32 @@ i40e_flow_create(struct rte_eth_dev *dev,
rte_free(flow);
return NULL;
}
+
+static int
+i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_flow *pmd_flow = (struct i40e_flow *)flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ int ret = 0;
+
+ switch (filter_type) {
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ TAILQ_REMOVE(&pf->flow_list, pmd_flow, node);
+ rte_free(pmd_flow);
+ } else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 12/17] net/i40e: destroy ethertype filter
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (10 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 11/17] net/i40e: add flow destroy function Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 13/17] net/i40e: destroy tunnel filter Beilei Xing
` (5 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_dev_destroy_ethertype_filter function
to destroy a ethertype filter for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 42 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 42 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index a64ef0e..a41d4c2 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -49,6 +49,7 @@
#include "i40e_logs.h"
#include "base/i40e_type.h"
+#include "base/i40e_prototype.h"
#include "i40e_ethdev.h"
#define I40E_IPV4_TC_SHIFT 4
@@ -96,6 +97,8 @@ static int i40e_parse_tunnel_act(struct rte_eth_dev *dev,
struct rte_eth_tunnel_filter_conf *filter);
static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
+static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1539,6 +1542,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
int ret = 0;
switch (filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_dev_destroy_ethertype_filter(pf,
+ (struct i40e_ethertype_filter *)pmd_flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -1556,3 +1563,38 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
return ret;
}
+
+static int
+i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *node;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret = 0;
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->input.mac_addr.addr_bytes,
+ filter->input.ether_type,
+ flags, pf->main_vsi->seid,
+ filter->queue, 0, &stats, NULL);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 13/17] net/i40e: destroy tunnel filter
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (11 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 12/17] net/i40e: destroy ethertype filter Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 14/17] net/i40e: destroy flow directory filter Beilei Xing
` (4 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_dev_destroy_tunnel_filter function
to destroy a tunnel filter for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index a41d4c2..e442c5c 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -99,6 +99,8 @@ static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1546,6 +1548,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_dev_destroy_ethertype_filter(pf,
(struct i40e_ethertype_filter *)pmd_flow->rule);
break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_destroy_tunnel_filter(pf,
+ (struct i40e_tunnel_filter *)pmd_flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -1598,3 +1604,38 @@ i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
return ret;
}
+
+static int
+i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *node;
+ int ret = 0;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
+ (struct ether_addr *)&cld_filter.outer_mac);
+ ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
+ (struct ether_addr *)&cld_filter.inner_mac);
+ cld_filter.inner_vlan = filter->input.inner_vlan;
+ cld_filter.flags = filter->input.flags;
+ cld_filter.tenant_id = filter->input.tenant_id;
+ cld_filter.queue_number = filter->queue;
+
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter, 1);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 14/17] net/i40e: destroy flow directory filter
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (12 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 13/17] net/i40e: destroy tunnel filter Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 15/17] net/i40e: add flow flush function Beilei Xing
` (3 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch supports destroying a flow directory filter
for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index e442c5c..605ee70 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1552,6 +1552,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_dev_destroy_tunnel_filter(pf,
(struct i40e_tunnel_filter *)pmd_flow->rule);
break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &((struct i40e_fdir_filter *)pmd_flow->rule)->fdir, 0);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 15/17] net/i40e: add flow flush function
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (13 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 14/17] net/i40e: destroy flow directory filter Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 16/17] net/i40e: flush ethertype filters Beilei Xing
` (2 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_flush function to flush all
filters for users. And flow director flush function
is involved first.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.h | 1 +
drivers/net/i40e/i40e_fdir.c | 4 +---
drivers/net/i40e/i40e_flow.c | 51 ++++++++++++++++++++++++++++++++++++++++++
3 files changed, 53 insertions(+), 3 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b33910d..57fd796 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -788,6 +788,7 @@ int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add);
+int i40e_fdir_flush(struct rte_eth_dev *dev);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index e1f97bb..965e3b2 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -119,8 +119,6 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
-static int i40e_fdir_flush(struct rte_eth_dev *dev);
-
static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
struct i40e_fdir_filter *filter);
static struct i40e_fdir_filter *
@@ -1325,7 +1323,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
* i40e_fdir_flush - clear all filters of Flow Director table
* @pf: board private structure
*/
-static int
+int
i40e_fdir_flush(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 605ee70..ee463c9 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -68,6 +68,8 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static int i40e_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
static int i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
@@ -101,11 +103,13 @@ static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
+static int i40e_fdir_filter_flush(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = i40e_flow_create,
.destroy = i40e_flow_destroy,
+ .flush = i40e_flow_flush,
};
union i40e_filter_t cons_filter;
@@ -1643,3 +1647,50 @@ i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
return ret;
}
+
+static int
+i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret;
+
+ ret = i40e_fdir_filter_flush(pf);
+ if (ret)
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush FDIR flows.");
+
+ return ret;
+}
+
+static int
+i40e_fdir_filter_flush(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter;
+ struct i40e_flow *flow;
+ void *temp;
+ int ret;
+
+ ret = i40e_fdir_flush(dev);
+ if (!ret) {
+ /* Delete FDIR filters in FDIR list. */
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ ret = i40e_sw_fdir_filter_del(pf,
+ &fdir_filter->fdir.input);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Delete FDIR flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 16/17] net/i40e: flush ethertype filters
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (14 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 15/17] net/i40e: add flow flush function Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 17/17] net/i40e: flush tunnel filters Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_ethertype_filter_flush function
to flush all ethertype filters, including filters in
SW and HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 41 ++++++++++++++++++++++++++++++++++++++++-
1 file changed, 40 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index ee463c9..947199d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -104,6 +104,7 @@ static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
static int i40e_fdir_filter_flush(struct i40e_pf *pf);
+static int i40e_ethertype_filter_flush(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1655,10 +1656,20 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
int ret;
ret = i40e_fdir_filter_flush(pf);
- if (ret)
+ if (ret) {
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to flush FDIR flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_ethertype_filter_flush(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to ethertype flush flows.");
+ return -rte_errno;
+ }
return ret;
}
@@ -1694,3 +1705,31 @@ i40e_fdir_filter_flush(struct i40e_pf *pf)
return ret;
}
+
+/* Flush all ethertype filters */
+static int
+i40e_ethertype_filter_flush(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((f = TAILQ_FIRST(ethertype_list))) {
+ ret = i40e_dev_destroy_ethertype_filter(pf, f);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete ethertype flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v3 17/17] net/i40e: flush tunnel filters
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (15 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 16/17] net/i40e: flush ethertype filters Beilei Xing
@ 2016-12-29 16:04 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-29 16:04 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_tunnel_filter_flush function
to flush all tunnel filters, including filters in
SW and HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 37 +++++++++++++++++++++++++++++++++++++
1 file changed, 37 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 947199d..d19d952 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -105,6 +105,7 @@ static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
static int i40e_fdir_filter_flush(struct i40e_pf *pf);
static int i40e_ethertype_filter_flush(struct i40e_pf *pf);
+static int i40e_tunnel_filter_flush(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1671,6 +1672,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return -rte_errno;
}
+ ret = i40e_tunnel_filter_flush(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush tunnel flows.");
+ return -rte_errno;
+ }
+
return ret;
}
@@ -1733,3 +1742,31 @@ i40e_ethertype_filter_flush(struct i40e_pf *pf)
return ret;
}
+
+/* Flush all tunnel filters */
+static int
+i40e_tunnel_filter_flush(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((f = TAILQ_FIRST(tunnel_list))) {
+ ret = i40e_dev_destroy_tunnel_filter(pf, f);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete tunnel flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
` (16 preceding siblings ...)
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 17/17] net/i40e: flush tunnel filters Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 01/17] net/i40e: store ethertype filter Beilei Xing
` (17 more replies)
17 siblings, 18 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
The patch set depends on Adrien's Generic flow API(rte_flow).
The patches mainly finish following functions:
1) Store and restore all kinds of filters.
2) Parse all kinds of filters.
3) Add flow validate function.
4) Add flow create function.
5) Add flow destroy function.
6) Add flow flush function.
v4 changes:
Change I40E_TCI_MASK with 0xFFFF to align with testpmd.
Modidy the stats show when restoring filters.
v3 changes:
Set the related cause pointer to a non-NULL value when error happens.
Change return value when error happens.
Modify filter_del parameter with key.
Malloc filter after checking when delete a filter.
Delete meaningless initialization.
Add return value when there's error.
Change global variable definition.
Modify some function declaration.
v2 changes:
Add i40e_flow.c, all flow ops are implemented in the file.
Change the whole implementation of all parse flow functions.
Update error info for all flow ops.
Add flow_list to store flows created.
Beilei Xing (17):
net/i40e: store ethertype filter
net/i40e: store tunnel filter
net/i40e: store flow director filter
net/i40e: restore ethertype filter
net/i40e: restore tunnel filter
net/i40e: restore flow director filter
net/i40e: add flow validate function
net/i40e: parse flow director filter
net/i40e: parse tunnel filter
net/i40e: add flow create function
net/i40e: add flow destroy function
net/i40e: destroy ethertype filter
net/i40e: destroy tunnel filter
net/i40e: destroy flow directory filter
net/i40e: add flow flush function
net/i40e: flush ethertype filters
net/i40e: flush tunnel filters
drivers/net/i40e/Makefile | 2 +
drivers/net/i40e/i40e_ethdev.c | 519 ++++++++++--
drivers/net/i40e/i40e_ethdev.h | 173 ++++
drivers/net/i40e/i40e_fdir.c | 140 +++-
drivers/net/i40e/i40e_flow.c | 1772 ++++++++++++++++++++++++++++++++++++++++
5 files changed, 2540 insertions(+), 66 deletions(-)
create mode 100644 drivers/net/i40e/i40e_flow.c
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 01/17] net/i40e: store ethertype filter
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 02/17] net/i40e: store tunnel filter Beilei Xing
` (16 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no ethertype filter stored in SW.
This patch stores ethertype filter with cuckoo hash
in SW, also adds protection if an ethertype filter
has been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 166 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 31 ++++++++
3 files changed, 197 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 66997b6..11175c4 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -117,5 +117,6 @@ DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_kvargs
+DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_hash
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 8033c35..e43b4d9 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -51,6 +51,7 @@
#include <rte_dev.h>
#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
+#include <rte_hash_crc.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -461,6 +462,12 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int i40e_ethertype_filter_convert(
+ const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter);
+static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -938,9 +945,18 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
int ret;
uint32_t len;
uint8_t aq_fail = 0;
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
PMD_INIT_FUNC_TRACE();
+ char ethertype_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters ethertype_hash_params = {
+ .name = ethertype_hash_name,
+ .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
+ .key_len = sizeof(struct i40e_ethertype_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1180,8 +1196,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
pf->flags &= ~I40E_FLAG_DCB;
}
+ /* Initialize ethertype filter rule list and hash */
+ TAILQ_INIT(ðertype_rule->ethertype_list);
+ snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
+ "ethertype_%s", dev->data->name);
+ ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params);
+ if (!ethertype_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
+ ret = -EINVAL;
+ goto err_ethertype_hash_table_create;
+ }
+ ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
+ sizeof(struct i40e_ethertype_filter *) *
+ I40E_MAX_ETHERTYPE_FILTER_NUM,
+ 0);
+ if (!ethertype_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for ethertype hash map!");
+ ret = -ENOMEM;
+ goto err_ethertype_hash_map_alloc;
+ }
+
return 0;
+err_ethertype_hash_map_alloc:
+ rte_hash_free(ethertype_rule->hash_table);
+err_ethertype_hash_table_create:
+ rte_free(dev->data->mac_addrs);
err_mac_alloc:
i40e_vsi_release(pf->main_vsi);
err_setup_pf_switch:
@@ -1204,23 +1245,40 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
+ struct i40e_pf *pf;
struct rte_pci_device *pci_dev;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
+ struct i40e_ethertype_filter *p_ethertype;
int ret;
uint8_t aq_fail = 0;
+ struct i40e_ethertype_rule *ethertype_rule;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = dev->pci_dev;
+ ethertype_rule = &pf->ethertype;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
+ /* Remove all ethertype director rules and hash */
+ if (ethertype_rule->hash_map)
+ rte_free(ethertype_rule->hash_map);
+ if (ethertype_rule->hash_table)
+ rte_hash_free(ethertype_rule->hash_table);
+
+ while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) {
+ TAILQ_REMOVE(ðertype_rule->ethertype_list,
+ p_ethertype, rules);
+ rte_free(p_ethertype);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
@@ -7955,6 +8013,82 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
return ret;
}
+/* Convert ethertype filter structure */
+static int
+i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter)
+{
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ filter->input.ether_type = input->ether_type;
+ filter->flags = input->flags;
+ filter->queue = input->queue;
+
+ return 0;
+}
+
+/* Check if there exists the ehtertype filter */
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return ethertype_rule->hash_map[ret];
+}
+
+/* Add ethertype filter in SW list */
+static int
+i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete ethertype filter in SW list */
+int
+i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ struct i40e_ethertype_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
@@ -7965,6 +8099,9 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *ethertype_filter, *node;
+ struct i40e_ethertype_filter check_filter;
struct i40e_control_filter_stats stats;
uint16_t flags = 0;
int ret;
@@ -7983,6 +8120,21 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
" not supported.");
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_ethertype_filter_convert(filter, &check_filter);
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
+ &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
+ return -EINVAL;
+ }
+
if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
@@ -8003,7 +8155,19 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
stats.mac_etype_free, stats.etype_free);
if (ret < 0)
return -ENOSYS;
- return 0;
+
+ /* Add or delete a filter in SW list */
+ if (add) {
+ ethertype_filter = rte_zmalloc("ethertype_filter",
+ sizeof(*ethertype_filter), 0);
+ rte_memcpy(ethertype_filter, &check_filter,
+ sizeof(check_filter));
+ ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ } else {
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+ }
+
+ return ret;
}
/*
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 298cef4..3fb20ba 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -37,6 +37,7 @@
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
+#include <rte_hash.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -396,6 +397,30 @@ struct i40e_fdir_info {
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
};
+/* Ethertype filter number HW supports */
+#define I40E_MAX_ETHERTYPE_FILTER_NUM 768
+
+/* Ethertype filter struct */
+struct i40e_ethertype_filter_input {
+ struct ether_addr mac_addr; /* Mac address to match */
+ uint16_t ether_type; /* Ether type to match */
+};
+
+struct i40e_ethertype_filter {
+ TAILQ_ENTRY(i40e_ethertype_filter) rules;
+ struct i40e_ethertype_filter_input input;
+ uint16_t flags; /* Flags from RTE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_ethertype_filter_list, i40e_ethertype_filter);
+
+struct i40e_ethertype_rule {
+ struct i40e_ethertype_filter_list ethertype_list;
+ struct i40e_ethertype_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -466,6 +491,7 @@ struct i40e_pf {
struct i40e_vmdq_info *vmdq;
struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
@@ -616,6 +642,11 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input);
+int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 02/17] net/i40e: store tunnel filter
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 01/17] net/i40e: store ethertype filter Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 03/17] net/i40e: store flow director filter Beilei Xing
` (15 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no tunnel filter stored in SW.
This patch stores tunnel filter in SW with cuckoo
hash, also adds protection if a tunnel filter has
been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 169 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 32 ++++++++
2 files changed, 198 insertions(+), 3 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index e43b4d9..2bdb4d6 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -468,6 +468,12 @@ static int i40e_ethertype_filter_convert(
static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -946,6 +952,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
uint32_t len;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
PMD_INIT_FUNC_TRACE();
@@ -957,6 +964,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
.hash_func = rte_hash_crc,
};
+ char tunnel_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters tunnel_hash_params = {
+ .name = tunnel_hash_name,
+ .entries = I40E_MAX_TUNNEL_FILTER_NUM,
+ .key_len = sizeof(struct i40e_tunnel_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1217,8 +1232,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_ethertype_hash_map_alloc;
}
+ /* Initialize tunnel filter rule list and hash */
+ TAILQ_INIT(&tunnel_rule->tunnel_list);
+ snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
+ "tunnel_%s", dev->data->name);
+ tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
+ if (!tunnel_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
+ ret = -EINVAL;
+ goto err_tunnel_hash_table_create;
+ }
+ tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
+ sizeof(struct i40e_tunnel_filter *) *
+ I40E_MAX_TUNNEL_FILTER_NUM,
+ 0);
+ if (!tunnel_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for tunnel hash map!");
+ ret = -ENOMEM;
+ goto err_tunnel_hash_map_alloc;
+ }
+
return 0;
+err_tunnel_hash_map_alloc:
+ rte_hash_free(tunnel_rule->hash_table);
+err_tunnel_hash_table_create:
+ rte_free(ethertype_rule->hash_map);
err_ethertype_hash_map_alloc:
rte_hash_free(ethertype_rule->hash_table);
err_ethertype_hash_table_create:
@@ -1250,9 +1290,11 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
struct i40e_ethertype_filter *p_ethertype;
+ struct i40e_tunnel_filter *p_tunnel;
int ret;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule;
+ struct i40e_tunnel_rule *tunnel_rule;
PMD_INIT_FUNC_TRACE();
@@ -1263,6 +1305,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = dev->pci_dev;
ethertype_rule = &pf->ethertype;
+ tunnel_rule = &pf->tunnel;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1279,6 +1322,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_ethertype);
}
+ /* Remove all tunnel director rules and hash */
+ if (tunnel_rule->hash_map)
+ rte_free(tunnel_rule->hash_map);
+ if (tunnel_rule->hash_table)
+ rte_hash_free(tunnel_rule->hash_table);
+
+ while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
+ TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
+ rte_free(p_tunnel);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
@@ -6478,6 +6532,85 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
return 0;
}
+/* Convert tunnel filter structure */
+static int
+i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data
+ *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ ether_addr_copy((struct ether_addr *)&cld_filter->outer_mac,
+ (struct ether_addr *)&tunnel_filter->input.outer_mac);
+ ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac,
+ (struct ether_addr *)&tunnel_filter->input.inner_mac);
+ tunnel_filter->input.inner_vlan = cld_filter->inner_vlan;
+ tunnel_filter->input.flags = cld_filter->flags;
+ tunnel_filter->input.tenant_id = cld_filter->tenant_id;
+ tunnel_filter->queue = cld_filter->queue_number;
+
+ return 0;
+}
+
+/* Check if there exists the tunnel filter */
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return tunnel_rule->hash_map[ret];
+}
+
+/* Add a tunnel filter into the SW list */
+static int
+i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = tunnel_filter;
+
+ TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
+
+ return 0;
+}
+
+/* Delete a tunnel filter from the SW list */
+int
+i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel_filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ tunnel_filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
+ rte_free(tunnel_filter);
+
+ return 0;
+}
+
static int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
@@ -6493,6 +6626,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct i40e_vsi *vsi = pf->main_vsi;
struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
cld_filter = rte_zmalloc("tunnel_filter",
sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
@@ -6555,11 +6691,38 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
- if (add)
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ return -EINVAL;
+ }
+
+ if (add) {
ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
- else
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ return ret;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- cld_filter, 1);
+ cld_filter, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ return ret;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
rte_free(cld_filter);
return ret;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 3fb20ba..83f3594 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -421,6 +421,32 @@ struct i40e_ethertype_rule {
struct rte_hash *hash_table;
};
+/* Tunnel filter number HW supports */
+#define I40E_MAX_TUNNEL_FILTER_NUM 400
+
+/* Tunnel filter struct */
+struct i40e_tunnel_filter_input {
+ uint8_t outer_mac[6]; /* Outer mac address to match */
+ uint8_t inner_mac[6]; /* Inner mac address to match */
+ uint16_t inner_vlan; /* Inner vlan address to match */
+ uint16_t flags; /* Filter type flag */
+ uint32_t tenant_id; /* Tenant id to match */
+};
+
+struct i40e_tunnel_filter {
+ TAILQ_ENTRY(i40e_tunnel_filter) rules;
+ struct i40e_tunnel_filter_input input;
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter);
+
+struct i40e_tunnel_rule {
+ struct i40e_tunnel_filter_list tunnel_list;
+ struct i40e_tunnel_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -492,6 +518,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
+ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
@@ -647,6 +674,11 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input);
int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter_input *input);
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input);
+int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 03/17] net/i40e: store flow director filter
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 01/17] net/i40e: store ethertype filter Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 02/17] net/i40e: store tunnel filter Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 04/17] net/i40e: restore ethertype filter Beilei Xing
` (14 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no flow director filter stored in SW. This
patch stores flow director filters in SW with cuckoo hash,
also adds protection if a flow director filter has been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 48 +++++++++++++++++++
drivers/net/i40e/i40e_ethdev.h | 14 ++++++
drivers/net/i40e/i40e_fdir.c | 105 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 167 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 2bdb4d6..fb7d794 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -953,6 +953,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
PMD_INIT_FUNC_TRACE();
@@ -972,6 +973,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
.hash_func = rte_hash_crc,
};
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = I40E_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct rte_eth_fdir_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1253,8 +1262,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_tunnel_hash_map_alloc;
}
+ /* Initialize flow director filter rule list and hash */
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", dev->data->name);
+ fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ ret = -EINVAL;
+ goto err_fdir_hash_table_create;
+ }
+ fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
+ sizeof(struct i40e_fdir_filter *) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ ret = -ENOMEM;
+ goto err_fdir_hash_map_alloc;
+ }
+
return 0;
+err_fdir_hash_map_alloc:
+ rte_hash_free(fdir_info->hash_table);
+err_fdir_hash_table_create:
+ rte_free(tunnel_rule->hash_map);
err_tunnel_hash_map_alloc:
rte_hash_free(tunnel_rule->hash_table);
err_tunnel_hash_table_create:
@@ -1291,10 +1325,12 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct i40e_filter_control_settings settings;
struct i40e_ethertype_filter *p_ethertype;
struct i40e_tunnel_filter *p_tunnel;
+ struct i40e_fdir_filter *p_fdir;
int ret;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule;
struct i40e_tunnel_rule *tunnel_rule;
+ struct i40e_fdir_info *fdir_info;
PMD_INIT_FUNC_TRACE();
@@ -1306,6 +1342,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
pci_dev = dev->pci_dev;
ethertype_rule = &pf->ethertype;
tunnel_rule = &pf->tunnel;
+ fdir_info = &pf->fdir;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1333,6 +1370,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_tunnel);
}
+ /* Remove all flow director rules and hash */
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_table)
+ rte_hash_free(fdir_info->hash_table);
+
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+ rte_free(p_fdir);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 83f3594..b79fbd6 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -377,6 +377,14 @@ struct i40e_fdir_flex_mask {
};
#define I40E_FILTER_PCTYPE_MAX 64
+#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
+
+struct i40e_fdir_filter {
+ TAILQ_ENTRY(i40e_fdir_filter) rules;
+ struct rte_eth_fdir_filter fdir;
+};
+
+TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
/*
* A structure used to define fields of a FDIR related info.
*/
@@ -395,6 +403,10 @@ struct i40e_fdir_info {
*/
struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+
+ struct i40e_fdir_filter_list fdir_list;
+ struct i40e_fdir_filter **hash_map;
+ struct rte_hash *hash_table;
};
/* Ethertype filter number HW supports */
@@ -674,6 +686,8 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input);
int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter_input *input);
+int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
+ struct rte_eth_fdir_input *input);
struct i40e_tunnel_filter *
i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
const struct i40e_tunnel_filter_input *input);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 335bf15..4a29b37 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -121,6 +121,14 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
bool add);
static int i40e_fdir_flush(struct rte_eth_dev *dev);
+static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter);
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input);
+static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
+
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
{
@@ -1017,6 +1025,74 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
return ret;
}
+static int
+i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter)
+{
+ rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+ return 0;
+}
+
+/* Check if there exists the flow director filter */
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+/* Add a flow director filter into the SW list */
+static int
+i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ fdir_info->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete a flow director filter from the SW list */
+int
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(fdir_info->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
@@ -1032,6 +1108,9 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter, *node;
+ struct i40e_fdir_filter check_filter; /* Check if the filter exists */
int ret = 0;
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1054,6 +1133,22 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_fdir_filter_convert(filter, &check_filter);
+ node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR,
+ "Conflict with existing flow director rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR,
+ "There's no corresponding flow firector filter!");
+ return -EINVAL;
+ }
+
memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
@@ -1077,6 +1172,16 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
pctype);
return ret;
}
+
+ if (add) {
+ fdir_filter = rte_zmalloc("fdir_filter",
+ sizeof(*fdir_filter), 0);
+ rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
+ } else {
+ ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
+ }
+
return ret;
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 04/17] net/i40e: restore ethertype filter
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (2 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 03/17] net/i40e: store flow director filter Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 05/17] net/i40e: restore tunnel filter Beilei Xing
` (13 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring ethertype filter in case filter
dropped accidentally, as all filters need to be added and
removed by user obviously for generic filter API.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 44 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index fb7d794..189d110 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -474,6 +474,9 @@ static int i40e_tunnel_filter_convert(
static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
+static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_filter_restore(struct i40e_pf *pf);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -1955,6 +1958,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+ i40e_filter_restore(pf);
+
return I40E_SUCCESS;
err_up:
@@ -10071,3 +10076,42 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
}
+
+/* Restore ethertype filter */
+static void
+i40e_ethertype_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags;
+
+ TAILQ_FOREACH(f, ethertype_list, rules) {
+ flags = 0;
+ if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ i40e_aq_add_rem_control_packet_filter(hw,
+ f->input.mac_addr.addr_bytes,
+ f->input.ether_type,
+ flags, pf->main_vsi->seid,
+ f->queue, 1, &stats, NULL);
+ }
+ PMD_DRV_LOG(INFO, "Ethertype filter:"
+ " mac_etype_used = %u, etype_used = %u,"
+ " mac_etype_free = %u, etype_free = %u\n",
+ stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
+}
+
+static void
+i40e_filter_restore(struct i40e_pf *pf)
+{
+ i40e_ethertype_filter_restore(pf);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 05/17] net/i40e: restore tunnel filter
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (3 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 04/17] net/i40e: restore ethertype filter Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 06/17] net/i40e: restore flow director filter Beilei Xing
` (12 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring tunnel filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 189d110..67e1b37 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -475,6 +475,7 @@ static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
static const struct rte_pci_id pci_id_i40e_map[] = {
@@ -10110,8 +10111,28 @@ i40e_ethertype_filter_restore(struct i40e_pf *pf)
stats.mac_etype_free, stats.etype_free);
}
+/* Restore tunnel filter */
+static void
+i40e_tunnel_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+
+ TAILQ_FOREACH(f, tunnel_list, rules) {
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ rte_memcpy(&cld_filter, &f->input, sizeof(f->input));
+ cld_filter.queue_number = f->queue;
+ i40e_aq_add_cloud_filters(hw, vsi->seid, &cld_filter, 1);
+ }
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
+ i40e_tunnel_filter_restore(pf);
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 06/17] net/i40e: restore flow director filter
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (4 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 05/17] net/i40e: restore tunnel filter Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 07/17] net/i40e: add flow validate function Beilei Xing
` (11 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring flow director filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev.h | 1 +
drivers/net/i40e/i40e_fdir.c | 31 +++++++++++++++++++++++++++++++
3 files changed, 33 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 67e1b37..153322a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -10135,4 +10135,5 @@ i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
+ i40e_fdir_filter_restore(pf);
}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b79fbd6..92f6f55 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -670,6 +670,7 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
+void i40e_fdir_filter_restore(struct i40e_pf *pf);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 4a29b37..f89dbc9 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1586,3 +1586,34 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* Restore flow director filter */
+void
+i40e_fdir_filter_restore(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
+ struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
+ struct i40e_fdir_filter *f;
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t fdstat;
+ uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
+ uint32_t best_cnt; /**< Number of filters in best effort spaces. */
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ TAILQ_FOREACH(f, fdir_list, rules)
+ i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
+ guarant_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d\n",
+ guarant_cnt, best_cnt);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 07/17] net/i40e: add flow validate function
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (5 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 06/17] net/i40e: restore flow director filter Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 08/17] net/i40e: parse flow director filter Beilei Xing
` (10 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_validation function to check if
a flow is valid according to the flow pattern.
i40e_parse_ethertype_filter is added first, it also gets
the ethertype info.
i40e_flow.c is added to handle all generic filter events.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 7 +
drivers/net/i40e/i40e_ethdev.h | 18 ++
drivers/net/i40e/i40e_flow.c | 447 +++++++++++++++++++++++++++++++++++++++++
4 files changed, 473 insertions(+)
create mode 100644 drivers/net/i40e/i40e_flow.c
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 11175c4..89bd85a 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -105,6 +105,7 @@ endif
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
# vector PMD driver needs SSE4.1 support
ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 153322a..edfd52b 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -8426,6 +8426,8 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
return ret;
}
+const struct rte_flow_ops i40e_flow_ops;
+
static int
i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
@@ -8457,6 +8459,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &i40e_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 92f6f55..23f360b 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -38,6 +38,7 @@
#include <rte_time.h>
#include <rte_kvargs.h>
#include <rte_hash.h>
+#include <rte_flow_driver.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -629,6 +630,23 @@ struct i40e_adapter {
struct rte_timecounter tx_tstamp_tc;
};
+union i40e_filter_t {
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_fdir_filter fdir_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
+};
+
+typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+struct i40e_valid_pattern {
+ enum rte_flow_item_type *items;
+ parse_filter_t parse_filter;
+};
+
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
new file mode 100644
index 0000000..a9ff73f
--- /dev/null
+++ b/drivers/net/i40e/i40e_flow.c
@@ -0,0 +1,447 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+
+static int i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static int i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_parse_ethertype_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+
+const struct rte_flow_ops i40e_flow_ops = {
+ .validate = i40e_flow_validate,
+};
+
+union i40e_filter_t cons_filter;
+
+/* Pattern matched ethertype filter */
+static enum rte_flow_item_type pattern_ethertype[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static int
+i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_ethertype_filter *ethertype_filter =
+ &filter->ethertype_filter;
+ int ret;
+
+ ret = i40e_parse_ethertype_pattern(dev, pattern, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_ethertype_act(dev, actions, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static struct i40e_valid_pattern i40e_supported_patterns[] = {
+ /* Ethertype */
+ { pattern_ethertype, i40e_parse_ethertype_filter },
+};
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+i40e_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = i40e_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = i40e_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+ pb = pe;
+ break;
+ }
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+i40e_match_pattern(enum rte_flow_item_type *item_array,
+ struct rte_flow_item *pattern)
+{
+ struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+static parse_filter_t
+i40e_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+ parse_filter_t parse_filter = NULL;
+ uint8_t i = 0;
+
+ for (; i < RTE_DIM(i40e_supported_patterns); i++) {
+ if (i40e_match_pattern(i40e_supported_patterns[i].items,
+ pattern)) {
+ parse_filter = i40e_supported_patterns[i].parse_filter;
+ break;
+ }
+ }
+
+ return parse_filter;
+}
+
+/* Parse attributes */
+static int
+i40e_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ enum rte_flow_item_type item_type;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ /* Get the MAC info. */
+ if (!eth_spec || !eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL ETH spec/mask");
+ return -rte_errno;
+ }
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(ð_mask->src) ||
+ (!is_zero_ether_addr(ð_mask->dst) &&
+ !is_broadcast_ether_addr(ð_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(ð_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type in"
+ " control packet filter.");
+ return -rte_errno;
+ }
+ if (filter->ether_type == ETHER_TYPE_VLAN)
+ PMD_DRV_LOG(WARNING, "filter vlan ether_type in"
+ " first tag is not supported.");
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_parse_ethertype_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for"
+ " ethertype_filter.");
+ return -rte_errno;
+ }
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item *items; /* internal pattern w/o VOID items */
+ parse_filter_t parse_filter;
+ uint32_t item_num = 0; /* non-void item number of pattern*/
+ uint32_t i = 0;
+ int ret;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ memset(&cons_filter, 0, sizeof(cons_filter));
+
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = rte_zmalloc("i40e_pattern",
+ item_num * sizeof(struct rte_flow_item), 0);
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for PMD internal items.");
+ return -ENOMEM;
+ }
+
+ i40e_pattern_skip_void_item(items, pattern);
+
+ /* Find if there's matched parse filter function */
+ parse_filter = i40e_find_parse_filter_func(items);
+ if (!parse_filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ return -rte_errno;
+ }
+
+ ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
+
+ rte_free(items);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 08/17] net/i40e: parse flow director filter
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (6 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 07/17] net/i40e: add flow validate function Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 09/17] net/i40e: parse tunnel filter Beilei Xing
` (9 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_parse_fdir_filter to check if a rule
is a flow director rule according to the flow pattern,
and the function also gets the flow director info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 56 +---
drivers/net/i40e/i40e_ethdev.h | 55 ++++
drivers/net/i40e/i40e_flow.c | 607 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 663 insertions(+), 55 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index edfd52b..bcf28cf 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -139,60 +139,6 @@
#define I40E_DEFAULT_DCB_APP_NUM 1
#define I40E_DEFAULT_DCB_APP_PRIO 3
-#define I40E_INSET_NONE 0x00000000000000000ULL
-
-/* bit0 ~ bit 7 */
-#define I40E_INSET_DMAC 0x0000000000000001ULL
-#define I40E_INSET_SMAC 0x0000000000000002ULL
-#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
-#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
-#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
-
-/* bit 8 ~ bit 15 */
-#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
-#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
-#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
-#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
-#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
-#define I40E_INSET_DST_PORT 0x0000000000002000ULL
-#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
-
-/* bit 16 ~ bit 31 */
-#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
-#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
-#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
-#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
-#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
-#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
-#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
-#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
-
-/* bit 32 ~ bit 47, tunnel fields */
-#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
-#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
-#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
-#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
-#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
-#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
-
-/* bit 48 ~ bit 55 */
-#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
-
-/* bit 56 ~ bit 63, Flex Payload */
-#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD \
- (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
- I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
- I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
- I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
-
/**
* Below are values for writing un-exposed registers suggested
* by silicon experts
@@ -7617,7 +7563,7 @@ i40e_validate_input_set(enum i40e_filter_pctype pctype,
}
/* default input set fields combination per pctype */
-static uint64_t
+uint64_t
i40e_get_default_input_set(uint16_t pctype)
{
static const uint64_t default_inset_table[] = {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 23f360b..9e3a48d 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -190,6 +190,60 @@ enum i40e_flxpld_layer_idx {
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
+#define I40E_INSET_NONE 0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC 0x0000000000000001ULL
+#define I40E_INSET_SMAC 0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
+#define I40E_INSET_DST_PORT 0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+ I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
struct i40e_adapter;
/**
@@ -712,6 +766,7 @@ i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
const struct i40e_tunnel_filter_input *input);
int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter_input *input);
+uint64_t i40e_get_default_input_set(uint16_t pctype);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index a9ff73f..7b872c9 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -51,6 +51,10 @@
#include "base/i40e_type.h"
#include "i40e_ethdev.h"
+#define I40E_IPV4_TC_SHIFT 4
+#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_FRAG_HEADER 44
+
static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -64,6 +68,14 @@ static int i40e_parse_ethertype_act(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_ethertype_filter *filter);
+static int i40e_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_parse_fdir_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
@@ -79,6 +91,107 @@ static enum rte_flow_item_type pattern_ethertype[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* Pattern matched flow director filter */
+static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static int
i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -108,9 +221,62 @@ i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
return ret;
}
+static int
+i40e_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_fdir_filter *fdir_filter =
+ &filter->fdir_filter;
+ int ret;
+
+ ret = i40e_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_fdir_act(dev, actions, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ if (dev->data->dev_conf.fdir_conf.mode !=
+ RTE_FDIR_MODE_PERFECT) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Check the mode in fdir_conf.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_parse_ethertype_filter },
+ /* FDIR */
+ { pattern_fdir_ipv4, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_ext, i40e_parse_fdir_filter },
};
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
@@ -385,6 +551,447 @@ i40e_parse_ethertype_act(struct rte_eth_dev *dev,
}
static int
+i40e_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set = I40E_INSET_NONE;
+ uint16_t flag_offset;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3;
+ uint32_t j;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if (eth_spec || eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH spec/mask");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ if (!ipv4_spec || !ipv4_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv4 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Get filter info */
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ /* Check if it is fragment. */
+ flag_offset =
+ rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
+ if (flag_offset & IPV4_HDR_OFFSET_MASK ||
+ flag_offset & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+ /* Get the filter info */
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (!ipv6_spec || !ipv6_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv6 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ /* SCR and DST address of IPv6 shouldn't be masked */
+ for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
+ if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
+ ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+ }
+
+ input_set |= I40E_INSET_IPV6_SRC;
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(ipv6_spec->hdr.vtc_flow <<
+ I40E_IPV4_TC_SHIFT);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!tcp_spec || !tcp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL TCP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (tcp_mask->hdr.src_port != UINT16_MAX ||
+ tcp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (!udp_spec || !udp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL UDP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (udp_mask->hdr.src_port != UINT16_MAX ||
+ udp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (!sctp_spec || !sctp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL SCTP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port != UINT16_MAX ||
+ sctp_mask->hdr.dst_port != UINT16_MAX ||
+ sctp_mask->hdr.tag != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ filter->input.flow_ext.is_vf = 1;
+ filter->input.flow_ext.dst_id = vf_spec->id;
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VF ID for FDIR.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ pctype = i40e_flowtype_to_pctype(flow_type);
+ if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported flow type");
+ return -rte_errno;
+ }
+
+ if (input_set != i40e_get_default_input_set(pctype)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid input set.");
+ return -rte_errno;
+ }
+ filter->input.flow_type = flow_type;
+
+ return 0;
+}
+
+/* Parse to get the action info of a FDIR filter */
+static int
+i40e_parse_fdir_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.flex_off = 0;
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+ else
+ filter->action.behavior = RTE_ETH_FDIR_REJECT;
+
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->action.rx_queue = act_q->index;
+
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID for FDIR.");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is MARK or END. */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+ act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->soft_id = mark_spec->id;
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 09/17] net/i40e: parse tunnel filter
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (7 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 08/17] net/i40e: parse flow director filter Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 10/17] net/i40e: add flow create function Beilei Xing
` (8 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_parse_tunnel_filter to check if
a rule is a tunnel rule according to items of the flow
pattern, and the function also gets the tunnel info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 394 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 394 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 7b872c9..063f8e2 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -54,6 +54,8 @@
#define I40E_IPV4_TC_SHIFT 4
#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
#define I40E_IPV6_FRAG_HEADER 44
+#define I40E_TENANT_ARRAY_NUM 3
+#define I40E_TCI_MASK 0xFFFF
static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -76,6 +78,14 @@ static int i40e_parse_fdir_act(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_fdir_filter *filter);
+static int i40e_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
+static int i40e_parse_tunnel_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
@@ -192,6 +202,45 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* Pattern matched tunnel filter */
+static enum rte_flow_item_type pattern_vxlan_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static int
i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -257,6 +306,33 @@ i40e_parse_fdir_filter(struct rte_eth_dev *dev,
return 0;
}
+static int
+i40e_parse_tunnel_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_tunnel_filter_conf *tunnel_filter =
+ &filter->tunnel_filter;
+ int ret;
+
+ ret = i40e_parse_tunnel_pattern(dev, pattern, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_tunnel_act(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_parse_ethertype_filter },
@@ -277,6 +353,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_fdir_ipv6_tcp_ext, i40e_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp, i40e_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp_ext, i40e_parse_fdir_filter },
+ /* tunnel */
+ { pattern_vxlan_1, i40e_parse_tunnel_filter },
+ { pattern_vxlan_2, i40e_parse_tunnel_filter },
+ { pattern_vxlan_3, i40e_parse_tunnel_filter },
+ { pattern_vxlan_4, i40e_parse_tunnel_filter },
};
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
@@ -991,6 +1072,319 @@ i40e_parse_fdir_act(struct rte_eth_dev *dev,
return 0;
}
+/* Parse to get the action info of a tunnle filter */
+static int i40e_parse_tunnel_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue_id = act_q->index;
+ if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_check_tenant_id_mask(const uint8_t *mask)
+{
+ uint32_t j;
+ int is_masked = 0;
+
+ for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
+ if (*(mask + j) == UINT8_MAX) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 0;
+ } else if (*(mask + j) == 0) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 1;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return is_masked;
+}
+
+static int
+i40e_parse_vxlan_pattern(const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_eth *o_eth_spec;
+ const struct rte_flow_item_eth *o_eth_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ const struct rte_flow_item_eth *i_eth_spec;
+ const struct rte_flow_item_eth *i_eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ bool is_vni_masked = 0;
+ enum rte_flow_item_type item_type;
+ bool vxlan_flag = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(ð_mask->dst) ||
+ !is_zero_ether_addr(ð_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!vxlan_flag)
+ rte_memcpy(&filter->outer_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ else
+ rte_memcpy(&filter->inner_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ }
+
+ if (!vxlan_flag) {
+ o_eth_spec = eth_spec;
+ o_eth_mask = eth_mask;
+ } else {
+ i_eth_spec = eth_spec;
+ i_eth_mask = eth_mask;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vxlan_flag) {
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ } else {
+ if (vlan_spec || vlan_mask)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* IPv4/IPv6/UDP are used to describe protocol,
+ * spec amd mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec =
+ (const struct rte_flow_item_vxlan *)item->spec;
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, either spec or mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_mask) {
+ is_vni_masked =
+ i40e_check_tenant_id_mask(vxlan_mask->vni);
+ if (is_vni_masked < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+ }
+ vxlan_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Check specification and mask to get the filter type */
+ if (vlan_spec && vlan_mask &&
+ (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ /* If there's inner vlan */
+ filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
+ & I40E_TCI_MASK;
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if ((!vlan_spec && !vlan_mask) ||
+ (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+ /* If there's no inner vlan */
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_TENID;
+ else if (o_eth_spec && o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask) {
+ filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Not supported by tunnel filter.");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ int ret;
+
+ ret = i40e_parse_vxlan_pattern(pattern, error, filter);
+
+ return ret;
+}
+
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 10/17] net/i40e: add flow create function
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (8 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 09/17] net/i40e: parse tunnel filter Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 11/17] net/i40e: add flow destroy function Beilei Xing
` (7 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_create function to create a
rule. It will check if a flow matches ethertype filter
or flow director filter or tunnel filter, if the flow
matches some kind of filter, then set the filter to HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 9 +++--
drivers/net/i40e/i40e_ethdev.h | 21 ++++++++++++
drivers/net/i40e/i40e_fdir.c | 2 +-
drivers/net/i40e/i40e_flow.c | 77 ++++++++++++++++++++++++++++++++++++++++++
4 files changed, 103 insertions(+), 6 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index bcf28cf..e49045a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -353,9 +353,6 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_ethertype_filter_set(struct i40e_pf *pf,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -1233,6 +1230,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_fdir_hash_map_alloc;
}
+ TAILQ_INIT(&pf->flow_list);
+
return 0;
err_fdir_hash_map_alloc:
@@ -6611,7 +6610,7 @@ i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
return 0;
}
-static int
+int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add)
@@ -8256,7 +8255,7 @@ i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
*/
-static int
+int
i40e_ethertype_filter_set(struct i40e_pf *pf,
struct rte_eth_ethertype_filter *filter,
bool add)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 9e3a48d..b33910d 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -536,6 +536,17 @@ struct i40e_mirror_rule {
TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
/*
+ * Struct to store flow created.
+ */
+struct i40e_flow {
+ TAILQ_ENTRY(i40e_flow) node;
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+TAILQ_HEAD(i40e_flow_list, i40e_flow);
+
+/*
* Structure to store private data specific for PF instance.
*/
struct i40e_pf {
@@ -592,6 +603,7 @@ struct i40e_pf {
bool floating_veb; /* The flag to use the floating VEB */
/* The floating enable flag for the specific VF */
bool floating_veb_list[I40E_MAX_VF];
+ struct i40e_flow_list flow_list;
};
enum pending_msg {
@@ -767,6 +779,15 @@ i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter_input *input);
uint64_t i40e_get_default_input_set(uint16_t pctype);
+int i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index f89dbc9..91d91aa 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1099,7 +1099,7 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
-static int
+int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 063f8e2..67ea83d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -62,6 +62,11 @@ static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
static int i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
@@ -91,9 +96,11 @@ static int i40e_parse_attr(const struct rte_flow_attr *attr,
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
+ .create = i40e_flow_create,
};
union i40e_filter_t cons_filter;
+enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
/* Pattern matched ethertype filter */
static enum rte_flow_item_type pattern_ethertype[] = {
@@ -267,6 +274,8 @@ i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
+
return ret;
}
@@ -294,6 +303,8 @@ i40e_parse_fdir_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_FDIR;
+
if (dev->data->dev_conf.fdir_conf.mode !=
RTE_FDIR_MODE_PERFECT) {
rte_flow_error_set(error, ENOTSUP,
@@ -330,6 +341,8 @@ i40e_parse_tunnel_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
return ret;
}
@@ -1446,3 +1459,67 @@ i40e_flow_validate(struct rte_eth_dev *dev,
return ret;
}
+
+static struct rte_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_flow *flow;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct i40e_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+
+ ret = i40e_flow_validate(dev, attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (cons_filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_set(pf,
+ &cons_filter.ethertype_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &cons_filter.fdir_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_tunnel_filter_set(pf,
+ &cons_filter.tunnel_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ break;
+ default:
+ goto free_flow;
+ }
+
+ flow->filter_type = cons_filter_type;
+ TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+ return (struct rte_flow *)flow;
+
+free_flow:
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ return NULL;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 11/17] net/i40e: add flow destroy function
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (9 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 10/17] net/i40e: add flow create function Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 12/17] net/i40e: destroy ethertype filter Beilei Xing
` (6 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_destroy function to destroy
a flow for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 33 +++++++++++++++++++++++++++++++++
1 file changed, 33 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 67ea83d..cda308d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -67,6 +67,9 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static int i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
static int i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
@@ -97,6 +100,7 @@ static int i40e_parse_attr(const struct rte_flow_attr *attr,
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = i40e_flow_create,
+ .destroy = i40e_flow_destroy,
};
union i40e_filter_t cons_filter;
@@ -1523,3 +1527,32 @@ i40e_flow_create(struct rte_eth_dev *dev,
rte_free(flow);
return NULL;
}
+
+static int
+i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_flow *pmd_flow = (struct i40e_flow *)flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ int ret = 0;
+
+ switch (filter_type) {
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ TAILQ_REMOVE(&pf->flow_list, pmd_flow, node);
+ rte_free(pmd_flow);
+ } else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 12/17] net/i40e: destroy ethertype filter
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (10 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 11/17] net/i40e: add flow destroy function Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 13/17] net/i40e: destroy tunnel filter Beilei Xing
` (5 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_dev_destroy_ethertype_filter function
to destroy a ethertype filter for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 42 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 42 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index cda308d..8d1e2ff 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -49,6 +49,7 @@
#include "i40e_logs.h"
#include "base/i40e_type.h"
+#include "base/i40e_prototype.h"
#include "i40e_ethdev.h"
#define I40E_IPV4_TC_SHIFT 4
@@ -96,6 +97,8 @@ static int i40e_parse_tunnel_act(struct rte_eth_dev *dev,
struct rte_eth_tunnel_filter_conf *filter);
static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
+static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1539,6 +1542,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
int ret = 0;
switch (filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_dev_destroy_ethertype_filter(pf,
+ (struct i40e_ethertype_filter *)pmd_flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -1556,3 +1563,38 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
return ret;
}
+
+static int
+i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *node;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret = 0;
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->input.mac_addr.addr_bytes,
+ filter->input.ether_type,
+ flags, pf->main_vsi->seid,
+ filter->queue, 0, &stats, NULL);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 13/17] net/i40e: destroy tunnel filter
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (11 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 12/17] net/i40e: destroy ethertype filter Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 14/17] net/i40e: destroy flow directory filter Beilei Xing
` (4 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_dev_destroy_tunnel_filter function
to destroy a tunnel filter for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 8d1e2ff..9fc311d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -99,6 +99,8 @@ static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1546,6 +1548,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_dev_destroy_ethertype_filter(pf,
(struct i40e_ethertype_filter *)pmd_flow->rule);
break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_destroy_tunnel_filter(pf,
+ (struct i40e_tunnel_filter *)pmd_flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -1598,3 +1604,38 @@ i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
return ret;
}
+
+static int
+i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *node;
+ int ret = 0;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
+ (struct ether_addr *)&cld_filter.outer_mac);
+ ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
+ (struct ether_addr *)&cld_filter.inner_mac);
+ cld_filter.inner_vlan = filter->input.inner_vlan;
+ cld_filter.flags = filter->input.flags;
+ cld_filter.tenant_id = filter->input.tenant_id;
+ cld_filter.queue_number = filter->queue;
+
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter, 1);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 14/17] net/i40e: destroy flow directory filter
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (12 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 13/17] net/i40e: destroy tunnel filter Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 15/17] net/i40e: add flow flush function Beilei Xing
` (3 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch supports destroying a flow directory filter
for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 9fc311d..e56e8b8 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1552,6 +1552,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_dev_destroy_tunnel_filter(pf,
(struct i40e_tunnel_filter *)pmd_flow->rule);
break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &((struct i40e_fdir_filter *)pmd_flow->rule)->fdir, 0);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 15/17] net/i40e: add flow flush function
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (13 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 14/17] net/i40e: destroy flow directory filter Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 16/17] net/i40e: flush ethertype filters Beilei Xing
` (2 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_flush function to flush all
filters for users. And flow director flush function
is involved first.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.h | 1 +
drivers/net/i40e/i40e_fdir.c | 4 +---
drivers/net/i40e/i40e_flow.c | 51 ++++++++++++++++++++++++++++++++++++++++++
3 files changed, 53 insertions(+), 3 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b33910d..57fd796 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -788,6 +788,7 @@ int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add);
+int i40e_fdir_flush(struct rte_eth_dev *dev);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 91d91aa..67d63ff 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -119,8 +119,6 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
-static int i40e_fdir_flush(struct rte_eth_dev *dev);
-
static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
struct i40e_fdir_filter *filter);
static struct i40e_fdir_filter *
@@ -1325,7 +1323,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
* i40e_fdir_flush - clear all filters of Flow Director table
* @pf: board private structure
*/
-static int
+int
i40e_fdir_flush(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index e56e8b8..eacea68 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -68,6 +68,8 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static int i40e_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
static int i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
@@ -101,11 +103,13 @@ static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
+static int i40e_fdir_filter_flush(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = i40e_flow_create,
.destroy = i40e_flow_destroy,
+ .flush = i40e_flow_flush,
};
union i40e_filter_t cons_filter;
@@ -1643,3 +1647,50 @@ i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
return ret;
}
+
+static int
+i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret;
+
+ ret = i40e_fdir_filter_flush(pf);
+ if (ret)
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush FDIR flows.");
+
+ return ret;
+}
+
+static int
+i40e_fdir_filter_flush(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter;
+ struct i40e_flow *flow;
+ void *temp;
+ int ret;
+
+ ret = i40e_fdir_flush(dev);
+ if (!ret) {
+ /* Delete FDIR filters in FDIR list. */
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ ret = i40e_sw_fdir_filter_del(pf,
+ &fdir_filter->fdir.input);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Delete FDIR flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 16/17] net/i40e: flush ethertype filters
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (14 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 15/17] net/i40e: add flow flush function Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 17/17] net/i40e: flush tunnel filters Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_ethertype_filter_flush function
to flush all ethertype filters, including filters in
SW and HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 41 ++++++++++++++++++++++++++++++++++++++++-
1 file changed, 40 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index eacea68..5c16fb8 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -104,6 +104,7 @@ static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
static int i40e_fdir_filter_flush(struct i40e_pf *pf);
+static int i40e_ethertype_filter_flush(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1655,10 +1656,20 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
int ret;
ret = i40e_fdir_filter_flush(pf);
- if (ret)
+ if (ret) {
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to flush FDIR flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_ethertype_filter_flush(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to ethertype flush flows.");
+ return -rte_errno;
+ }
return ret;
}
@@ -1694,3 +1705,31 @@ i40e_fdir_filter_flush(struct i40e_pf *pf)
return ret;
}
+
+/* Flush all ethertype filters */
+static int
+i40e_ethertype_filter_flush(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((f = TAILQ_FIRST(ethertype_list))) {
+ ret = i40e_dev_destroy_ethertype_filter(pf, f);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete ethertype flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v4 17/17] net/i40e: flush tunnel filters
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (15 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 16/17] net/i40e: flush ethertype filters Beilei Xing
@ 2016-12-30 3:25 ` Beilei Xing
2017-01-03 3:25 ` Guo, Jia
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
17 siblings, 1 reply; 175+ messages in thread
From: Beilei Xing @ 2016-12-30 3:25 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_tunnel_filter_flush function
to flush all tunnel filters, including filters in
SW and HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 37 +++++++++++++++++++++++++++++++++++++
1 file changed, 37 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 5c16fb8..fd0f0ec 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -105,6 +105,7 @@ static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
static int i40e_fdir_filter_flush(struct i40e_pf *pf);
static int i40e_ethertype_filter_flush(struct i40e_pf *pf);
+static int i40e_tunnel_filter_flush(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1671,6 +1672,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return -rte_errno;
}
+ ret = i40e_tunnel_filter_flush(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush tunnel flows.");
+ return -rte_errno;
+ }
+
return ret;
}
@@ -1733,3 +1742,31 @@ i40e_ethertype_filter_flush(struct i40e_pf *pf)
return ret;
}
+
+/* Flush all tunnel filters */
+static int
+i40e_tunnel_filter_flush(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((f = TAILQ_FIRST(tunnel_list))) {
+ ret = i40e_dev_destroy_tunnel_filter(pf, f);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete tunnel flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v4 17/17] net/i40e: flush tunnel filters
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 17/17] net/i40e: flush tunnel filters Beilei Xing
@ 2017-01-03 3:25 ` Guo, Jia
2017-01-03 4:49 ` Xing, Beilei
0 siblings, 1 reply; 175+ messages in thread
From: Guo, Jia @ 2017-01-03 3:25 UTC (permalink / raw)
To: Beilei Xing, jingjing.wu, helin.zhang; +Cc: dev
On 12/30/2016 11:25 AM, Beilei Xing wrote:
> This patch adds i40e_tunnel_filter_flush function
> to flush all tunnel filters, including filters in
> SW and HW.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/i40e/i40e_flow.c | 37 +++++++++++++++++++++++++++++++++++++
> 1 file changed, 37 insertions(+)
>
> diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
> index 5c16fb8..fd0f0ec 100644
> --- a/drivers/net/i40e/i40e_flow.c
> +++ b/drivers/net/i40e/i40e_flow.c
> @@ -105,6 +105,7 @@ static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
> struct i40e_tunnel_filter *filter);
> static int i40e_fdir_filter_flush(struct i40e_pf *pf);
> static int i40e_ethertype_filter_flush(struct i40e_pf *pf);
> +static int i40e_tunnel_filter_flush(struct i40e_pf *pf);
>
> const struct rte_flow_ops i40e_flow_ops = {
> .validate = i40e_flow_validate,
> @@ -1671,6 +1672,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
> return -rte_errno;
> }
>
> + ret = i40e_tunnel_filter_flush(pf);
> + if (ret) {
> + rte_flow_error_set(error, -ret,
> + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> + "Failed to flush tunnel flows.");
> + return -rte_errno;
> + }
> +
> return ret;
> }
>
> @@ -1733,3 +1742,31 @@ i40e_ethertype_filter_flush(struct i40e_pf *pf)
>
> return ret;
> }
> +
> +/* Flush all tunnel filters */
> +static int
> +i40e_tunnel_filter_flush(struct i40e_pf *pf)
> +{
> + struct i40e_tunnel_filter_list
> + *tunnel_list = &pf->tunnel.tunnel_list;
> + struct i40e_tunnel_filter *f;
Just a mini comment, could you replace the variables name from "f" to
any other obvious name , to let code to be more readable.
>
> + struct i40e_flow *flow;
> + void *temp;
> + int ret = 0;
> +
> + while ((f = TAILQ_FIRST(tunnel_list))) {
> + ret = i40e_dev_destroy_tunnel_filter(pf, f);
> + if (ret)
> + return ret;
> + }
> +
> + /* Delete tunnel flows in flow list. */
> + TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
> + if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
> + TAILQ_REMOVE(&pf->flow_list, flow, node);
> + rte_free(flow);
> + }
> + }
> +
> + return ret;
> +}
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v4 17/17] net/i40e: flush tunnel filters
2017-01-03 3:25 ` Guo, Jia
@ 2017-01-03 4:49 ` Xing, Beilei
0 siblings, 0 replies; 175+ messages in thread
From: Xing, Beilei @ 2017-01-03 4:49 UTC (permalink / raw)
To: Guo, Jia, Wu, Jingjing, Zhang, Helin; +Cc: dev
Hi Jeff,
> -----Original Message-----
> From: Guo, Jia
> Sent: Tuesday, January 3, 2017 11:25 AM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Zhang, Helin <helin.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v4 17/17] net/i40e: flush tunnel filters
>
>
>
> On 12/30/2016 11:25 AM, Beilei Xing wrote:
> > This patch adds i40e_tunnel_filter_flush function to flush all tunnel
> > filters, including filters in SW and HW.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/i40e/i40e_flow.c | 37
> +++++++++++++++++++++++++++++++++++++
> > 1 file changed, 37 insertions(+)
> >
> > diff --git a/drivers/net/i40e/i40e_flow.c
> > b/drivers/net/i40e/i40e_flow.c index 5c16fb8..fd0f0ec 100644
> > --- a/drivers/net/i40e/i40e_flow.c
> > +++ b/drivers/net/i40e/i40e_flow.c
> > @@ -105,6 +105,7 @@ static int i40e_dev_destroy_tunnel_filter(struct
> i40e_pf *pf,
> > struct i40e_tunnel_filter *filter);
> > static int i40e_fdir_filter_flush(struct i40e_pf *pf);
> > static int i40e_ethertype_filter_flush(struct i40e_pf *pf);
> > +static int i40e_tunnel_filter_flush(struct i40e_pf *pf);
> >
> > const struct rte_flow_ops i40e_flow_ops = {
> > .validate = i40e_flow_validate,
> > @@ -1671,6 +1672,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct
> rte_flow_error *error)
> > return -rte_errno;
> > }
> >
> > + ret = i40e_tunnel_filter_flush(pf);
> > + if (ret) {
> > + rte_flow_error_set(error, -ret,
> > + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> > + "Failed to flush tunnel flows.");
> > + return -rte_errno;
> > + }
> > +
> > return ret;
> > }
> >
> > @@ -1733,3 +1742,31 @@ i40e_ethertype_filter_flush(struct i40e_pf *pf)
> >
> > return ret;
> > }
> > +
> > +/* Flush all tunnel filters */
> > +static int
> > +i40e_tunnel_filter_flush(struct i40e_pf *pf) {
> > + struct i40e_tunnel_filter_list
> > + *tunnel_list = &pf->tunnel.tunnel_list;
> > + struct i40e_tunnel_filter *f;
> Just a mini comment, could you replace the variables name from "f" to any
> other obvious name , to let code to be more readable.
Thanks for comments, and will update in next version.
> >
> > + struct i40e_flow *flow;
> > + void *temp;
> > + int ret = 0;
> > +
> > + while ((f = TAILQ_FIRST(tunnel_list))) {
> > + ret = i40e_dev_destroy_tunnel_filter(pf, f);
> > + if (ret)
> > + return ret;
> > + }
> > +
> > + /* Delete tunnel flows in flow list. */
> > + TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
> > + if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
> > + TAILQ_REMOVE(&pf->flow_list, flow, node);
> > + rte_free(flow);
> > + }
> > + }
> > +
> > + return ret;
> > +}
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
` (16 preceding siblings ...)
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 17/17] net/i40e: flush tunnel filters Beilei Xing
@ 2017-01-04 3:22 ` Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 01/17] net/i40e: store ethertype filter Beilei Xing
` (18 more replies)
17 siblings, 19 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:22 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
The patch set depends on Adrien's Generic flow API(rte_flow).
The patches mainly finish following functions:
1) Store and restore all kinds of filters.
2) Parse all kinds of filters.
3) Add flow validate function.
4) Add flow create function.
5) Add flow destroy function.
6) Add flow flush function.
v5 changes:
Change some local variable name.
Add removing i40e_flow_list during device unint.
Fix compile error when gcc compile option isn't '-O0'.
v4 changes:
Change I40E_TCI_MASK with 0xFFFF to align with testpmd.
Modidy the stats show when restoring filters.
v3 changes:
Set the related cause pointer to a non-NULL value when error happens.
Change return value when error happens.
Modify filter_del parameter with key.
Malloc filter after checking when delete a filter.
Delete meaningless initialization.
Add return value when there's error.
Change global variable definition.
Modify some function declaration.
v2 changes:
Add i40e_flow.c, all flow ops are implemented in the file.
Change the whole implementation of all parse flow functions.
Update error info for all flow ops.
Add flow_list to store flows created.
Beilei Xing (17):
net/i40e: store ethertype filter
net/i40e: store tunnel filter
net/i40e: store flow director filter
net/i40e: restore ethertype filter
net/i40e: restore tunnel filter
net/i40e: restore flow director filter
net/i40e: add flow validate function
net/i40e: parse flow director filter
net/i40e: parse tunnel filter
net/i40e: add flow create function
net/i40e: add flow destroy function
net/i40e: destroy ethertype filter
net/i40e: destroy tunnel filter
net/i40e: destroy flow directory filter
net/i40e: add flow flush function
net/i40e: flush ethertype filters
net/i40e: flush tunnel filters
drivers/net/i40e/Makefile | 2 +
drivers/net/i40e/i40e_ethdev.c | 526 ++++++++++--
drivers/net/i40e/i40e_ethdev.h | 173 ++++
drivers/net/i40e/i40e_fdir.c | 140 +++-
drivers/net/i40e/i40e_flow.c | 1772 ++++++++++++++++++++++++++++++++++++++++
5 files changed, 2547 insertions(+), 66 deletions(-)
create mode 100644 drivers/net/i40e/i40e_flow.c
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 01/17] net/i40e: store ethertype filter
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
@ 2017-01-04 3:22 ` Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 02/17] net/i40e: store tunnel filter Beilei Xing
` (17 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:22 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no ethertype filter stored in SW.
This patch stores ethertype filter with cuckoo hash
in SW, also adds protection if an ethertype filter
has been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 166 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 31 ++++++++
3 files changed, 197 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 66997b6..11175c4 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -117,5 +117,6 @@ DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_kvargs
+DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_hash
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 8033c35..e43b4d9 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -51,6 +51,7 @@
#include <rte_dev.h>
#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
+#include <rte_hash_crc.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -461,6 +462,12 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int i40e_ethertype_filter_convert(
+ const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter);
+static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -938,9 +945,18 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
int ret;
uint32_t len;
uint8_t aq_fail = 0;
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
PMD_INIT_FUNC_TRACE();
+ char ethertype_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters ethertype_hash_params = {
+ .name = ethertype_hash_name,
+ .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
+ .key_len = sizeof(struct i40e_ethertype_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1180,8 +1196,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
pf->flags &= ~I40E_FLAG_DCB;
}
+ /* Initialize ethertype filter rule list and hash */
+ TAILQ_INIT(ðertype_rule->ethertype_list);
+ snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
+ "ethertype_%s", dev->data->name);
+ ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params);
+ if (!ethertype_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
+ ret = -EINVAL;
+ goto err_ethertype_hash_table_create;
+ }
+ ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
+ sizeof(struct i40e_ethertype_filter *) *
+ I40E_MAX_ETHERTYPE_FILTER_NUM,
+ 0);
+ if (!ethertype_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for ethertype hash map!");
+ ret = -ENOMEM;
+ goto err_ethertype_hash_map_alloc;
+ }
+
return 0;
+err_ethertype_hash_map_alloc:
+ rte_hash_free(ethertype_rule->hash_table);
+err_ethertype_hash_table_create:
+ rte_free(dev->data->mac_addrs);
err_mac_alloc:
i40e_vsi_release(pf->main_vsi);
err_setup_pf_switch:
@@ -1204,23 +1245,40 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
+ struct i40e_pf *pf;
struct rte_pci_device *pci_dev;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
+ struct i40e_ethertype_filter *p_ethertype;
int ret;
uint8_t aq_fail = 0;
+ struct i40e_ethertype_rule *ethertype_rule;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = dev->pci_dev;
+ ethertype_rule = &pf->ethertype;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
+ /* Remove all ethertype director rules and hash */
+ if (ethertype_rule->hash_map)
+ rte_free(ethertype_rule->hash_map);
+ if (ethertype_rule->hash_table)
+ rte_hash_free(ethertype_rule->hash_table);
+
+ while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) {
+ TAILQ_REMOVE(ðertype_rule->ethertype_list,
+ p_ethertype, rules);
+ rte_free(p_ethertype);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
@@ -7955,6 +8013,82 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
return ret;
}
+/* Convert ethertype filter structure */
+static int
+i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter)
+{
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ filter->input.ether_type = input->ether_type;
+ filter->flags = input->flags;
+ filter->queue = input->queue;
+
+ return 0;
+}
+
+/* Check if there exists the ehtertype filter */
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return ethertype_rule->hash_map[ret];
+}
+
+/* Add ethertype filter in SW list */
+static int
+i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete ethertype filter in SW list */
+int
+i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ struct i40e_ethertype_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
@@ -7965,6 +8099,9 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *ethertype_filter, *node;
+ struct i40e_ethertype_filter check_filter;
struct i40e_control_filter_stats stats;
uint16_t flags = 0;
int ret;
@@ -7983,6 +8120,21 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
" not supported.");
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_ethertype_filter_convert(filter, &check_filter);
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
+ &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
+ return -EINVAL;
+ }
+
if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
@@ -8003,7 +8155,19 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
stats.mac_etype_free, stats.etype_free);
if (ret < 0)
return -ENOSYS;
- return 0;
+
+ /* Add or delete a filter in SW list */
+ if (add) {
+ ethertype_filter = rte_zmalloc("ethertype_filter",
+ sizeof(*ethertype_filter), 0);
+ rte_memcpy(ethertype_filter, &check_filter,
+ sizeof(check_filter));
+ ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ } else {
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+ }
+
+ return ret;
}
/*
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 298cef4..3fb20ba 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -37,6 +37,7 @@
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
+#include <rte_hash.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -396,6 +397,30 @@ struct i40e_fdir_info {
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
};
+/* Ethertype filter number HW supports */
+#define I40E_MAX_ETHERTYPE_FILTER_NUM 768
+
+/* Ethertype filter struct */
+struct i40e_ethertype_filter_input {
+ struct ether_addr mac_addr; /* Mac address to match */
+ uint16_t ether_type; /* Ether type to match */
+};
+
+struct i40e_ethertype_filter {
+ TAILQ_ENTRY(i40e_ethertype_filter) rules;
+ struct i40e_ethertype_filter_input input;
+ uint16_t flags; /* Flags from RTE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_ethertype_filter_list, i40e_ethertype_filter);
+
+struct i40e_ethertype_rule {
+ struct i40e_ethertype_filter_list ethertype_list;
+ struct i40e_ethertype_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -466,6 +491,7 @@ struct i40e_pf {
struct i40e_vmdq_info *vmdq;
struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
@@ -616,6 +642,11 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input);
+int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 02/17] net/i40e: store tunnel filter
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 01/17] net/i40e: store ethertype filter Beilei Xing
@ 2017-01-04 3:22 ` Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 03/17] net/i40e: store flow director filter Beilei Xing
` (16 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:22 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no tunnel filter stored in SW.
This patch stores tunnel filter in SW with cuckoo
hash, also adds protection if a tunnel filter has
been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 169 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 32 ++++++++
2 files changed, 198 insertions(+), 3 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index e43b4d9..2bdb4d6 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -468,6 +468,12 @@ static int i40e_ethertype_filter_convert(
static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -946,6 +952,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
uint32_t len;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
PMD_INIT_FUNC_TRACE();
@@ -957,6 +964,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
.hash_func = rte_hash_crc,
};
+ char tunnel_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters tunnel_hash_params = {
+ .name = tunnel_hash_name,
+ .entries = I40E_MAX_TUNNEL_FILTER_NUM,
+ .key_len = sizeof(struct i40e_tunnel_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1217,8 +1232,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_ethertype_hash_map_alloc;
}
+ /* Initialize tunnel filter rule list and hash */
+ TAILQ_INIT(&tunnel_rule->tunnel_list);
+ snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
+ "tunnel_%s", dev->data->name);
+ tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
+ if (!tunnel_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
+ ret = -EINVAL;
+ goto err_tunnel_hash_table_create;
+ }
+ tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
+ sizeof(struct i40e_tunnel_filter *) *
+ I40E_MAX_TUNNEL_FILTER_NUM,
+ 0);
+ if (!tunnel_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for tunnel hash map!");
+ ret = -ENOMEM;
+ goto err_tunnel_hash_map_alloc;
+ }
+
return 0;
+err_tunnel_hash_map_alloc:
+ rte_hash_free(tunnel_rule->hash_table);
+err_tunnel_hash_table_create:
+ rte_free(ethertype_rule->hash_map);
err_ethertype_hash_map_alloc:
rte_hash_free(ethertype_rule->hash_table);
err_ethertype_hash_table_create:
@@ -1250,9 +1290,11 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
struct i40e_ethertype_filter *p_ethertype;
+ struct i40e_tunnel_filter *p_tunnel;
int ret;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule;
+ struct i40e_tunnel_rule *tunnel_rule;
PMD_INIT_FUNC_TRACE();
@@ -1263,6 +1305,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = dev->pci_dev;
ethertype_rule = &pf->ethertype;
+ tunnel_rule = &pf->tunnel;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1279,6 +1322,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_ethertype);
}
+ /* Remove all tunnel director rules and hash */
+ if (tunnel_rule->hash_map)
+ rte_free(tunnel_rule->hash_map);
+ if (tunnel_rule->hash_table)
+ rte_hash_free(tunnel_rule->hash_table);
+
+ while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
+ TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
+ rte_free(p_tunnel);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
@@ -6478,6 +6532,85 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
return 0;
}
+/* Convert tunnel filter structure */
+static int
+i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data
+ *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ ether_addr_copy((struct ether_addr *)&cld_filter->outer_mac,
+ (struct ether_addr *)&tunnel_filter->input.outer_mac);
+ ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac,
+ (struct ether_addr *)&tunnel_filter->input.inner_mac);
+ tunnel_filter->input.inner_vlan = cld_filter->inner_vlan;
+ tunnel_filter->input.flags = cld_filter->flags;
+ tunnel_filter->input.tenant_id = cld_filter->tenant_id;
+ tunnel_filter->queue = cld_filter->queue_number;
+
+ return 0;
+}
+
+/* Check if there exists the tunnel filter */
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return tunnel_rule->hash_map[ret];
+}
+
+/* Add a tunnel filter into the SW list */
+static int
+i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = tunnel_filter;
+
+ TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
+
+ return 0;
+}
+
+/* Delete a tunnel filter from the SW list */
+int
+i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel_filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ tunnel_filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
+ rte_free(tunnel_filter);
+
+ return 0;
+}
+
static int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
@@ -6493,6 +6626,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct i40e_vsi *vsi = pf->main_vsi;
struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
cld_filter = rte_zmalloc("tunnel_filter",
sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
@@ -6555,11 +6691,38 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
- if (add)
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ return -EINVAL;
+ }
+
+ if (add) {
ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
- else
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ return ret;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- cld_filter, 1);
+ cld_filter, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ return ret;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
rte_free(cld_filter);
return ret;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 3fb20ba..83f3594 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -421,6 +421,32 @@ struct i40e_ethertype_rule {
struct rte_hash *hash_table;
};
+/* Tunnel filter number HW supports */
+#define I40E_MAX_TUNNEL_FILTER_NUM 400
+
+/* Tunnel filter struct */
+struct i40e_tunnel_filter_input {
+ uint8_t outer_mac[6]; /* Outer mac address to match */
+ uint8_t inner_mac[6]; /* Inner mac address to match */
+ uint16_t inner_vlan; /* Inner vlan address to match */
+ uint16_t flags; /* Filter type flag */
+ uint32_t tenant_id; /* Tenant id to match */
+};
+
+struct i40e_tunnel_filter {
+ TAILQ_ENTRY(i40e_tunnel_filter) rules;
+ struct i40e_tunnel_filter_input input;
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter);
+
+struct i40e_tunnel_rule {
+ struct i40e_tunnel_filter_list tunnel_list;
+ struct i40e_tunnel_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -492,6 +518,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
+ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
@@ -647,6 +674,11 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input);
int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter_input *input);
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input);
+int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 03/17] net/i40e: store flow director filter
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 01/17] net/i40e: store ethertype filter Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 02/17] net/i40e: store tunnel filter Beilei Xing
@ 2017-01-04 3:22 ` Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 04/17] net/i40e: restore ethertype filter Beilei Xing
` (15 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:22 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no flow director filter stored in SW. This
patch stores flow director filters in SW with cuckoo hash,
also adds protection if a flow director filter has been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 48 +++++++++++++++++++
drivers/net/i40e/i40e_ethdev.h | 14 ++++++
drivers/net/i40e/i40e_fdir.c | 105 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 167 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 2bdb4d6..fb7d794 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -953,6 +953,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
PMD_INIT_FUNC_TRACE();
@@ -972,6 +973,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
.hash_func = rte_hash_crc,
};
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = I40E_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct rte_eth_fdir_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1253,8 +1262,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_tunnel_hash_map_alloc;
}
+ /* Initialize flow director filter rule list and hash */
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", dev->data->name);
+ fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ ret = -EINVAL;
+ goto err_fdir_hash_table_create;
+ }
+ fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
+ sizeof(struct i40e_fdir_filter *) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ ret = -ENOMEM;
+ goto err_fdir_hash_map_alloc;
+ }
+
return 0;
+err_fdir_hash_map_alloc:
+ rte_hash_free(fdir_info->hash_table);
+err_fdir_hash_table_create:
+ rte_free(tunnel_rule->hash_map);
err_tunnel_hash_map_alloc:
rte_hash_free(tunnel_rule->hash_table);
err_tunnel_hash_table_create:
@@ -1291,10 +1325,12 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct i40e_filter_control_settings settings;
struct i40e_ethertype_filter *p_ethertype;
struct i40e_tunnel_filter *p_tunnel;
+ struct i40e_fdir_filter *p_fdir;
int ret;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule;
struct i40e_tunnel_rule *tunnel_rule;
+ struct i40e_fdir_info *fdir_info;
PMD_INIT_FUNC_TRACE();
@@ -1306,6 +1342,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
pci_dev = dev->pci_dev;
ethertype_rule = &pf->ethertype;
tunnel_rule = &pf->tunnel;
+ fdir_info = &pf->fdir;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1333,6 +1370,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_tunnel);
}
+ /* Remove all flow director rules and hash */
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_table)
+ rte_hash_free(fdir_info->hash_table);
+
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+ rte_free(p_fdir);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 83f3594..b79fbd6 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -377,6 +377,14 @@ struct i40e_fdir_flex_mask {
};
#define I40E_FILTER_PCTYPE_MAX 64
+#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
+
+struct i40e_fdir_filter {
+ TAILQ_ENTRY(i40e_fdir_filter) rules;
+ struct rte_eth_fdir_filter fdir;
+};
+
+TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
/*
* A structure used to define fields of a FDIR related info.
*/
@@ -395,6 +403,10 @@ struct i40e_fdir_info {
*/
struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+
+ struct i40e_fdir_filter_list fdir_list;
+ struct i40e_fdir_filter **hash_map;
+ struct rte_hash *hash_table;
};
/* Ethertype filter number HW supports */
@@ -674,6 +686,8 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input);
int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter_input *input);
+int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
+ struct rte_eth_fdir_input *input);
struct i40e_tunnel_filter *
i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
const struct i40e_tunnel_filter_input *input);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 335bf15..4a29b37 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -121,6 +121,14 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
bool add);
static int i40e_fdir_flush(struct rte_eth_dev *dev);
+static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter);
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input);
+static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
+
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
{
@@ -1017,6 +1025,74 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
return ret;
}
+static int
+i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter)
+{
+ rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+ return 0;
+}
+
+/* Check if there exists the flow director filter */
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+/* Add a flow director filter into the SW list */
+static int
+i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ fdir_info->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete a flow director filter from the SW list */
+int
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(fdir_info->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
@@ -1032,6 +1108,9 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter, *node;
+ struct i40e_fdir_filter check_filter; /* Check if the filter exists */
int ret = 0;
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1054,6 +1133,22 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_fdir_filter_convert(filter, &check_filter);
+ node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR,
+ "Conflict with existing flow director rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR,
+ "There's no corresponding flow firector filter!");
+ return -EINVAL;
+ }
+
memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
@@ -1077,6 +1172,16 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
pctype);
return ret;
}
+
+ if (add) {
+ fdir_filter = rte_zmalloc("fdir_filter",
+ sizeof(*fdir_filter), 0);
+ rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
+ } else {
+ ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
+ }
+
return ret;
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 04/17] net/i40e: restore ethertype filter
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (2 preceding siblings ...)
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 03/17] net/i40e: store flow director filter Beilei Xing
@ 2017-01-04 3:22 ` Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 05/17] net/i40e: restore tunnel filter Beilei Xing
` (14 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:22 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring ethertype filter in case filter
dropped accidentally, as all filters need to be added and
removed by user obviously for generic filter API.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 44 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index fb7d794..189d110 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -474,6 +474,9 @@ static int i40e_tunnel_filter_convert(
static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
+static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_filter_restore(struct i40e_pf *pf);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -1955,6 +1958,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+ i40e_filter_restore(pf);
+
return I40E_SUCCESS;
err_up:
@@ -10071,3 +10076,42 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
}
+
+/* Restore ethertype filter */
+static void
+i40e_ethertype_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags;
+
+ TAILQ_FOREACH(f, ethertype_list, rules) {
+ flags = 0;
+ if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ i40e_aq_add_rem_control_packet_filter(hw,
+ f->input.mac_addr.addr_bytes,
+ f->input.ether_type,
+ flags, pf->main_vsi->seid,
+ f->queue, 1, &stats, NULL);
+ }
+ PMD_DRV_LOG(INFO, "Ethertype filter:"
+ " mac_etype_used = %u, etype_used = %u,"
+ " mac_etype_free = %u, etype_free = %u\n",
+ stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
+}
+
+static void
+i40e_filter_restore(struct i40e_pf *pf)
+{
+ i40e_ethertype_filter_restore(pf);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 05/17] net/i40e: restore tunnel filter
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (3 preceding siblings ...)
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 04/17] net/i40e: restore ethertype filter Beilei Xing
@ 2017-01-04 3:22 ` Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 06/17] net/i40e: restore flow director filter Beilei Xing
` (13 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:22 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring tunnel filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 189d110..67e1b37 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -475,6 +475,7 @@ static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
static const struct rte_pci_id pci_id_i40e_map[] = {
@@ -10110,8 +10111,28 @@ i40e_ethertype_filter_restore(struct i40e_pf *pf)
stats.mac_etype_free, stats.etype_free);
}
+/* Restore tunnel filter */
+static void
+i40e_tunnel_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+
+ TAILQ_FOREACH(f, tunnel_list, rules) {
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ rte_memcpy(&cld_filter, &f->input, sizeof(f->input));
+ cld_filter.queue_number = f->queue;
+ i40e_aq_add_cloud_filters(hw, vsi->seid, &cld_filter, 1);
+ }
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
+ i40e_tunnel_filter_restore(pf);
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 06/17] net/i40e: restore flow director filter
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (4 preceding siblings ...)
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 05/17] net/i40e: restore tunnel filter Beilei Xing
@ 2017-01-04 3:22 ` Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 07/17] net/i40e: add flow validate function Beilei Xing
` (12 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:22 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring flow director filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev.h | 1 +
drivers/net/i40e/i40e_fdir.c | 31 +++++++++++++++++++++++++++++++
3 files changed, 33 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 67e1b37..153322a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -10135,4 +10135,5 @@ i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
+ i40e_fdir_filter_restore(pf);
}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b79fbd6..92f6f55 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -670,6 +670,7 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
+void i40e_fdir_filter_restore(struct i40e_pf *pf);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 4a29b37..f89dbc9 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1586,3 +1586,34 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* Restore flow director filter */
+void
+i40e_fdir_filter_restore(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
+ struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
+ struct i40e_fdir_filter *f;
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t fdstat;
+ uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
+ uint32_t best_cnt; /**< Number of filters in best effort spaces. */
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ TAILQ_FOREACH(f, fdir_list, rules)
+ i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
+ guarant_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d\n",
+ guarant_cnt, best_cnt);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 07/17] net/i40e: add flow validate function
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (5 preceding siblings ...)
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 06/17] net/i40e: restore flow director filter Beilei Xing
@ 2017-01-04 3:22 ` Beilei Xing
2017-01-04 18:57 ` Ferruh Yigit
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 08/17] net/i40e: parse flow director filter Beilei Xing
` (11 subsequent siblings)
18 siblings, 1 reply; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:22 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_validation function to check if
a flow is valid according to the flow pattern.
i40e_parse_ethertype_filter is added first, it also gets
the ethertype info.
i40e_flow.c is added to handle all generic filter events.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 7 +
drivers/net/i40e/i40e_ethdev.h | 18 ++
drivers/net/i40e/i40e_flow.c | 447 +++++++++++++++++++++++++++++++++++++++++
4 files changed, 473 insertions(+)
create mode 100644 drivers/net/i40e/i40e_flow.c
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 11175c4..89bd85a 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -105,6 +105,7 @@ endif
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
# vector PMD driver needs SSE4.1 support
ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 153322a..edfd52b 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -8426,6 +8426,8 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
return ret;
}
+const struct rte_flow_ops i40e_flow_ops;
+
static int
i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
@@ -8457,6 +8459,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &i40e_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 92f6f55..23f360b 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -38,6 +38,7 @@
#include <rte_time.h>
#include <rte_kvargs.h>
#include <rte_hash.h>
+#include <rte_flow_driver.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -629,6 +630,23 @@ struct i40e_adapter {
struct rte_timecounter tx_tstamp_tc;
};
+union i40e_filter_t {
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_fdir_filter fdir_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
+};
+
+typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+struct i40e_valid_pattern {
+ enum rte_flow_item_type *items;
+ parse_filter_t parse_filter;
+};
+
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
new file mode 100644
index 0000000..a9ff73f
--- /dev/null
+++ b/drivers/net/i40e/i40e_flow.c
@@ -0,0 +1,447 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+
+static int i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static int i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_parse_ethertype_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+
+const struct rte_flow_ops i40e_flow_ops = {
+ .validate = i40e_flow_validate,
+};
+
+union i40e_filter_t cons_filter;
+
+/* Pattern matched ethertype filter */
+static enum rte_flow_item_type pattern_ethertype[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static int
+i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_ethertype_filter *ethertype_filter =
+ &filter->ethertype_filter;
+ int ret;
+
+ ret = i40e_parse_ethertype_pattern(dev, pattern, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_ethertype_act(dev, actions, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static struct i40e_valid_pattern i40e_supported_patterns[] = {
+ /* Ethertype */
+ { pattern_ethertype, i40e_parse_ethertype_filter },
+};
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+i40e_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = i40e_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = i40e_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+ pb = pe;
+ break;
+ }
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+i40e_match_pattern(enum rte_flow_item_type *item_array,
+ struct rte_flow_item *pattern)
+{
+ struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+static parse_filter_t
+i40e_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+ parse_filter_t parse_filter = NULL;
+ uint8_t i = 0;
+
+ for (; i < RTE_DIM(i40e_supported_patterns); i++) {
+ if (i40e_match_pattern(i40e_supported_patterns[i].items,
+ pattern)) {
+ parse_filter = i40e_supported_patterns[i].parse_filter;
+ break;
+ }
+ }
+
+ return parse_filter;
+}
+
+/* Parse attributes */
+static int
+i40e_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ enum rte_flow_item_type item_type;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ /* Get the MAC info. */
+ if (!eth_spec || !eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL ETH spec/mask");
+ return -rte_errno;
+ }
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(ð_mask->src) ||
+ (!is_zero_ether_addr(ð_mask->dst) &&
+ !is_broadcast_ether_addr(ð_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(ð_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type in"
+ " control packet filter.");
+ return -rte_errno;
+ }
+ if (filter->ether_type == ETHER_TYPE_VLAN)
+ PMD_DRV_LOG(WARNING, "filter vlan ether_type in"
+ " first tag is not supported.");
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_parse_ethertype_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for"
+ " ethertype_filter.");
+ return -rte_errno;
+ }
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item *items; /* internal pattern w/o VOID items */
+ parse_filter_t parse_filter;
+ uint32_t item_num = 0; /* non-void item number of pattern*/
+ uint32_t i = 0;
+ int ret;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ memset(&cons_filter, 0, sizeof(cons_filter));
+
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = rte_zmalloc("i40e_pattern",
+ item_num * sizeof(struct rte_flow_item), 0);
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for PMD internal items.");
+ return -ENOMEM;
+ }
+
+ i40e_pattern_skip_void_item(items, pattern);
+
+ /* Find if there's matched parse filter function */
+ parse_filter = i40e_find_parse_filter_func(items);
+ if (!parse_filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ return -rte_errno;
+ }
+
+ ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
+
+ rte_free(items);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 08/17] net/i40e: parse flow director filter
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (6 preceding siblings ...)
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 07/17] net/i40e: add flow validate function Beilei Xing
@ 2017-01-04 3:22 ` Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 09/17] net/i40e: parse tunnel filter Beilei Xing
` (10 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:22 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_parse_fdir_filter to check if a rule
is a flow director rule according to the flow pattern,
and the function also gets the flow director info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 56 +---
drivers/net/i40e/i40e_ethdev.h | 55 ++++
drivers/net/i40e/i40e_flow.c | 607 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 663 insertions(+), 55 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index edfd52b..bcf28cf 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -139,60 +139,6 @@
#define I40E_DEFAULT_DCB_APP_NUM 1
#define I40E_DEFAULT_DCB_APP_PRIO 3
-#define I40E_INSET_NONE 0x00000000000000000ULL
-
-/* bit0 ~ bit 7 */
-#define I40E_INSET_DMAC 0x0000000000000001ULL
-#define I40E_INSET_SMAC 0x0000000000000002ULL
-#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
-#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
-#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
-
-/* bit 8 ~ bit 15 */
-#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
-#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
-#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
-#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
-#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
-#define I40E_INSET_DST_PORT 0x0000000000002000ULL
-#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
-
-/* bit 16 ~ bit 31 */
-#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
-#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
-#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
-#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
-#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
-#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
-#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
-#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
-
-/* bit 32 ~ bit 47, tunnel fields */
-#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
-#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
-#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
-#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
-#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
-#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
-
-/* bit 48 ~ bit 55 */
-#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
-
-/* bit 56 ~ bit 63, Flex Payload */
-#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD \
- (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
- I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
- I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
- I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
-
/**
* Below are values for writing un-exposed registers suggested
* by silicon experts
@@ -7617,7 +7563,7 @@ i40e_validate_input_set(enum i40e_filter_pctype pctype,
}
/* default input set fields combination per pctype */
-static uint64_t
+uint64_t
i40e_get_default_input_set(uint16_t pctype)
{
static const uint64_t default_inset_table[] = {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 23f360b..9e3a48d 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -190,6 +190,60 @@ enum i40e_flxpld_layer_idx {
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
+#define I40E_INSET_NONE 0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC 0x0000000000000001ULL
+#define I40E_INSET_SMAC 0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
+#define I40E_INSET_DST_PORT 0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+ I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
struct i40e_adapter;
/**
@@ -712,6 +766,7 @@ i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
const struct i40e_tunnel_filter_input *input);
int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter_input *input);
+uint64_t i40e_get_default_input_set(uint16_t pctype);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index a9ff73f..64b4ab6 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -51,6 +51,10 @@
#include "base/i40e_type.h"
#include "i40e_ethdev.h"
+#define I40E_IPV4_TC_SHIFT 4
+#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_FRAG_HEADER 44
+
static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -64,6 +68,14 @@ static int i40e_parse_ethertype_act(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_ethertype_filter *filter);
+static int i40e_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_parse_fdir_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
@@ -79,6 +91,107 @@ static enum rte_flow_item_type pattern_ethertype[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* Pattern matched flow director filter */
+static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static int
i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -108,9 +221,62 @@ i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
return ret;
}
+static int
+i40e_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_fdir_filter *fdir_filter =
+ &filter->fdir_filter;
+ int ret;
+
+ ret = i40e_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_fdir_act(dev, actions, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ if (dev->data->dev_conf.fdir_conf.mode !=
+ RTE_FDIR_MODE_PERFECT) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Check the mode in fdir_conf.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_parse_ethertype_filter },
+ /* FDIR */
+ { pattern_fdir_ipv4, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_ext, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp, i40e_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_ext, i40e_parse_fdir_filter },
};
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
@@ -385,6 +551,447 @@ i40e_parse_ethertype_act(struct rte_eth_dev *dev,
}
static int
+i40e_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set = I40E_INSET_NONE;
+ uint16_t flag_offset;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ uint32_t j;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if (eth_spec || eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH spec/mask");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ if (!ipv4_spec || !ipv4_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv4 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Get filter info */
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ /* Check if it is fragment. */
+ flag_offset =
+ rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
+ if (flag_offset & IPV4_HDR_OFFSET_MASK ||
+ flag_offset & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+ /* Get the filter info */
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (!ipv6_spec || !ipv6_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv6 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ /* SCR and DST address of IPv6 shouldn't be masked */
+ for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
+ if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
+ ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+ }
+
+ input_set |= I40E_INSET_IPV6_SRC;
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(ipv6_spec->hdr.vtc_flow <<
+ I40E_IPV4_TC_SHIFT);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!tcp_spec || !tcp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL TCP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (tcp_mask->hdr.src_port != UINT16_MAX ||
+ tcp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (!udp_spec || !udp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL UDP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (udp_mask->hdr.src_port != UINT16_MAX ||
+ udp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (!sctp_spec || !sctp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL SCTP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port != UINT16_MAX ||
+ sctp_mask->hdr.dst_port != UINT16_MAX ||
+ sctp_mask->hdr.tag != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ filter->input.flow_ext.is_vf = 1;
+ filter->input.flow_ext.dst_id = vf_spec->id;
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VF ID for FDIR.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ pctype = i40e_flowtype_to_pctype(flow_type);
+ if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported flow type");
+ return -rte_errno;
+ }
+
+ if (input_set != i40e_get_default_input_set(pctype)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid input set.");
+ return -rte_errno;
+ }
+ filter->input.flow_type = flow_type;
+
+ return 0;
+}
+
+/* Parse to get the action info of a FDIR filter */
+static int
+i40e_parse_fdir_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.flex_off = 0;
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+ else
+ filter->action.behavior = RTE_ETH_FDIR_REJECT;
+
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->action.rx_queue = act_q->index;
+
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID for FDIR.");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is MARK or END. */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+ act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->soft_id = mark_spec->id;
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 09/17] net/i40e: parse tunnel filter
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (7 preceding siblings ...)
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 08/17] net/i40e: parse flow director filter Beilei Xing
@ 2017-01-04 3:22 ` Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 10/17] net/i40e: add flow create function Beilei Xing
` (9 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:22 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_parse_tunnel_filter to check if
a rule is a tunnel rule according to items of the flow
pattern, and the function also gets the tunnel info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 394 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 394 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 64b4ab6..42ebe5e 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -54,6 +54,8 @@
#define I40E_IPV4_TC_SHIFT 4
#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
#define I40E_IPV6_FRAG_HEADER 44
+#define I40E_TENANT_ARRAY_NUM 3
+#define I40E_TCI_MASK 0xFFFF
static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -76,6 +78,14 @@ static int i40e_parse_fdir_act(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_fdir_filter *filter);
+static int i40e_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
+static int i40e_parse_tunnel_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
@@ -192,6 +202,45 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* Pattern matched tunnel filter */
+static enum rte_flow_item_type pattern_vxlan_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static int
i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -257,6 +306,33 @@ i40e_parse_fdir_filter(struct rte_eth_dev *dev,
return 0;
}
+static int
+i40e_parse_tunnel_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_tunnel_filter_conf *tunnel_filter =
+ &filter->tunnel_filter;
+ int ret;
+
+ ret = i40e_parse_tunnel_pattern(dev, pattern, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_tunnel_act(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_parse_ethertype_filter },
@@ -277,6 +353,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_fdir_ipv6_tcp_ext, i40e_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp, i40e_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp_ext, i40e_parse_fdir_filter },
+ /* tunnel */
+ { pattern_vxlan_1, i40e_parse_tunnel_filter },
+ { pattern_vxlan_2, i40e_parse_tunnel_filter },
+ { pattern_vxlan_3, i40e_parse_tunnel_filter },
+ { pattern_vxlan_4, i40e_parse_tunnel_filter },
};
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
@@ -991,6 +1072,319 @@ i40e_parse_fdir_act(struct rte_eth_dev *dev,
return 0;
}
+/* Parse to get the action info of a tunnle filter */
+static int i40e_parse_tunnel_act(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue_id = act_q->index;
+ if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_check_tenant_id_mask(const uint8_t *mask)
+{
+ uint32_t j;
+ int is_masked = 0;
+
+ for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
+ if (*(mask + j) == UINT8_MAX) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 0;
+ } else if (*(mask + j) == 0) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 1;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return is_masked;
+}
+
+static int
+i40e_parse_vxlan_pattern(const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_eth *o_eth_spec = NULL;
+ const struct rte_flow_item_eth *o_eth_mask = NULL;
+ const struct rte_flow_item_vxlan *vxlan_spec = NULL;
+ const struct rte_flow_item_vxlan *vxlan_mask = NULL;
+ const struct rte_flow_item_eth *i_eth_spec = NULL;
+ const struct rte_flow_item_eth *i_eth_mask = NULL;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ bool is_vni_masked = 0;
+ enum rte_flow_item_type item_type;
+ bool vxlan_flag = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(ð_mask->dst) ||
+ !is_zero_ether_addr(ð_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!vxlan_flag)
+ rte_memcpy(&filter->outer_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ else
+ rte_memcpy(&filter->inner_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ }
+
+ if (!vxlan_flag) {
+ o_eth_spec = eth_spec;
+ o_eth_mask = eth_mask;
+ } else {
+ i_eth_spec = eth_spec;
+ i_eth_mask = eth_mask;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vxlan_flag) {
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ } else {
+ if (vlan_spec || vlan_mask)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* IPv4/IPv6/UDP are used to describe protocol,
+ * spec amd mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec =
+ (const struct rte_flow_item_vxlan *)item->spec;
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, either spec or mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_mask) {
+ is_vni_masked =
+ i40e_check_tenant_id_mask(vxlan_mask->vni);
+ if (is_vni_masked < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+ }
+ vxlan_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Check specification and mask to get the filter type */
+ if (vlan_spec && vlan_mask &&
+ (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ /* If there's inner vlan */
+ filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
+ & I40E_TCI_MASK;
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if ((!vlan_spec && !vlan_mask) ||
+ (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+ /* If there's no inner vlan */
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_TENID;
+ else if (o_eth_spec && o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask) {
+ filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Not supported by tunnel filter.");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ int ret;
+
+ ret = i40e_parse_vxlan_pattern(pattern, error, filter);
+
+ return ret;
+}
+
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 10/17] net/i40e: add flow create function
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (8 preceding siblings ...)
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 09/17] net/i40e: parse tunnel filter Beilei Xing
@ 2017-01-04 3:23 ` Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 11/17] net/i40e: add flow destroy function Beilei Xing
` (8 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:23 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_create function to create a
rule. It will check if a flow matches ethertype filter
or flow director filter or tunnel filter, if the flow
matches some kind of filter, then set the filter to HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 16 ++++++---
drivers/net/i40e/i40e_ethdev.h | 21 ++++++++++++
drivers/net/i40e/i40e_fdir.c | 2 +-
drivers/net/i40e/i40e_flow.c | 77 ++++++++++++++++++++++++++++++++++++++++++
4 files changed, 110 insertions(+), 6 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index bcf28cf..bbc43dc 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -353,9 +353,6 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_ethertype_filter_set(struct i40e_pf *pf,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -1233,6 +1230,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_fdir_hash_map_alloc;
}
+ TAILQ_INIT(&pf->flow_list);
+
return 0;
err_fdir_hash_map_alloc:
@@ -1273,6 +1272,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
+ struct i40e_flow *p_flow;
struct i40e_ethertype_filter *p_ethertype;
struct i40e_tunnel_filter *p_tunnel;
struct i40e_fdir_filter *p_fdir;
@@ -1297,6 +1297,12 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
+ /* Remove all flows */
+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+ rte_free(p_flow);
+ }
+
/* Remove all ethertype director rules and hash */
if (ethertype_rule->hash_map)
rte_free(ethertype_rule->hash_map);
@@ -6611,7 +6617,7 @@ i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
return 0;
}
-static int
+int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add)
@@ -8256,7 +8262,7 @@ i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
*/
-static int
+int
i40e_ethertype_filter_set(struct i40e_pf *pf,
struct rte_eth_ethertype_filter *filter,
bool add)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 9e3a48d..b33910d 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -536,6 +536,17 @@ struct i40e_mirror_rule {
TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
/*
+ * Struct to store flow created.
+ */
+struct i40e_flow {
+ TAILQ_ENTRY(i40e_flow) node;
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+TAILQ_HEAD(i40e_flow_list, i40e_flow);
+
+/*
* Structure to store private data specific for PF instance.
*/
struct i40e_pf {
@@ -592,6 +603,7 @@ struct i40e_pf {
bool floating_veb; /* The flag to use the floating VEB */
/* The floating enable flag for the specific VF */
bool floating_veb_list[I40E_MAX_VF];
+ struct i40e_flow_list flow_list;
};
enum pending_msg {
@@ -767,6 +779,15 @@ i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter_input *input);
uint64_t i40e_get_default_input_set(uint16_t pctype);
+int i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index f89dbc9..91d91aa 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1099,7 +1099,7 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
-static int
+int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 42ebe5e..3114368 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -62,6 +62,11 @@ static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
static int i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
@@ -91,9 +96,11 @@ static int i40e_parse_attr(const struct rte_flow_attr *attr,
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
+ .create = i40e_flow_create,
};
union i40e_filter_t cons_filter;
+enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
/* Pattern matched ethertype filter */
static enum rte_flow_item_type pattern_ethertype[] = {
@@ -267,6 +274,8 @@ i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
+
return ret;
}
@@ -294,6 +303,8 @@ i40e_parse_fdir_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_FDIR;
+
if (dev->data->dev_conf.fdir_conf.mode !=
RTE_FDIR_MODE_PERFECT) {
rte_flow_error_set(error, ENOTSUP,
@@ -330,6 +341,8 @@ i40e_parse_tunnel_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
return ret;
}
@@ -1446,3 +1459,67 @@ i40e_flow_validate(struct rte_eth_dev *dev,
return ret;
}
+
+static struct rte_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_flow *flow;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct i40e_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+
+ ret = i40e_flow_validate(dev, attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (cons_filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_set(pf,
+ &cons_filter.ethertype_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &cons_filter.fdir_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_tunnel_filter_set(pf,
+ &cons_filter.tunnel_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ break;
+ default:
+ goto free_flow;
+ }
+
+ flow->filter_type = cons_filter_type;
+ TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+ return (struct rte_flow *)flow;
+
+free_flow:
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ return NULL;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 11/17] net/i40e: add flow destroy function
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (9 preceding siblings ...)
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 10/17] net/i40e: add flow create function Beilei Xing
@ 2017-01-04 3:23 ` Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 12/17] net/i40e: destroy ethertype filter Beilei Xing
` (7 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:23 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_destroy function to destroy
a flow for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 33 +++++++++++++++++++++++++++++++++
1 file changed, 33 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 3114368..ece9f89 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -67,6 +67,9 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static int i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
static int i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
@@ -97,6 +100,7 @@ static int i40e_parse_attr(const struct rte_flow_attr *attr,
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = i40e_flow_create,
+ .destroy = i40e_flow_destroy,
};
union i40e_filter_t cons_filter;
@@ -1523,3 +1527,32 @@ i40e_flow_create(struct rte_eth_dev *dev,
rte_free(flow);
return NULL;
}
+
+static int
+i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_flow *pmd_flow = (struct i40e_flow *)flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ int ret = 0;
+
+ switch (filter_type) {
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ TAILQ_REMOVE(&pf->flow_list, pmd_flow, node);
+ rte_free(pmd_flow);
+ } else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 12/17] net/i40e: destroy ethertype filter
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (10 preceding siblings ...)
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 11/17] net/i40e: add flow destroy function Beilei Xing
@ 2017-01-04 3:23 ` Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 13/17] net/i40e: destroy tunnel filter Beilei Xing
` (6 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:23 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_dev_destroy_ethertype_filter function
to destroy a ethertype filter for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 42 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 42 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index ece9f89..2940058 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -49,6 +49,7 @@
#include "i40e_logs.h"
#include "base/i40e_type.h"
+#include "base/i40e_prototype.h"
#include "i40e_ethdev.h"
#define I40E_IPV4_TC_SHIFT 4
@@ -96,6 +97,8 @@ static int i40e_parse_tunnel_act(struct rte_eth_dev *dev,
struct rte_eth_tunnel_filter_conf *filter);
static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
+static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1539,6 +1542,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
int ret = 0;
switch (filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_dev_destroy_ethertype_filter(pf,
+ (struct i40e_ethertype_filter *)pmd_flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -1556,3 +1563,38 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
return ret;
}
+
+static int
+i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *node;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret = 0;
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->input.mac_addr.addr_bytes,
+ filter->input.ether_type,
+ flags, pf->main_vsi->seid,
+ filter->queue, 0, &stats, NULL);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 13/17] net/i40e: destroy tunnel filter
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (11 preceding siblings ...)
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 12/17] net/i40e: destroy ethertype filter Beilei Xing
@ 2017-01-04 3:23 ` Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 14/17] net/i40e: destroy flow directory filter Beilei Xing
` (5 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:23 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_dev_destroy_tunnel_filter function
to destroy a tunnel filter for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 2940058..f334844 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -99,6 +99,8 @@ static int i40e_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1546,6 +1548,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_dev_destroy_ethertype_filter(pf,
(struct i40e_ethertype_filter *)pmd_flow->rule);
break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_destroy_tunnel_filter(pf,
+ (struct i40e_tunnel_filter *)pmd_flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -1598,3 +1604,38 @@ i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
return ret;
}
+
+static int
+i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *node;
+ int ret = 0;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
+ (struct ether_addr *)&cld_filter.outer_mac);
+ ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
+ (struct ether_addr *)&cld_filter.inner_mac);
+ cld_filter.inner_vlan = filter->input.inner_vlan;
+ cld_filter.flags = filter->input.flags;
+ cld_filter.tenant_id = filter->input.tenant_id;
+ cld_filter.queue_number = filter->queue;
+
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter, 1);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 14/17] net/i40e: destroy flow directory filter
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (12 preceding siblings ...)
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 13/17] net/i40e: destroy tunnel filter Beilei Xing
@ 2017-01-04 3:23 ` Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 15/17] net/i40e: add flow flush function Beilei Xing
` (4 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:23 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch supports destroying a flow directory filter
for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index f334844..2674c2c 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1552,6 +1552,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_dev_destroy_tunnel_filter(pf,
(struct i40e_tunnel_filter *)pmd_flow->rule);
break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &((struct i40e_fdir_filter *)pmd_flow->rule)->fdir, 0);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 15/17] net/i40e: add flow flush function
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (13 preceding siblings ...)
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 14/17] net/i40e: destroy flow directory filter Beilei Xing
@ 2017-01-04 3:23 ` Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 16/17] net/i40e: flush ethertype filters Beilei Xing
` (3 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:23 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_flush function to flush all
filters for users. And flow director flush function
is involved first.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.h | 1 +
drivers/net/i40e/i40e_fdir.c | 4 +---
drivers/net/i40e/i40e_flow.c | 51 ++++++++++++++++++++++++++++++++++++++++++
3 files changed, 53 insertions(+), 3 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b33910d..57fd796 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -788,6 +788,7 @@ int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add);
+int i40e_fdir_flush(struct rte_eth_dev *dev);
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 91d91aa..67d63ff 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -119,8 +119,6 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
-static int i40e_fdir_flush(struct rte_eth_dev *dev);
-
static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
struct i40e_fdir_filter *filter);
static struct i40e_fdir_filter *
@@ -1325,7 +1323,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
* i40e_fdir_flush - clear all filters of Flow Director table
* @pf: board private structure
*/
-static int
+int
i40e_fdir_flush(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 2674c2c..bc8a76c 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -68,6 +68,8 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static int i40e_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
static int i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
@@ -101,11 +103,13 @@ static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
+static int i40e_fdir_filter_flush(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = i40e_flow_create,
.destroy = i40e_flow_destroy,
+ .flush = i40e_flow_flush,
};
union i40e_filter_t cons_filter;
@@ -1643,3 +1647,50 @@ i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
return ret;
}
+
+static int
+i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret;
+
+ ret = i40e_fdir_filter_flush(pf);
+ if (ret)
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush FDIR flows.");
+
+ return ret;
+}
+
+static int
+i40e_fdir_filter_flush(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter;
+ struct i40e_flow *flow;
+ void *temp;
+ int ret;
+
+ ret = i40e_fdir_flush(dev);
+ if (!ret) {
+ /* Delete FDIR filters in FDIR list. */
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ ret = i40e_sw_fdir_filter_del(pf,
+ &fdir_filter->fdir.input);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Delete FDIR flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 16/17] net/i40e: flush ethertype filters
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (14 preceding siblings ...)
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 15/17] net/i40e: add flow flush function Beilei Xing
@ 2017-01-04 3:23 ` Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 17/17] net/i40e: flush tunnel filters Beilei Xing
` (2 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:23 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_ethertype_filter_flush function
to flush all ethertype filters, including filters in
SW and HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 41 ++++++++++++++++++++++++++++++++++++++++-
1 file changed, 40 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index bc8a76c..2e696d3 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -104,6 +104,7 @@ static int i40e_dev_destroy_ethertype_filter(struct i40e_pf *pf,
static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
static int i40e_fdir_filter_flush(struct i40e_pf *pf);
+static int i40e_ethertype_filter_flush(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1655,10 +1656,20 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
int ret;
ret = i40e_fdir_filter_flush(pf);
- if (ret)
+ if (ret) {
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to flush FDIR flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_ethertype_filter_flush(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to ethertype flush flows.");
+ return -rte_errno;
+ }
return ret;
}
@@ -1694,3 +1705,31 @@ i40e_fdir_filter_flush(struct i40e_pf *pf)
return ret;
}
+
+/* Flush all ethertype filters */
+static int
+i40e_ethertype_filter_flush(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *filter;
+ struct i40e_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(ethertype_list))) {
+ ret = i40e_dev_destroy_ethertype_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete ethertype flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v5 17/17] net/i40e: flush tunnel filters
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (15 preceding siblings ...)
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 16/17] net/i40e: flush ethertype filters Beilei Xing
@ 2017-01-04 3:23 ` Beilei Xing
2017-01-04 6:40 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Wu, Jingjing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-04 3:23 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_tunnel_filter_flush function
to flush all tunnel filters, including filters in
SW and HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 37 +++++++++++++++++++++++++++++++++++++
1 file changed, 37 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 2e696d3..c8eae4f 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -105,6 +105,7 @@ static int i40e_dev_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
static int i40e_fdir_filter_flush(struct i40e_pf *pf);
static int i40e_ethertype_filter_flush(struct i40e_pf *pf);
+static int i40e_tunnel_filter_flush(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1671,6 +1672,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return -rte_errno;
}
+ ret = i40e_tunnel_filter_flush(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush tunnel flows.");
+ return -rte_errno;
+ }
+
return ret;
}
@@ -1733,3 +1742,31 @@ i40e_ethertype_filter_flush(struct i40e_pf *pf)
return ret;
}
+
+/* Flush all tunnel filters */
+static int
+i40e_tunnel_filter_flush(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *filter;
+ struct i40e_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(tunnel_list))) {
+ ret = i40e_dev_destroy_tunnel_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete tunnel flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (16 preceding siblings ...)
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 17/17] net/i40e: flush tunnel filters Beilei Xing
@ 2017-01-04 6:40 ` Wu, Jingjing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
18 siblings, 0 replies; 175+ messages in thread
From: Wu, Jingjing @ 2017-01-04 6:40 UTC (permalink / raw)
To: Xing, Beilei, Zhang, Helin; +Cc: dev
> -----Original Message-----
> From: Xing, Beilei
> Sent: Wednesday, January 4, 2017 11:23 AM
> To: Wu, Jingjing <jingjing.wu@intel.com>; Zhang, Helin <helin.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: [PATCH v5 00/17] net/i40e: consistent filter API
>
> The patch set depends on Adrien's Generic flow API(rte_flow).
>
> The patches mainly finish following functions:
> 1) Store and restore all kinds of filters.
> 2) Parse all kinds of filters.
> 3) Add flow validate function.
> 4) Add flow create function.
> 5) Add flow destroy function.
> 6) Add flow flush function.
>
> v5 changes:
> Change some local variable name.
> Add removing i40e_flow_list during device unint.
> Fix compile error when gcc compile option isn't '-O0'.
>
> v4 changes:
> Change I40E_TCI_MASK with 0xFFFF to align with testpmd.
> Modidy the stats show when restoring filters.
>
> v3 changes:
> Set the related cause pointer to a non-NULL value when error happens.
> Change return value when error happens.
> Modify filter_del parameter with key.
> Malloc filter after checking when delete a filter.
> Delete meaningless initialization.
> Add return value when there's error.
> Change global variable definition.
> Modify some function declaration.
>
> v2 changes:
> Add i40e_flow.c, all flow ops are implemented in the file.
> Change the whole implementation of all parse flow functions.
> Update error info for all flow ops.
> Add flow_list to store flows created.
>
> Beilei Xing (17):
> net/i40e: store ethertype filter
> net/i40e: store tunnel filter
> net/i40e: store flow director filter
> net/i40e: restore ethertype filter
> net/i40e: restore tunnel filter
> net/i40e: restore flow director filter
> net/i40e: add flow validate function
> net/i40e: parse flow director filter
> net/i40e: parse tunnel filter
> net/i40e: add flow create function
> net/i40e: add flow destroy function
> net/i40e: destroy ethertype filter
> net/i40e: destroy tunnel filter
> net/i40e: destroy flow directory filter
> net/i40e: add flow flush function
> net/i40e: flush ethertype filters
> net/i40e: flush tunnel filters
>
> drivers/net/i40e/Makefile | 2 +
> drivers/net/i40e/i40e_ethdev.c | 526 ++++++++++--
> drivers/net/i40e/i40e_ethdev.h | 173 ++++
> drivers/net/i40e/i40e_fdir.c | 140 +++-
> drivers/net/i40e/i40e_flow.c | 1772
> ++++++++++++++++++++++++++++++++++++++++
> 5 files changed, 2547 insertions(+), 66 deletions(-) create mode 100644
> drivers/net/i40e/i40e_flow.c
>
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
Thanks
Jingjing
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v5 07/17] net/i40e: add flow validate function
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 07/17] net/i40e: add flow validate function Beilei Xing
@ 2017-01-04 18:57 ` Ferruh Yigit
2017-01-05 6:08 ` Xing, Beilei
0 siblings, 1 reply; 175+ messages in thread
From: Ferruh Yigit @ 2017-01-04 18:57 UTC (permalink / raw)
To: Beilei Xing, jingjing.wu, helin.zhang; +Cc: dev
On 1/4/2017 3:22 AM, Beilei Xing wrote:
> This patch adds i40e_flow_validation function to check if
> a flow is valid according to the flow pattern.
> i40e_parse_ethertype_filter is added first, it also gets
> the ethertype info.
> i40e_flow.c is added to handle all generic filter events.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
<...>
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 153322a..edfd52b 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -8426,6 +8426,8 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
> return ret;
> }
>
> +const struct rte_flow_ops i40e_flow_ops;
Is this intentional (instead of using extern) ?
Because i40e_flow.c has a global variable definition with same name, it
looks like this is not causing a build error, but I think confusing.
<...>
> +static int i40e_parse_ethertype_act(struct rte_eth_dev *dev,
> + const struct rte_flow_action *actions,
> + struct rte_flow_error *error,
> + struct rte_eth_ethertype_filter *filter);
In API naming, I would prefer full "action" instead of shorten "act",
but it is your call.
<...>
> +
> +union i40e_filter_t cons_filter;
Why this cons_filter is required. I can see this is saving some state
related rule during validate function.
If the plan is to use this during rule creation, is user has to call
validate before each create?
<...>
> +
> +static int
> +i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
> + const struct rte_flow_attr *attr,
> + const struct rte_flow_item pattern[],
> + const struct rte_flow_action actions[],
> + struct rte_flow_error *error,
> + union i40e_filter_t *filter)
> +{
> + struct rte_eth_ethertype_filter *ethertype_filter =
> + &filter->ethertype_filter;
> + int ret;
> +
> + ret = i40e_parse_ethertype_pattern(dev, pattern, error,
> + ethertype_filter);
> + if (ret)
> + return ret;
> +
> + ret = i40e_parse_ethertype_act(dev, actions, error,
> + ethertype_filter);
> + if (ret)
> + return ret;
> +
> + ret = i40e_parse_attr(attr, error);
It is your call, but I would suggest using a specific namespace for all
rte_flow related functions, something like "i40e_flow_".
In this context it is clear what this function is, but in whole driver
code, the function name is too generic to understand what it does.
> + if (ret)
> + return ret;
> +
> + return ret;
> +}
> +
<...>
> +
> +static int
> +i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
> + const struct rte_flow_item *pattern,
> + struct rte_flow_error *error,
> + struct rte_eth_ethertype_filter *filter)
I think it is good idea to comment what pattern is recognized in to
function comment, instead of reading code every time to figure out.
> +{
> + const struct rte_flow_item *item = pattern;
> + const struct rte_flow_item_eth *eth_spec;
> + const struct rte_flow_item_eth *eth_mask;
> + enum rte_flow_item_type item_type;
> +
> + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> + if (item->last) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Not support range");
> + return -rte_errno;
> + }
> + item_type = item->type;
> + switch (item_type) {
> + case RTE_FLOW_ITEM_TYPE_ETH:
> + eth_spec = (const struct rte_flow_item_eth *)item->spec;
> + eth_mask = (const struct rte_flow_item_eth *)item->mask;
> + /* Get the MAC info. */
> + if (!eth_spec || !eth_mask) {
Why an eth_mask is required?
Can't driver support drop/queue packets from specific src to specific
dst with specific eth_type?
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "NULL ETH spec/mask");
> + return -rte_errno;
> + }
> +
> + /* Mask bits of source MAC address must be full of 0.
> + * Mask bits of destination MAC address must be full
> + * of 1 or full of 0.
> + */
> + if (!is_zero_ether_addr(ð_mask->src) ||
> + (!is_zero_ether_addr(ð_mask->dst) &&
> + !is_broadcast_ether_addr(ð_mask->dst))) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Invalid MAC_addr mask");
> + return -rte_errno;
> + }
> +
> + if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Invalid ethertype mask");
Why returning error here?
Can't we say drop packets to specific MAC address, independent from the
ether_type?
> + return -rte_errno;
> + }
> +
> + /* If mask bits of destination MAC address
> + * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
> + */
> + if (is_broadcast_ether_addr(ð_mask->dst)) {
> + filter->mac_addr = eth_spec->dst;
> + filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
> + } else {
> + filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
> + }
> + filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
> +
> + if (filter->ether_type == ETHER_TYPE_IPv4 ||
> + filter->ether_type == ETHER_TYPE_IPv6) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ITEM,
> + item,
> + "Unsupported ether_type in"
> + " control packet filter.");
Can't we create a drop rule based on dst MAC address if eth_type is ip ?
> + return -rte_errno;
> + }
> + if (filter->ether_type == ETHER_TYPE_VLAN)
> + PMD_DRV_LOG(WARNING, "filter vlan ether_type in"
> + " first tag is not supported.");
Who is the target of this message?
To the caller, this API is responding as this is supported.
The end user, the user of the application, can see this message, how
this message will help to end user?
> +
> + break;
> + default:
> + break;
> + }
> + }
> +
> + return 0;
> +}
> +
> +static int
> +i40e_parse_ethertype_act(struct rte_eth_dev *dev,
> + const struct rte_flow_action *actions,
> + struct rte_flow_error *error,
> + struct rte_eth_ethertype_filter *filter)
I think it would be good to comment this functions to say only DROP and
QUEUE actions are supported.
<...>
> +
> +static int
> +i40e_flow_validate(struct rte_eth_dev *dev,
> + const struct rte_flow_attr *attr,
> + const struct rte_flow_item pattern[],
> + const struct rte_flow_action actions[],
> + struct rte_flow_error *error)
> +{
> + struct rte_flow_item *items; /* internal pattern w/o VOID items */
> + parse_filter_t parse_filter;
> + uint32_t item_num = 0; /* non-void item number of pattern*/
> + uint32_t i = 0;
> + int ret;
> +
> + if (!pattern) {
> + rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> + NULL, "NULL pattern.");
> + return -rte_errno;
> + }
> +
> + if (!actions) {
> + rte_flow_error_set(error, EINVAL,
> + RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> + NULL, "NULL action.");
> + return -rte_errno;
> + }
It may be good to validate attr too, if it is NULL or not. It is
accessed without check in later stages of the call stack.
<...>
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v5 07/17] net/i40e: add flow validate function
2017-01-04 18:57 ` Ferruh Yigit
@ 2017-01-05 6:08 ` Xing, Beilei
2017-01-05 11:16 ` Ferruh Yigit
0 siblings, 1 reply; 175+ messages in thread
From: Xing, Beilei @ 2017-01-05 6:08 UTC (permalink / raw)
To: Yigit, Ferruh, Wu, Jingjing, Zhang, Helin; +Cc: dev, Zhao1, Wei
Hi Ferruh,
> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Thursday, January 5, 2017 2:57 AM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Zhang, Helin <helin.zhang@intel.com>
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH v5 07/17] net/i40e: add flow validate
> function
>
> On 1/4/2017 3:22 AM, Beilei Xing wrote:
> > This patch adds i40e_flow_validation function to check if a flow is
> > valid according to the flow pattern.
> > i40e_parse_ethertype_filter is added first, it also gets the ethertype
> > info.
> > i40e_flow.c is added to handle all generic filter events.
> >
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
>
> <...>
>
> > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > b/drivers/net/i40e/i40e_ethdev.c index 153322a..edfd52b 100644
> > --- a/drivers/net/i40e/i40e_ethdev.c
> > +++ b/drivers/net/i40e/i40e_ethdev.c
> > @@ -8426,6 +8426,8 @@ i40e_ethertype_filter_handle(struct rte_eth_dev
> *dev,
> > return ret;
> > }
> >
> > +const struct rte_flow_ops i40e_flow_ops;
>
> Is this intentional (instead of using extern) ?
> Because i40e_flow.c has a global variable definition with same name, it looks
> like this is not causing a build error, but I think confusing.
>
Actually it's the global variable definition in i40e_flow.c. I thought gcc would add extern automatically during compiling, as I checked the address of the variable is the same in different files.
To avoid confusion, I will add extern in next version.
> <...>
>
> > +static int i40e_parse_ethertype_act(struct rte_eth_dev *dev,
> > + const struct rte_flow_action *actions,
> > + struct rte_flow_error *error,
> > + struct rte_eth_ethertype_filter *filter);
>
> In API naming, I would prefer full "action" instead of shorten "act", but it is
> your call.
I will change the API name in next version. Thanks.
>
> <...>
>
> > +
> > +union i40e_filter_t cons_filter;
>
> Why this cons_filter is required. I can see this is saving some state related
> rule during validate function.
> If the plan is to use this during rule creation, is user has to call validate before
> each create?
You are right, cons_filter will get filter info during validation, and it's for flow_create function.
User needn't to call the flow_validate function, as validate function will be called in i40e_flow_create.
>
> <...>
>
> > +
> > +static int
> > +i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
> > + const struct rte_flow_attr *attr,
> > + const struct rte_flow_item pattern[],
> > + const struct rte_flow_action actions[],
> > + struct rte_flow_error *error,
> > + union i40e_filter_t *filter)
> > +{
> > + struct rte_eth_ethertype_filter *ethertype_filter =
> > + &filter->ethertype_filter;
> > + int ret;
> > +
> > + ret = i40e_parse_ethertype_pattern(dev, pattern, error,
> > + ethertype_filter);
> > + if (ret)
> > + return ret;
> > +
> > + ret = i40e_parse_ethertype_act(dev, actions, error,
> > + ethertype_filter);
> > + if (ret)
> > + return ret;
> > +
> > + ret = i40e_parse_attr(attr, error);
>
> It is your call, but I would suggest using a specific namespace for all rte_flow
> related functions, something like "i40e_flow_".
> In this context it is clear what this function is, but in whole driver code, the
> function name is too generic to understand what it does.
Make sense. I'll update the function names.
>
> > + if (ret)
> > + return ret;
> > +
> > + return ret;
> > +}
> > +
>
> <...>
>
> > +
> > +static int
> > +i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
> > + const struct rte_flow_item *pattern,
> > + struct rte_flow_error *error,
> > + struct rte_eth_ethertype_filter *filter)
>
> I think it is good idea to comment what pattern is recognized in to function
> comment, instead of reading code every time to figure out.
In fact, the array of i40e_supported_patterns has listed all supported patterns for each filter type.
i40e_supported_patterns is also defined in this patch.
>
> > +{
> > + const struct rte_flow_item *item = pattern;
> > + const struct rte_flow_item_eth *eth_spec;
> > + const struct rte_flow_item_eth *eth_mask;
> > + enum rte_flow_item_type item_type;
> > +
> > + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> > + if (item->last) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Not support range");
> > + return -rte_errno;
> > + }
> > + item_type = item->type;
> > + switch (item_type) {
> > + case RTE_FLOW_ITEM_TYPE_ETH:
> > + eth_spec = (const struct rte_flow_item_eth *)item-
> >spec;
> > + eth_mask = (const struct rte_flow_item_eth *)item-
> >mask;
> > + /* Get the MAC info. */
> > + if (!eth_spec || !eth_mask) {
>
> Why an eth_mask is required?
Yes, since eth_type mask in eth_mask should be UINT16_MAX.
> Can't driver support drop/queue packets from specific src to specific dst with
> specific eth_type?
No, we support specific dst with specific eth_type, or only specific eth_type. Perfect match.
>
> > + rte_flow_error_set(error, EINVAL,
> > +
> RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "NULL ETH spec/mask");
> > + return -rte_errno;
> > + }
> > +
> > + /* Mask bits of source MAC address must be full of 0.
> > + * Mask bits of destination MAC address must be full
> > + * of 1 or full of 0.
> > + */
> > + if (!is_zero_ether_addr(ð_mask->src) ||
> > + (!is_zero_ether_addr(ð_mask->dst) &&
> > + !is_broadcast_ether_addr(ð_mask->dst))) {
> > + rte_flow_error_set(error, EINVAL,
> > +
> RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Invalid MAC_addr mask");
> > + return -rte_errno;
> > + }
> > +
> > + if ((eth_mask->type & UINT16_MAX) !=
> UINT16_MAX) {
> > + rte_flow_error_set(error, EINVAL,
> > +
> RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Invalid ethertype mask");
>
> Why returning error here?
> Can't we say drop packets to specific MAC address, independent from the
> ether_type?
No. as I said above, we support specific dst with specific eth_type, or only specific eth_type for ethertype_filter.
>
> > + return -rte_errno;
> > + }
> > +
> > + /* If mask bits of destination MAC address
> > + * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
> > + */
> > + if (is_broadcast_ether_addr(ð_mask->dst)) {
> > + filter->mac_addr = eth_spec->dst;
> > + filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
> > + } else {
> > + filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
> > + }
> > + filter->ether_type = rte_be_to_cpu_16(eth_spec-
> >type);
> > +
> > + if (filter->ether_type == ETHER_TYPE_IPv4 ||
> > + filter->ether_type == ETHER_TYPE_IPv6) {
> > + rte_flow_error_set(error, EINVAL,
> > +
> RTE_FLOW_ERROR_TYPE_ITEM,
> > + item,
> > + "Unsupported ether_type
> in"
> > + " control packet filter.");
>
> Can't we create a drop rule based on dst MAC address if eth_type is ip ?
No, we don't support drop MAC_addr + eth_type_IP for ethertype filter.
>
> > + return -rte_errno;
> > + }
> > + if (filter->ether_type == ETHER_TYPE_VLAN)
> > + PMD_DRV_LOG(WARNING, "filter vlan
> ether_type in"
> > + " first tag is not supported.");
>
> Who is the target of this message?
> To the caller, this API is responding as this is supported.
> The end user, the user of the application, can see this message, how this
> message will help to end user?
Actually I add this warning according to the original processing in i40e_dev_eythertype_filter_set.
After checing datasheet, "The ethertype programmed by this command should not be one of the L2 tags ethertype (VLAN, E-tag, S-tag, etc.) and should not be IP or IPv6" is descripted.
But if QinQ is disabled, and inner vlan is ETHER_TYPE_VLAN, the filter works. So the message is "vlan ether_type in outer tag is not supported".
I want to simplify it in next version, don't support the situation above, and return error if (filter->ether_type == ETHER_TYPE_VLAN), because HW only recognizes ETH when QinQ is diabled. What do you think?
>
> > +
> > + break;
> > + default:
> > + break;
> > + }
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static int
> > +i40e_parse_ethertype_act(struct rte_eth_dev *dev,
> > + const struct rte_flow_action *actions,
> > + struct rte_flow_error *error,
> > + struct rte_eth_ethertype_filter *filter)
>
> I think it would be good to comment this functions to say only DROP and
> QUEUE actions are supported.
Yes, will update in next version.
>
> <...>
>
> > +
> > +static int
> > +i40e_flow_validate(struct rte_eth_dev *dev,
> > + const struct rte_flow_attr *attr,
> > + const struct rte_flow_item pattern[],
> > + const struct rte_flow_action actions[],
> > + struct rte_flow_error *error)
> > +{
> > + struct rte_flow_item *items; /* internal pattern w/o VOID items */
> > + parse_filter_t parse_filter;
> > + uint32_t item_num = 0; /* non-void item number of pattern*/
> > + uint32_t i = 0;
> > + int ret;
> > +
> > + if (!pattern) {
> > + rte_flow_error_set(error, EINVAL,
> RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> > + NULL, "NULL pattern.");
> > + return -rte_errno;
> > + }
> > +
> > + if (!actions) {
> > + rte_flow_error_set(error, EINVAL,
> > + RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> > + NULL, "NULL action.");
> > + return -rte_errno;
> > + }
>
> It may be good to validate attr too, if it is NULL or not. It is accessed without
> check in later stages of the call stack.
Yes. Thanks for reminder.
Best Regards,
Beilei
>
> <...>
>
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v5 07/17] net/i40e: add flow validate function
2017-01-05 6:08 ` Xing, Beilei
@ 2017-01-05 11:16 ` Ferruh Yigit
2017-01-05 11:52 ` Xing, Beilei
0 siblings, 1 reply; 175+ messages in thread
From: Ferruh Yigit @ 2017-01-05 11:16 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing, Zhang, Helin; +Cc: dev, Zhao1, Wei
On 1/5/2017 6:08 AM, Xing, Beilei wrote:
> Hi Ferruh,
>
>> -----Original Message-----
>> From: Yigit, Ferruh
>> Sent: Thursday, January 5, 2017 2:57 AM
>> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
>> <jingjing.wu@intel.com>; Zhang, Helin <helin.zhang@intel.com>
>> Cc: dev@dpdk.org
>> Subject: Re: [dpdk-dev] [PATCH v5 07/17] net/i40e: add flow validate
>> function
>>
>> On 1/4/2017 3:22 AM, Beilei Xing wrote:
>>> This patch adds i40e_flow_validation function to check if a flow is
>>> valid according to the flow pattern.
>>> i40e_parse_ethertype_filter is added first, it also gets the ethertype
>>> info.
>>> i40e_flow.c is added to handle all generic filter events.
>>>
>>> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
>>> ---
>>
>> <...>
>>
>>> diff --git a/drivers/net/i40e/i40e_ethdev.c
>>> b/drivers/net/i40e/i40e_ethdev.c index 153322a..edfd52b 100644
>>> --- a/drivers/net/i40e/i40e_ethdev.c
>>> +++ b/drivers/net/i40e/i40e_ethdev.c
>>> @@ -8426,6 +8426,8 @@ i40e_ethertype_filter_handle(struct rte_eth_dev
>> *dev,
>>> return ret;
>>> }
>>>
>>> +const struct rte_flow_ops i40e_flow_ops;
>>
>> Is this intentional (instead of using extern) ?
>> Because i40e_flow.c has a global variable definition with same name, it looks
>> like this is not causing a build error, but I think confusing.
>>
>
> Actually it's the global variable definition in i40e_flow.c. I thought gcc would add extern automatically during compiling, as I checked the address of the variable is the same in different files.
> To avoid confusion, I will add extern in next version.
>
>> <...>
>>
>>> +static int i40e_parse_ethertype_act(struct rte_eth_dev *dev,
>>> + const struct rte_flow_action *actions,
>>> + struct rte_flow_error *error,
>>> + struct rte_eth_ethertype_filter *filter);
>>
>> In API naming, I would prefer full "action" instead of shorten "act", but it is
>> your call.
>
> I will change the API name in next version. Thanks.
>
>>
>> <...>
>>
>>> +
>>> +union i40e_filter_t cons_filter;
>>
>> Why this cons_filter is required. I can see this is saving some state related
>> rule during validate function.
>> If the plan is to use this during rule creation, is user has to call validate before
>> each create?
>
> You are right, cons_filter will get filter info during validation, and it's for flow_create function.
> User needn't to call the flow_validate function, as validate function will be called in i40e_flow_create.
Ok then.
>
>>
>> <...>
>>
>>> +
>>> +static int
>>> +i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
>>> + const struct rte_flow_attr *attr,
>>> + const struct rte_flow_item pattern[],
>>> + const struct rte_flow_action actions[],
>>> + struct rte_flow_error *error,
>>> + union i40e_filter_t *filter)
>>> +{
>>> + struct rte_eth_ethertype_filter *ethertype_filter =
>>> + &filter->ethertype_filter;
>>> + int ret;
>>> +
>>> + ret = i40e_parse_ethertype_pattern(dev, pattern, error,
>>> + ethertype_filter);
>>> + if (ret)
>>> + return ret;
>>> +
>>> + ret = i40e_parse_ethertype_act(dev, actions, error,
>>> + ethertype_filter);
>>> + if (ret)
>>> + return ret;
>>> +
>>> + ret = i40e_parse_attr(attr, error);
>>
>> It is your call, but I would suggest using a specific namespace for all rte_flow
>> related functions, something like "i40e_flow_".
>> In this context it is clear what this function is, but in whole driver code, the
>> function name is too generic to understand what it does.
>
> Make sense. I'll update the function names.
>
>>
>>> + if (ret)
>>> + return ret;
>>> +
>>> + return ret;
>>> +}
>>> +
>>
>> <...>
>>
>>> +
>>> +static int
>>> +i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev *dev,
>>> + const struct rte_flow_item *pattern,
>>> + struct rte_flow_error *error,
>>> + struct rte_eth_ethertype_filter *filter)
>>
>> I think it is good idea to comment what pattern is recognized in to function
>> comment, instead of reading code every time to figure out.
>
> In fact, the array of i40e_supported_patterns has listed all supported patterns for each filter type.
> i40e_supported_patterns is also defined in this patch.
i40e_supported_patterns only shows item->type values, I think it is good
to documents expected/valid mask (.dst, .src, .type) and last values for
this type.
>
>>
>>> +{
>>> + const struct rte_flow_item *item = pattern;
>>> + const struct rte_flow_item_eth *eth_spec;
>>> + const struct rte_flow_item_eth *eth_mask;
>>> + enum rte_flow_item_type item_type;
>>> +
>>> + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
>>> + if (item->last) {
>>> + rte_flow_error_set(error, EINVAL,
>>> + RTE_FLOW_ERROR_TYPE_ITEM,
>>> + item,
>>> + "Not support range");
>>> + return -rte_errno;
>>> + }
>>> + item_type = item->type;
>>> + switch (item_type) {
>>> + case RTE_FLOW_ITEM_TYPE_ETH:
>>> + eth_spec = (const struct rte_flow_item_eth *)item-
>>> spec;
>>> + eth_mask = (const struct rte_flow_item_eth *)item-
>>> mask;
>>> + /* Get the MAC info. */
>>> + if (!eth_spec || !eth_mask) {
>>
>> Why an eth_mask is required?
> Yes, since eth_type mask in eth_mask should be UINT16_MAX.
>
>> Can't driver support drop/queue packets from specific src to specific dst with
>> specific eth_type?
> No, we support specific dst with specific eth_type, or only specific eth_type. Perfect match.
Thanks for clarification.
>
>>
>>> + rte_flow_error_set(error, EINVAL,
>>> +
>> RTE_FLOW_ERROR_TYPE_ITEM,
>>> + item,
>>> + "NULL ETH spec/mask");
>>> + return -rte_errno;
>>> + }
>>> +
>>> + /* Mask bits of source MAC address must be full of 0.
>>> + * Mask bits of destination MAC address must be full
>>> + * of 1 or full of 0.
>>> + */
>>> + if (!is_zero_ether_addr(ð_mask->src) ||
>>> + (!is_zero_ether_addr(ð_mask->dst) &&
>>> + !is_broadcast_ether_addr(ð_mask->dst))) {
>>> + rte_flow_error_set(error, EINVAL,
>>> +
>> RTE_FLOW_ERROR_TYPE_ITEM,
>>> + item,
>>> + "Invalid MAC_addr mask");
>>> + return -rte_errno;
>>> + }
>>> +
>>> + if ((eth_mask->type & UINT16_MAX) !=
>> UINT16_MAX) {
>>> + rte_flow_error_set(error, EINVAL,
>>> +
>> RTE_FLOW_ERROR_TYPE_ITEM,
>>> + item,
>>> + "Invalid ethertype mask");
>>
>> Why returning error here?
>> Can't we say drop packets to specific MAC address, independent from the
>> ether_type?
>
> No. as I said above, we support specific dst with specific eth_type, or only specific eth_type for ethertype_filter.
>
>>
>>> + return -rte_errno;
>>> + }
>>> +
>>> + /* If mask bits of destination MAC address
>>> + * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
>>> + */
>>> + if (is_broadcast_ether_addr(ð_mask->dst)) {
>>> + filter->mac_addr = eth_spec->dst;
>>> + filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
>>> + } else {
>>> + filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
>>> + }
>>> + filter->ether_type = rte_be_to_cpu_16(eth_spec-
>>> type);
>>> +
>>> + if (filter->ether_type == ETHER_TYPE_IPv4 ||
>>> + filter->ether_type == ETHER_TYPE_IPv6) {
>>> + rte_flow_error_set(error, EINVAL,
>>> +
>> RTE_FLOW_ERROR_TYPE_ITEM,
>>> + item,
>>> + "Unsupported ether_type
>> in"
>>> + " control packet filter.");
>>
>> Can't we create a drop rule based on dst MAC address if eth_type is ip ?
>
> No, we don't support drop MAC_addr + eth_type_IP for ethertype filter.
>
>>
>>> + return -rte_errno;
>>> + }
>>> + if (filter->ether_type == ETHER_TYPE_VLAN)
>>> + PMD_DRV_LOG(WARNING, "filter vlan
>> ether_type in"
>>> + " first tag is not supported.");
>>
>> Who is the target of this message?
>> To the caller, this API is responding as this is supported.
>> The end user, the user of the application, can see this message, how this
>> message will help to end user?
>
> Actually I add this warning according to the original processing in i40e_dev_eythertype_filter_set.
> After checing datasheet, "The ethertype programmed by this command should not be one of the L2 tags ethertype (VLAN, E-tag, S-tag, etc.) and should not be IP or IPv6" is descripted.
> But if QinQ is disabled, and inner vlan is ETHER_TYPE_VLAN, the filter works. So the message is "vlan ether_type in outer tag is not supported".
> I want to simplify it in next version, don't support the situation above, and return error if (filter->ether_type == ETHER_TYPE_VLAN), because HW only recognizes ETH when QinQ is diabled. What do you think?
I think it is better.
And this can be fine tuned in the future to check QinQ and return
accordingly.
>
>>
>>> +
>>> + break;
>>> + default:
>>> + break;
>>> + }
>>> + }
>>> +
>>> + return 0;
>>> +}
>>> +
>>> +static int
>>> +i40e_parse_ethertype_act(struct rte_eth_dev *dev,
>>> + const struct rte_flow_action *actions,
>>> + struct rte_flow_error *error,
>>> + struct rte_eth_ethertype_filter *filter)
>>
>> I think it would be good to comment this functions to say only DROP and
>> QUEUE actions are supported.
>
> Yes, will update in next version.
>
>>
>> <...>
>>
>>> +
>>> +static int
>>> +i40e_flow_validate(struct rte_eth_dev *dev,
>>> + const struct rte_flow_attr *attr,
>>> + const struct rte_flow_item pattern[],
>>> + const struct rte_flow_action actions[],
>>> + struct rte_flow_error *error)
>>> +{
>>> + struct rte_flow_item *items; /* internal pattern w/o VOID items */
>>> + parse_filter_t parse_filter;
>>> + uint32_t item_num = 0; /* non-void item number of pattern*/
>>> + uint32_t i = 0;
>>> + int ret;
>>> +
>>> + if (!pattern) {
>>> + rte_flow_error_set(error, EINVAL,
>> RTE_FLOW_ERROR_TYPE_ITEM_NUM,
>>> + NULL, "NULL pattern.");
>>> + return -rte_errno;
>>> + }
>>> +
>>> + if (!actions) {
>>> + rte_flow_error_set(error, EINVAL,
>>> + RTE_FLOW_ERROR_TYPE_ACTION_NUM,
>>> + NULL, "NULL action.");
>>> + return -rte_errno;
>>> + }
>>
>> It may be good to validate attr too, if it is NULL or not. It is accessed without
>> check in later stages of the call stack.
>
> Yes. Thanks for reminder.
>
> Best Regards,
> Beilei
>
>>
>> <...>
>>
>
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v5 07/17] net/i40e: add flow validate function
2017-01-05 11:16 ` Ferruh Yigit
@ 2017-01-05 11:52 ` Xing, Beilei
0 siblings, 0 replies; 175+ messages in thread
From: Xing, Beilei @ 2017-01-05 11:52 UTC (permalink / raw)
To: Yigit, Ferruh, Wu, Jingjing, Zhang, Helin; +Cc: dev, Zhao1, Wei
> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Thursday, January 5, 2017 7:16 PM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> <jingjing.wu@intel.com>; Zhang, Helin <helin.zhang@intel.com>
> Cc: dev@dpdk.org; Zhao1, Wei <wei.zhao1@intel.com>
> Subject: Re: [dpdk-dev] [PATCH v5 07/17] net/i40e: add flow validate
> function
>
> On 1/5/2017 6:08 AM, Xing, Beilei wrote:
> > Hi Ferruh,
> >
> >> -----Original Message-----
> >> From: Yigit, Ferruh
> >> Sent: Thursday, January 5, 2017 2:57 AM
> >> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> >> <jingjing.wu@intel.com>; Zhang, Helin <helin.zhang@intel.com>
> >> Cc: dev@dpdk.org
> >> Subject: Re: [dpdk-dev] [PATCH v5 07/17] net/i40e: add flow validate
> >> function
> >>
> >> On 1/4/2017 3:22 AM, Beilei Xing wrote:
> >>> This patch adds i40e_flow_validation function to check if a flow is
> >>> valid according to the flow pattern.
> >>> i40e_parse_ethertype_filter is added first, it also gets the
> >>> ethertype info.
> >>> i40e_flow.c is added to handle all generic filter events.
> >>>
> >>> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> >>> ---
> >>
> >> <...>
> >>
> >>> diff --git a/drivers/net/i40e/i40e_ethdev.c
> >>> b/drivers/net/i40e/i40e_ethdev.c index 153322a..edfd52b 100644
> >>> --- a/drivers/net/i40e/i40e_ethdev.c
> >>> +++ b/drivers/net/i40e/i40e_ethdev.c
> >>> @@ -8426,6 +8426,8 @@ i40e_ethertype_filter_handle(struct
> >>> rte_eth_dev
> >> *dev,
> >>> return ret;
> >>> }
> >>>
> >>> +const struct rte_flow_ops i40e_flow_ops;
> >>
> >> Is this intentional (instead of using extern) ?
> >> Because i40e_flow.c has a global variable definition with same name,
> >> it looks like this is not causing a build error, but I think confusing.
> >>
> >
> > Actually it's the global variable definition in i40e_flow.c. I thought gcc
> would add extern automatically during compiling, as I checked the address of
> the variable is the same in different files.
> > To avoid confusion, I will add extern in next version.
> >
> >> <...>
> >>
> >>> +static int i40e_parse_ethertype_act(struct rte_eth_dev *dev,
> >>> + const struct rte_flow_action *actions,
> >>> + struct rte_flow_error *error,
> >>> + struct rte_eth_ethertype_filter *filter);
> >>
> >> In API naming, I would prefer full "action" instead of shorten "act",
> >> but it is your call.
> >
> > I will change the API name in next version. Thanks.
> >
> >>
> >> <...>
> >>
> >>> +
> >>> +union i40e_filter_t cons_filter;
> >>
> >> Why this cons_filter is required. I can see this is saving some state
> >> related rule during validate function.
> >> If the plan is to use this during rule creation, is user has to call
> >> validate before each create?
> >
> > You are right, cons_filter will get filter info during validation, and it's for
> flow_create function.
> > User needn't to call the flow_validate function, as validate function will be
> called in i40e_flow_create.
>
> Ok then.
>
> >
> >>
> >> <...>
> >>
> >>> +
> >>> +static int
> >>> +i40e_parse_ethertype_filter(struct rte_eth_dev *dev,
> >>> + const struct rte_flow_attr *attr,
> >>> + const struct rte_flow_item pattern[],
> >>> + const struct rte_flow_action actions[],
> >>> + struct rte_flow_error *error,
> >>> + union i40e_filter_t *filter) {
> >>> + struct rte_eth_ethertype_filter *ethertype_filter =
> >>> + &filter->ethertype_filter;
> >>> + int ret;
> >>> +
> >>> + ret = i40e_parse_ethertype_pattern(dev, pattern, error,
> >>> + ethertype_filter);
> >>> + if (ret)
> >>> + return ret;
> >>> +
> >>> + ret = i40e_parse_ethertype_act(dev, actions, error,
> >>> + ethertype_filter);
> >>> + if (ret)
> >>> + return ret;
> >>> +
> >>> + ret = i40e_parse_attr(attr, error);
> >>
> >> It is your call, but I would suggest using a specific namespace for
> >> all rte_flow related functions, something like "i40e_flow_".
> >> In this context it is clear what this function is, but in whole
> >> driver code, the function name is too generic to understand what it does.
> >
> > Make sense. I'll update the function names.
> >
> >>
> >>> + if (ret)
> >>> + return ret;
> >>> +
> >>> + return ret;
> >>> +}
> >>> +
> >>
> >> <...>
> >>
> >>> +
> >>> +static int
> >>> +i40e_parse_ethertype_pattern(__rte_unused struct rte_eth_dev
> *dev,
> >>> + const struct rte_flow_item *pattern,
> >>> + struct rte_flow_error *error,
> >>> + struct rte_eth_ethertype_filter *filter)
> >>
> >> I think it is good idea to comment what pattern is recognized in to
> >> function comment, instead of reading code every time to figure out.
> >
> > In fact, the array of i40e_supported_patterns has listed all supported
> patterns for each filter type.
> > i40e_supported_patterns is also defined in this patch.
>
> i40e_supported_patterns only shows item->type values, I think it is good to
> documents expected/valid mask (.dst, .src, .type) and last values for this
> type.
OK, I see, will add the comments in the function.
>
> >
> >>
> >>> +{
> >>> + const struct rte_flow_item *item = pattern;
> >>> + const struct rte_flow_item_eth *eth_spec;
> >>> + const struct rte_flow_item_eth *eth_mask;
> >>> + enum rte_flow_item_type item_type;
> >>> +
> >>> + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> >>> + if (item->last) {
> >>> + rte_flow_error_set(error, EINVAL,
> >>> + RTE_FLOW_ERROR_TYPE_ITEM,
> >>> + item,
> >>> + "Not support range");
> >>> + return -rte_errno;
> >>> + }
> >>> + item_type = item->type;
> >>> + switch (item_type) {
> >>> + case RTE_FLOW_ITEM_TYPE_ETH:
> >>> + eth_spec = (const struct rte_flow_item_eth *)item-
> >>> spec;
> >>> + eth_mask = (const struct rte_flow_item_eth *)item-
> >>> mask;
> >>> + /* Get the MAC info. */
> >>> + if (!eth_spec || !eth_mask) {
> >>
> >> Why an eth_mask is required?
> > Yes, since eth_type mask in eth_mask should be UINT16_MAX.
> >
> >> Can't driver support drop/queue packets from specific src to specific
> >> dst with specific eth_type?
> > No, we support specific dst with specific eth_type, or only specific
> eth_type. Perfect match.
>
> Thanks for clarification.
>
> >
> >>
> >>> + rte_flow_error_set(error, EINVAL,
> >>> +
> >> RTE_FLOW_ERROR_TYPE_ITEM,
> >>> + item,
> >>> + "NULL ETH spec/mask");
> >>> + return -rte_errno;
> >>> + }
> >>> +
> >>> + /* Mask bits of source MAC address must be full of 0.
> >>> + * Mask bits of destination MAC address must be full
> >>> + * of 1 or full of 0.
> >>> + */
> >>> + if (!is_zero_ether_addr(ð_mask->src) ||
> >>> + (!is_zero_ether_addr(ð_mask->dst) &&
> >>> + !is_broadcast_ether_addr(ð_mask->dst))) {
> >>> + rte_flow_error_set(error, EINVAL,
> >>> +
> >> RTE_FLOW_ERROR_TYPE_ITEM,
> >>> + item,
> >>> + "Invalid MAC_addr mask");
> >>> + return -rte_errno;
> >>> + }
> >>> +
> >>> + if ((eth_mask->type & UINT16_MAX) !=
> >> UINT16_MAX) {
> >>> + rte_flow_error_set(error, EINVAL,
> >>> +
> >> RTE_FLOW_ERROR_TYPE_ITEM,
> >>> + item,
> >>> + "Invalid ethertype mask");
> >>
> >> Why returning error here?
> >> Can't we say drop packets to specific MAC address, independent from
> >> the ether_type?
> >
> > No. as I said above, we support specific dst with specific eth_type, or only
> specific eth_type for ethertype_filter.
> >
> >>
> >>> + return -rte_errno;
> >>> + }
> >>> +
> >>> + /* If mask bits of destination MAC address
> >>> + * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
> >>> + */
> >>> + if (is_broadcast_ether_addr(ð_mask->dst)) {
> >>> + filter->mac_addr = eth_spec->dst;
> >>> + filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
> >>> + } else {
> >>> + filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
> >>> + }
> >>> + filter->ether_type = rte_be_to_cpu_16(eth_spec-
> >>> type);
> >>> +
> >>> + if (filter->ether_type == ETHER_TYPE_IPv4 ||
> >>> + filter->ether_type == ETHER_TYPE_IPv6) {
> >>> + rte_flow_error_set(error, EINVAL,
> >>> +
> >> RTE_FLOW_ERROR_TYPE_ITEM,
> >>> + item,
> >>> + "Unsupported ether_type
> >> in"
> >>> + " control packet filter.");
> >>
> >> Can't we create a drop rule based on dst MAC address if eth_type is ip ?
> >
> > No, we don't support drop MAC_addr + eth_type_IP for ethertype filter.
> >
> >>
> >>> + return -rte_errno;
> >>> + }
> >>> + if (filter->ether_type == ETHER_TYPE_VLAN)
> >>> + PMD_DRV_LOG(WARNING, "filter vlan
> >> ether_type in"
> >>> + " first tag is not supported.");
> >>
> >> Who is the target of this message?
> >> To the caller, this API is responding as this is supported.
> >> The end user, the user of the application, can see this message, how
> >> this message will help to end user?
> >
> > Actually I add this warning according to the original processing in
> i40e_dev_eythertype_filter_set.
> > After checing datasheet, "The ethertype programmed by this command
> should not be one of the L2 tags ethertype (VLAN, E-tag, S-tag, etc.) and
> should not be IP or IPv6" is descripted.
> > But if QinQ is disabled, and inner vlan is ETHER_TYPE_VLAN, the filter works.
> So the message is "vlan ether_type in outer tag is not supported".
> > I want to simplify it in next version, don't support the situation above, and
> return error if (filter->ether_type == ETHER_TYPE_VLAN), because HW only
> recognizes ETH when QinQ is diabled. What do you think?
>
> I think it is better.
> And this can be fine tuned in the future to check QinQ and return accordingly.
I have tuned QinQ, and it will not work when ether_type is equal to the outer vlan. So I will update.
>
> >
> >>
> >>> +
> >>> + break;
> >>> + default:
> >>> + break;
> >>> + }
> >>> + }
> >>> +
> >>> + return 0;
> >>> +}
> >>> +
> >>> +static int
> >>> +i40e_parse_ethertype_act(struct rte_eth_dev *dev,
> >>> + const struct rte_flow_action *actions,
> >>> + struct rte_flow_error *error,
> >>> + struct rte_eth_ethertype_filter *filter)
> >>
> >> I think it would be good to comment this functions to say only DROP
> >> and QUEUE actions are supported.
> >
> > Yes, will update in next version.
> >
> >>
> >> <...>
> >>
> >>> +
> >>> +static int
> >>> +i40e_flow_validate(struct rte_eth_dev *dev,
> >>> + const struct rte_flow_attr *attr,
> >>> + const struct rte_flow_item pattern[],
> >>> + const struct rte_flow_action actions[],
> >>> + struct rte_flow_error *error)
> >>> +{
> >>> + struct rte_flow_item *items; /* internal pattern w/o VOID items */
> >>> + parse_filter_t parse_filter;
> >>> + uint32_t item_num = 0; /* non-void item number of pattern*/
> >>> + uint32_t i = 0;
> >>> + int ret;
> >>> +
> >>> + if (!pattern) {
> >>> + rte_flow_error_set(error, EINVAL,
> >> RTE_FLOW_ERROR_TYPE_ITEM_NUM,
> >>> + NULL, "NULL pattern.");
> >>> + return -rte_errno;
> >>> + }
> >>> +
> >>> + if (!actions) {
> >>> + rte_flow_error_set(error, EINVAL,
> >>> + RTE_FLOW_ERROR_TYPE_ACTION_NUM,
> >>> + NULL, "NULL action.");
> >>> + return -rte_errno;
> >>> + }
> >>
> >> It may be good to validate attr too, if it is NULL or not. It is
> >> accessed without check in later stages of the call stack.
> >
> > Yes. Thanks for reminder.
> >
> > Best Regards,
> > Beilei
> >
> >>
> >> <...>
> >>
> >
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 00/17] net/i40e: consistent filter API
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
` (17 preceding siblings ...)
2017-01-04 6:40 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Wu, Jingjing
@ 2017-01-05 15:45 ` Beilei Xing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 01/17] net/i40e: store ethertype filter Beilei Xing
` (18 more replies)
18 siblings, 19 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:45 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
The patch set depends on Adrien's Generic flow API(rte_flow).
The patches mainly finish following functions:
1) Store and restore all kinds of filters.
2) Parse all kinds of filters.
3) Add flow validate function.
4) Add flow create function.
5) Add flow destroy function.
6) Add flow flush function.
v6 changes:
Change functions' name to be more readable.
Add comments for parse_pattern functions to list supported rules.
Add comments for parse_action functions to list supported actions.
Add ETHTYPE check when parsing ethertype pattern.
v5 changes:
Change some local variable name.
Add removing i40e_flow_list during device unint.
Fix compile error when gcc compile option isn't '-O0'.
v4 changes:
Change I40E_TCI_MASK with 0xFFFF to align with testpmd.
Modidy the stats show when restoring filters.
v3 changes:
Set the related cause pointer to a non-NULL value when error happens.
Change return value when error happens.
Modify filter_del parameter with key.
Malloc filter after checking when delete a filter.
Delete meaningless initialization.
Add return value when there's error.
Change global variable definition.
Modify some function declaration.
v2 changes:
Add i40e_flow.c, all flow ops are implemented in the file.
Change the whole implementation of all parse flow functions.
Update error info for all flow ops.
Add flow_list to store flows created.
Beilei Xing (17):
net/i40e: store ethertype filter
net/i40e: store tunnel filter
net/i40e: store flow director filter
net/i40e: restore ethertype filter
net/i40e: restore tunnel filter
net/i40e: restore flow director filter
net/i40e: add flow validate function
net/i40e: parse flow director filter
net/i40e: parse tunnel filter
net/i40e: add flow create function
net/i40e: add flow destroy function
net/i40e: destroy ethertype filter
net/i40e: destroy tunnel filter
net/i40e: destroy flow directory filter
net/i40e: add flow flush function
net/i40e: flush ethertype filters
net/i40e: flush tunnel filters
drivers/net/i40e/Makefile | 2 +
drivers/net/i40e/i40e_ethdev.c | 531 ++++++++++--
drivers/net/i40e/i40e_ethdev.h | 178 ++++
drivers/net/i40e/i40e_fdir.c | 140 ++-
drivers/net/i40e/i40e_flow.c | 1850 ++++++++++++++++++++++++++++++++++++++++
5 files changed, 2630 insertions(+), 71 deletions(-)
create mode 100644 drivers/net/i40e/i40e_flow.c
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 01/17] net/i40e: store ethertype filter
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
@ 2017-01-05 15:45 ` Beilei Xing
2017-01-05 17:46 ` Ferruh Yigit
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 02/17] net/i40e: store tunnel filter Beilei Xing
` (17 subsequent siblings)
18 siblings, 1 reply; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:45 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no ethertype filter stored in SW.
This patch stores ethertype filter with cuckoo hash
in SW, also adds protection if an ethertype filter
has been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 166 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 31 ++++++++
3 files changed, 197 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 66997b6..11175c4 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -117,5 +117,6 @@ DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_kvargs
+DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_hash
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7b8e6c0..bd13780 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -51,6 +51,7 @@
#include <rte_dev.h>
#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
+#include <rte_hash_crc.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -461,6 +462,12 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int i40e_ethertype_filter_convert(
+ const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter);
+static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -939,9 +946,18 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
int ret;
uint32_t len;
uint8_t aq_fail = 0;
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
PMD_INIT_FUNC_TRACE();
+ char ethertype_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters ethertype_hash_params = {
+ .name = ethertype_hash_name,
+ .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
+ .key_len = sizeof(struct i40e_ethertype_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1182,8 +1198,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
pf->flags &= ~I40E_FLAG_DCB;
}
+ /* Initialize ethertype filter rule list and hash */
+ TAILQ_INIT(ðertype_rule->ethertype_list);
+ snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
+ "ethertype_%s", dev->data->name);
+ ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params);
+ if (!ethertype_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
+ ret = -EINVAL;
+ goto err_ethertype_hash_table_create;
+ }
+ ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
+ sizeof(struct i40e_ethertype_filter *) *
+ I40E_MAX_ETHERTYPE_FILTER_NUM,
+ 0);
+ if (!ethertype_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for ethertype hash map!");
+ ret = -ENOMEM;
+ goto err_ethertype_hash_map_alloc;
+ }
+
return 0;
+err_ethertype_hash_map_alloc:
+ rte_hash_free(ethertype_rule->hash_table);
+err_ethertype_hash_table_create:
+ rte_free(dev->data->mac_addrs);
err_mac_alloc:
i40e_vsi_release(pf->main_vsi);
err_setup_pf_switch:
@@ -1206,25 +1247,42 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
+ struct i40e_pf *pf;
struct rte_pci_device *pci_dev;
struct rte_intr_handle *intr_handle;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
+ struct i40e_ethertype_filter *p_ethertype;
int ret;
uint8_t aq_fail = 0;
+ struct i40e_ethertype_rule *ethertype_rule;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = I40E_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
+ ethertype_rule = &pf->ethertype;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
+ /* Remove all ethertype director rules and hash */
+ if (ethertype_rule->hash_map)
+ rte_free(ethertype_rule->hash_map);
+ if (ethertype_rule->hash_table)
+ rte_hash_free(ethertype_rule->hash_table);
+
+ while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) {
+ TAILQ_REMOVE(ðertype_rule->ethertype_list,
+ p_ethertype, rules);
+ rte_free(p_ethertype);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
@@ -7970,6 +8028,82 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
return ret;
}
+/* Convert ethertype filter structure */
+static int
+i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter)
+{
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ filter->input.ether_type = input->ether_type;
+ filter->flags = input->flags;
+ filter->queue = input->queue;
+
+ return 0;
+}
+
+/* Check if there exists the ehtertype filter */
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return ethertype_rule->hash_map[ret];
+}
+
+/* Add ethertype filter in SW list */
+static int
+i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete ethertype filter in SW list */
+int
+i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ struct i40e_ethertype_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
@@ -7980,6 +8114,9 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *ethertype_filter, *node;
+ struct i40e_ethertype_filter check_filter;
struct i40e_control_filter_stats stats;
uint16_t flags = 0;
int ret;
@@ -7998,6 +8135,21 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
" not supported.");
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_ethertype_filter_convert(filter, &check_filter);
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
+ &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
+ return -EINVAL;
+ }
+
if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
@@ -8018,7 +8170,19 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
stats.mac_etype_free, stats.etype_free);
if (ret < 0)
return -ENOSYS;
- return 0;
+
+ /* Add or delete a filter in SW list */
+ if (add) {
+ ethertype_filter = rte_zmalloc("ethertype_filter",
+ sizeof(*ethertype_filter), 0);
+ rte_memcpy(ethertype_filter, &check_filter,
+ sizeof(check_filter));
+ ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ } else {
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+ }
+
+ return ret;
}
/*
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b0363f9..dbfcf9f 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -37,6 +37,7 @@
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
+#include <rte_hash.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -396,6 +397,30 @@ struct i40e_fdir_info {
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
};
+/* Ethertype filter number HW supports */
+#define I40E_MAX_ETHERTYPE_FILTER_NUM 768
+
+/* Ethertype filter struct */
+struct i40e_ethertype_filter_input {
+ struct ether_addr mac_addr; /* Mac address to match */
+ uint16_t ether_type; /* Ether type to match */
+};
+
+struct i40e_ethertype_filter {
+ TAILQ_ENTRY(i40e_ethertype_filter) rules;
+ struct i40e_ethertype_filter_input input;
+ uint16_t flags; /* Flags from RTE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_ethertype_filter_list, i40e_ethertype_filter);
+
+struct i40e_ethertype_rule {
+ struct i40e_ethertype_filter_list ethertype_list;
+ struct i40e_ethertype_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -466,6 +491,7 @@ struct i40e_pf {
struct i40e_vmdq_info *vmdq;
struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
@@ -616,6 +642,11 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input);
+int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 02/17] net/i40e: store tunnel filter
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 01/17] net/i40e: store ethertype filter Beilei Xing
@ 2017-01-05 15:45 ` Beilei Xing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 03/17] net/i40e: store flow director filter Beilei Xing
` (16 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:45 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no tunnel filter stored in SW.
This patch stores tunnel filter in SW with cuckoo
hash, also adds protection if a tunnel filter has
been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 169 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 32 ++++++++
2 files changed, 198 insertions(+), 3 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index bd13780..fc2d98a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -468,6 +468,12 @@ static int i40e_ethertype_filter_convert(
static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -947,6 +953,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
uint32_t len;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
PMD_INIT_FUNC_TRACE();
@@ -958,6 +965,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
.hash_func = rte_hash_crc,
};
+ char tunnel_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters tunnel_hash_params = {
+ .name = tunnel_hash_name,
+ .entries = I40E_MAX_TUNNEL_FILTER_NUM,
+ .key_len = sizeof(struct i40e_tunnel_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1219,8 +1234,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_ethertype_hash_map_alloc;
}
+ /* Initialize tunnel filter rule list and hash */
+ TAILQ_INIT(&tunnel_rule->tunnel_list);
+ snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
+ "tunnel_%s", dev->data->name);
+ tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
+ if (!tunnel_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
+ ret = -EINVAL;
+ goto err_tunnel_hash_table_create;
+ }
+ tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
+ sizeof(struct i40e_tunnel_filter *) *
+ I40E_MAX_TUNNEL_FILTER_NUM,
+ 0);
+ if (!tunnel_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for tunnel hash map!");
+ ret = -ENOMEM;
+ goto err_tunnel_hash_map_alloc;
+ }
+
return 0;
+err_tunnel_hash_map_alloc:
+ rte_hash_free(tunnel_rule->hash_table);
+err_tunnel_hash_table_create:
+ rte_free(ethertype_rule->hash_map);
err_ethertype_hash_map_alloc:
rte_hash_free(ethertype_rule->hash_table);
err_ethertype_hash_table_create:
@@ -1253,9 +1293,11 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
struct i40e_ethertype_filter *p_ethertype;
+ struct i40e_tunnel_filter *p_tunnel;
int ret;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule;
+ struct i40e_tunnel_rule *tunnel_rule;
PMD_INIT_FUNC_TRACE();
@@ -1267,6 +1309,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
pci_dev = I40E_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
ethertype_rule = &pf->ethertype;
+ tunnel_rule = &pf->tunnel;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1283,6 +1326,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_ethertype);
}
+ /* Remove all tunnel director rules and hash */
+ if (tunnel_rule->hash_map)
+ rte_free(tunnel_rule->hash_map);
+ if (tunnel_rule->hash_table)
+ rte_hash_free(tunnel_rule->hash_table);
+
+ while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
+ TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
+ rte_free(p_tunnel);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
@@ -6493,6 +6547,85 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
return 0;
}
+/* Convert tunnel filter structure */
+static int
+i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data
+ *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ ether_addr_copy((struct ether_addr *)&cld_filter->outer_mac,
+ (struct ether_addr *)&tunnel_filter->input.outer_mac);
+ ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac,
+ (struct ether_addr *)&tunnel_filter->input.inner_mac);
+ tunnel_filter->input.inner_vlan = cld_filter->inner_vlan;
+ tunnel_filter->input.flags = cld_filter->flags;
+ tunnel_filter->input.tenant_id = cld_filter->tenant_id;
+ tunnel_filter->queue = cld_filter->queue_number;
+
+ return 0;
+}
+
+/* Check if there exists the tunnel filter */
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return tunnel_rule->hash_map[ret];
+}
+
+/* Add a tunnel filter into the SW list */
+static int
+i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = tunnel_filter;
+
+ TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
+
+ return 0;
+}
+
+/* Delete a tunnel filter from the SW list */
+int
+i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel_filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ tunnel_filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
+ rte_free(tunnel_filter);
+
+ return 0;
+}
+
static int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
@@ -6508,6 +6641,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct i40e_vsi *vsi = pf->main_vsi;
struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
cld_filter = rte_zmalloc("tunnel_filter",
sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
@@ -6570,11 +6706,38 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
- if (add)
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ return -EINVAL;
+ }
+
+ if (add) {
ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
- else
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ return ret;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- cld_filter, 1);
+ cld_filter, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ return ret;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
rte_free(cld_filter);
return ret;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index dbfcf9f..349e865 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -421,6 +421,32 @@ struct i40e_ethertype_rule {
struct rte_hash *hash_table;
};
+/* Tunnel filter number HW supports */
+#define I40E_MAX_TUNNEL_FILTER_NUM 400
+
+/* Tunnel filter struct */
+struct i40e_tunnel_filter_input {
+ uint8_t outer_mac[6]; /* Outer mac address to match */
+ uint8_t inner_mac[6]; /* Inner mac address to match */
+ uint16_t inner_vlan; /* Inner vlan address to match */
+ uint16_t flags; /* Filter type flag */
+ uint32_t tenant_id; /* Tenant id to match */
+};
+
+struct i40e_tunnel_filter {
+ TAILQ_ENTRY(i40e_tunnel_filter) rules;
+ struct i40e_tunnel_filter_input input;
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter);
+
+struct i40e_tunnel_rule {
+ struct i40e_tunnel_filter_list tunnel_list;
+ struct i40e_tunnel_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -492,6 +518,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
+ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
@@ -647,6 +674,11 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input);
int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter_input *input);
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input);
+int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 03/17] net/i40e: store flow director filter
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 01/17] net/i40e: store ethertype filter Beilei Xing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 02/17] net/i40e: store tunnel filter Beilei Xing
@ 2017-01-05 15:45 ` Beilei Xing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 04/17] net/i40e: restore ethertype filter Beilei Xing
` (15 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:45 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no flow director filter stored in SW. This
patch stores flow director filters in SW with cuckoo hash,
also adds protection if a flow director filter has been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 48 +++++++++++++++++++
drivers/net/i40e/i40e_ethdev.h | 14 ++++++
drivers/net/i40e/i40e_fdir.c | 105 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 167 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index fc2d98a..3212c76 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -954,6 +954,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
PMD_INIT_FUNC_TRACE();
@@ -973,6 +974,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
.hash_func = rte_hash_crc,
};
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = I40E_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct rte_eth_fdir_input),
+ .hash_func = rte_hash_crc,
+ };
+
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
@@ -1255,8 +1264,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_tunnel_hash_map_alloc;
}
+ /* Initialize flow director filter rule list and hash */
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", dev->data->name);
+ fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ ret = -EINVAL;
+ goto err_fdir_hash_table_create;
+ }
+ fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
+ sizeof(struct i40e_fdir_filter *) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ ret = -ENOMEM;
+ goto err_fdir_hash_map_alloc;
+ }
+
return 0;
+err_fdir_hash_map_alloc:
+ rte_hash_free(fdir_info->hash_table);
+err_fdir_hash_table_create:
+ rte_free(tunnel_rule->hash_map);
err_tunnel_hash_map_alloc:
rte_hash_free(tunnel_rule->hash_table);
err_tunnel_hash_table_create:
@@ -1294,10 +1328,12 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct i40e_filter_control_settings settings;
struct i40e_ethertype_filter *p_ethertype;
struct i40e_tunnel_filter *p_tunnel;
+ struct i40e_fdir_filter *p_fdir;
int ret;
uint8_t aq_fail = 0;
struct i40e_ethertype_rule *ethertype_rule;
struct i40e_tunnel_rule *tunnel_rule;
+ struct i40e_fdir_info *fdir_info;
PMD_INIT_FUNC_TRACE();
@@ -1310,6 +1346,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
intr_handle = &pci_dev->intr_handle;
ethertype_rule = &pf->ethertype;
tunnel_rule = &pf->tunnel;
+ fdir_info = &pf->fdir;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1337,6 +1374,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_free(p_tunnel);
}
+ /* Remove all flow director rules and hash */
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_table)
+ rte_hash_free(fdir_info->hash_table);
+
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+ rte_free(p_fdir);
+ }
+
dev->dev_ops = NULL;
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 349e865..43a3dbb 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -377,6 +377,14 @@ struct i40e_fdir_flex_mask {
};
#define I40E_FILTER_PCTYPE_MAX 64
+#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
+
+struct i40e_fdir_filter {
+ TAILQ_ENTRY(i40e_fdir_filter) rules;
+ struct rte_eth_fdir_filter fdir;
+};
+
+TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
/*
* A structure used to define fields of a FDIR related info.
*/
@@ -395,6 +403,10 @@ struct i40e_fdir_info {
*/
struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+
+ struct i40e_fdir_filter_list fdir_list;
+ struct i40e_fdir_filter **hash_map;
+ struct rte_hash *hash_table;
};
/* Ethertype filter number HW supports */
@@ -674,6 +686,8 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input);
int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter_input *input);
+int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
+ struct rte_eth_fdir_input *input);
struct i40e_tunnel_filter *
i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
const struct i40e_tunnel_filter_input *input);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 335bf15..4a29b37 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -121,6 +121,14 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
bool add);
static int i40e_fdir_flush(struct rte_eth_dev *dev);
+static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter);
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input);
+static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
+
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
{
@@ -1017,6 +1025,74 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
return ret;
}
+static int
+i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter)
+{
+ rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+ return 0;
+}
+
+/* Check if there exists the flow director filter */
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+/* Add a flow director filter into the SW list */
+static int
+i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ fdir_info->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete a flow director filter from the SW list */
+int
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(fdir_info->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
@@ -1032,6 +1108,9 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter, *node;
+ struct i40e_fdir_filter check_filter; /* Check if the filter exists */
int ret = 0;
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1054,6 +1133,22 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_fdir_filter_convert(filter, &check_filter);
+ node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR,
+ "Conflict with existing flow director rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR,
+ "There's no corresponding flow firector filter!");
+ return -EINVAL;
+ }
+
memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
@@ -1077,6 +1172,16 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
pctype);
return ret;
}
+
+ if (add) {
+ fdir_filter = rte_zmalloc("fdir_filter",
+ sizeof(*fdir_filter), 0);
+ rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
+ } else {
+ ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
+ }
+
return ret;
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 04/17] net/i40e: restore ethertype filter
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (2 preceding siblings ...)
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 03/17] net/i40e: store flow director filter Beilei Xing
@ 2017-01-05 15:45 ` Beilei Xing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 05/17] net/i40e: restore tunnel filter Beilei Xing
` (14 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:45 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring ethertype filter in case filter
dropped accidentally, as all filters need to be added and
removed by user obviously for generic filter API.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 44 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 3212c76..6e6fc55 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -474,6 +474,9 @@ static int i40e_tunnel_filter_convert(
static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
+static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_filter_restore(struct i40e_pf *pf);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -1964,6 +1967,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+ i40e_filter_restore(pf);
+
return I40E_SUCCESS;
err_up:
@@ -10089,3 +10094,42 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
}
+
+/* Restore ethertype filter */
+static void
+i40e_ethertype_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags;
+
+ TAILQ_FOREACH(f, ethertype_list, rules) {
+ flags = 0;
+ if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ i40e_aq_add_rem_control_packet_filter(hw,
+ f->input.mac_addr.addr_bytes,
+ f->input.ether_type,
+ flags, pf->main_vsi->seid,
+ f->queue, 1, &stats, NULL);
+ }
+ PMD_DRV_LOG(INFO, "Ethertype filter:"
+ " mac_etype_used = %u, etype_used = %u,"
+ " mac_etype_free = %u, etype_free = %u\n",
+ stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
+}
+
+static void
+i40e_filter_restore(struct i40e_pf *pf)
+{
+ i40e_ethertype_filter_restore(pf);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 05/17] net/i40e: restore tunnel filter
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (3 preceding siblings ...)
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 04/17] net/i40e: restore ethertype filter Beilei Xing
@ 2017-01-05 15:45 ` Beilei Xing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 06/17] net/i40e: restore flow director filter Beilei Xing
` (13 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:45 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring tunnel filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 6e6fc55..a636dc0 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -475,6 +475,7 @@ static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
static const struct rte_pci_id pci_id_i40e_map[] = {
@@ -10128,8 +10129,28 @@ i40e_ethertype_filter_restore(struct i40e_pf *pf)
stats.mac_etype_free, stats.etype_free);
}
+/* Restore tunnel filter */
+static void
+i40e_tunnel_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+
+ TAILQ_FOREACH(f, tunnel_list, rules) {
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ rte_memcpy(&cld_filter, &f->input, sizeof(f->input));
+ cld_filter.queue_number = f->queue;
+ i40e_aq_add_cloud_filters(hw, vsi->seid, &cld_filter, 1);
+ }
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
+ i40e_tunnel_filter_restore(pf);
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 06/17] net/i40e: restore flow director filter
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (4 preceding siblings ...)
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 05/17] net/i40e: restore tunnel filter Beilei Xing
@ 2017-01-05 15:45 ` Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 07/17] net/i40e: add flow validate function Beilei Xing
` (12 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:45 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring flow director filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev.h | 1 +
drivers/net/i40e/i40e_fdir.c | 31 +++++++++++++++++++++++++++++++
3 files changed, 33 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index a636dc0..02331d2 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -10153,4 +10153,5 @@ i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
+ i40e_fdir_filter_restore(pf);
}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 43a3dbb..82baab6 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -670,6 +670,7 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
+void i40e_fdir_filter_restore(struct i40e_pf *pf);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 4a29b37..f89dbc9 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1586,3 +1586,34 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* Restore flow director filter */
+void
+i40e_fdir_filter_restore(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
+ struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
+ struct i40e_fdir_filter *f;
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t fdstat;
+ uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
+ uint32_t best_cnt; /**< Number of filters in best effort spaces. */
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ TAILQ_FOREACH(f, fdir_list, rules)
+ i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
+ guarant_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d\n",
+ guarant_cnt, best_cnt);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 07/17] net/i40e: add flow validate function
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (5 preceding siblings ...)
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 06/17] net/i40e: restore flow director filter Beilei Xing
@ 2017-01-05 15:46 ` Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 08/17] net/i40e: parse flow director filter Beilei Xing
` (11 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:46 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_validation function to check if
a flow is valid according to the flow pattern.
i40e_flow_parse_ethertype_filter is added first, it also
gets the ethertype info.
i40e_flow.c is added to handle all generic filter events.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 12 +-
drivers/net/i40e/i40e_ethdev.h | 23 ++
drivers/net/i40e/i40e_flow.c | 492 +++++++++++++++++++++++++++++++++++++++++
4 files changed, 523 insertions(+), 5 deletions(-)
create mode 100644 drivers/net/i40e/i40e_flow.c
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 11175c4..89bd85a 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -105,6 +105,7 @@ endif
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
# vector PMD driver needs SSE4.1 support
ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 02331d2..01338ca 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -285,11 +285,6 @@
#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL
#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
-#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
-#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
-#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
- I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
-
/* PCI offset for querying capability */
#define PCI_DEV_CAP_REG 0xA4
/* PCI offset for enabling/disabling Extended Tag */
@@ -8441,6 +8436,8 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
return ret;
}
+extern const struct rte_flow_ops i40e_flow_ops;
+
static int
i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
@@ -8472,6 +8469,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &i40e_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 82baab6..00c2a0a 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -38,6 +38,7 @@
#include <rte_time.h>
#include <rte_kvargs.h>
#include <rte_hash.h>
+#include <rte_flow_driver.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -189,6 +190,11 @@ enum i40e_flxpld_layer_idx {
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
+#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
+ I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
+
struct i40e_adapter;
/**
@@ -629,6 +635,23 @@ struct i40e_adapter {
struct rte_timecounter tx_tstamp_tc;
};
+union i40e_filter_t {
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_fdir_filter fdir_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
+};
+
+typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+struct i40e_valid_pattern {
+ enum rte_flow_item_type *items;
+ parse_filter_t parse_filter;
+};
+
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
new file mode 100644
index 0000000..5ca7a42
--- /dev/null
+++ b/drivers/net/i40e/i40e_flow.c
@@ -0,0 +1,492 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_type.h"
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+
+static int i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+
+const struct rte_flow_ops i40e_flow_ops = {
+ .validate = i40e_flow_validate,
+};
+
+union i40e_filter_t cons_filter;
+
+/* Pattern matched ethertype filter */
+static enum rte_flow_item_type pattern_ethertype[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct i40e_valid_pattern i40e_supported_patterns[] = {
+ /* Ethertype */
+ { pattern_ethertype, i40e_flow_parse_ethertype_filter },
+};
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+i40e_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = i40e_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = i40e_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+ pb = pe;
+ break;
+ }
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+i40e_match_pattern(enum rte_flow_item_type *item_array,
+ struct rte_flow_item *pattern)
+{
+ struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+static parse_filter_t
+i40e_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+ parse_filter_t parse_filter = NULL;
+ uint8_t i = 0;
+
+ for (; i < RTE_DIM(i40e_supported_patterns); i++) {
+ if (i40e_match_pattern(i40e_supported_patterns[i].items,
+ pattern)) {
+ parse_filter = i40e_supported_patterns[i].parse_filter;
+ break;
+ }
+ }
+
+ return parse_filter;
+}
+
+/* Parse attributes */
+static int
+i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static uint16_t
+i40e_get_outer_vlan(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ uint64_t reg_r = 0;
+ uint16_t reg_id;
+ uint16_t tpid;
+
+ if (qinq)
+ reg_id = 2;
+ else
+ reg_id = 3;
+
+ i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ ®_r, NULL);
+
+ tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
+
+ return tpid;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
+ * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
+ * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
+ * FF:FF:FF:FF:FF:FF
+ * 5. Ether_type mask should be 0xFFFF.
+ */
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ enum rte_flow_item_type item_type;
+ uint16_t outer_tpid;
+
+ outer_tpid = i40e_get_outer_vlan(dev);
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ /* Get the MAC info. */
+ if (!eth_spec || !eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL ETH spec/mask");
+ return -rte_errno;
+ }
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(ð_mask->src) ||
+ (!is_zero_ether_addr(ð_mask->dst) &&
+ !is_broadcast_ether_addr(ð_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(ð_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6 ||
+ filter->ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type in"
+ " control packet filter.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* Ethertype action only supports QUEUE or DROP. */
+static int
+i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for"
+ " ethertype_filter.");
+ return -rte_errno;
+ }
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_ethertype_filter *ethertype_filter =
+ &filter->ethertype_filter;
+ int ret;
+
+ ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_ethertype_action(dev, actions, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int
+i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item *items; /* internal pattern w/o VOID items */
+ parse_filter_t parse_filter;
+ uint32_t item_num = 0; /* non-void item number of pattern*/
+ uint32_t i = 0;
+ int ret;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ memset(&cons_filter, 0, sizeof(cons_filter));
+
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = rte_zmalloc("i40e_pattern",
+ item_num * sizeof(struct rte_flow_item), 0);
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for PMD internal items.");
+ return -ENOMEM;
+ }
+
+ i40e_pattern_skip_void_item(items, pattern);
+
+ /* Find if there's matched parse filter function */
+ parse_filter = i40e_find_parse_filter_func(items);
+ if (!parse_filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ return -rte_errno;
+ }
+
+ ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
+
+ rte_free(items);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 08/17] net/i40e: parse flow director filter
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (6 preceding siblings ...)
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 07/17] net/i40e: add flow validate function Beilei Xing
@ 2017-01-05 15:46 ` Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 09/17] net/i40e: parse tunnel filter Beilei Xing
` (10 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:46 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_parse_fdir_filter to check
if a rule is a flow director rule according to the
flow pattern, and the function also gets the flow
director info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 56 +---
drivers/net/i40e/i40e_ethdev.h | 55 ++++
drivers/net/i40e/i40e_flow.c | 623 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 679 insertions(+), 55 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 01338ca..81ed13e 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -139,60 +139,6 @@
#define I40E_DEFAULT_DCB_APP_NUM 1
#define I40E_DEFAULT_DCB_APP_PRIO 3
-#define I40E_INSET_NONE 0x00000000000000000ULL
-
-/* bit0 ~ bit 7 */
-#define I40E_INSET_DMAC 0x0000000000000001ULL
-#define I40E_INSET_SMAC 0x0000000000000002ULL
-#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
-#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
-#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
-
-/* bit 8 ~ bit 15 */
-#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
-#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
-#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
-#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
-#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
-#define I40E_INSET_DST_PORT 0x0000000000002000ULL
-#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
-
-/* bit 16 ~ bit 31 */
-#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
-#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
-#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
-#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
-#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
-#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
-#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
-#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
-
-/* bit 32 ~ bit 47, tunnel fields */
-#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
-#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
-#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
-#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
-#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
-#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
-
-/* bit 48 ~ bit 55 */
-#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
-
-/* bit 56 ~ bit 63, Flex Payload */
-#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD \
- (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
- I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
- I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
- I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
-
/**
* Below are values for writing un-exposed registers suggested
* by silicon experts
@@ -7627,7 +7573,7 @@ i40e_validate_input_set(enum i40e_filter_pctype pctype,
}
/* default input set fields combination per pctype */
-static uint64_t
+uint64_t
i40e_get_default_input_set(uint16_t pctype)
{
static const uint64_t default_inset_table[] = {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 00c2a0a..4597615 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -195,6 +195,60 @@ enum i40e_flxpld_layer_idx {
#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
+#define I40E_INSET_NONE 0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC 0x0000000000000001ULL
+#define I40E_INSET_SMAC 0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
+#define I40E_INSET_DST_PORT 0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+ I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
struct i40e_adapter;
/**
@@ -717,6 +771,7 @@ i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
const struct i40e_tunnel_filter_input *input);
int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter_input *input);
+uint64_t i40e_get_default_input_set(uint16_t pctype);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 5ca7a42..2eead93 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -52,6 +52,10 @@
#include "base/i40e_prototype.h"
#include "i40e_ethdev.h"
+#define I40E_IPV4_TC_SHIFT 4
+#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_FRAG_HEADER 44
+
static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -66,6 +70,14 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
@@ -74,6 +86,12 @@ static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -87,9 +105,127 @@ static enum rte_flow_item_type pattern_ethertype[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* Pattern matched flow director filter */
+static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_flow_parse_ethertype_filter },
+ /* FDIR */
+ { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
};
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
@@ -422,6 +558,493 @@ i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
return ret;
}
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported flow type and input set: refer to array
+ * default_inset_table in i40e_ethdev.c.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set = I40E_INSET_NONE;
+ uint16_t flag_offset;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ uint32_t j;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if (eth_spec || eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH spec/mask");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ if (!ipv4_spec || !ipv4_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv4 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Get filter info */
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ /* Check if it is fragment. */
+ flag_offset =
+ rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
+ if (flag_offset & IPV4_HDR_OFFSET_MASK ||
+ flag_offset & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+ /* Get the filter info */
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (!ipv6_spec || !ipv6_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv6 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ /* SCR and DST address of IPv6 shouldn't be masked */
+ for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
+ if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
+ ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+ }
+
+ input_set |= I40E_INSET_IPV6_SRC;
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(ipv6_spec->hdr.vtc_flow <<
+ I40E_IPV4_TC_SHIFT);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!tcp_spec || !tcp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL TCP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (tcp_mask->hdr.src_port != UINT16_MAX ||
+ tcp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (!udp_spec || !udp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL UDP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (udp_mask->hdr.src_port != UINT16_MAX ||
+ udp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (!sctp_spec || !sctp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL SCTP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port != UINT16_MAX ||
+ sctp_mask->hdr.dst_port != UINT16_MAX ||
+ sctp_mask->hdr.tag != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ filter->input.flow_ext.is_vf = 1;
+ filter->input.flow_ext.dst_id = vf_spec->id;
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VF ID for FDIR.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ pctype = i40e_flowtype_to_pctype(flow_type);
+ if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported flow type");
+ return -rte_errno;
+ }
+
+ if (input_set != i40e_get_default_input_set(pctype)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid input set.");
+ return -rte_errno;
+ }
+ filter->input.flow_type = flow_type;
+
+ return 0;
+}
+
+/* Parse to get the action info of a FDIR filter.
+ * FDIR action supports QUEUE or (QUEUE + MARK).
+ */
+static int
+i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.flex_off = 0;
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+ else
+ filter->action.behavior = RTE_ETH_FDIR_REJECT;
+
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->action.rx_queue = act_q->index;
+
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID for FDIR.");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is MARK or END. */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+ act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->soft_id = mark_spec->id;
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_fdir_filter *fdir_filter =
+ &filter->fdir_filter;
+ int ret;
+
+ ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ if (dev->data->dev_conf.fdir_conf.mode !=
+ RTE_FDIR_MODE_PERFECT) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Check the mode in fdir_conf.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 09/17] net/i40e: parse tunnel filter
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (7 preceding siblings ...)
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 08/17] net/i40e: parse flow director filter Beilei Xing
@ 2017-01-05 15:46 ` Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 10/17] net/i40e: add flow create function Beilei Xing
` (9 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:46 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_parse_tunnel_filter to check
if a rule is a tunnel rule according to items of the
flow pattern, and the function also gets the tunnel info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 412 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 412 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 2eead93..ed87624 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -55,6 +55,8 @@
#define I40E_IPV4_TC_SHIFT 4
#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
#define I40E_IPV6_FRAG_HEADER 44
+#define I40E_TENANT_ARRAY_NUM 3
+#define I40E_TCI_MASK 0xFFFF
static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -78,6 +80,14 @@ static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_fdir_filter *filter);
+static int i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
+static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
@@ -92,6 +102,12 @@ static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -206,6 +222,45 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* Pattern matched tunnel filter */
+static enum rte_flow_item_type pattern_vxlan_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_flow_parse_ethertype_filter },
@@ -226,6 +281,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
+ /* tunnel */
+ { pattern_vxlan_1, i40e_flow_parse_tunnel_filter },
+ { pattern_vxlan_2, i40e_flow_parse_tunnel_filter },
+ { pattern_vxlan_3, i40e_flow_parse_tunnel_filter },
+ { pattern_vxlan_4, i40e_flow_parse_tunnel_filter },
};
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
@@ -1045,6 +1105,358 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
return 0;
}
+/* Parse to get the action info of a tunnle filter
+ * Tunnel action only supports QUEUE.
+ */
+static int
+i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue_id = act_q->index;
+ if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_check_tenant_id_mask(const uint8_t *mask)
+{
+ uint32_t j;
+ int is_masked = 0;
+
+ for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
+ if (*(mask + j) == UINT8_MAX) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 0;
+ } else if (*(mask + j) == 0) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 1;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return is_masked;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_eth *o_eth_spec = NULL;
+ const struct rte_flow_item_eth *o_eth_mask = NULL;
+ const struct rte_flow_item_vxlan *vxlan_spec = NULL;
+ const struct rte_flow_item_vxlan *vxlan_mask = NULL;
+ const struct rte_flow_item_eth *i_eth_spec = NULL;
+ const struct rte_flow_item_eth *i_eth_mask = NULL;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ bool is_vni_masked = 0;
+ enum rte_flow_item_type item_type;
+ bool vxlan_flag = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(ð_mask->dst) ||
+ !is_zero_ether_addr(ð_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!vxlan_flag)
+ rte_memcpy(&filter->outer_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ else
+ rte_memcpy(&filter->inner_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ }
+
+ if (!vxlan_flag) {
+ o_eth_spec = eth_spec;
+ o_eth_mask = eth_mask;
+ } else {
+ i_eth_spec = eth_spec;
+ i_eth_mask = eth_mask;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vxlan_flag) {
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ } else {
+ if (vlan_spec || vlan_mask)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* IPv4/IPv6/UDP are used to describe protocol,
+ * spec amd mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec =
+ (const struct rte_flow_item_vxlan *)item->spec;
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, either spec or mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_mask) {
+ is_vni_masked =
+ i40e_check_tenant_id_mask(vxlan_mask->vni);
+ if (is_vni_masked < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+ }
+ vxlan_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Check specification and mask to get the filter type */
+ if (vlan_spec && vlan_mask &&
+ (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ /* If there's inner vlan */
+ filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
+ & I40E_TCI_MASK;
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if ((!vlan_spec && !vlan_mask) ||
+ (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+ /* If there's no inner vlan */
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_TENID;
+ else if (o_eth_spec && o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask) {
+ filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Not supported by tunnel filter.");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ int ret;
+
+ ret = i40e_flow_parse_vxlan_pattern(pattern, error, filter);
+
+ return ret;
+}
+
+static int
+i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_tunnel_filter_conf *tunnel_filter =
+ &filter->tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_tunnel_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 10/17] net/i40e: add flow create function
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (8 preceding siblings ...)
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 09/17] net/i40e: parse tunnel filter Beilei Xing
@ 2017-01-05 15:46 ` Beilei Xing
2017-01-05 17:47 ` Ferruh Yigit
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 11/17] net/i40e: add flow destroy function Beilei Xing
` (8 subsequent siblings)
18 siblings, 1 reply; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:46 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_create function to create a
rule. It will check if a flow matches ethertype filter
or flow director filter or tunnel filter, if the flow
matches some kind of filter, then set the filter to HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 16 ++++++---
drivers/net/i40e/i40e_ethdev.h | 21 ++++++++++++
drivers/net/i40e/i40e_fdir.c | 2 +-
drivers/net/i40e/i40e_flow.c | 77 ++++++++++++++++++++++++++++++++++++++++++
4 files changed, 110 insertions(+), 6 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 81ed13e..52e3047 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -348,9 +348,6 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_ethertype_filter_set(struct i40e_pf *pf,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -1230,6 +1227,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
goto err_fdir_hash_map_alloc;
}
+ TAILQ_INIT(&pf->flow_list);
+
return 0;
err_fdir_hash_map_alloc:
@@ -1271,6 +1270,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct rte_intr_handle *intr_handle;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
+ struct i40e_flow *p_flow;
struct i40e_ethertype_filter *p_ethertype;
struct i40e_tunnel_filter *p_tunnel;
struct i40e_fdir_filter *p_fdir;
@@ -1296,6 +1296,12 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
+ /* Remove all flows */
+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+ rte_free(p_flow);
+ }
+
/* Remove all ethertype director rules and hash */
if (ethertype_rule->hash_map)
rte_free(ethertype_rule->hash_map);
@@ -6621,7 +6627,7 @@ i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
return 0;
}
-static int
+int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add)
@@ -8266,7 +8272,7 @@ i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
*/
-static int
+int
i40e_ethertype_filter_set(struct i40e_pf *pf,
struct rte_eth_ethertype_filter *filter,
bool add)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 4597615..0088351 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -541,6 +541,17 @@ struct i40e_mirror_rule {
TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
/*
+ * Struct to store flow created.
+ */
+struct i40e_flow {
+ TAILQ_ENTRY(i40e_flow) node;
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+TAILQ_HEAD(i40e_flow_list, i40e_flow);
+
+/*
* Structure to store private data specific for PF instance.
*/
struct i40e_pf {
@@ -597,6 +608,7 @@ struct i40e_pf {
bool floating_veb; /* The flag to use the floating VEB */
/* The floating enable flag for the specific VF */
bool floating_veb_list[I40E_MAX_VF];
+ struct i40e_flow_list flow_list;
};
enum pending_msg {
@@ -772,6 +784,15 @@ i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter_input *input);
uint64_t i40e_get_default_input_set(uint16_t pctype);
+int i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index f89dbc9..91d91aa 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1099,7 +1099,7 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
-static int
+int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index ed87624..cc1656a 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -63,6 +63,11 @@ static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
static int
i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
@@ -111,9 +116,11 @@ static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
+ .create = i40e_flow_create,
};
union i40e_filter_t cons_filter;
+enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
/* Pattern matched ethertype filter */
static enum rte_flow_item_type pattern_ethertype[] = {
@@ -615,6 +622,8 @@ i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
+
return ret;
}
@@ -1093,6 +1102,8 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_FDIR;
+
if (dev->data->dev_conf.fdir_conf.mode !=
RTE_FDIR_MODE_PERFECT) {
rte_flow_error_set(error, ENOTSUP,
@@ -1454,6 +1465,8 @@ i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
return ret;
}
@@ -1525,3 +1538,67 @@ i40e_flow_validate(struct rte_eth_dev *dev,
return ret;
}
+
+static struct rte_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_flow *flow;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct i40e_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+
+ ret = i40e_flow_validate(dev, attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (cons_filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_set(pf,
+ &cons_filter.ethertype_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &cons_filter.fdir_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_tunnel_filter_set(pf,
+ &cons_filter.tunnel_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ break;
+ default:
+ goto free_flow;
+ }
+
+ flow->filter_type = cons_filter_type;
+ TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+ return (struct rte_flow *)flow;
+
+free_flow:
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ return NULL;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 11/17] net/i40e: add flow destroy function
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (9 preceding siblings ...)
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 10/17] net/i40e: add flow create function Beilei Xing
@ 2017-01-05 15:46 ` Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 12/17] net/i40e: destroy ethertype filter Beilei Xing
` (7 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:46 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_destroy function to destroy
a flow for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 33 +++++++++++++++++++++++++++++++++
1 file changed, 33 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index cc1656a..4daaca0 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -68,6 +68,9 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static int i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
static int
i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
@@ -117,6 +120,7 @@ static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = i40e_flow_create,
+ .destroy = i40e_flow_destroy,
};
union i40e_filter_t cons_filter;
@@ -1602,3 +1606,32 @@ i40e_flow_create(struct rte_eth_dev *dev,
rte_free(flow);
return NULL;
}
+
+static int
+i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_flow *pmd_flow = (struct i40e_flow *)flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ int ret = 0;
+
+ switch (filter_type) {
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ TAILQ_REMOVE(&pf->flow_list, pmd_flow, node);
+ rte_free(pmd_flow);
+ } else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 12/17] net/i40e: destroy ethertype filter
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (10 preceding siblings ...)
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 11/17] net/i40e: add flow destroy function Beilei Xing
@ 2017-01-05 15:46 ` Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 13/17] net/i40e: destroy tunnel filter Beilei Xing
` (6 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:46 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_destroy_ethertype_filter
function to destroy a ethertype filter for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 4daaca0..fc633cd 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -116,6 +116,8 @@ static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1618,6 +1620,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
int ret = 0;
switch (filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_flow_destroy_ethertype_filter(pf,
+ (struct i40e_ethertype_filter *)pmd_flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -1635,3 +1641,38 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
return ret;
}
+
+static int
+i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *node;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret = 0;
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->input.mac_addr.addr_bytes,
+ filter->input.ether_type,
+ flags, pf->main_vsi->seid,
+ filter->queue, 0, &stats, NULL);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 13/17] net/i40e: destroy tunnel filter
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (11 preceding siblings ...)
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 12/17] net/i40e: destroy ethertype filter Beilei Xing
@ 2017-01-05 15:46 ` Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 14/17] net/i40e: destroy flow directory filter Beilei Xing
` (5 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:46 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_destroy_tunnel_filter
function to destroy a tunnel filter for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index fc633cd..2603e9e 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -118,6 +118,8 @@ static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
union i40e_filter_t *filter);
static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1624,6 +1626,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_flow_destroy_ethertype_filter(pf,
(struct i40e_ethertype_filter *)pmd_flow->rule);
break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_flow_destroy_tunnel_filter(pf,
+ (struct i40e_tunnel_filter *)pmd_flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -1676,3 +1682,38 @@ i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
return ret;
}
+
+static int
+i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *node;
+ int ret = 0;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
+ (struct ether_addr *)&cld_filter.outer_mac);
+ ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
+ (struct ether_addr *)&cld_filter.inner_mac);
+ cld_filter.inner_vlan = filter->input.inner_vlan;
+ cld_filter.flags = filter->input.flags;
+ cld_filter.tenant_id = filter->input.tenant_id;
+ cld_filter.queue_number = filter->queue;
+
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter, 1);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 14/17] net/i40e: destroy flow directory filter
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (12 preceding siblings ...)
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 13/17] net/i40e: destroy tunnel filter Beilei Xing
@ 2017-01-05 15:46 ` Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 15/17] net/i40e: add flow flush function Beilei Xing
` (4 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:46 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch supports destroying a flow directory filter
for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 2603e9e..f56c404 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1630,6 +1630,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_flow_destroy_tunnel_filter(pf,
(struct i40e_tunnel_filter *)pmd_flow->rule);
break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &((struct i40e_fdir_filter *)pmd_flow->rule)->fdir, 0);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 15/17] net/i40e: add flow flush function
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (13 preceding siblings ...)
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 14/17] net/i40e: destroy flow directory filter Beilei Xing
@ 2017-01-05 15:46 ` Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 16/17] net/i40e: flush ethertype filters Beilei Xing
` (3 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:46 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_flush function to flush all
filters for users. And flow director flush function
is involved first.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.h | 1 +
drivers/net/i40e/i40e_fdir.c | 4 +---
drivers/net/i40e/i40e_flow.c | 51 ++++++++++++++++++++++++++++++++++++++++++
3 files changed, 53 insertions(+), 3 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 0088351..198548a 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -793,6 +793,7 @@ int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add);
+int i40e_fdir_flush(struct rte_eth_dev *dev);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 91d91aa..67d63ff 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -119,8 +119,6 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
-static int i40e_fdir_flush(struct rte_eth_dev *dev);
-
static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
struct i40e_fdir_filter *filter);
static struct i40e_fdir_filter *
@@ -1325,7 +1323,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
* i40e_fdir_flush - clear all filters of Flow Director table
* @pf: board private structure
*/
-static int
+int
i40e_fdir_flush(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index f56c404..f37bd76 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -71,6 +71,8 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
static int i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
+static int i40e_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
static int
i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
@@ -120,11 +122,13 @@ static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
+static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = i40e_flow_create,
.destroy = i40e_flow_destroy,
+ .flush = i40e_flow_flush,
};
union i40e_filter_t cons_filter;
@@ -1721,3 +1725,50 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
return ret;
}
+
+static int
+i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret;
+
+ ret = i40e_flow_flush_fdir_filter(pf);
+ if (ret)
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush FDIR flows.");
+
+ return ret;
+}
+
+static int
+i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter;
+ struct i40e_flow *flow;
+ void *temp;
+ int ret;
+
+ ret = i40e_fdir_flush(dev);
+ if (!ret) {
+ /* Delete FDIR filters in FDIR list. */
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ ret = i40e_sw_fdir_filter_del(pf,
+ &fdir_filter->fdir.input);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Delete FDIR flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 16/17] net/i40e: flush ethertype filters
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (14 preceding siblings ...)
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 15/17] net/i40e: add flow flush function Beilei Xing
@ 2017-01-05 15:46 ` Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 17/17] net/i40e: flush tunnel filters Beilei Xing
` (2 subsequent siblings)
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:46 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_flush_ethertype_filter
function to flush all ethertype filters, including
filters in SW and HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 41 ++++++++++++++++++++++++++++++++++++++++-
1 file changed, 40 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index f37bd76..1909455 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -123,6 +123,7 @@ static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1733,10 +1734,20 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
int ret;
ret = i40e_flow_flush_fdir_filter(pf);
- if (ret)
+ if (ret) {
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to flush FDIR flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_ethertype_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to ethertype flush flows.");
+ return -rte_errno;
+ }
return ret;
}
@@ -1772,3 +1783,31 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
return ret;
}
+
+/* Flush all ethertype filters */
+static int
+i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *filter;
+ struct i40e_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(ethertype_list))) {
+ ret = i40e_flow_destroy_ethertype_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete ethertype flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v6 17/17] net/i40e: flush tunnel filters
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (15 preceding siblings ...)
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 16/17] net/i40e: flush ethertype filters Beilei Xing
@ 2017-01-05 15:46 ` Beilei Xing
2017-01-05 17:46 ` [dpdk-dev] [PATCH v6 00/17] net/i40e: consistent filter API Ferruh Yigit
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
18 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-05 15:46 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_flush_tunnel_filter
function to flush all tunnel filters, including
filters in SW and HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 37 +++++++++++++++++++++++++++++++++++++
1 file changed, 37 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 1909455..e080a00 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -124,6 +124,7 @@ static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1749,6 +1750,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return -rte_errno;
}
+ ret = i40e_flow_flush_tunnel_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush tunnel flows.");
+ return -rte_errno;
+ }
+
return ret;
}
@@ -1811,3 +1820,31 @@ i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
return ret;
}
+
+/* Flush all tunnel filters */
+static int
+i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *filter;
+ struct i40e_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(tunnel_list))) {
+ ret = i40e_flow_destroy_tunnel_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete tunnel flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v6 00/17] net/i40e: consistent filter API
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (16 preceding siblings ...)
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 17/17] net/i40e: flush tunnel filters Beilei Xing
@ 2017-01-05 17:46 ` Ferruh Yigit
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
18 siblings, 0 replies; 175+ messages in thread
From: Ferruh Yigit @ 2017-01-05 17:46 UTC (permalink / raw)
To: Beilei Xing, jingjing.wu, helin.zhang; +Cc: dev
On 1/5/2017 3:45 PM, Beilei Xing wrote:
> The patch set depends on Adrien's Generic flow API(rte_flow).
>
> The patches mainly finish following functions:
> 1) Store and restore all kinds of filters.
> 2) Parse all kinds of filters.
> 3) Add flow validate function.
> 4) Add flow create function.
> 5) Add flow destroy function.
> 6) Add flow flush function.
>
> v6 changes:
> Change functions' name to be more readable.
> Add comments for parse_pattern functions to list supported rules.
> Add comments for parse_action functions to list supported actions.
> Add ETHTYPE check when parsing ethertype pattern.
>
> v5 changes:
> Change some local variable name.
> Add removing i40e_flow_list during device unint.
> Fix compile error when gcc compile option isn't '-O0'.
>
> v4 changes:
> Change I40E_TCI_MASK with 0xFFFF to align with testpmd.
> Modidy the stats show when restoring filters.
>
> v3 changes:
> Set the related cause pointer to a non-NULL value when error happens.
> Change return value when error happens.
> Modify filter_del parameter with key.
> Malloc filter after checking when delete a filter.
> Delete meaningless initialization.
> Add return value when there's error.
> Change global variable definition.
> Modify some function declaration.
>
> v2 changes:
> Add i40e_flow.c, all flow ops are implemented in the file.
> Change the whole implementation of all parse flow functions.
> Update error info for all flow ops.
> Add flow_list to store flows created.
>
> Beilei Xing (17):
> net/i40e: store ethertype filter
> net/i40e: store tunnel filter
> net/i40e: store flow director filter
> net/i40e: restore ethertype filter
> net/i40e: restore tunnel filter
> net/i40e: restore flow director filter
> net/i40e: add flow validate function
> net/i40e: parse flow director filter
> net/i40e: parse tunnel filter
> net/i40e: add flow create function
> net/i40e: add flow destroy function
> net/i40e: destroy ethertype filter
> net/i40e: destroy tunnel filter
> net/i40e: destroy flow directory filter
> net/i40e: add flow flush function
> net/i40e: flush ethertype filters
> net/i40e: flush tunnel filters
>
Thanks Beilei,
Overall looks good to me, just two more comments (as reply to relevant
patches).
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v6 01/17] net/i40e: store ethertype filter
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 01/17] net/i40e: store ethertype filter Beilei Xing
@ 2017-01-05 17:46 ` Ferruh Yigit
0 siblings, 0 replies; 175+ messages in thread
From: Ferruh Yigit @ 2017-01-05 17:46 UTC (permalink / raw)
To: Beilei Xing, jingjing.wu, helin.zhang; +Cc: dev
On 1/5/2017 3:45 PM, Beilei Xing wrote:
> Currently there's no ethertype filter stored in SW.
> This patch stores ethertype filter with cuckoo hash
> in SW, also adds protection if an ethertype filter
> has been added.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
<...>
> @@ -939,9 +946,18 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
> int ret;
> uint32_t len;
> uint8_t aq_fail = 0;
> + struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
>
> PMD_INIT_FUNC_TRACE();
>
> + char ethertype_hash_name[RTE_HASH_NAMESIZE];
> + struct rte_hash_parameters ethertype_hash_params = {
> + .name = ethertype_hash_name,
> + .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
> + .key_len = sizeof(struct i40e_ethertype_filter_input),
> + .hash_func = rte_hash_crc,
> + };
> +
> dev->dev_ops = &i40e_eth_dev_ops;
> dev->rx_pkt_burst = i40e_recv_pkts;
> dev->tx_pkt_burst = i40e_xmit_pkts;
> @@ -1182,8 +1198,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
> pf->flags &= ~I40E_FLAG_DCB;
> }
>
> + /* Initialize ethertype filter rule list and hash */
> + TAILQ_INIT(ðertype_rule->ethertype_list);
> + snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
> + "ethertype_%s", dev->data->name);
> + ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params);
> + if (!ethertype_rule->hash_table) {
> + PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
> + ret = -EINVAL;
> + goto err_ethertype_hash_table_create;
> + }
> + ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
> + sizeof(struct i40e_ethertype_filter *) *
> + I40E_MAX_ETHERTYPE_FILTER_NUM,
> + 0);
> + if (!ethertype_rule->hash_map) {
> + PMD_INIT_LOG(ERR,
> + "Failed to allocate memory for ethertype hash map!");
> + ret = -ENOMEM;
> + goto err_ethertype_hash_map_alloc;
> + }
> +
> return 0;
>
> +err_ethertype_hash_map_alloc:
> + rte_hash_free(ethertype_rule->hash_table);
> +err_ethertype_hash_table_create:
> + rte_free(dev->data->mac_addrs);
> err_mac_alloc:
> i40e_vsi_release(pf->main_vsi);
> err_setup_pf_switch:
It can be good idea to extract filter related code into a separate
function, eth_i40e_dev_init() is already too big. It is up to you.
> @@ -1206,25 +1247,42 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
> static int
> eth_i40e_dev_uninit(struct rte_eth_dev *dev)
> {
> + struct i40e_pf *pf;
> struct rte_pci_device *pci_dev;
> struct rte_intr_handle *intr_handle;
> struct i40e_hw *hw;
> struct i40e_filter_control_settings settings;
> + struct i40e_ethertype_filter *p_ethertype;
> int ret;
> uint8_t aq_fail = 0;
> + struct i40e_ethertype_rule *ethertype_rule;
>
> PMD_INIT_FUNC_TRACE();
>
> if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> return 0;
>
> + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> pci_dev = I40E_DEV_TO_PCI(dev);
> intr_handle = &pci_dev->intr_handle;
> + ethertype_rule = &pf->ethertype;
>
> if (hw->adapter_stopped == 0)
> i40e_dev_close(dev);
>
> + /* Remove all ethertype director rules and hash */
> + if (ethertype_rule->hash_map)
> + rte_free(ethertype_rule->hash_map);
> + if (ethertype_rule->hash_table)
> + rte_hash_free(ethertype_rule->hash_table);
> +
> + while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) {
> + TAILQ_REMOVE(ðertype_rule->ethertype_list,
> + p_ethertype, rules);
> + rte_free(p_ethertype);
> + }
> +
> dev->dev_ops = NULL;
> dev->rx_pkt_burst = NULL;
> dev->tx_pkt_burst = NULL;
Same is valid for eth_i40e_dev_uninit(), if possible having a separate
function for filter related work.
<...>
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v6 10/17] net/i40e: add flow create function
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 10/17] net/i40e: add flow create function Beilei Xing
@ 2017-01-05 17:47 ` Ferruh Yigit
0 siblings, 0 replies; 175+ messages in thread
From: Ferruh Yigit @ 2017-01-05 17:47 UTC (permalink / raw)
To: Beilei Xing, jingjing.wu, helin.zhang; +Cc: dev
On 1/5/2017 3:46 PM, Beilei Xing wrote:
> This patch adds i40e_flow_create function to create a
> rule. It will check if a flow matches ethertype filter
> or flow director filter or tunnel filter, if the flow
> matches some kind of filter, then set the filter to HW.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
<...>
>
> /*
> + * Struct to store flow created.
> + */
> +struct i40e_flow {
> + TAILQ_ENTRY(i40e_flow) node;
> + enum rte_filter_type filter_type;
> + void *rule;
> +};
It is possible to define this struct as "struct rte_flow", this prevents
repetitive casting between rte_flow and i40e_flow. It is your call.
<...>
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 00/17] net/i40e: consistent filter API
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
` (17 preceding siblings ...)
2017-01-05 17:46 ` [dpdk-dev] [PATCH v6 00/17] net/i40e: consistent filter API Ferruh Yigit
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 01/17] net/i40e: store ethertype filter Beilei Xing
` (17 more replies)
18 siblings, 18 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
The patch set depends on Adrien's Generic flow API(rte_flow).
The patches mainly finish following functions:
1) Store and restore all kinds of filters.
2) Parse all kinds of filters.
3) Add flow validate function.
4) Add flow create function.
5) Add flow destroy function.
6) Add flow flush function.
v7 changes:
Separate filter related code from eth_i40e_dev_init().
Change struct i40e_flow to ret_flow.
v6 changes:
Change functions' name to be more readable.
Add comments for parse_pattern functions to list supported rules.
Add comments for parse_action functions to list supported actions.
Add ETHTYPE check when parsing ethertype pattern.
v5 changes:
Change some local variable name.
Add removing i40e_flow_list during device unint.
Fix compile error when gcc compile option isn't '-O0'.
v4 changes:
Change I40E_TCI_MASK with 0xFFFF to align with testpmd.
Modidy the stats show when restoring filters.
v3 changes:
Set the related cause pointer to a non-NULL value when error happens.
Change return value when error happens.
Modify filter_del parameter with key.
Malloc filter after checking when delete a filter.
Delete meaningless initialization.
Add return value when there's error.
Change global variable definition.
Modify some function declaration.
v2 changes:
Add i40e_flow.c, all flow ops are implemented in the file.
Change the whole implementation of all parse flow functions.
Update error info for all flow ops.
Add flow_list to store flows created.
Beilei Xing (17):
net/i40e: store ethertype filter
net/i40e: store tunnel filter
net/i40e: store flow director filter
net/i40e: restore ethertype filter
net/i40e: restore tunnel filter
net/i40e: restore flow director filter
net/i40e: add flow validate function
net/i40e: parse flow director filter
net/i40e: parse tunnel filter
net/i40e: add flow create function
net/i40e: add flow destroy function
net/i40e: destroy ethertype filter
net/i40e: destroy tunnel filter
net/i40e: destroy flow directory filter
net/i40e: add flow flush function
net/i40e: flush ethertype filters
net/i40e: flush tunnel filters
drivers/net/i40e/Makefile | 2 +
drivers/net/i40e/i40e_ethdev.c | 594 +++++++++++--
drivers/net/i40e/i40e_ethdev.h | 178 ++++
drivers/net/i40e/i40e_fdir.c | 140 ++-
drivers/net/i40e/i40e_flow.c | 1849 ++++++++++++++++++++++++++++++++++++++++
5 files changed, 2692 insertions(+), 71 deletions(-)
create mode 100644 drivers/net/i40e/i40e_flow.c
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 01/17] net/i40e: store ethertype filter
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 02/17] net/i40e: store tunnel filter Beilei Xing
` (16 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no ethertype filter stored in SW.
This patch stores ethertype filter with cuckoo hash
in SW, also adds protection if an ethertype filter
has been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 188 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 31 +++++++
3 files changed, 219 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 66997b6..11175c4 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -117,5 +117,6 @@ DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net
DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_kvargs
+DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_hash
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7b8e6c0..bad2b5e 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -51,6 +51,7 @@
#include <rte_dev.h>
#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
+#include <rte_hash_crc.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -461,6 +462,12 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int i40e_ethertype_filter_convert(
+ const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter);
+static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -929,6 +936,49 @@ config_floating_veb(struct rte_eth_dev *dev)
#define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
static int
+i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ char ethertype_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters ethertype_hash_params = {
+ .name = ethertype_hash_name,
+ .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
+ .key_len = sizeof(struct i40e_ethertype_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
+ /* Initialize ethertype filter rule list and hash */
+ TAILQ_INIT(ðertype_rule->ethertype_list);
+ snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
+ "ethertype_%s", dev->data->name);
+ ethertype_rule->hash_table = rte_hash_create(ðertype_hash_params);
+ if (!ethertype_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
+ return -EINVAL;
+ }
+ ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
+ sizeof(struct i40e_ethertype_filter *) *
+ I40E_MAX_ETHERTYPE_FILTER_NUM,
+ 0);
+ if (!ethertype_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for ethertype hash map!");
+ ret = -ENOMEM;
+ goto err_ethertype_hash_map_alloc;
+ }
+
+ return 0;
+
+err_ethertype_hash_map_alloc:
+ rte_hash_free(ethertype_rule->hash_table);
+
+ return ret;
+}
+
+static int
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev;
@@ -1182,8 +1232,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
pf->flags &= ~I40E_FLAG_DCB;
}
+ ret = i40e_init_ethtype_filter_list(dev);
+ if (ret < 0)
+ goto err_init_ethtype_filter_list;
+
return 0;
+err_init_ethtype_filter_list:
+ rte_free(dev->data->mac_addrs);
err_mac_alloc:
i40e_vsi_release(pf->main_vsi);
err_setup_pf_switch:
@@ -1203,9 +1259,30 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
return ret;
}
+static void
+i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter *p_ethertype;
+ struct i40e_ethertype_rule *ethertype_rule;
+
+ ethertype_rule = &pf->ethertype;
+ /* Remove all ethertype filter rules and hash */
+ if (ethertype_rule->hash_map)
+ rte_free(ethertype_rule->hash_map);
+ if (ethertype_rule->hash_table)
+ rte_hash_free(ethertype_rule->hash_table);
+
+ while ((p_ethertype = TAILQ_FIRST(ðertype_rule->ethertype_list))) {
+ TAILQ_REMOVE(ðertype_rule->ethertype_list,
+ p_ethertype, rules);
+ rte_free(p_ethertype);
+ }
+}
+
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
+ struct i40e_pf *pf;
struct rte_pci_device *pci_dev;
struct rte_intr_handle *intr_handle;
struct i40e_hw *hw;
@@ -1218,6 +1295,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
pci_dev = I40E_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
@@ -1256,6 +1334,8 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
rte_intr_callback_unregister(intr_handle,
i40e_dev_interrupt_handler, dev);
+ i40e_rm_ethtype_filter_list(pf);
+
return 0;
}
@@ -7970,6 +8050,82 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
return ret;
}
+/* Convert ethertype filter structure */
+static int
+i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter)
+{
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ filter->input.ether_type = input->ether_type;
+ filter->flags = input->flags;
+ filter->queue = input->queue;
+
+ return 0;
+}
+
+/* Check if there exists the ehtertype filter */
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return ethertype_rule->hash_map[ret];
+}
+
+/* Add ethertype filter in SW list */
+static int
+i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete ethertype filter in SW list */
+int
+i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ struct i40e_ethertype_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
@@ -7980,6 +8136,9 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *ethertype_filter, *node;
+ struct i40e_ethertype_filter check_filter;
struct i40e_control_filter_stats stats;
uint16_t flags = 0;
int ret;
@@ -7998,6 +8157,21 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
" not supported.");
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_ethertype_filter_convert(filter, &check_filter);
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
+ &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
+ return -EINVAL;
+ }
+
if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
@@ -8018,7 +8192,19 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
stats.mac_etype_free, stats.etype_free);
if (ret < 0)
return -ENOSYS;
- return 0;
+
+ /* Add or delete a filter in SW list */
+ if (add) {
+ ethertype_filter = rte_zmalloc("ethertype_filter",
+ sizeof(*ethertype_filter), 0);
+ rte_memcpy(ethertype_filter, &check_filter,
+ sizeof(check_filter));
+ ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ } else {
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+ }
+
+ return ret;
}
/*
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index b0363f9..dbfcf9f 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -37,6 +37,7 @@
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
+#include <rte_hash.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -396,6 +397,30 @@ struct i40e_fdir_info {
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
};
+/* Ethertype filter number HW supports */
+#define I40E_MAX_ETHERTYPE_FILTER_NUM 768
+
+/* Ethertype filter struct */
+struct i40e_ethertype_filter_input {
+ struct ether_addr mac_addr; /* Mac address to match */
+ uint16_t ether_type; /* Ether type to match */
+};
+
+struct i40e_ethertype_filter {
+ TAILQ_ENTRY(i40e_ethertype_filter) rules;
+ struct i40e_ethertype_filter_input input;
+ uint16_t flags; /* Flags from RTE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_ethertype_filter_list, i40e_ethertype_filter);
+
+struct i40e_ethertype_rule {
+ struct i40e_ethertype_filter_list ethertype_list;
+ struct i40e_ethertype_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -466,6 +491,7 @@ struct i40e_pf {
struct i40e_vmdq_info *vmdq;
struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
@@ -616,6 +642,11 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input);
+int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 02/17] net/i40e: store tunnel filter
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 01/17] net/i40e: store ethertype filter Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 03/17] net/i40e: store flow director filter Beilei Xing
` (15 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no tunnel filter stored in SW.
This patch stores tunnel filter in SW with cuckoo
hash, also adds protection if a tunnel filter has
been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 190 ++++++++++++++++++++++++++++++++++++++++-
drivers/net/i40e/i40e_ethdev.h | 32 +++++++
2 files changed, 219 insertions(+), 3 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index bad2b5e..673c509 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -468,6 +468,12 @@ static int i40e_ethertype_filter_convert(
static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -979,6 +985,49 @@ i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
}
static int
+i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ char tunnel_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters tunnel_hash_params = {
+ .name = tunnel_hash_name,
+ .entries = I40E_MAX_TUNNEL_FILTER_NUM,
+ .key_len = sizeof(struct i40e_tunnel_filter_input),
+ .hash_func = rte_hash_crc,
+ };
+
+ /* Initialize tunnel filter rule list and hash */
+ TAILQ_INIT(&tunnel_rule->tunnel_list);
+ snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
+ "tunnel_%s", dev->data->name);
+ tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
+ if (!tunnel_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
+ return -EINVAL;
+ }
+ tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
+ sizeof(struct i40e_tunnel_filter *) *
+ I40E_MAX_TUNNEL_FILTER_NUM,
+ 0);
+ if (!tunnel_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for tunnel hash map!");
+ ret = -ENOMEM;
+ goto err_tunnel_hash_map_alloc;
+ }
+
+ return 0;
+
+err_tunnel_hash_map_alloc:
+ rte_hash_free(tunnel_rule->hash_table);
+
+ return ret;
+}
+
+static int
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev;
@@ -1235,9 +1284,15 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
ret = i40e_init_ethtype_filter_list(dev);
if (ret < 0)
goto err_init_ethtype_filter_list;
+ ret = i40e_init_tunnel_filter_list(dev);
+ if (ret < 0)
+ goto err_init_tunnel_filter_list;
return 0;
+err_init_tunnel_filter_list:
+ rte_free(pf->ethertype.hash_table);
+ rte_free(pf->ethertype.hash_map);
err_init_ethtype_filter_list:
rte_free(dev->data->mac_addrs);
err_mac_alloc:
@@ -1279,6 +1334,25 @@ i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
}
}
+static void
+i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter *p_tunnel;
+ struct i40e_tunnel_rule *tunnel_rule;
+
+ tunnel_rule = &pf->tunnel;
+ /* Remove all tunnel director rules and hash */
+ if (tunnel_rule->hash_map)
+ rte_free(tunnel_rule->hash_map);
+ if (tunnel_rule->hash_table)
+ rte_hash_free(tunnel_rule->hash_table);
+
+ while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
+ TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
+ rte_free(p_tunnel);
+ }
+}
+
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
@@ -1335,6 +1409,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
i40e_dev_interrupt_handler, dev);
i40e_rm_ethtype_filter_list(pf);
+ i40e_rm_tunnel_filter_list(pf);
return 0;
}
@@ -6515,6 +6590,85 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
return 0;
}
+/* Convert tunnel filter structure */
+static int
+i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data
+ *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ ether_addr_copy((struct ether_addr *)&cld_filter->outer_mac,
+ (struct ether_addr *)&tunnel_filter->input.outer_mac);
+ ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac,
+ (struct ether_addr *)&tunnel_filter->input.inner_mac);
+ tunnel_filter->input.inner_vlan = cld_filter->inner_vlan;
+ tunnel_filter->input.flags = cld_filter->flags;
+ tunnel_filter->input.tenant_id = cld_filter->tenant_id;
+ tunnel_filter->queue = cld_filter->queue_number;
+
+ return 0;
+}
+
+/* Check if there exists the tunnel filter */
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return tunnel_rule->hash_map[ret];
+}
+
+/* Add a tunnel filter into the SW list */
+static int
+i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = tunnel_filter;
+
+ TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
+
+ return 0;
+}
+
+/* Delete a tunnel filter from the SW list */
+int
+i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel_filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ tunnel_filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
+ rte_free(tunnel_filter);
+
+ return 0;
+}
+
static int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
@@ -6530,6 +6684,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct i40e_vsi *vsi = pf->main_vsi;
struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
cld_filter = rte_zmalloc("tunnel_filter",
sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
@@ -6592,11 +6749,38 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
- if (add)
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ return -EINVAL;
+ }
+
+ if (add) {
ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
- else
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ return ret;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- cld_filter, 1);
+ cld_filter, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ return ret;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
rte_free(cld_filter);
return ret;
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index dbfcf9f..349e865 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -421,6 +421,32 @@ struct i40e_ethertype_rule {
struct rte_hash *hash_table;
};
+/* Tunnel filter number HW supports */
+#define I40E_MAX_TUNNEL_FILTER_NUM 400
+
+/* Tunnel filter struct */
+struct i40e_tunnel_filter_input {
+ uint8_t outer_mac[6]; /* Outer mac address to match */
+ uint8_t inner_mac[6]; /* Inner mac address to match */
+ uint16_t inner_vlan; /* Inner vlan address to match */
+ uint16_t flags; /* Filter type flag */
+ uint32_t tenant_id; /* Tenant id to match */
+};
+
+struct i40e_tunnel_filter {
+ TAILQ_ENTRY(i40e_tunnel_filter) rules;
+ struct i40e_tunnel_filter_input input;
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter);
+
+struct i40e_tunnel_rule {
+ struct i40e_tunnel_filter_list tunnel_list;
+ struct i40e_tunnel_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
#define I40E_MAX_MIRROR_RULES 64
/*
@@ -492,6 +518,7 @@ struct i40e_pf {
struct i40e_fdir_info fdir; /* flow director info */
struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
+ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
@@ -647,6 +674,11 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input);
int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter_input *input);
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input);
+int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 03/17] net/i40e: store flow director filter
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 01/17] net/i40e: store ethertype filter Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 02/17] net/i40e: store tunnel filter Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 04/17] net/i40e: restore ethertype filter Beilei Xing
` (14 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Currently there's no flow director filter stored in SW. This
patch stores flow director filters in SW with cuckoo hash,
also adds protection if a flow director filter has been added.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 68 ++++++++++++++++++++++++++
drivers/net/i40e/i40e_ethdev.h | 14 ++++++
drivers/net/i40e/i40e_fdir.c | 105 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 187 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 673c509..a3b92bf 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1028,6 +1028,48 @@ i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
}
static int
+i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = I40E_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct rte_eth_fdir_input),
+ .hash_func = rte_hash_crc,
+ };
+
+ /* Initialize flow director filter rule list and hash */
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", dev->data->name);
+ fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
+ sizeof(struct i40e_fdir_filter *) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ ret = -ENOMEM;
+ goto err_fdir_hash_map_alloc;
+ }
+ return 0;
+
+err_fdir_hash_map_alloc:
+ rte_hash_free(fdir_info->hash_table);
+
+ return ret;
+}
+
+static int
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev;
@@ -1287,9 +1329,15 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
ret = i40e_init_tunnel_filter_list(dev);
if (ret < 0)
goto err_init_tunnel_filter_list;
+ ret = i40e_init_fdir_filter_list(dev);
+ if (ret < 0)
+ goto err_init_fdir_filter_list;
return 0;
+err_init_fdir_filter_list:
+ rte_free(pf->tunnel.hash_table);
+ rte_free(pf->tunnel.hash_map);
err_init_tunnel_filter_list:
rte_free(pf->ethertype.hash_table);
rte_free(pf->ethertype.hash_map);
@@ -1353,6 +1401,25 @@ i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
}
}
+static void
+i40e_rm_fdir_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_fdir_filter *p_fdir;
+ struct i40e_fdir_info *fdir_info;
+
+ fdir_info = &pf->fdir;
+ /* Remove all flow director rules and hash */
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_table)
+ rte_hash_free(fdir_info->hash_table);
+
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+ rte_free(p_fdir);
+ }
+}
+
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
@@ -1410,6 +1477,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
i40e_rm_ethtype_filter_list(pf);
i40e_rm_tunnel_filter_list(pf);
+ i40e_rm_fdir_filter_list(pf);
return 0;
}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 349e865..43a3dbb 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -377,6 +377,14 @@ struct i40e_fdir_flex_mask {
};
#define I40E_FILTER_PCTYPE_MAX 64
+#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
+
+struct i40e_fdir_filter {
+ TAILQ_ENTRY(i40e_fdir_filter) rules;
+ struct rte_eth_fdir_filter fdir;
+};
+
+TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
/*
* A structure used to define fields of a FDIR related info.
*/
@@ -395,6 +403,10 @@ struct i40e_fdir_info {
*/
struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+
+ struct i40e_fdir_filter_list fdir_list;
+ struct i40e_fdir_filter **hash_map;
+ struct rte_hash *hash_table;
};
/* Ethertype filter number HW supports */
@@ -674,6 +686,8 @@ i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input);
int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
struct i40e_ethertype_filter_input *input);
+int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
+ struct rte_eth_fdir_input *input);
struct i40e_tunnel_filter *
i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
const struct i40e_tunnel_filter_input *input);
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 335bf15..4a29b37 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -121,6 +121,14 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
bool add);
static int i40e_fdir_flush(struct rte_eth_dev *dev);
+static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter);
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input);
+static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
+
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
{
@@ -1017,6 +1025,74 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
return ret;
}
+static int
+i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter)
+{
+ rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+ return 0;
+}
+
+/* Check if there exists the flow director filter */
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+/* Add a flow director filter into the SW list */
+static int
+i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ fdir_info->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete a flow director filter from the SW list */
+int
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(fdir_info->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
@@ -1032,6 +1108,9 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter, *node;
+ struct i40e_fdir_filter check_filter; /* Check if the filter exists */
int ret = 0;
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1054,6 +1133,22 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_fdir_filter_convert(filter, &check_filter);
+ node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR,
+ "Conflict with existing flow director rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR,
+ "There's no corresponding flow firector filter!");
+ return -EINVAL;
+ }
+
memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
@@ -1077,6 +1172,16 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
pctype);
return ret;
}
+
+ if (add) {
+ fdir_filter = rte_zmalloc("fdir_filter",
+ sizeof(*fdir_filter), 0);
+ rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
+ } else {
+ ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
+ }
+
return ret;
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 04/17] net/i40e: restore ethertype filter
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (2 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 03/17] net/i40e: store flow director filter Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 05/17] net/i40e: restore tunnel filter Beilei Xing
` (13 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring ethertype filter in case filter
dropped accidentally, as all filters need to be added and
removed by user obviously for generic filter API.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 44 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index a3b92bf..5720709 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -474,6 +474,9 @@ static int i40e_tunnel_filter_convert(
static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
+static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_filter_restore(struct i40e_pf *pf);
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -2027,6 +2030,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+ i40e_filter_restore(pf);
+
return I40E_SUCCESS;
err_up:
@@ -10152,3 +10157,42 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
}
+
+/* Restore ethertype filter */
+static void
+i40e_ethertype_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags;
+
+ TAILQ_FOREACH(f, ethertype_list, rules) {
+ flags = 0;
+ if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ i40e_aq_add_rem_control_packet_filter(hw,
+ f->input.mac_addr.addr_bytes,
+ f->input.ether_type,
+ flags, pf->main_vsi->seid,
+ f->queue, 1, &stats, NULL);
+ }
+ PMD_DRV_LOG(INFO, "Ethertype filter:"
+ " mac_etype_used = %u, etype_used = %u,"
+ " mac_etype_free = %u, etype_free = %u\n",
+ stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
+}
+
+static void
+i40e_filter_restore(struct i40e_pf *pf)
+{
+ i40e_ethertype_filter_restore(pf);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 05/17] net/i40e: restore tunnel filter
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (3 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 04/17] net/i40e: restore ethertype filter Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 06/17] net/i40e: restore flow director filter Beilei Xing
` (12 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring tunnel filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 5720709..ec495ee 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -475,6 +475,7 @@ static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
struct i40e_tunnel_filter *tunnel_filter);
static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
static void i40e_filter_restore(struct i40e_pf *pf);
static const struct rte_pci_id pci_id_i40e_map[] = {
@@ -10191,8 +10192,28 @@ i40e_ethertype_filter_restore(struct i40e_pf *pf)
stats.mac_etype_free, stats.etype_free);
}
+/* Restore tunnel filter */
+static void
+i40e_tunnel_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+
+ TAILQ_FOREACH(f, tunnel_list, rules) {
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ rte_memcpy(&cld_filter, &f->input, sizeof(f->input));
+ cld_filter.queue_number = f->queue;
+ i40e_aq_add_cloud_filters(hw, vsi->seid, &cld_filter, 1);
+ }
+}
+
static void
i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
+ i40e_tunnel_filter_restore(pf);
}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 06/17] net/i40e: restore flow director filter
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (4 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 05/17] net/i40e: restore tunnel filter Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 07/17] net/i40e: add flow validate function Beilei Xing
` (11 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
Add support of restoring flow director filter.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 1 +
drivers/net/i40e/i40e_ethdev.h | 1 +
drivers/net/i40e/i40e_fdir.c | 31 +++++++++++++++++++++++++++++++
3 files changed, 33 insertions(+)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index ec495ee..30cc8c5 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -10216,4 +10216,5 @@ i40e_filter_restore(struct i40e_pf *pf)
{
i40e_ethertype_filter_restore(pf);
i40e_tunnel_filter_restore(pf);
+ i40e_fdir_filter_restore(pf);
}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 43a3dbb..82baab6 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -670,6 +670,7 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
+void i40e_fdir_filter_restore(struct i40e_pf *pf);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 4a29b37..f89dbc9 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1586,3 +1586,34 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* Restore flow director filter */
+void
+i40e_fdir_filter_restore(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
+ struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
+ struct i40e_fdir_filter *f;
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t fdstat;
+ uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
+ uint32_t best_cnt; /**< Number of filters in best effort spaces. */
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ TAILQ_FOREACH(f, fdir_list, rules)
+ i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+
+#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+ fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
+ guarant_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
+
+ PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d\n",
+ guarant_cnt, best_cnt);
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 07/17] net/i40e: add flow validate function
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (5 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 06/17] net/i40e: restore flow director filter Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 08/17] net/i40e: parse flow director filter Beilei Xing
` (10 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_validation function to check if
a flow is valid according to the flow pattern.
i40e_flow_parse_ethertype_filter is added first, it also
gets the ethertype info.
i40e_flow.c is added to handle all generic filter events.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/Makefile | 1 +
drivers/net/i40e/i40e_ethdev.c | 12 +-
drivers/net/i40e/i40e_ethdev.h | 23 ++
drivers/net/i40e/i40e_flow.c | 492 +++++++++++++++++++++++++++++++++++++++++
4 files changed, 523 insertions(+), 5 deletions(-)
create mode 100644 drivers/net/i40e/i40e_flow.c
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 11175c4..89bd85a 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -105,6 +105,7 @@ endif
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
# vector PMD driver needs SSE4.1 support
ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 30cc8c5..69945b1 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -285,11 +285,6 @@
#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL
#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
-#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
-#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
-#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
- I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
-
/* PCI offset for querying capability */
#define PCI_DEV_CAP_REG 0xA4
/* PCI offset for enabling/disabling Extended Tag */
@@ -8504,6 +8499,8 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
return ret;
}
+extern const struct rte_flow_ops i40e_flow_ops;
+
static int
i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
@@ -8535,6 +8532,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &i40e_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 82baab6..00c2a0a 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -38,6 +38,7 @@
#include <rte_time.h>
#include <rte_kvargs.h>
#include <rte_hash.h>
+#include <rte_flow_driver.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -189,6 +190,11 @@ enum i40e_flxpld_layer_idx {
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
+#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
+ I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
+
struct i40e_adapter;
/**
@@ -629,6 +635,23 @@ struct i40e_adapter {
struct rte_timecounter tx_tstamp_tc;
};
+union i40e_filter_t {
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_fdir_filter fdir_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
+};
+
+typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+struct i40e_valid_pattern {
+ enum rte_flow_item_type *items;
+ parse_filter_t parse_filter;
+};
+
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
new file mode 100644
index 0000000..5ca7a42
--- /dev/null
+++ b/drivers/net/i40e/i40e_flow.c
@@ -0,0 +1,492 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_type.h"
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+
+static int i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+
+const struct rte_flow_ops i40e_flow_ops = {
+ .validate = i40e_flow_validate,
+};
+
+union i40e_filter_t cons_filter;
+
+/* Pattern matched ethertype filter */
+static enum rte_flow_item_type pattern_ethertype[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct i40e_valid_pattern i40e_supported_patterns[] = {
+ /* Ethertype */
+ { pattern_ethertype, i40e_flow_parse_ethertype_filter },
+};
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+i40e_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = i40e_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = i40e_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+ pb = pe;
+ break;
+ }
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+i40e_match_pattern(enum rte_flow_item_type *item_array,
+ struct rte_flow_item *pattern)
+{
+ struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+static parse_filter_t
+i40e_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+ parse_filter_t parse_filter = NULL;
+ uint8_t i = 0;
+
+ for (; i < RTE_DIM(i40e_supported_patterns); i++) {
+ if (i40e_match_pattern(i40e_supported_patterns[i].items,
+ pattern)) {
+ parse_filter = i40e_supported_patterns[i].parse_filter;
+ break;
+ }
+ }
+
+ return parse_filter;
+}
+
+/* Parse attributes */
+static int
+i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static uint16_t
+i40e_get_outer_vlan(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ uint64_t reg_r = 0;
+ uint16_t reg_id;
+ uint16_t tpid;
+
+ if (qinq)
+ reg_id = 2;
+ else
+ reg_id = 3;
+
+ i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ ®_r, NULL);
+
+ tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
+
+ return tpid;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
+ * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
+ * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
+ * FF:FF:FF:FF:FF:FF
+ * 5. Ether_type mask should be 0xFFFF.
+ */
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ enum rte_flow_item_type item_type;
+ uint16_t outer_tpid;
+
+ outer_tpid = i40e_get_outer_vlan(dev);
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ /* Get the MAC info. */
+ if (!eth_spec || !eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL ETH spec/mask");
+ return -rte_errno;
+ }
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(ð_mask->src) ||
+ (!is_zero_ether_addr(ð_mask->dst) &&
+ !is_broadcast_ether_addr(ð_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(ð_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6 ||
+ filter->ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type in"
+ " control packet filter.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* Ethertype action only supports QUEUE or DROP. */
+static int
+i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for"
+ " ethertype_filter.");
+ return -rte_errno;
+ }
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_ethertype_filter *ethertype_filter =
+ &filter->ethertype_filter;
+ int ret;
+
+ ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_ethertype_action(dev, actions, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int
+i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item *items; /* internal pattern w/o VOID items */
+ parse_filter_t parse_filter;
+ uint32_t item_num = 0; /* non-void item number of pattern*/
+ uint32_t i = 0;
+ int ret;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ memset(&cons_filter, 0, sizeof(cons_filter));
+
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = rte_zmalloc("i40e_pattern",
+ item_num * sizeof(struct rte_flow_item), 0);
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for PMD internal items.");
+ return -ENOMEM;
+ }
+
+ i40e_pattern_skip_void_item(items, pattern);
+
+ /* Find if there's matched parse filter function */
+ parse_filter = i40e_find_parse_filter_func(items);
+ if (!parse_filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ return -rte_errno;
+ }
+
+ ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
+
+ rte_free(items);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 08/17] net/i40e: parse flow director filter
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (6 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 07/17] net/i40e: add flow validate function Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 09/17] net/i40e: parse tunnel filter Beilei Xing
` (9 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_parse_fdir_filter to check
if a rule is a flow director rule according to the
flow pattern, and the function also gets the flow
director info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 56 +---
drivers/net/i40e/i40e_ethdev.h | 55 ++++
drivers/net/i40e/i40e_flow.c | 623 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 679 insertions(+), 55 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 69945b1..3a27170 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -139,60 +139,6 @@
#define I40E_DEFAULT_DCB_APP_NUM 1
#define I40E_DEFAULT_DCB_APP_PRIO 3
-#define I40E_INSET_NONE 0x00000000000000000ULL
-
-/* bit0 ~ bit 7 */
-#define I40E_INSET_DMAC 0x0000000000000001ULL
-#define I40E_INSET_SMAC 0x0000000000000002ULL
-#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
-#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
-#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
-
-/* bit 8 ~ bit 15 */
-#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
-#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
-#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
-#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
-#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
-#define I40E_INSET_DST_PORT 0x0000000000002000ULL
-#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
-
-/* bit 16 ~ bit 31 */
-#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
-#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
-#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
-#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
-#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
-#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
-#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
-#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
-
-/* bit 32 ~ bit 47, tunnel fields */
-#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
-#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
-#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
-#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
-#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
-#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
-
-/* bit 48 ~ bit 55 */
-#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
-
-/* bit 56 ~ bit 63, Flex Payload */
-#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD \
- (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
- I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
- I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
- I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
-
/**
* Below are values for writing un-exposed registers suggested
* by silicon experts
@@ -7690,7 +7636,7 @@ i40e_validate_input_set(enum i40e_filter_pctype pctype,
}
/* default input set fields combination per pctype */
-static uint64_t
+uint64_t
i40e_get_default_input_set(uint16_t pctype)
{
static const uint64_t default_inset_table[] = {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 00c2a0a..4597615 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -195,6 +195,60 @@ enum i40e_flxpld_layer_idx {
#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
+#define I40E_INSET_NONE 0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC 0x0000000000000001ULL
+#define I40E_INSET_SMAC 0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
+#define I40E_INSET_DST_PORT 0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+ I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
struct i40e_adapter;
/**
@@ -717,6 +771,7 @@ i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
const struct i40e_tunnel_filter_input *input);
int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter_input *input);
+uint64_t i40e_get_default_input_set(uint16_t pctype);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 5ca7a42..2eead93 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -52,6 +52,10 @@
#include "base/i40e_prototype.h"
#include "i40e_ethdev.h"
+#define I40E_IPV4_TC_SHIFT 4
+#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_FRAG_HEADER 44
+
static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -66,6 +70,14 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
@@ -74,6 +86,12 @@ static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -87,9 +105,127 @@ static enum rte_flow_item_type pattern_ethertype[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* Pattern matched flow director filter */
+static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_flow_parse_ethertype_filter },
+ /* FDIR */
+ { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
};
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
@@ -422,6 +558,493 @@ i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
return ret;
}
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported flow type and input set: refer to array
+ * default_inset_table in i40e_ethdev.c.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set = I40E_INSET_NONE;
+ uint16_t flag_offset;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ uint32_t j;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if (eth_spec || eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH spec/mask");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ if (!ipv4_spec || !ipv4_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv4 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Get filter info */
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ /* Check if it is fragment. */
+ flag_offset =
+ rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
+ if (flag_offset & IPV4_HDR_OFFSET_MASK ||
+ flag_offset & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+ /* Get the filter info */
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (!ipv6_spec || !ipv6_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv6 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ /* SCR and DST address of IPv6 shouldn't be masked */
+ for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
+ if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
+ ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+ }
+
+ input_set |= I40E_INSET_IPV6_SRC;
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(ipv6_spec->hdr.vtc_flow <<
+ I40E_IPV4_TC_SHIFT);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!tcp_spec || !tcp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL TCP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (tcp_mask->hdr.src_port != UINT16_MAX ||
+ tcp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (!udp_spec || !udp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL UDP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (udp_mask->hdr.src_port != UINT16_MAX ||
+ udp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (!sctp_spec || !sctp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL SCTP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port != UINT16_MAX ||
+ sctp_mask->hdr.dst_port != UINT16_MAX ||
+ sctp_mask->hdr.tag != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ filter->input.flow_ext.is_vf = 1;
+ filter->input.flow_ext.dst_id = vf_spec->id;
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VF ID for FDIR.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ pctype = i40e_flowtype_to_pctype(flow_type);
+ if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported flow type");
+ return -rte_errno;
+ }
+
+ if (input_set != i40e_get_default_input_set(pctype)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid input set.");
+ return -rte_errno;
+ }
+ filter->input.flow_type = flow_type;
+
+ return 0;
+}
+
+/* Parse to get the action info of a FDIR filter.
+ * FDIR action supports QUEUE or (QUEUE + MARK).
+ */
+static int
+i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.flex_off = 0;
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+ else
+ filter->action.behavior = RTE_ETH_FDIR_REJECT;
+
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->action.rx_queue = act_q->index;
+
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID for FDIR.");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is MARK or END. */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+ act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->soft_id = mark_spec->id;
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_fdir_filter *fdir_filter =
+ &filter->fdir_filter;
+ int ret;
+
+ ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ if (dev->data->dev_conf.fdir_conf.mode !=
+ RTE_FDIR_MODE_PERFECT) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Check the mode in fdir_conf.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 09/17] net/i40e: parse tunnel filter
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (7 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 08/17] net/i40e: parse flow director filter Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 10/17] net/i40e: add flow create function Beilei Xing
` (8 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_parse_tunnel_filter to check
if a rule is a tunnel rule according to items of the
flow pattern, and the function also gets the tunnel info.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 412 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 412 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 2eead93..ed87624 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -55,6 +55,8 @@
#define I40E_IPV4_TC_SHIFT 4
#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
#define I40E_IPV6_FRAG_HEADER 44
+#define I40E_TENANT_ARRAY_NUM 3
+#define I40E_TCI_MASK 0xFFFF
static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -78,6 +80,14 @@ static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
struct rte_eth_fdir_filter *filter);
+static int i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
+static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter);
static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
struct rte_flow_error *error);
static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
@@ -92,6 +102,12 @@ static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -206,6 +222,45 @@ static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
RTE_FLOW_ITEM_TYPE_END,
};
+/* Pattern matched tunnel filter */
+static enum rte_flow_item_type pattern_vxlan_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
static struct i40e_valid_pattern i40e_supported_patterns[] = {
/* Ethertype */
{ pattern_ethertype, i40e_flow_parse_ethertype_filter },
@@ -226,6 +281,11 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = {
{ pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
{ pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
+ /* tunnel */
+ { pattern_vxlan_1, i40e_flow_parse_tunnel_filter },
+ { pattern_vxlan_2, i40e_flow_parse_tunnel_filter },
+ { pattern_vxlan_3, i40e_flow_parse_tunnel_filter },
+ { pattern_vxlan_4, i40e_flow_parse_tunnel_filter },
};
#define NEXT_ITEM_OF_ACTION(act, actions, index) \
@@ -1045,6 +1105,358 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
return 0;
}
+/* Parse to get the action info of a tunnle filter
+ * Tunnel action only supports QUEUE.
+ */
+static int
+i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue_id = act_q->index;
+ if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_check_tenant_id_mask(const uint8_t *mask)
+{
+ uint32_t j;
+ int is_masked = 0;
+
+ for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
+ if (*(mask + j) == UINT8_MAX) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 0;
+ } else if (*(mask + j) == 0) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 1;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return is_masked;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_vxlan_pattern(const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_eth *o_eth_spec = NULL;
+ const struct rte_flow_item_eth *o_eth_mask = NULL;
+ const struct rte_flow_item_vxlan *vxlan_spec = NULL;
+ const struct rte_flow_item_vxlan *vxlan_mask = NULL;
+ const struct rte_flow_item_eth *i_eth_spec = NULL;
+ const struct rte_flow_item_eth *i_eth_mask = NULL;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ bool is_vni_masked = 0;
+ enum rte_flow_item_type item_type;
+ bool vxlan_flag = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(ð_mask->dst) ||
+ !is_zero_ether_addr(ð_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!vxlan_flag)
+ rte_memcpy(&filter->outer_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ else
+ rte_memcpy(&filter->inner_mac,
+ ð_spec->dst,
+ ETHER_ADDR_LEN);
+ }
+
+ if (!vxlan_flag) {
+ o_eth_spec = eth_spec;
+ o_eth_mask = eth_mask;
+ } else {
+ i_eth_spec = eth_spec;
+ i_eth_mask = eth_mask;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vxlan_flag) {
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ } else {
+ if (vlan_spec || vlan_mask)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* IPv4/IPv6/UDP are used to describe protocol,
+ * spec amd mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec =
+ (const struct rte_flow_item_vxlan *)item->spec;
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, either spec or mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_mask) {
+ is_vni_masked =
+ i40e_check_tenant_id_mask(vxlan_mask->vni);
+ if (is_vni_masked < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+ }
+ vxlan_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Check specification and mask to get the filter type */
+ if (vlan_spec && vlan_mask &&
+ (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ /* If there's inner vlan */
+ filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
+ & I40E_TCI_MASK;
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if ((!vlan_spec && !vlan_mask) ||
+ (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+ /* If there's no inner vlan */
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(&filter->tenant_id, vxlan_spec->vni,
+ RTE_DIM(vxlan_spec->vni));
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_TENID;
+ else if (o_eth_spec && o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask) {
+ filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Not supported by tunnel filter.");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_tunnel_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_tunnel_filter_conf *filter)
+{
+ int ret;
+
+ ret = i40e_flow_parse_vxlan_pattern(pattern, error, filter);
+
+ return ret;
+}
+
+static int
+i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_tunnel_filter_conf *tunnel_filter =
+ &filter->tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_tunnel_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 10/17] net/i40e: add flow create function
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (8 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 09/17] net/i40e: parse tunnel filter Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 11/17] net/i40e: add flow destroy function Beilei Xing
` (7 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_create function to create a
rule. It will check if a flow matches ethertype filter
or flow director filter or tunnel filter, if the flow
matches some kind of filter, then set the filter to HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.c | 16 ++++++---
drivers/net/i40e/i40e_ethdev.h | 21 ++++++++++++
drivers/net/i40e/i40e_fdir.c | 2 +-
drivers/net/i40e/i40e_flow.c | 77 ++++++++++++++++++++++++++++++++++++++++++
4 files changed, 110 insertions(+), 6 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 3a27170..592a74c 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -348,9 +348,6 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_ethertype_filter_set(struct i40e_pf *pf,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -1373,6 +1370,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct rte_intr_handle *intr_handle;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
+ struct rte_flow *p_flow;
int ret;
uint8_t aq_fail = 0;
@@ -1424,6 +1422,12 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
i40e_rm_tunnel_filter_list(pf);
i40e_rm_fdir_filter_list(pf);
+ /* Remove all flows */
+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+ rte_free(p_flow);
+ }
+
return 0;
}
@@ -1488,6 +1492,8 @@ i40e_dev_configure(struct rte_eth_dev *dev)
}
}
+ TAILQ_INIT(&pf->flow_list);
+
return 0;
err_dcb:
@@ -6684,7 +6690,7 @@ i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
return 0;
}
-static int
+int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add)
@@ -8329,7 +8335,7 @@ i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
*/
-static int
+int
i40e_ethertype_filter_set(struct i40e_pf *pf,
struct rte_eth_ethertype_filter *filter,
bool add)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 4597615..3e166ee 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -541,6 +541,17 @@ struct i40e_mirror_rule {
TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
/*
+ * Struct to store flow created.
+ */
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) node;
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+TAILQ_HEAD(i40e_flow_list, rte_flow);
+
+/*
* Structure to store private data specific for PF instance.
*/
struct i40e_pf {
@@ -597,6 +608,7 @@ struct i40e_pf {
bool floating_veb; /* The flag to use the floating VEB */
/* The floating enable flag for the specific VF */
bool floating_veb_list[I40E_MAX_VF];
+ struct i40e_flow_list flow_list;
};
enum pending_msg {
@@ -772,6 +784,15 @@ i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
struct i40e_tunnel_filter_input *input);
uint64_t i40e_get_default_input_set(uint16_t pctype);
+int i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index f89dbc9..91d91aa 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1099,7 +1099,7 @@ i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
-static int
+int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index ed87624..d4d9f50 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -63,6 +63,11 @@ static int i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
static int
i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
@@ -111,9 +116,11 @@ static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
+ .create = i40e_flow_create,
};
union i40e_filter_t cons_filter;
+enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
/* Pattern matched ethertype filter */
static enum rte_flow_item_type pattern_ethertype[] = {
@@ -615,6 +622,8 @@ i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
+
return ret;
}
@@ -1093,6 +1102,8 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_FDIR;
+
if (dev->data->dev_conf.fdir_conf.mode !=
RTE_FDIR_MODE_PERFECT) {
rte_flow_error_set(error, ENOTSUP,
@@ -1454,6 +1465,8 @@ i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
return ret;
}
@@ -1525,3 +1538,67 @@ i40e_flow_validate(struct rte_eth_dev *dev,
return ret;
}
+
+static struct rte_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_flow *flow;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return flow;
+ }
+
+ ret = i40e_flow_validate(dev, attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (cons_filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_set(pf,
+ &cons_filter.ethertype_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &cons_filter.fdir_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_tunnel_filter_set(pf,
+ &cons_filter.tunnel_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ break;
+ default:
+ goto free_flow;
+ }
+
+ flow->filter_type = cons_filter_type;
+ TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+ return flow;
+
+free_flow:
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ return NULL;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 11/17] net/i40e: add flow destroy function
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (9 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 10/17] net/i40e: add flow create function Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 12/17] net/i40e: destroy ethertype filter Beilei Xing
` (6 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_destroy function to destroy
a flow for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 32 ++++++++++++++++++++++++++++++++
1 file changed, 32 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index d4d9f50..5225948 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -68,6 +68,9 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
+static int i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
static int
i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
@@ -117,6 +120,7 @@ static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = i40e_flow_create,
+ .destroy = i40e_flow_destroy,
};
union i40e_filter_t cons_filter;
@@ -1602,3 +1606,31 @@ i40e_flow_create(struct rte_eth_dev *dev,
rte_free(flow);
return NULL;
}
+
+static int
+i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum rte_filter_type filter_type = flow->filter_type;
+ int ret = 0;
+
+ switch (filter_type) {
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ } else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 12/17] net/i40e: destroy ethertype filter
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (10 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 11/17] net/i40e: add flow destroy function Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 13/17] net/i40e: destroy tunnel filter Beilei Xing
` (5 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_destroy_ethertype_filter
function to destroy a ethertype filter for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 5225948..ca97682 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -116,6 +116,8 @@ static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
union i40e_filter_t *filter);
+static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1617,6 +1619,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
int ret = 0;
switch (filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_flow_destroy_ethertype_filter(pf,
+ (struct i40e_ethertype_filter *)flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -1634,3 +1640,38 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
return ret;
}
+
+static int
+i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *node;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret = 0;
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->input.mac_addr.addr_bytes,
+ filter->input.ether_type,
+ flags, pf->main_vsi->seid,
+ filter->queue, 0, &stats, NULL);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 13/17] net/i40e: destroy tunnel filter
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (11 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 12/17] net/i40e: destroy ethertype filter Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 14/17] net/i40e: destroy flow directory filter Beilei Xing
` (4 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_destroy_tunnel_filter
function to destroy a tunnel filter for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index ca97682..c090d84 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -118,6 +118,8 @@ static int i40e_flow_parse_tunnel_filter(struct rte_eth_dev *dev,
union i40e_filter_t *filter);
static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
+static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1623,6 +1625,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_flow_destroy_ethertype_filter(pf,
(struct i40e_ethertype_filter *)flow->rule);
break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_flow_destroy_tunnel_filter(pf,
+ (struct i40e_tunnel_filter *)flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -1675,3 +1681,38 @@ i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
return ret;
}
+
+static int
+i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi = pf->main_vsi;
+ struct i40e_aqc_add_remove_cloud_filters_element_data cld_filter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *node;
+ int ret = 0;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
+ (struct ether_addr *)&cld_filter.outer_mac);
+ ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
+ (struct ether_addr *)&cld_filter.inner_mac);
+ cld_filter.inner_vlan = filter->input.inner_vlan;
+ cld_filter.flags = filter->input.flags;
+ cld_filter.tenant_id = filter->input.tenant_id;
+ cld_filter.queue_number = filter->queue;
+
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter, 1);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 14/17] net/i40e: destroy flow directory filter
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (12 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 13/17] net/i40e: destroy tunnel filter Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 15/17] net/i40e: add flow flush function Beilei Xing
` (3 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch supports destroying a flow directory filter
for users.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index c090d84..e33da2d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1629,6 +1629,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_flow_destroy_tunnel_filter(pf,
(struct i40e_tunnel_filter *)flow->rule);
break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 15/17] net/i40e: add flow flush function
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (13 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 14/17] net/i40e: destroy flow directory filter Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 16/17] net/i40e: flush ethertype filters Beilei Xing
` (2 subsequent siblings)
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_flush function to flush all
filters for users. And flow director flush function
is involved first.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_ethdev.h | 1 +
drivers/net/i40e/i40e_fdir.c | 4 +---
drivers/net/i40e/i40e_flow.c | 51 ++++++++++++++++++++++++++++++++++++++++++
3 files changed, 53 insertions(+), 3 deletions(-)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 3e166ee..ebe58a8 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -793,6 +793,7 @@ int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add);
+int i40e_fdir_flush(struct rte_eth_dev *dev);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 91d91aa..67d63ff 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -119,8 +119,6 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
-static int i40e_fdir_flush(struct rte_eth_dev *dev);
-
static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
struct i40e_fdir_filter *filter);
static struct i40e_fdir_filter *
@@ -1325,7 +1323,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
* i40e_fdir_flush - clear all filters of Flow Director table
* @pf: board private structure
*/
-static int
+int
i40e_fdir_flush(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index e33da2d..e425fe8 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -71,6 +71,8 @@ static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
static int i40e_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error);
+static int i40e_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
static int
i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
const struct rte_flow_item *pattern,
@@ -120,11 +122,13 @@ static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
struct i40e_ethertype_filter *filter);
static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
+static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
.create = i40e_flow_create,
.destroy = i40e_flow_destroy,
+ .flush = i40e_flow_flush,
};
union i40e_filter_t cons_filter;
@@ -1720,3 +1724,50 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
return ret;
}
+
+static int
+i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret;
+
+ ret = i40e_flow_flush_fdir_filter(pf);
+ if (ret)
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush FDIR flows.");
+
+ return ret;
+}
+
+static int
+i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret;
+
+ ret = i40e_fdir_flush(dev);
+ if (!ret) {
+ /* Delete FDIR filters in FDIR list. */
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ ret = i40e_sw_fdir_filter_del(pf,
+ &fdir_filter->fdir.input);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Delete FDIR flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 16/17] net/i40e: flush ethertype filters
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (14 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 15/17] net/i40e: add flow flush function Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 17/17] net/i40e: flush tunnel filters Beilei Xing
2017-01-06 11:54 ` [dpdk-dev] [PATCH v7 00/17] net/i40e: consistent filter API Ferruh Yigit
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_flush_ethertype_filter
function to flush all ethertype filters, including
filters in SW and HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 41 ++++++++++++++++++++++++++++++++++++++++-
1 file changed, 40 insertions(+), 1 deletion(-)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index e425fe8..dc5e655 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -123,6 +123,7 @@ static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1732,10 +1733,20 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
int ret;
ret = i40e_flow_flush_fdir_filter(pf);
- if (ret)
+ if (ret) {
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Failed to flush FDIR flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_ethertype_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to ethertype flush flows.");
+ return -rte_errno;
+ }
return ret;
}
@@ -1771,3 +1782,31 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
return ret;
}
+
+/* Flush all ethertype filters */
+static int
+i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(ethertype_list))) {
+ ret = i40e_flow_destroy_ethertype_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete ethertype flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* [dpdk-dev] [PATCH v7 17/17] net/i40e: flush tunnel filters
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (15 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 16/17] net/i40e: flush ethertype filters Beilei Xing
@ 2017-01-06 5:27 ` Beilei Xing
2017-01-06 11:54 ` [dpdk-dev] [PATCH v7 00/17] net/i40e: consistent filter API Ferruh Yigit
17 siblings, 0 replies; 175+ messages in thread
From: Beilei Xing @ 2017-01-06 5:27 UTC (permalink / raw)
To: jingjing.wu, helin.zhang; +Cc: dev
This patch adds i40e_flow_flush_tunnel_filter
function to flush all tunnel filters, including
filters in SW and HW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/i40e/i40e_flow.c | 37 +++++++++++++++++++++++++++++++++++++
1 file changed, 37 insertions(+)
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index dc5e655..76bb332 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -124,6 +124,7 @@ static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
struct i40e_tunnel_filter *filter);
static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
const struct rte_flow_ops i40e_flow_ops = {
.validate = i40e_flow_validate,
@@ -1748,6 +1749,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return -rte_errno;
}
+ ret = i40e_flow_flush_tunnel_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush tunnel flows.");
+ return -rte_errno;
+ }
+
return ret;
}
@@ -1810,3 +1819,31 @@ i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
return ret;
}
+
+/* Flush all tunnel filters */
+static int
+i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(tunnel_list))) {
+ ret = i40e_flow_destroy_tunnel_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete tunnel flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
--
2.5.5
^ permalink raw reply [flat|nested] 175+ messages in thread
* Re: [dpdk-dev] [PATCH v7 00/17] net/i40e: consistent filter API
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
` (16 preceding siblings ...)
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 17/17] net/i40e: flush tunnel filters Beilei Xing
@ 2017-01-06 11:54 ` Ferruh Yigit
17 siblings, 0 replies; 175+ messages in thread
From: Ferruh Yigit @ 2017-01-06 11:54 UTC (permalink / raw)
To: Beilei Xing, jingjing.wu, helin.zhang; +Cc: dev
On 1/6/2017 5:27 AM, Beilei Xing wrote:
> The patch set depends on Adrien's Generic flow API(rte_flow).
>
> The patches mainly finish following functions:
> 1) Store and restore all kinds of filters.
> 2) Parse all kinds of filters.
> 3) Add flow validate function.
> 4) Add flow create function.
> 5) Add flow destroy function.
> 6) Add flow flush function.
>
> v7 changes:
> Separate filter related code from eth_i40e_dev_init().
> Change struct i40e_flow to ret_flow.
>
> v6 changes:
> Change functions' name to be more readable.
> Add comments for parse_pattern functions to list supported rules.
> Add comments for parse_action functions to list supported actions.
> Add ETHTYPE check when parsing ethertype pattern.
>
> v5 changes:
> Change some local variable name.
> Add removing i40e_flow_list during device unint.
> Fix compile error when gcc compile option isn't '-O0'.
>
> v4 changes:
> Change I40E_TCI_MASK with 0xFFFF to align with testpmd.
> Modidy the stats show when restoring filters.
>
> v3 changes:
> Set the related cause pointer to a non-NULL value when error happens.
> Change return value when error happens.
> Modify filter_del parameter with key.
> Malloc filter after checking when delete a filter.
> Delete meaningless initialization.
> Add return value when there's error.
> Change global variable definition.
> Modify some function declaration.
>
> v2 changes:
> Add i40e_flow.c, all flow ops are implemented in the file.
> Change the whole implementation of all parse flow functions.
> Update error info for all flow ops.
> Add flow_list to store flows created.
>
> Beilei Xing (17):
> net/i40e: store ethertype filter
> net/i40e: store tunnel filter
> net/i40e: store flow director filter
> net/i40e: restore ethertype filter
> net/i40e: restore tunnel filter
> net/i40e: restore flow director filter
> net/i40e: add flow validate function
> net/i40e: parse flow director filter
> net/i40e: parse tunnel filter
> net/i40e: add flow create function
> net/i40e: add flow destroy function
> net/i40e: destroy ethertype filter
> net/i40e: destroy tunnel filter
> net/i40e: destroy flow directory filter
> net/i40e: add flow flush function
> net/i40e: flush ethertype filters
> net/i40e: flush tunnel filters
>
> drivers/net/i40e/Makefile | 2 +
> drivers/net/i40e/i40e_ethdev.c | 594 +++++++++++--
> drivers/net/i40e/i40e_ethdev.h | 178 ++++
> drivers/net/i40e/i40e_fdir.c | 140 ++-
> drivers/net/i40e/i40e_flow.c | 1849 ++++++++++++++++++++++++++++++++++++++++
> 5 files changed, 2692 insertions(+), 71 deletions(-)
> create mode 100644 drivers/net/i40e/i40e_flow.c
>
> Acked-by: Jingjing Wu <jingjing.wu@intel.com>
>
Series applied to dpdk-next-net/master, thanks.
(extern moved into header file)
^ permalink raw reply [flat|nested] 175+ messages in thread
end of thread, other threads:[~2017-01-06 11:54 UTC | newest]
Thread overview: 175+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-12-02 11:53 [dpdk-dev] [PATCH 00/24] net/i40e: Consistent filter API Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 01/24] net/i40e: store ethertype filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 02/24] net/i40e: store tunnel filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 03/24] net/i40e: store flow director filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 04/24] net/i40e: store RSS hash info Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 05/24] net/i40e: restore ethertype filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 06/24] net/i40e: restore macvlan filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 07/24] net/i40e: restore tunnel filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 08/24] net/i40e: restore flow director filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 09/24] net/i40e: restore RSS hash info Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 10/24] ethdev: parse ethertype filter Beilei Xing
2016-12-20 18:12 ` Ferruh Yigit
2016-12-21 3:54 ` Xing, Beilei
2016-12-23 8:43 ` Adrien Mazarguil
2016-12-27 6:36 ` Xing, Beilei
2016-12-02 11:53 ` [dpdk-dev] [PATCH 11/24] net/i40e: add flow validate function Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 12/24] net/i40e: parse macvlan filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 13/24] net/i40e: parse VXLAN filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 14/24] net/i40e: parse NVGRE filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 15/24] net/i40e: parse flow director filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 16/24] net/i40e: add flow create function Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 17/24] net/i40e: destroy ethertype filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 18/24] net/i40e: destroy macvlan filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 19/24] net/i40e: destroy tunnel filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 20/24] net/i40e: destroy flow directory filter Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 21/24] net/i40e: add flow flush function Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 22/24] net/i40e: flush ethertype filters Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 23/24] net/i40e: flush macvlan filters Beilei Xing
2016-12-02 11:53 ` [dpdk-dev] [PATCH 24/24] net/i40e: flush tunnel filters Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 00/17] net/i40e: Consistent filter API Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 01/17] net/i40e: store ethertype filter Beilei Xing
2016-12-28 2:22 ` Wu, Jingjing
2016-12-29 4:03 ` Xing, Beilei
2016-12-29 4:36 ` Xing, Beilei
2016-12-28 3:22 ` Tiwei Bie
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 02/17] net/i40e: store tunnel filter Beilei Xing
2016-12-28 3:27 ` Tiwei Bie
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 03/17] net/i40e: store flow director filter Beilei Xing
2016-12-28 3:38 ` Tiwei Bie
2016-12-28 7:10 ` Xing, Beilei
2016-12-28 7:14 ` Tiwei Bie
2016-12-28 7:36 ` Tiwei Bie
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 04/17] net/i40e: restore ethertype filter Beilei Xing
2016-12-28 2:25 ` Wu, Jingjing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 05/17] net/i40e: restore tunnel filter Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 06/17] net/i40e: restore flow director filter Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 07/17] net/i40e: add flow validate function Beilei Xing
2016-12-27 12:40 ` Adrien Mazarguil
2016-12-28 9:00 ` Xing, Beilei
2016-12-28 9:29 ` Adrien Mazarguil
2016-12-28 10:03 ` Xing, Beilei
2016-12-28 2:52 ` Wu, Jingjing
2016-12-28 7:44 ` Xing, Beilei
2016-12-28 4:08 ` Tiwei Bie
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 08/17] net/i40e: parse flow director filter Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 09/17] net/i40e: parse tunnel filter Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 10/17] net/i40e: add flow create function Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 11/17] net/i40e: add flow destroy function Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 12/17] net/i40e: destroy ethertype filter Beilei Xing
2016-12-28 3:30 ` Wu, Jingjing
2016-12-28 7:29 ` Xing, Beilei
2016-12-28 4:56 ` Tiwei Bie
2016-12-28 6:57 ` Xing, Beilei
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 13/17] net/i40e: destroy tunnel filter Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 14/17] net/i40e: destroy flow directory filter Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 15/17] net/i40e: add flow flush function Beilei Xing
2016-12-27 12:40 ` Adrien Mazarguil
2016-12-28 8:02 ` Xing, Beilei
2016-12-28 5:35 ` Tiwei Bie
2016-12-28 6:48 ` Xing, Beilei
2016-12-28 7:00 ` Tiwei Bie
2016-12-28 7:20 ` Xing, Beilei
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 16/17] net/i40e: flush ethertype filters Beilei Xing
2016-12-27 6:26 ` [dpdk-dev] [PATCH v2 17/17] net/i40e: flush tunnel filters Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 00/17] net/i40e: consistent filter API Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 01/17] net/i40e: store ethertype filter Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 02/17] net/i40e: store tunnel filter Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 03/17] net/i40e: store flow director filter Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 04/17] net/i40e: restore ethertype filter Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 05/17] net/i40e: restore tunnel filter Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 06/17] net/i40e: restore flow director filter Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 07/17] net/i40e: add flow validate function Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 08/17] net/i40e: parse flow director filter Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 09/17] net/i40e: parse tunnel filter Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 10/17] net/i40e: add flow create function Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 11/17] net/i40e: add flow destroy function Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 12/17] net/i40e: destroy ethertype filter Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 13/17] net/i40e: destroy tunnel filter Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 14/17] net/i40e: destroy flow directory filter Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 15/17] net/i40e: add flow flush function Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 16/17] net/i40e: flush ethertype filters Beilei Xing
2016-12-29 16:04 ` [dpdk-dev] [PATCH v3 17/17] net/i40e: flush tunnel filters Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 00/17] net/i40e: consistent filter API Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 01/17] net/i40e: store ethertype filter Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 02/17] net/i40e: store tunnel filter Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 03/17] net/i40e: store flow director filter Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 04/17] net/i40e: restore ethertype filter Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 05/17] net/i40e: restore tunnel filter Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 06/17] net/i40e: restore flow director filter Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 07/17] net/i40e: add flow validate function Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 08/17] net/i40e: parse flow director filter Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 09/17] net/i40e: parse tunnel filter Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 10/17] net/i40e: add flow create function Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 11/17] net/i40e: add flow destroy function Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 12/17] net/i40e: destroy ethertype filter Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 13/17] net/i40e: destroy tunnel filter Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 14/17] net/i40e: destroy flow directory filter Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 15/17] net/i40e: add flow flush function Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 16/17] net/i40e: flush ethertype filters Beilei Xing
2016-12-30 3:25 ` [dpdk-dev] [PATCH v4 17/17] net/i40e: flush tunnel filters Beilei Xing
2017-01-03 3:25 ` Guo, Jia
2017-01-03 4:49 ` Xing, Beilei
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 01/17] net/i40e: store ethertype filter Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 02/17] net/i40e: store tunnel filter Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 03/17] net/i40e: store flow director filter Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 04/17] net/i40e: restore ethertype filter Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 05/17] net/i40e: restore tunnel filter Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 06/17] net/i40e: restore flow director filter Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 07/17] net/i40e: add flow validate function Beilei Xing
2017-01-04 18:57 ` Ferruh Yigit
2017-01-05 6:08 ` Xing, Beilei
2017-01-05 11:16 ` Ferruh Yigit
2017-01-05 11:52 ` Xing, Beilei
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 08/17] net/i40e: parse flow director filter Beilei Xing
2017-01-04 3:22 ` [dpdk-dev] [PATCH v5 09/17] net/i40e: parse tunnel filter Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 10/17] net/i40e: add flow create function Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 11/17] net/i40e: add flow destroy function Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 12/17] net/i40e: destroy ethertype filter Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 13/17] net/i40e: destroy tunnel filter Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 14/17] net/i40e: destroy flow directory filter Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 15/17] net/i40e: add flow flush function Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 16/17] net/i40e: flush ethertype filters Beilei Xing
2017-01-04 3:23 ` [dpdk-dev] [PATCH v5 17/17] net/i40e: flush tunnel filters Beilei Xing
2017-01-04 6:40 ` [dpdk-dev] [PATCH v5 00/17] net/i40e: consistent filter API Wu, Jingjing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 " Beilei Xing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 01/17] net/i40e: store ethertype filter Beilei Xing
2017-01-05 17:46 ` Ferruh Yigit
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 02/17] net/i40e: store tunnel filter Beilei Xing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 03/17] net/i40e: store flow director filter Beilei Xing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 04/17] net/i40e: restore ethertype filter Beilei Xing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 05/17] net/i40e: restore tunnel filter Beilei Xing
2017-01-05 15:45 ` [dpdk-dev] [PATCH v6 06/17] net/i40e: restore flow director filter Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 07/17] net/i40e: add flow validate function Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 08/17] net/i40e: parse flow director filter Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 09/17] net/i40e: parse tunnel filter Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 10/17] net/i40e: add flow create function Beilei Xing
2017-01-05 17:47 ` Ferruh Yigit
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 11/17] net/i40e: add flow destroy function Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 12/17] net/i40e: destroy ethertype filter Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 13/17] net/i40e: destroy tunnel filter Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 14/17] net/i40e: destroy flow directory filter Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 15/17] net/i40e: add flow flush function Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 16/17] net/i40e: flush ethertype filters Beilei Xing
2017-01-05 15:46 ` [dpdk-dev] [PATCH v6 17/17] net/i40e: flush tunnel filters Beilei Xing
2017-01-05 17:46 ` [dpdk-dev] [PATCH v6 00/17] net/i40e: consistent filter API Ferruh Yigit
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 " Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 01/17] net/i40e: store ethertype filter Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 02/17] net/i40e: store tunnel filter Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 03/17] net/i40e: store flow director filter Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 04/17] net/i40e: restore ethertype filter Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 05/17] net/i40e: restore tunnel filter Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 06/17] net/i40e: restore flow director filter Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 07/17] net/i40e: add flow validate function Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 08/17] net/i40e: parse flow director filter Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 09/17] net/i40e: parse tunnel filter Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 10/17] net/i40e: add flow create function Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 11/17] net/i40e: add flow destroy function Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 12/17] net/i40e: destroy ethertype filter Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 13/17] net/i40e: destroy tunnel filter Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 14/17] net/i40e: destroy flow directory filter Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 15/17] net/i40e: add flow flush function Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 16/17] net/i40e: flush ethertype filters Beilei Xing
2017-01-06 5:27 ` [dpdk-dev] [PATCH v7 17/17] net/i40e: flush tunnel filters Beilei Xing
2017-01-06 11:54 ` [dpdk-dev] [PATCH v7 00/17] net/i40e: consistent filter API Ferruh Yigit
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).