From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 84DD33DC for ; Tue, 27 Dec 2016 07:27:05 +0100 (CET) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga104.jf.intel.com with ESMTP; 26 Dec 2016 22:27:01 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.33,414,1477983600"; d="scan'208";a="1086959677" Received: from unknown (HELO dpdk9.sh.intel.com) ([10.239.129.31]) by fmsmga001.fm.intel.com with ESMTP; 26 Dec 2016 22:27:00 -0800 From: Beilei Xing To: jingjing.wu@intel.com, helin.zhang@intel.com Cc: dev@dpdk.org Date: Tue, 27 Dec 2016 14:26:09 +0800 Message-Id: <1482819984-14120-3-git-send-email-beilei.xing@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1482819984-14120-1-git-send-email-beilei.xing@intel.com> References: <1480679625-4157-1-git-send-email-beilei.xing@intel.com> <1482819984-14120-1-git-send-email-beilei.xing@intel.com> Subject: [dpdk-dev] [PATCH v2 02/17] net/i40e: store tunnel filter X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 27 Dec 2016 06:27:06 -0000 Currently there's no tunnel filter stored in SW. This patch stores tunnel filter in SW with cuckoo hash, also adds protection if a tunnel filter has been added. Signed-off-by: Beilei Xing --- drivers/net/i40e/i40e_ethdev.c | 167 ++++++++++++++++++++++++++++++++++++++++- drivers/net/i40e/i40e_ethdev.h | 27 +++++++ 2 files changed, 191 insertions(+), 3 deletions(-) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 80dd8d7..c012d5d 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -473,6 +473,17 @@ static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf, static int i40e_sw_ethertype_filter_del(struct i40e_pf *pf, struct i40e_ethertype_filter *filter); +static int i40e_tunnel_filter_convert( + struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter, + struct i40e_tunnel_filter *tunnel_filter); +static struct i40e_tunnel_filter * +i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule, + const struct i40e_tunnel_filter_input *input); +static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf, + struct i40e_tunnel_filter *tunnel_filter); +static int i40e_sw_tunnel_filter_del(struct i40e_pf *pf, + struct i40e_tunnel_filter *tunnel_filter); + static const struct rte_pci_id pci_id_i40e_map[] = { { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) }, @@ -950,6 +961,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) uint32_t len; uint8_t aq_fail = 0; struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype; + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; PMD_INIT_FUNC_TRACE(); @@ -961,6 +973,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) .hash_func = rte_hash_crc, }; + char tunnel_hash_name[RTE_HASH_NAMESIZE]; + struct rte_hash_parameters tunnel_hash_params = { + .name = tunnel_hash_name, + .entries = I40E_MAX_TUNNEL_FILTER_NUM, + .key_len = sizeof(struct i40e_tunnel_filter_input), + .hash_func = rte_hash_crc, + }; + dev->dev_ops = &i40e_eth_dev_ops; dev->rx_pkt_burst = i40e_recv_pkts; dev->tx_pkt_burst = i40e_xmit_pkts; @@ -1221,8 +1241,33 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) goto err_ethertype_hash_map_alloc; } + /* Initialize tunnel filter rule list and hash */ + TAILQ_INIT(&tunnel_rule->tunnel_list); + snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE, + "tunnel_%s", dev->data->name); + tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params); + if (!tunnel_rule->hash_table) { + PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!"); + ret = -EINVAL; + goto err_tunnel_hash_table_create; + } + tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map", + sizeof(struct i40e_tunnel_filter *) * + I40E_MAX_TUNNEL_FILTER_NUM, + 0); + if (!tunnel_rule->hash_map) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for tunnel hash map!"); + ret = -ENOMEM; + goto err_tunnel_hash_map_alloc; + } + return 0; +err_tunnel_hash_map_alloc: + rte_hash_free(tunnel_rule->hash_table); +err_tunnel_hash_table_create: + rte_free(ethertype_rule->hash_map); err_ethertype_hash_map_alloc: rte_hash_free(ethertype_rule->hash_table); err_ethertype_hash_table_create: @@ -1254,9 +1299,11 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) struct i40e_hw *hw; struct i40e_filter_control_settings settings; struct i40e_ethertype_filter *p_ethertype; + struct i40e_tunnel_filter *p_tunnel; int ret; uint8_t aq_fail = 0; struct i40e_ethertype_rule *ethertype_rule; + struct i40e_tunnel_rule *tunnel_rule; PMD_INIT_FUNC_TRACE(); @@ -1267,6 +1314,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); pci_dev = dev->pci_dev; ethertype_rule = &pf->ethertype; + tunnel_rule = &pf->tunnel; if (hw->adapter_stopped == 0) i40e_dev_close(dev); @@ -1283,6 +1331,17 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) rte_free(p_ethertype); } + /* Remove all tunnel director rules and hash */ + if (tunnel_rule->hash_map) + rte_free(tunnel_rule->hash_map); + if (tunnel_rule->hash_table) + rte_hash_free(tunnel_rule->hash_table); + + while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) { + TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules); + rte_free(p_tunnel); + } + dev->dev_ops = NULL; dev->rx_pkt_burst = NULL; dev->tx_pkt_burst = NULL; @@ -6482,6 +6541,81 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag) return 0; } +/* Convert tunnel filter structure */ +static int +i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data + *cld_filter, + struct i40e_tunnel_filter *tunnel_filter) +{ + ether_addr_copy((struct ether_addr *)&cld_filter->outer_mac, + (struct ether_addr *)&tunnel_filter->input.outer_mac); + ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac, + (struct ether_addr *)&tunnel_filter->input.inner_mac); + tunnel_filter->input.inner_vlan = cld_filter->inner_vlan; + tunnel_filter->input.flags = cld_filter->flags; + tunnel_filter->input.tenant_id = cld_filter->tenant_id; + tunnel_filter->queue = cld_filter->queue_number; + + return 0; +} + +/* Check if there exists the tunnel filter */ +static struct i40e_tunnel_filter * +i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule, + const struct i40e_tunnel_filter_input *input) +{ + int ret = 0; + + ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input); + if (ret < 0) + return NULL; + + return tunnel_rule->hash_map[ret]; +} + +/* Add a tunnel filter into the SW list */ +static int +i40e_sw_tunnel_filter_insert(struct i40e_pf *pf, + struct i40e_tunnel_filter *tunnel_filter) +{ + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + int ret = 0; + + ret = rte_hash_add_key(tunnel_rule->hash_table, + &tunnel_filter->input); + if (ret < 0) + PMD_DRV_LOG(ERR, + "Failed to insert tunnel filter to hash table %d!", + ret); + tunnel_rule->hash_map[ret] = tunnel_filter; + + TAILQ_INSERT_TAIL(&tunnel_rule->tunnel_list, tunnel_filter, rules); + + return 0; +} + +/* Delete a tunnel filter from the SW list */ +static int +i40e_sw_tunnel_filter_del(struct i40e_pf *pf, + struct i40e_tunnel_filter *tunnel_filter) +{ + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + int ret = 0; + + ret = rte_hash_del_key(tunnel_rule->hash_table, + &tunnel_filter->input); + if (ret < 0) + PMD_DRV_LOG(ERR, + "Failed to delete tunnel filter to hash table %d!", + ret); + tunnel_rule->hash_map[ret] = NULL; + + TAILQ_REMOVE(&tunnel_rule->tunnel_list, tunnel_filter, rules); + rte_free(tunnel_filter); + + return 0; +} + static int i40e_dev_tunnel_filter_set(struct i40e_pf *pf, struct rte_eth_tunnel_filter_conf *tunnel_filter, @@ -6497,6 +6631,8 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, struct i40e_vsi *vsi = pf->main_vsi; struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter; struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter; + struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel; + struct i40e_tunnel_filter *tunnel, *node; cld_filter = rte_zmalloc("tunnel_filter", sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data), @@ -6559,11 +6695,36 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id); - if (add) + tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); + i40e_tunnel_filter_convert(cld_filter, tunnel); + node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &tunnel->input); + if (add && node) { + PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); + rte_free(tunnel); + return -EINVAL; + } else if (!add && !node) { + PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); + rte_free(tunnel); + return -EINVAL; + } + + if (add) { ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1); - else + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); + return ret; + } + ret = i40e_sw_tunnel_filter_insert(pf, tunnel); + } else { ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, - cld_filter, 1); + cld_filter, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); + return ret; + } + ret = i40e_sw_tunnel_filter_del(pf, node); + rte_free(tunnel); + } rte_free(cld_filter); return ret; diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 316af80..c05436c 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -421,6 +421,32 @@ struct i40e_ethertype_rule { struct rte_hash *hash_table; }; +/* Tunnel filter number HW supports */ +#define I40E_MAX_TUNNEL_FILTER_NUM 400 + +/* Tunnel filter struct */ +struct i40e_tunnel_filter_input { + uint8_t outer_mac[6]; /* Outer mac address to match */ + uint8_t inner_mac[6]; /* Inner mac address to match */ + uint16_t inner_vlan; /* Inner vlan address to match */ + uint16_t flags; /* Filter type flag */ + uint32_t tenant_id; /* Tenant id to match */ +}; + +struct i40e_tunnel_filter { + TAILQ_ENTRY(i40e_tunnel_filter) rules; + struct i40e_tunnel_filter_input input; + uint16_t queue; /* Queue assigned to when match */ +}; + +TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter); + +struct i40e_tunnel_rule { + struct i40e_tunnel_filter_list tunnel_list; + struct i40e_tunnel_filter **hash_map; + struct rte_hash *hash_table; +}; + #define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64 #define I40E_MAX_MIRROR_RULES 64 /* @@ -492,6 +518,7 @@ struct i40e_pf { struct i40e_fdir_info fdir; /* flow director info */ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */ + struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */ struct i40e_fc_conf fc_conf; /* Flow control conf */ struct i40e_mirror_rule_list mirror_list; uint16_t nb_mirror_rule; /* The number of mirror rules */ -- 2.5.5