From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 156D3A0521; Tue, 3 Nov 2020 09:30:00 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1325EC7FA; Tue, 3 Nov 2020 09:29:44 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 46BCAC7FA for ; Tue, 3 Nov 2020 09:29:42 +0100 (CET) IronPort-SDR: vir442KrpQICP1UwpqKunc7LURNYVvE98ubMoSoNM+V0NcP0CfClUPkQhug0UmjvdlGcOzKTmi cys0s7JDY3Vg== X-IronPort-AV: E=McAfee;i="6000,8403,9793"; a="169122332" X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="169122332" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:41 -0800 IronPort-SDR: gBg3R4yPK7BOUt/m8FJVZofYeB51N+Tg2FP84BNAsdFZeYBPusG0mziiFob4KlnFSqZoTD6krY D5gQbxAwVtYg== X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="470711404" Received: from intel-npg-odc-srv01.cd.intel.com ([10.240.178.136]) by orsmga004-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:39 -0800 From: Steve Yang To: dev@dpdk.org Cc: jia.guo@intel.com, haiyue.wang@intel.com, qiming.yang@intel.com, beilei.xing@intel.com, orika@nvidia.com, murphyx.yang@intel.com, Steve Yang Date: Tue, 3 Nov 2020 08:28:05 +0000 Message-Id: <20201103082809.41149-3-stevex.yang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201103082809.41149-1-stevex.yang@intel.com> References: <20201014084131.72035-1-simonx.lu@intel.com> <20201103082809.41149-1-stevex.yang@intel.com> Subject: [dpdk-dev] [RFC v2 2/6] net/i40e: define the mirror filter parser X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Define the sample filter parser for mirror, it will divide to two phases, the one is sample attributions pattern parsing, and the mirror config will be filled in according to pattern type VF/PF/VLAN when sample ratio is 1. The another is sample action parsing that the port id of mirror config will be filled in according to action type VF/PF. Signed-off-by: Steve Yang --- drivers/net/i40e/i40e_flow.c | 264 +++++++++++++++++++++++++- lib/librte_ethdev/rte_ethdev_driver.h | 1 + 2 files changed, 258 insertions(+), 7 deletions(-) diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index 5bec0c7a8..7928871bf 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -1871,15 +1871,18 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = { { pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter }, }; -#define NEXT_ITEM_OF_ACTION(act, actions, index) \ - do { \ - act = actions + index; \ - while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ - index++; \ - act = actions + index; \ - } \ +#define NEXT_ITEM_OF_ACTION(act, actions, index) \ + do { \ + act = (actions) + (index); \ + while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ + (index)++; \ + act = (actions) + (index); \ + } \ } while (0) +#define GET_VLAN_ID_FROM_TCI(vlan_item, default_vid) \ + ((vlan_item) ? ntohs(vlan_item->tci) & 0x0fff : (default_vid)) + /* Find the first VOID or non-VOID item pointer */ static const struct rte_flow_item * i40e_find_first_item(const struct rte_flow_item *item, bool is_void) @@ -5267,6 +5270,253 @@ i40e_config_rss_filter_del(struct rte_eth_dev *dev, return 0; } +static int +i40e_flow_parse_sample_attr_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + const struct rte_flow_item *item = pattern; + const struct rte_flow_item *next_item = pattern + 1; + enum rte_flow_item_type item_type, next_item_type; + const struct rte_flow_item_vf *vf_spec, *vf_mask, *vf_last; + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask, *vlan_last; + struct i40e_mirror_rule_conf *mirror_config = &filter->mirror_conf; + uint16_t *entries = mirror_config->entries; + uint8_t *rule_type = &mirror_config->rule_type; + uint16_t vf_id, vf_id_last, vlan_id, vlan_id_mask, vlan_id_last; + uint16_t i, j = 0, k = 0; + + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + if (attr->group) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group."); + return -rte_errno; + } + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "Not support group."); + return -rte_errno; + } + + item_type = item->type; + next_item_type = next_item->type; + if (!(next_item_type == RTE_FLOW_ITEM_TYPE_END && + (item_type == RTE_FLOW_ITEM_TYPE_PF || + item_type == RTE_FLOW_ITEM_TYPE_VF || + item_type == RTE_FLOW_ITEM_TYPE_VLAN))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support a pattern item that is pf or vf or vlan."); + return -rte_errno; + } + + if (item_type == RTE_FLOW_ITEM_TYPE_PF) { + if (!attr->ingress && attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS; + } else if (attr->ingress && !attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + attr, + "Only support ingress or egress attribute for PF mirror."); + return -rte_errno; + } + } else if (item_type == RTE_FLOW_ITEM_TYPE_VF) { + if (!attr->ingress && attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS; + } else if (attr->ingress && !attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + attr, + "Only support ingress or egress attribute for VF mirror."); + return -rte_errno; + } + + vf_spec = item->spec; + vf_last = item->last; + vf_mask = item->mask; + if (item->spec || item->last) { + vf_id = (vf_spec ? vf_spec->id : 0); + vf_id_last = (vf_last ? vf_last->id : vf_id); + if (vf_id >= pf->vf_num || + vf_id_last >= pf->vf_num || + vf_id_last < vf_id) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item, + "VF ID is out of range."); + return -rte_errno; + } + for (i = vf_id; i <= vf_id_last; i++, k++) + if (!vf_mask || (vf_mask->id & (1 << k))) + entries[j++] = pf->vfs[i].vsi->seid; + mirror_config->num_entries = j; + } else if (item->mask) { + if (vf_mask->id >= (uint32_t)(1 << pf->vf_num)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, + item, + "VF ID mask is out of range."); + return -rte_errno; + } + for (i = 0; i < pf->vf_num; i++) { + if (vf_mask->id & (1 << i)) + entries[j++] = pf->vfs[i].vsi->seid; + } + mirror_config->num_entries = j; + } + if (mirror_config->num_entries == 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not valid VF ID."); + return -rte_errno; + } + } else if (item_type == RTE_FLOW_ITEM_TYPE_VLAN) { + if (attr->ingress && !attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, attr, + "Only support ingress attribute for VLAN mirror."); + return -rte_errno; + } + + vlan_spec = item->spec; + vlan_last = item->last; + vlan_mask = item->mask; + if (item->spec || item->last) { + vlan_id = GET_VLAN_ID_FROM_TCI(vlan_spec, 0); + vlan_id_last = GET_VLAN_ID_FROM_TCI(vlan_last, vlan_id); + vlan_id_mask = GET_VLAN_ID_FROM_TCI(vlan_mask, 0x0fff); + if (vlan_id >= ETH_MIRROR_MAX_VLANS || + vlan_id_last >= ETH_MIRROR_MAX_VLANS || + vlan_id_last < vlan_id) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, item, + "VLAN ID is out of range."); + return -rte_errno; + } + for (i = vlan_id; i <= vlan_id_last; i++, k++) + if (vlan_id_mask & (1 << k)) + entries[j++] = i; + mirror_config->num_entries = j; + } else if (item->mask) { + vlan_id_mask = GET_VLAN_ID_FROM_TCI(vlan_mask, 0x0fff); + for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) { + if (vlan_id_mask & (1 << i)) + entries[j++] = i; + mirror_config->num_entries = j; + } + } + if (mirror_config->num_entries == 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not valid VLAN ID."); + return -rte_errno; + } + } + + return 0; +} + +static int +i40e_flow_parse_sample_action(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + const struct rte_flow_action *act; + struct i40e_mirror_rule_conf *mirror_config = &filter->mirror_conf; + uint16_t *dst_vsi_seid = &mirror_config->dst_vsi_seid; + uint32_t index = 0; + + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_SAMPLE) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Not supported action."); + return -rte_errno; + } + + if (((const struct rte_flow_action_sample *)act->conf)->ratio != 1) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "Invalid ratio for mirror action"); + return -rte_errno; + } + + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + + if (act->type == RTE_FLOW_ACTION_TYPE_PORT_ID) { + const struct rte_flow_action_port_id *conf = + (const struct rte_flow_action_port_id *)act->conf; + + if (!conf->id) { + *dst_vsi_seid = pf->main_vsi_seid; + } else if (conf->id <= pf->vf_num) { + *dst_vsi_seid = pf->vfs[conf->id - 1].vsi->seid; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "Invalid port id mirror action"); + return -rte_errno; + } + } + + /* Check if the next non-void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Only support pf or vf parameter item."); + return -rte_errno; + } + + return 0; +} + +static int +i40e_parse_sample_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + union i40e_filter_t *filter, + struct rte_flow_error *error) +{ + int ret; + + ret = i40e_flow_parse_sample_attr_pattern(dev, attr, pattern, + error, filter); + if (ret) + return ret; + + ret = i40e_flow_parse_sample_action(dev, actions, error, filter); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_SAMPLE; + + return 0; +} + static int i40e_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, diff --git a/lib/librte_ethdev/rte_ethdev_driver.h b/lib/librte_ethdev/rte_ethdev_driver.h index 0eacfd842..9c45124d5 100644 --- a/lib/librte_ethdev/rte_ethdev_driver.h +++ b/lib/librte_ethdev/rte_ethdev_driver.h @@ -477,6 +477,7 @@ enum rte_filter_type { RTE_ETH_FILTER_TUNNEL, RTE_ETH_FILTER_FDIR, RTE_ETH_FILTER_HASH, + RTE_ETH_FILTER_SAMPLE, RTE_ETH_FILTER_L2_TUNNEL, RTE_ETH_FILTER_GENERIC, }; -- 2.17.1