From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 35D3CA04B7; Wed, 14 Oct 2020 10:45:33 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id DEC6E1DD32; Wed, 14 Oct 2020 10:43:31 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id 5AD5F1DD2F for ; Wed, 14 Oct 2020 10:43:29 +0200 (CEST) IronPort-SDR: wmsJoKRUL9NLLus/lp3r4rrv0eI9Wyc+K0Yzq+YuOq+lkQD5636B2qVoGYqevPu2IVxvlW2WSd hqEsXov/Lltg== X-IronPort-AV: E=McAfee;i="6000,8403,9773"; a="163432238" X-IronPort-AV: E=Sophos;i="5.77,374,1596524400"; d="scan'208";a="163432238" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 14 Oct 2020 01:43:29 -0700 IronPort-SDR: VAOGUhIPW6wV5SaRMHwvChBbFwaIH1OPOOnN4Q0NjsdxOFm9atiMkS/8kWyWWYLcVDZZEbGjqp ilUCfbDipu7Q== X-IronPort-AV: E=Sophos;i="5.77,374,1596524400"; d="scan'208";a="299864591" Received: from intel-npg-odc-srv01.cd.intel.com ([10.240.178.136]) by fmsmga008-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 14 Oct 2020 01:43:25 -0700 From: SimonX Lu To: dev@dpdk.org Cc: jia.guo@intel.com, haiyue.wang@intel.com, qiming.yang@intel.com, beilei.xing@intel.com, orika@nvidia.com, Simon Lu Date: Wed, 14 Oct 2020 08:41:30 +0000 Message-Id: <20201014084131.72035-8-simonx.lu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201014084131.72035-1-simonx.lu@intel.com> References: <20201014084131.72035-1-simonx.lu@intel.com> Subject: [dpdk-dev] [PATCH v1 7/8] net/i40e: define the mirror filter paser X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Simon Lu define the mirror filter parser, it will divide to two pharse, the one is mirror attributions pattern parsing, and the mirror config will be filled in according to pattern type VF/PF/VLAN. the another is mirror action parsing that the dst_pool of mirror config will be filled in according to action type VF/PF. Signed-off-by: Simon Lu --- drivers/net/i40e/i40e_flow.c | 263 ++++++++++++++++++++++++++++++++++- 1 file changed, 256 insertions(+), 7 deletions(-) diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index adc5da1c5..578a68773 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -1868,15 +1868,18 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = { { pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter }, }; -#define NEXT_ITEM_OF_ACTION(act, actions, index) \ - do { \ - act = actions + index; \ - while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ - index++; \ - act = actions + index; \ - } \ +#define NEXT_ITEM_OF_ACTION(act, actions, index) \ + do { \ + act = (actions) + (index); \ + while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \ + (index)++; \ + act = (actions) + (index); \ + } \ } while (0) +#define GET_VLAN_ID_FROM_TCI(vlan_item, default_vid) \ + ((vlan_item) ? ntohs(vlan_item->tci) & 0x0fff : (default_vid)) + /* Find the first VOID or non-VOID item pointer */ static const struct rte_flow_item * i40e_find_first_item(const struct rte_flow_item *item, bool is_void) @@ -5259,6 +5262,252 @@ i40e_config_rss_filter_del(struct rte_eth_dev *dev, return 0; } +static int +i40e_flow_parse_mirror_attr_pattern(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + const struct rte_flow_item *item = pattern; + const struct rte_flow_item *next_item = pattern + 1; + enum rte_flow_item_type item_type, next_item_type; + const struct rte_flow_item_vf *vf_spec, *vf_mask, *vf_last; + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask, *vlan_last; + struct i40e_mirror_rule_conf *mirror_config = &filter->mirror_conf; + uint16_t *entries = mirror_config->entries; + uint8_t *rule_type = &mirror_config->rule_type; + uint16_t vf_id, vf_id_last, vlan_id, vlan_id_mask, vlan_id_last; + uint16_t i, j = 0, k = 0; + + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "Not support priority."); + return -rte_errno; + } + if (attr->group) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "Not support group."); + return -rte_errno; + } + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "Not support group."); + return -rte_errno; + } + + item_type = item->type; + next_item_type = next_item->type; + if (!(next_item_type == RTE_FLOW_ITEM_TYPE_END && + (item_type == RTE_FLOW_ITEM_TYPE_PF || + item_type == RTE_FLOW_ITEM_TYPE_VF || + item_type == RTE_FLOW_ITEM_TYPE_VLAN))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Only support a pattern item that is pf or vf or vlan."); + return -rte_errno; + } + + if (item_type == RTE_FLOW_ITEM_TYPE_PF) { + if (!attr->ingress && attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS; + } else if (attr->ingress && !attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + attr, + "Only support ingress or egress attribute for PF mirror."); + return -rte_errno; + } + } else if (item_type == RTE_FLOW_ITEM_TYPE_VF) { + if (!attr->ingress && attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS; + } else if (attr->ingress && !attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + attr, + "Only support ingress or egress attribute for VF mirror."); + return -rte_errno; + } + + vf_spec = item->spec; + vf_last = item->last; + vf_mask = item->mask; + if (item->spec || item->last) { + vf_id = (vf_spec ? vf_spec->id : 0); + vf_id_last = (vf_last ? vf_last->id : vf_id); + if (vf_id >= pf->vf_num || + vf_id_last >= pf->vf_num || + vf_id_last < vf_id) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item, + "VF ID is out of range."); + return -rte_errno; + } + for (i = vf_id; i <= vf_id_last; i++, k++) + if (!vf_mask || (vf_mask->id & (1 << k))) + entries[j++] = pf->vfs[i].vsi->seid; + mirror_config->num_entries = j; + } else if (item->mask) { + if (vf_mask->id >= (uint32_t)(1 << pf->vf_num)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, + item, + "VF ID mask is out of range."); + return -rte_errno; + } + for (i = 0; i < pf->vf_num; i++) { + if (vf_mask->id & (1 << i)) + entries[j++] = pf->vfs[i].vsi->seid; + } + mirror_config->num_entries = j; + } + if (mirror_config->num_entries == 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not valid VF ID."); + return -rte_errno; + } + } else if (item_type == RTE_FLOW_ITEM_TYPE_VLAN) { + if (attr->ingress && !attr->egress) { + *rule_type = I40E_AQC_MIRROR_RULE_TYPE_VLAN; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, attr, + "Only support ingress attribute for VLAN mirror."); + return -rte_errno; + } + + vlan_spec = item->spec; + vlan_last = item->last; + vlan_mask = item->mask; + if (item->spec || item->last) { + vlan_id = GET_VLAN_ID_FROM_TCI(vlan_spec, 0); + vlan_id_last = GET_VLAN_ID_FROM_TCI(vlan_last, vlan_id); + vlan_id_mask = GET_VLAN_ID_FROM_TCI(vlan_mask, 0x0fff); + if (vlan_id >= ETH_MIRROR_MAX_VLANS || + vlan_id_last >= ETH_MIRROR_MAX_VLANS || + vlan_id_last < vlan_id) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, item, + "VLAN ID is out of range."); + return -rte_errno; + } + for (i = vlan_id; i <= vlan_id_last; i++, k++) + if (vlan_id_mask & (1 << k)) + entries[j++] = i; + mirror_config->num_entries = j; + } else if (item->mask) { + vlan_id_mask = GET_VLAN_ID_FROM_TCI(vlan_mask, 0x0fff); + for (i = 0, j = 0; i < ETH_MIRROR_MAX_VLANS; i++) { + if (vlan_id_mask & (1 << i)) + entries[j++] = i; + mirror_config->num_entries = j; + } + } + if (mirror_config->num_entries == 0) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Not valid VLAN ID."); + return -rte_errno; + } + } + + return 0; +} + +static int +i40e_flow_parse_mirror_action(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + union i40e_filter_t *filter) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + const struct rte_flow_action *act; + const struct rte_flow_action_vf *act_q; + struct i40e_mirror_rule_conf *mirror_config = &filter->mirror_conf; + uint16_t *dst_vsi_seid = &mirror_config->dst_vsi_seid; + uint32_t index = 0; + + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_MIRROR) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Not supported action."); + return -rte_errno; + } + + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type == RTE_FLOW_ACTION_TYPE_PF) { + *dst_vsi_seid = pf->main_vsi_seid; + } else if (act->type == RTE_FLOW_ACTION_TYPE_VF) { + act_q = act->conf; + if (act_q->id >= pf->vf_num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "Invalid VF ID for mirror action"); + return -rte_errno; + } + /* If the dst_pool is equal to vf_num, consider it as PF */ + if (act_q->id == pf->vf_num) + *dst_vsi_seid = pf->main_vsi_seid; + else + *dst_vsi_seid = pf->vfs[act_q->id].vsi->seid; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Only support pf or vf parameter item."); + return -rte_errno; + } + + /* Check if the next non-void item is END */ + index++; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Only support pf or vf parameter item."); + return -rte_errno; + } + + return 0; +} + +static int +i40e_parse_mirror_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + union i40e_filter_t *filter, + struct rte_flow_error *error) +{ + int ret; + + ret = i40e_flow_parse_mirror_attr_pattern(dev, attr, pattern, + error, filter); + if (ret) + return ret; + + ret = i40e_flow_parse_mirror_action(dev, actions, error, filter); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_MIRROR; + + return 0; +} + static int i40e_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, -- 2.17.1