From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 59E93A0521; Tue, 3 Nov 2020 09:30:20 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 90AAFC810; Tue, 3 Nov 2020 09:29:47 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 2634BC810 for ; Tue, 3 Nov 2020 09:29:45 +0100 (CET) IronPort-SDR: YSed7cEyTSAh2nR18yS9ttUgigBbuzCTIoCIUZsUf/Juq4P+uh3ArWbV5wdo0I/lkfOv2q6kNo o41jZ2dpKCFA== X-IronPort-AV: E=McAfee;i="6000,8403,9793"; a="169122335" X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="169122335" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:44 -0800 IronPort-SDR: freikTRm0SUzUocqVculdVLxatw+2+OQJIyV+w6KbXVS/1wfFgf894usS+V/jNj0+P6GS0HJoj hF3Ha/ZuYasg== X-IronPort-AV: E=Sophos;i="5.77,447,1596524400"; d="scan'208";a="470711428" Received: from intel-npg-odc-srv01.cd.intel.com ([10.240.178.136]) by orsmga004-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 03 Nov 2020 00:29:42 -0800 From: Steve Yang To: dev@dpdk.org Cc: jia.guo@intel.com, haiyue.wang@intel.com, qiming.yang@intel.com, beilei.xing@intel.com, orika@nvidia.com, murphyx.yang@intel.com, Steve Yang Date: Tue, 3 Nov 2020 08:28:06 +0000 Message-Id: <20201103082809.41149-4-stevex.yang@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20201103082809.41149-1-stevex.yang@intel.com> References: <20201014084131.72035-1-simonx.lu@intel.com> <20201103082809.41149-1-stevex.yang@intel.com> Subject: [dpdk-dev] [RFC v2 3/6] net/i40e: use generic flow command to re-realize mirror rule X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" When set follow sample rule's ratio equal to one, its behavior is same as mirror-rule, so we can use "flow create * pattern * actions sample *" to replace old "set port * mirror-rule *" command now. The example of mirror rule command mapping to flow management command: (in below command, port 0 is PF and port 1-2 is VF): 1): Ingress pf => pf set port 0 mirror-rule 0 uplink-mirror dst-pool 2 on or flow create 0 ingress pattern pf / end \ actions sample ratio 1 / port_id id 0 / end 2): Egress pf => pf set port 0 mirror-rule 0 downlink-mirror dst-pool 2 on or flow create 0 egress pattern pf / end \ actions sample ratio 1 / port_id id 0 / end 3): ingress pf => vf 1 set port 0 mirror-rule 0 uplink-mirror dst-pool 1 on or flow create 0 ingress pattern pf / end \ actions sample ratio 1 / port_id id 2 / end 4): egress pf => vf 1 set port 0 mirror-rule 0 downlink-mirror dst-pool 1 on or flow create 0 egress pattern pf / end \ actions sample ratio 1 / port_id id 2 / end 5): ingress vf 0 1 => pf set port 0 mirror-rule 0 pool-mirror-up 0x3 dst-pool 2 on or flow create 0 ingress pattern vf id is 1 / end \ actions sample ratio 1 / port_id id 0 / end flow create 0 ingress pattern vf id is 0 / end \ actions sample ratio 1 / port_id id 0 / end or flow create 0 ingress pattern vf id last 1 / end \ actions sample ratio 1 / port_id id 0 / end 6): egress vf 0 1 => pf set port 0 mirror-rule 0 pool-mirror-down 0x3 dst-pool 2 on or flow create 0 egress pattern vf id is 0 / end \ actions sample ratio 1 / port_id id 0 / end flow create 0 egress pattern vf id is 1 / end \ actions sample ratio 1 / port_id id 0 / end or flow create 0 egress pattern vf id last 1 / end \ actions sample ratio 1 / port_id id 0 / end 7): ingress vf 0 => vf 1 set port 0 mirror-rule 0 pool-mirror-up 0x1 dst-pool 1 on or flow create 0 ingress pattern vf id is 0 / end \ actions sample ratio 1 / port_id id 2 / end 8): egress vf 0 => vf 1 set port 0 mirror-rule 0 pool-mirror-down 0x1 dst-pool 1 on or flow create 0 egress pattern vf id is 0 / end \ actions sample ratio 1 / port_id id 2 / end 9): ingress vlan 4,6 => vf 1 set port 0 mirror-rule 0 vlan-mirror 4,6 dst-pool 1 on or flow create 0 ingress pattern vlan vid is 4 / end \ actions sample ratio 1 / port_id id 2 / end flow create 0 ingress pattern vlan vid is 6 / end \ actions sample ratio 1 / port_id id 2 / end or flow create 0 ingress pattern vlan vid is 4 vid last 6 \ vid mask 0x5 / end \ actions sample ratio 1 / port_id id 2 / end Signed-off-by: Steve Yang --- drivers/net/i40e/i40e_flow.c | 153 +++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index 7928871bf..d6c95415c 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -119,6 +119,7 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf); static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf); static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf); static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev); +static int i40e_flow_flush_sample_filter(struct rte_eth_dev *dev); static int i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -5517,6 +5518,104 @@ i40e_parse_sample_filter(struct rte_eth_dev *dev, return 0; } +static int +i40e_config_sample_filter_set(struct rte_eth_dev *dev, + struct i40e_mirror_rule_conf *conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_mirror_filter *it; + struct i40e_mirror_filter *mirror_filter; + uint16_t rule_id; + int ret; + + if (pf->main_vsi->veb == NULL || pf->vfs == NULL) { + PMD_DRV_LOG(ERR, + "mirror rule can not be configured without veb or vfs."); + return -ENOSYS; + } + if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) { + PMD_DRV_LOG(ERR, "mirror table is full."); + return -ENOSPC; + } + + TAILQ_FOREACH(it, &pf->mirror_filter_list, next) { + if (it->conf.dst_vsi_seid == conf->dst_vsi_seid && + it->conf.rule_type == conf->rule_type && + it->conf.num_entries == conf->num_entries && + !memcmp(it->conf.entries, conf->entries, + conf->num_entries * sizeof(conf->entries[0]))) { + PMD_DRV_LOG(ERR, "mirror rule exists."); + return -EEXIST; + } + } + + mirror_filter = rte_zmalloc("i40e_mirror_filter", + sizeof(*mirror_filter), 0); + if (mirror_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + mirror_filter->conf = *conf; + + ret = i40e_aq_add_mirror_rule(hw, + pf->main_vsi->veb->seid, + mirror_filter->conf.dst_vsi_seid, + mirror_filter->conf.rule_type, + mirror_filter->conf.entries, + mirror_filter->conf.num_entries, + &rule_id); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "failed to add mirror rule: ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); + rte_free(mirror_filter); + return -ENOSYS; + } + + mirror_filter->conf.rule_id = rule_id; + + pf->nb_mirror_rule++; + + TAILQ_INSERT_TAIL(&pf->mirror_filter_list, mirror_filter, next); + + return 0; +} + +static int +i40e_config_sample_filter_del(struct rte_eth_dev *dev, + struct i40e_mirror_rule_conf *conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_mirror_filter *mirror_filter; + void *temp; + int ret; + + ret = i40e_aq_del_mirror_rule(hw, + pf->main_vsi->veb->seid, + conf->rule_type, + conf->entries, + conf->num_entries, + conf->rule_id); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "failed to remove mirror rule: ret = %d, aq_err = %d.", + ret, hw->aq.asq_last_status); + return -ENOSYS; + } + + TAILQ_FOREACH_SAFE(mirror_filter, &pf->mirror_filter_list, next, temp) { + if (!memcmp(&mirror_filter->conf, conf, + sizeof(struct i40e_mirror_rule_conf))) { + TAILQ_REMOVE(&pf->mirror_filter_list, + mirror_filter, next); + rte_free(mirror_filter); + } + } + return 0; +} + static int i40e_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -5562,6 +5661,12 @@ i40e_flow_validate(struct rte_eth_dev *dev, return ret; } + if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_SAMPLE) { + ret = i40e_parse_sample_filter(dev, attr, pattern, + actions, &cons_filter, error); + return ret; + } + i = 0; /* Get the non-void item number of pattern */ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) { @@ -5681,6 +5786,14 @@ i40e_flow_create(struct rte_eth_dev *dev, flow->rule = TAILQ_LAST(&pf->rss_config_list, i40e_rss_conf_list); break; + case RTE_ETH_FILTER_SAMPLE: + ret = i40e_config_sample_filter_set(dev, + &cons_filter.mirror_conf); + if (ret) + goto free_flow; + flow->rule = TAILQ_LAST(&pf->mirror_filter_list, + i40e_mirror_filter_list); + break; default: goto free_flow; } @@ -5735,6 +5848,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev, ret = i40e_config_rss_filter_del(dev, &((struct i40e_rss_filter *)flow->rule)->rss_filter_info); break; + case RTE_ETH_FILTER_SAMPLE: + ret = i40e_config_sample_filter_del(dev, + &((struct i40e_mirror_filter *)flow->rule)->conf); + break; default: PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", filter_type); @@ -5889,6 +6006,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) return -rte_errno; } + ret = i40e_flow_flush_sample_filter(dev); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to flush mirror flows."); + return -rte_errno; + } + return ret; } @@ -6035,6 +6160,34 @@ i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) return ret; } +/* remove the mirror filter */ +static int +i40e_flow_flush_sample_filter(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_flow *flow; + void *temp; + int32_t ret = -EINVAL; + + /* Delete mirror flows in flow list. */ + TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) { + struct i40e_mirror_filter *rule = flow->rule; + + if (flow->filter_type != RTE_ETH_FILTER_SAMPLE) + continue; + + if (rule) { + ret = i40e_config_sample_filter_del(dev, &rule->conf); + if (ret) + return ret; + } + TAILQ_REMOVE(&pf->flow_list, flow, node); + rte_free(flow); + } + + return ret; +} + static int i40e_flow_query(struct rte_eth_dev *dev __rte_unused, struct rte_flow *flow, -- 2.17.1