From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 44F03A2EDB for ; Fri, 6 Sep 2019 18:15:12 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 13AC11F416; Fri, 6 Sep 2019 18:15:12 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 8AE3A1F40E for ; Fri, 6 Sep 2019 18:15:10 +0200 (CEST) X-Amp-Result: UNKNOWN X-Amp-Original-Verdict: FILE UNKNOWN X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 06 Sep 2019 09:15:01 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,473,1559545200"; d="scan'208";a="383296517" Received: from yexl-server.sh.intel.com (HELO localhost) ([10.67.117.5]) by fmsmga005.fm.intel.com with ESMTP; 06 Sep 2019 09:14:59 -0700 Date: Sat, 7 Sep 2019 00:12:58 +0800 From: Ye Xiaolong To: Ying Wang Cc: qi.z.zhang@intel.com, qiming.yang@intel.com, dev@dpdk.org, wei.zhao1@intel.com Message-ID: <20190906161258.GA108591@intel.com> References: <20190903221522.151382-1-ying.a.wang@intel.com> <20190903221522.151382-3-ying.a.wang@intel.com> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <20190903221522.151382-3-ying.a.wang@intel.com> User-Agent: Mutt/1.9.4 (2018-02-28) Subject: Re: [dpdk-dev] [PATCH 2/4] net/ice: rework for generic flow enabling X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" On 09/04, Ying Wang wrote: >The patch reworks the generic flow API (rte_flow) implementation. >It introduces an abstract layer which provides a unified interface >for low-level filter engine (switch, fdir, hash) to register supported >patterns and actions and implement flow validate/create/destroy/flush/ >query activities. > >The patch also removes the existing switch filter implementation to >avoid compile error. Switch filter implementation for the new framework >will be added in the following patch. > >Signed-off-by: Ying Wang >--- > drivers/net/ice/ice_ethdev.c | 22 +- > drivers/net/ice/ice_ethdev.h | 15 +- > drivers/net/ice/ice_generic_flow.c | 768 +++++++++++++++-------------------- > drivers/net/ice/ice_generic_flow.h | 782 ++++++++---------------------------- > drivers/net/ice/ice_switch_filter.c | 511 ----------------------- > drivers/net/ice/ice_switch_filter.h | 18 - > 6 files changed, 525 insertions(+), 1591 deletions(-) > >diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c >index 4e0645db1..647aca3ed 100644 >--- a/drivers/net/ice/ice_ethdev.c >+++ b/drivers/net/ice/ice_ethdev.c >@@ -15,7 +15,7 @@ > #include "base/ice_dcb.h" > #include "ice_ethdev.h" > #include "ice_rxtx.h" >-#include "ice_switch_filter.h" >+#include "ice_generic_flow.h" > > /* devargs */ > #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support" >@@ -1677,7 +1677,11 @@ ice_dev_init(struct rte_eth_dev *dev) > /* get base queue pairs index in the device */ > ice_base_queue_get(pf); > >- TAILQ_INIT(&pf->flow_list); >+ ret = ice_flow_init(ad); >+ if (ret) { >+ PMD_INIT_LOG(ERR, "Failed to initialize flow"); >+ return ret; >+ } > > return 0; > >@@ -1796,6 +1800,8 @@ ice_dev_close(struct rte_eth_dev *dev) > { > struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); > struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private); >+ struct ice_adapter *ad = >+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); > > /* Since stop will make link down, then the link event will be > * triggered, disable the irq firstly to avoid the port_infoe etc >@@ -1806,6 +1812,8 @@ ice_dev_close(struct rte_eth_dev *dev) > > ice_dev_stop(dev); > >+ ice_flow_uninit(ad); >+ > /* release all queue resource */ > ice_free_queues(dev); > >@@ -1822,8 +1830,6 @@ ice_dev_uninit(struct rte_eth_dev *dev) > { > struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); > struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; >- struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private); >- struct rte_flow *p_flow; > > ice_dev_close(dev); > >@@ -1840,14 +1846,6 @@ ice_dev_uninit(struct rte_eth_dev *dev) > /* unregister callback func from eal lib */ > rte_intr_callback_unregister(intr_handle, > ice_interrupt_handler, dev); >- >- /* Remove all flows */ >- while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { >- TAILQ_REMOVE(&pf->flow_list, p_flow, node); >- ice_free_switch_filter_rule(p_flow->rule); >- rte_free(p_flow); >- } >- > return 0; > } > >diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h >index 9bf5de08d..d1d07641d 100644 >--- a/drivers/net/ice/ice_ethdev.h >+++ b/drivers/net/ice/ice_ethdev.h >@@ -241,16 +241,14 @@ struct ice_vsi { > bool offset_loaded; > }; > >-extern const struct rte_flow_ops ice_flow_ops; >- >-/* Struct to store flow created. */ >-struct rte_flow { >- TAILQ_ENTRY(rte_flow) node; >- void *rule; >-}; > >+struct rte_flow; > TAILQ_HEAD(ice_flow_list, rte_flow); > >+ >+struct ice_flow_parser; >+TAILQ_HEAD(ice_parser_list, ice_flow_parser); >+ > struct ice_pf { > struct ice_adapter *adapter; /* The adapter this PF associate to */ > struct ice_vsi *main_vsi; /* pointer to main VSI structure */ >@@ -278,6 +276,9 @@ struct ice_pf { > bool offset_loaded; > bool adapter_stopped; > struct ice_flow_list flow_list; >+ struct ice_parser_list rss_parser_list; >+ struct ice_parser_list perm_parser_list; >+ struct ice_parser_list dist_parser_list; > }; > > /** >diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c >index 1c0adc779..aa11d6170 100644 >--- a/drivers/net/ice/ice_generic_flow.c >+++ b/drivers/net/ice/ice_generic_flow.c >@@ -17,7 +17,22 @@ > > #include "ice_ethdev.h" > #include "ice_generic_flow.h" >-#include "ice_switch_filter.h" >+ >+/** >+ * Non-pipeline mode, fdir and swith both used as distributor, s/swith/switch >+ * fdir used first, switch used as fdir's backup. >+ */ >+#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY 0 >+/*Pipeline mode, switch used at permission stage*/ >+#define ICE_FLOW_CLASSIFY_STAGE_PERMISSION 1 >+/*Pipeline mode, fdir used at distributor stage*/ >+#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2 >+ >+static int ice_pipeline_stage = >+ ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY; >+ >+static struct ice_engine_list engine_list = >+ TAILQ_HEAD_INITIALIZER(engine_list); > > static int ice_flow_validate(struct rte_eth_dev *dev, > const struct rte_flow_attr *attr, >@@ -34,17 +49,153 @@ static int ice_flow_destroy(struct rte_eth_dev *dev, > struct rte_flow_error *error); > static int ice_flow_flush(struct rte_eth_dev *dev, > struct rte_flow_error *error); >+static int ice_flow_query_count(struct rte_eth_dev *dev, >+ struct rte_flow *flow, >+ const struct rte_flow_action *actions, >+ void *data, >+ struct rte_flow_error *error); > > const struct rte_flow_ops ice_flow_ops = { > .validate = ice_flow_validate, > .create = ice_flow_create, > .destroy = ice_flow_destroy, > .flush = ice_flow_flush, >+ .query = ice_flow_query_count, > }; > >+ >+void >+ice_register_flow_engine(struct ice_flow_engine *engine) >+{ >+ TAILQ_INSERT_TAIL(&engine_list, engine, node); >+} >+ >+int >+ice_flow_init(struct ice_adapter *ad) >+{ >+ int ret = 0; >+ struct ice_pf *pf = &ad->pf; >+ void *temp; >+ struct ice_flow_engine *engine = NULL; >+ >+ TAILQ_INIT(&pf->flow_list); >+ TAILQ_INIT(&pf->rss_parser_list); >+ TAILQ_INIT(&pf->perm_parser_list); >+ TAILQ_INIT(&pf->dist_parser_list); >+ >+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { >+ if (engine->init == NULL) >+ return -EINVAL; >+ >+ ret = engine->init(ad); >+ if (ret) >+ return ret; >+ } >+ return 0; >+} >+ >+void >+ice_flow_uninit(struct ice_adapter *ad) >+{ >+ struct ice_pf *pf = &ad->pf; >+ struct ice_flow_engine *engine; >+ struct rte_flow *p_flow; >+ struct ice_flow_parser *p_parser; >+ void *temp; >+ >+ TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) { >+ if (engine->uninit) >+ engine->uninit(ad); >+ } >+ >+ /* Remove all flows */ >+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { >+ TAILQ_REMOVE(&pf->flow_list, p_flow, node); >+ if (p_flow->engine->free) >+ p_flow->engine->free(p_flow); >+ rte_free(p_flow); >+ } >+ >+ /* Cleanup parser list */ >+ while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list))) >+ TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node); >+ >+ while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list))) >+ TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node); >+ >+ while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list))) >+ TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node); >+} >+ >+int >+ice_register_parser(struct ice_flow_parser *parser, >+ struct ice_adapter *ad) >+{ >+ struct ice_parser_list *list = NULL; >+ struct ice_pf *pf = &ad->pf; >+ >+ switch (parser->stage) { >+ case ICE_FLOW_STAGE_RSS: >+ list = &pf->rss_parser_list; >+ break; >+ case ICE_FLOW_STAGE_PERMISSION: >+ list = &pf->perm_parser_list; >+ break; >+ case ICE_FLOW_STAGE_DISTRIBUTOR: >+ list = &pf->dist_parser_list; >+ break; >+ default: >+ return -EINVAL; >+ } >+ >+ if (ad->devargs.pipeline_mode_support) >+ TAILQ_INSERT_TAIL(list, parser, node); >+ else { >+ if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH >+ || parser->engine->type == ICE_FLOW_ENGINE_HASH) >+ TAILQ_INSERT_TAIL(list, parser, node); >+ else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR) >+ TAILQ_INSERT_HEAD(list, parser, node); >+ else >+ return -EINVAL; >+ } >+ return 0; >+} >+ >+void >+ice_unregister_parser(struct ice_flow_parser *parser, >+ struct ice_adapter *ad) >+{ >+ struct ice_pf *pf = &ad->pf; >+ struct ice_parser_list *list; >+ struct ice_flow_parser *p_parser; >+ void *temp; >+ >+ switch (parser->stage) { >+ case ICE_FLOW_STAGE_RSS: >+ list = &pf->rss_parser_list; >+ break; >+ case ICE_FLOW_STAGE_PERMISSION: >+ list = &pf->perm_parser_list; >+ break; >+ case ICE_FLOW_STAGE_DISTRIBUTOR: >+ list = &pf->dist_parser_list; >+ break; >+ default: >+ return; >+ } The switch blocks in above functions are the same, it's better to use a common function to reduce the duplicated code. Thanks, Xiaolong