* Re: [dpdk-dev] [PATCH] net/ice: support flow ops thread safe
2020-03-30 16:42 [dpdk-dev] [PATCH] net/ice: support flow ops thread safe Beilei Xing
@ 2020-03-30 8:02 ` Yang, Qiming
0 siblings, 0 replies; 2+ messages in thread
From: Yang, Qiming @ 2020-03-30 8:02 UTC (permalink / raw)
To: Xing, Beilei, dev, Wang, Haiyue, Zhang, Qi Z
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Tuesday, March 31, 2020 00:43
> To: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: [PATCH] net/ice: support flow ops thread safe
>
> For DCF, flow ops may be executed in different threads, so an thread safe
> option for generic flow APIs is needed.
>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
> drivers/net/ice/ice_ethdev.h | 1 +
> drivers/net/ice/ice_generic_flow.c | 35 +++++++++++++++++++++++++++-
> -------
> 2 files changed, 28 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
> index 7b94a3c..f88f9dd 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -384,6 +384,7 @@ struct ice_pf {
> bool offset_loaded;
> bool adapter_stopped;
> struct ice_flow_list flow_list;
> + rte_spinlock_t flow_ops_lock;
> struct ice_parser_list rss_parser_list;
> struct ice_parser_list perm_parser_list;
> struct ice_parser_list dist_parser_list; diff --git
> a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
> index 823ff0e..c042079 100644
> --- a/drivers/net/ice/ice_generic_flow.c
> +++ b/drivers/net/ice/ice_generic_flow.c
> @@ -1395,6 +1395,7 @@ ice_flow_init(struct ice_adapter *ad)
> TAILQ_INIT(&pf->rss_parser_list);
> TAILQ_INIT(&pf->perm_parser_list);
> TAILQ_INIT(&pf->dist_parser_list);
> + rte_spinlock_init(&pf->flow_ops_lock);
>
> TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
> if (engine->init == NULL) {
> @@ -1862,19 +1863,24 @@ ice_flow_create(struct rte_eth_dev *dev,
> return flow;
> }
>
> + rte_spinlock_lock(&pf->flow_ops_lock);
> +
> ret = ice_flow_process_filter(dev, flow, attr, pattern, actions,
> &engine, ice_parse_engine_create, error);
> - if (ret < 0)
> - goto free_flow;
> + if (ret < 0) {
> + PMD_DRV_LOG(ERR, "Failed to create flow");
> + rte_free(flow);
> + flow = NULL;
> + goto out;
> + }
> +
> flow->engine = engine;
> TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
> PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine-
> >type);
> - return flow;
>
> -free_flow:
> - PMD_DRV_LOG(ERR, "Failed to create flow");
> - rte_free(flow);
> - return NULL;
> +out:
> + rte_spinlock_unlock(&pf->flow_ops_lock);
> + return flow;
> }
>
> static int
> @@ -1894,8 +1900,9 @@ ice_flow_destroy(struct rte_eth_dev *dev,
> return -rte_errno;
> }
>
> - ret = flow->engine->destroy(ad, flow, error);
> + rte_spinlock_lock(&pf->flow_ops_lock);
>
> + ret = flow->engine->destroy(ad, flow, error);
> if (!ret) {
> TAILQ_REMOVE(&pf->flow_list, flow, node);
> rte_free(flow);
> @@ -1903,6 +1910,8 @@ ice_flow_destroy(struct rte_eth_dev *dev,
> PMD_DRV_LOG(ERR, "Failed to destroy flow");
> }
>
> + rte_spinlock_unlock(&pf->flow_ops_lock);
> +
> return ret;
> }
>
> @@ -1937,6 +1946,7 @@ ice_flow_query(struct rte_eth_dev *dev,
> struct ice_adapter *ad =
> ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> struct rte_flow_query_count *count = data;
> + struct ice_pf *pf = &ad->pf;
>
> if (!flow || !flow->engine || !flow->engine->query_count) {
> rte_flow_error_set(error, EINVAL,
> @@ -1945,6 +1955,8 @@ ice_flow_query(struct rte_eth_dev *dev,
> return -rte_errno;
> }
>
> + rte_spinlock_lock(&pf->flow_ops_lock);
> +
> for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
> switch (actions->type) {
> case RTE_FLOW_ACTION_TYPE_VOID:
> @@ -1959,6 +1971,9 @@ ice_flow_query(struct rte_eth_dev *dev,
> "action not supported");
> }
> }
> +
> + rte_spinlock_unlock(&pf->flow_ops_lock);
> +
> return ret;
> }
>
> @@ -1971,6 +1986,8 @@ ice_flow_redirect(struct ice_adapter *ad,
> void *temp;
> int ret;
>
> + rte_spinlock_lock(&pf->flow_ops_lock);
> +
> TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
> if (!p_flow->engine->redirect)
> continue;
> @@ -1981,5 +1998,7 @@ ice_flow_redirect(struct ice_adapter *ad,
> }
> }
>
> + rte_spinlock_unlock(&pf->flow_ops_lock);
> +
> return 0;
> }
> --
> 2.7.4
Acked-by: Qiming Yang <qiming.yang@intel.com>
^ permalink raw reply [flat|nested] 2+ messages in thread
* [dpdk-dev] [PATCH] net/ice: support flow ops thread safe
@ 2020-03-30 16:42 Beilei Xing
2020-03-30 8:02 ` Yang, Qiming
0 siblings, 1 reply; 2+ messages in thread
From: Beilei Xing @ 2020-03-30 16:42 UTC (permalink / raw)
To: dev, haiyue.wang, qiming.yang, qi.z.zhang
For DCF, flow ops may be executed in different threads,
so an thread safe option for generic flow APIs is needed.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/ice/ice_ethdev.h | 1 +
drivers/net/ice/ice_generic_flow.c | 35 +++++++++++++++++++++++++++--------
2 files changed, 28 insertions(+), 8 deletions(-)
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 7b94a3c..f88f9dd 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -384,6 +384,7 @@ struct ice_pf {
bool offset_loaded;
bool adapter_stopped;
struct ice_flow_list flow_list;
+ rte_spinlock_t flow_ops_lock;
struct ice_parser_list rss_parser_list;
struct ice_parser_list perm_parser_list;
struct ice_parser_list dist_parser_list;
diff --git a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
index 823ff0e..c042079 100644
--- a/drivers/net/ice/ice_generic_flow.c
+++ b/drivers/net/ice/ice_generic_flow.c
@@ -1395,6 +1395,7 @@ ice_flow_init(struct ice_adapter *ad)
TAILQ_INIT(&pf->rss_parser_list);
TAILQ_INIT(&pf->perm_parser_list);
TAILQ_INIT(&pf->dist_parser_list);
+ rte_spinlock_init(&pf->flow_ops_lock);
TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
if (engine->init == NULL) {
@@ -1862,19 +1863,24 @@ ice_flow_create(struct rte_eth_dev *dev,
return flow;
}
+ rte_spinlock_lock(&pf->flow_ops_lock);
+
ret = ice_flow_process_filter(dev, flow, attr, pattern, actions,
&engine, ice_parse_engine_create, error);
- if (ret < 0)
- goto free_flow;
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to create flow");
+ rte_free(flow);
+ flow = NULL;
+ goto out;
+ }
+
flow->engine = engine;
TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
- return flow;
-free_flow:
- PMD_DRV_LOG(ERR, "Failed to create flow");
- rte_free(flow);
- return NULL;
+out:
+ rte_spinlock_unlock(&pf->flow_ops_lock);
+ return flow;
}
static int
@@ -1894,8 +1900,9 @@ ice_flow_destroy(struct rte_eth_dev *dev,
return -rte_errno;
}
- ret = flow->engine->destroy(ad, flow, error);
+ rte_spinlock_lock(&pf->flow_ops_lock);
+ ret = flow->engine->destroy(ad, flow, error);
if (!ret) {
TAILQ_REMOVE(&pf->flow_list, flow, node);
rte_free(flow);
@@ -1903,6 +1910,8 @@ ice_flow_destroy(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "Failed to destroy flow");
}
+ rte_spinlock_unlock(&pf->flow_ops_lock);
+
return ret;
}
@@ -1937,6 +1946,7 @@ ice_flow_query(struct rte_eth_dev *dev,
struct ice_adapter *ad =
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct rte_flow_query_count *count = data;
+ struct ice_pf *pf = &ad->pf;
if (!flow || !flow->engine || !flow->engine->query_count) {
rte_flow_error_set(error, EINVAL,
@@ -1945,6 +1955,8 @@ ice_flow_query(struct rte_eth_dev *dev,
return -rte_errno;
}
+ rte_spinlock_lock(&pf->flow_ops_lock);
+
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
@@ -1959,6 +1971,9 @@ ice_flow_query(struct rte_eth_dev *dev,
"action not supported");
}
}
+
+ rte_spinlock_unlock(&pf->flow_ops_lock);
+
return ret;
}
@@ -1971,6 +1986,8 @@ ice_flow_redirect(struct ice_adapter *ad,
void *temp;
int ret;
+ rte_spinlock_lock(&pf->flow_ops_lock);
+
TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
if (!p_flow->engine->redirect)
continue;
@@ -1981,5 +1998,7 @@ ice_flow_redirect(struct ice_adapter *ad,
}
}
+ rte_spinlock_unlock(&pf->flow_ops_lock);
+
return 0;
}
--
2.7.4
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2020-03-30 8:02 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-30 16:42 [dpdk-dev] [PATCH] net/ice: support flow ops thread safe Beilei Xing
2020-03-30 8:02 ` Yang, Qiming
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).