DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Yang, Qiming" <qiming.yang@intel.com>
To: "Xing, Beilei" <beilei.xing@intel.com>,
	"dev@dpdk.org" <dev@dpdk.org>,
	"Wang, Haiyue" <haiyue.wang@intel.com>,
	"Zhang, Qi Z" <qi.z.zhang@intel.com>
Subject: Re: [dpdk-dev] [PATCH] net/ice: support flow ops thread safe
Date: Mon, 30 Mar 2020 08:02:47 +0000	[thread overview]
Message-ID: <MN2PR11MB358265E76E0CAC5EC5421C74E5CB0@MN2PR11MB3582.namprd11.prod.outlook.com> (raw)
In-Reply-To: <1585586570-9956-1-git-send-email-beilei.xing@intel.com>



> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Tuesday, March 31, 2020 00:43
> To: dev@dpdk.org; Wang, Haiyue <haiyue.wang@intel.com>; Yang, Qiming
> <qiming.yang@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: [PATCH] net/ice: support flow ops thread safe
> 
> For DCF, flow ops may be executed in different threads, so an thread safe
> option for generic flow APIs is needed.
> 
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
> ---
>  drivers/net/ice/ice_ethdev.h       |  1 +
>  drivers/net/ice/ice_generic_flow.c | 35 +++++++++++++++++++++++++++-
> -------
>  2 files changed, 28 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
> index 7b94a3c..f88f9dd 100644
> --- a/drivers/net/ice/ice_ethdev.h
> +++ b/drivers/net/ice/ice_ethdev.h
> @@ -384,6 +384,7 @@ struct ice_pf {
>  	bool offset_loaded;
>  	bool adapter_stopped;
>  	struct ice_flow_list flow_list;
> +	rte_spinlock_t flow_ops_lock;
>  	struct ice_parser_list rss_parser_list;
>  	struct ice_parser_list perm_parser_list;
>  	struct ice_parser_list dist_parser_list; diff --git
> a/drivers/net/ice/ice_generic_flow.c b/drivers/net/ice/ice_generic_flow.c
> index 823ff0e..c042079 100644
> --- a/drivers/net/ice/ice_generic_flow.c
> +++ b/drivers/net/ice/ice_generic_flow.c
> @@ -1395,6 +1395,7 @@ ice_flow_init(struct ice_adapter *ad)
>  	TAILQ_INIT(&pf->rss_parser_list);
>  	TAILQ_INIT(&pf->perm_parser_list);
>  	TAILQ_INIT(&pf->dist_parser_list);
> +	rte_spinlock_init(&pf->flow_ops_lock);
> 
>  	TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
>  		if (engine->init == NULL) {
> @@ -1862,19 +1863,24 @@ ice_flow_create(struct rte_eth_dev *dev,
>  		return flow;
>  	}
> 
> +	rte_spinlock_lock(&pf->flow_ops_lock);
> +
>  	ret = ice_flow_process_filter(dev, flow, attr, pattern, actions,
>  			&engine, ice_parse_engine_create, error);
> -	if (ret < 0)
> -		goto free_flow;
> +	if (ret < 0) {
> +		PMD_DRV_LOG(ERR, "Failed to create flow");
> +		rte_free(flow);
> +		flow = NULL;
> +		goto out;
> +	}
> +
>  	flow->engine = engine;
>  	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
>  	PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine-
> >type);
> -	return flow;
> 
> -free_flow:
> -	PMD_DRV_LOG(ERR, "Failed to create flow");
> -	rte_free(flow);
> -	return NULL;
> +out:
> +	rte_spinlock_unlock(&pf->flow_ops_lock);
> +	return flow;
>  }
> 
>  static int
> @@ -1894,8 +1900,9 @@ ice_flow_destroy(struct rte_eth_dev *dev,
>  		return -rte_errno;
>  	}
> 
> -	ret = flow->engine->destroy(ad, flow, error);
> +	rte_spinlock_lock(&pf->flow_ops_lock);
> 
> +	ret = flow->engine->destroy(ad, flow, error);
>  	if (!ret) {
>  		TAILQ_REMOVE(&pf->flow_list, flow, node);
>  		rte_free(flow);
> @@ -1903,6 +1910,8 @@ ice_flow_destroy(struct rte_eth_dev *dev,
>  		PMD_DRV_LOG(ERR, "Failed to destroy flow");
>  	}
> 
> +	rte_spinlock_unlock(&pf->flow_ops_lock);
> +
>  	return ret;
>  }
> 
> @@ -1937,6 +1946,7 @@ ice_flow_query(struct rte_eth_dev *dev,
>  	struct ice_adapter *ad =
>  		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>  	struct rte_flow_query_count *count = data;
> +	struct ice_pf *pf = &ad->pf;
> 
>  	if (!flow || !flow->engine || !flow->engine->query_count) {
>  		rte_flow_error_set(error, EINVAL,
> @@ -1945,6 +1955,8 @@ ice_flow_query(struct rte_eth_dev *dev,
>  		return -rte_errno;
>  	}
> 
> +	rte_spinlock_lock(&pf->flow_ops_lock);
> +
>  	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
>  		switch (actions->type) {
>  		case RTE_FLOW_ACTION_TYPE_VOID:
> @@ -1959,6 +1971,9 @@ ice_flow_query(struct rte_eth_dev *dev,
>  					"action not supported");
>  		}
>  	}
> +
> +	rte_spinlock_unlock(&pf->flow_ops_lock);
> +
>  	return ret;
>  }
> 
> @@ -1971,6 +1986,8 @@ ice_flow_redirect(struct ice_adapter *ad,
>  	void *temp;
>  	int ret;
> 
> +	rte_spinlock_lock(&pf->flow_ops_lock);
> +
>  	TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
>  		if (!p_flow->engine->redirect)
>  			continue;
> @@ -1981,5 +1998,7 @@ ice_flow_redirect(struct ice_adapter *ad,
>  		}
>  	}
> 
> +	rte_spinlock_unlock(&pf->flow_ops_lock);
> +
>  	return 0;
>  }
> --
> 2.7.4

Acked-by: Qiming Yang <qiming.yang@intel.com>

      reply	other threads:[~2020-03-30  8:02 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-30 16:42 Beilei Xing
2020-03-30  8:02 ` Yang, Qiming [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=MN2PR11MB358265E76E0CAC5EC5421C74E5CB0@MN2PR11MB3582.namprd11.prod.outlook.com \
    --to=qiming.yang@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=haiyue.wang@intel.com \
    --cc=qi.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).