DPDK patches and discussions
 help / color / mirror / Atom feed
From: Dekel Peled <dekelp@mellanox.com>
To: matan@mellanox.com, viacheslavo@mellanox.com, rasland@mellanox.com
Cc: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 2/6] net/mlx5: rename Verbs action to generic name
Date: Sun, 28 Jun 2020 17:06:51 +0300	[thread overview]
Message-ID: <9fab65273a5ac747699d95543851939e355c4fb7.1593352527.git.dekelp@mellanox.com> (raw)
In-Reply-To: <cover.1593352527.git.dekelp@mellanox.com>

As part of the effort to support DPDK on Windows and other OS,
rename 'verbs_action' to the generic name 'action'.

Signed-off-by: Dekel Peled <dekelp@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.h    | 16 ++++++++--------
 drivers/net/mlx5/mlx5_flow_dv.c | 28 ++++++++++++++--------------
 2 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 74d2c49..22cc356 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -399,8 +399,8 @@ struct mlx5_flow_dv_encap_decap_resource {
 	ILIST_ENTRY(uint32_t)next;
 	/* Pointer to next element. */
 	rte_atomic32_t refcnt; /**< Reference counter. */
-	void *verbs_action;
-	/**< Verbs encap/decap action object. */
+	void *action;
+	/**< Encap/decap action object. */
 	uint8_t buf[MLX5_ENCAP_MAX_LEN];
 	size_t size;
 	uint8_t reformat_type;
@@ -413,7 +413,7 @@ struct mlx5_flow_dv_tag_resource {
 	struct mlx5_hlist_entry entry;
 	/**< hash list entry for tag resource, tag value as the key. */
 	void *action;
-	/**< Verbs tag action object. */
+	/**< Tag action object. */
 	rte_atomic32_t refcnt; /**< Reference counter. */
 	uint32_t idx; /**< Index for the index memory pool. */
 };
@@ -436,8 +436,8 @@ struct mlx5_flow_dv_modify_hdr_resource {
 	LIST_ENTRY(mlx5_flow_dv_modify_hdr_resource) next;
 	/* Pointer to next element. */
 	rte_atomic32_t refcnt; /**< Reference counter. */
-	struct ibv_flow_action *verbs_action;
-	/**< Verbs modify header action object. */
+	struct ibv_flow_action *action;
+	/**< Modify header action object. */
 	uint8_t ft_type; /**< Flow table type, Rx or Tx. */
 	uint32_t actions_num; /**< Number of modification actions. */
 	uint64_t flags; /**< Flags for RDMA API. */
@@ -458,7 +458,7 @@ struct mlx5_flow_dv_port_id_action_resource {
 	/* Pointer to next element. */
 	rte_atomic32_t refcnt; /**< Reference counter. */
 	void *action;
-	/**< Verbs tag action object. */
+	/**< Action object. */
 	uint32_t port_id; /**< Port ID value. */
 };
 
@@ -467,7 +467,7 @@ struct mlx5_flow_dv_push_vlan_action_resource {
 	ILIST_ENTRY(uint32_t)next;
 	/* Pointer to next element. */
 	rte_atomic32_t refcnt; /**< Reference counter. */
-	void *action; /**< Direct verbs action object. */
+	void *action; /**< Action object. */
 	uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
 	rte_be32_t vlan_tag; /**< VLAN tag value. */
 };
@@ -660,7 +660,7 @@ struct mlx5_flow_verbs_workspace {
 struct mlx5_flow {
 	struct rte_flow *flow; /**< Pointer to the main flow. */
 	uint32_t flow_idx; /**< The memory pool index to the main flow. */
-	uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+	uint64_t hash_fields; /**< Hash Rx queue hash fields. */
 	uint64_t act_flags;
 	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
 	bool external; /**< true if the flow is created external to PMD. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index ae4b05c..dc8d952 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -2603,13 +2603,13 @@ struct field_modify_info modify_tcp[] = {
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "cannot allocate resource memory");
 	*cache_resource = *resource;
-	cache_resource->verbs_action =
+	cache_resource->action =
 		mlx5_glue->dv_create_flow_action_packet_reformat
 			(sh->ctx, cache_resource->reformat_type,
 			 cache_resource->ft_type, domain, cache_resource->flags,
 			 cache_resource->size,
 			 (cache_resource->size ? cache_resource->buf : NULL));
-	if (!cache_resource->verbs_action) {
+	if (!cache_resource->action) {
 		rte_free(cache_resource);
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -4030,12 +4030,12 @@ struct field_modify_info modify_tcp[] = {
 					  "cannot allocate resource memory");
 	*cache_resource = *resource;
 	rte_memcpy(cache_resource->actions, resource->actions, actions_len);
-	cache_resource->verbs_action =
+	cache_resource->action =
 		mlx5_glue->dv_create_flow_action_modify_header
 					(sh->ctx, cache_resource->ft_type, ns,
 					 cache_resource->flags, actions_len,
 					 (uint64_t *)cache_resource->actions);
-	if (!cache_resource->verbs_action) {
+	if (!cache_resource->action) {
 		rte_free(cache_resource);
 		return rte_flow_error_set(error, ENOMEM,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
@@ -8093,7 +8093,7 @@ struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+					dev_flow->dv.encap_decap->action;
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
@@ -8103,7 +8103,7 @@ struct field_modify_info modify_tcp[] = {
 							   error))
 				return -rte_errno;
 			dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+					dev_flow->dv.encap_decap->action;
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
@@ -8113,7 +8113,7 @@ struct field_modify_info modify_tcp[] = {
 					(dev, actions, dev_flow, attr, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+					dev_flow->dv.encap_decap->action;
 			} else {
 				/* Handle encap without preceding decap. */
 				if (flow_dv_create_action_l2_encap
@@ -8121,7 +8121,7 @@ struct field_modify_info modify_tcp[] = {
 				     error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+					dev_flow->dv.encap_decap->action;
 			}
 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
 			break;
@@ -8133,7 +8133,7 @@ struct field_modify_info modify_tcp[] = {
 				    (dev, dev_flow, attr->transfer, error))
 					return -rte_errno;
 				dev_flow->dv.actions[actions_n++] =
-					dev_flow->dv.encap_decap->verbs_action;
+					dev_flow->dv.encap_decap->action;
 			}
 			/* If decap is followed by encap, handle it at encap. */
 			action_flags |= MLX5_FLOW_ACTION_DECAP;
@@ -8315,7 +8315,7 @@ struct field_modify_info modify_tcp[] = {
 					(dev, mhdr_res, dev_flow, error))
 					return -rte_errno;
 				dev_flow->dv.actions[modify_action_position] =
-					handle->dvh.modify_hdr->verbs_action;
+					handle->dvh.modify_hdr->action;
 			}
 			if (action_flags & MLX5_FLOW_ACTION_COUNT) {
 				flow->counter =
@@ -8778,13 +8778,13 @@ struct field_modify_info modify_tcp[] = {
 			 idx);
 	if (!cache_resource)
 		return 0;
-	MLX5_ASSERT(cache_resource->verbs_action);
+	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
 	if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
 		claim_zero(mlx5_glue->destroy_flow_action
-				(cache_resource->verbs_action));
+				(cache_resource->action));
 		ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
 			     &priv->sh->encaps_decaps, idx,
 			     cache_resource, next);
@@ -8881,13 +8881,13 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
 							handle->dvh.modify_hdr;
 
-	MLX5_ASSERT(cache_resource->verbs_action);
+	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
 	if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
 		claim_zero(mlx5_glue->destroy_flow_action
-				(cache_resource->verbs_action));
+				(cache_resource->action));
 		LIST_REMOVE(cache_resource, next);
 		rte_free(cache_resource);
 		DRV_LOG(DEBUG, "modify-header resource %p: removed",
-- 
1.8.3.1


  parent reply	other threads:[~2020-06-28 14:08 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-28 14:06 [dpdk-dev] [PATCH 0/6] net/mlx5: refactor flow infrastructure Dekel Peled
2020-06-28 14:06 ` [dpdk-dev] [PATCH 1/6] net/mlx5: rename IB flow to generic name DRV flow Dekel Peled
2020-06-28 14:06 ` Dekel Peled [this message]
2020-06-28 14:06 ` [dpdk-dev] [PATCH 3/6] net/mlx5: add OS specific flow related utilities Dekel Peled
2020-06-28 14:06 ` [dpdk-dev] [PATCH 4/6] net/mlx5: add OS specific flow type selection Dekel Peled
2020-06-28 14:06 ` [dpdk-dev] [PATCH 5/6] net/mlx5: add OS specific flow create and destroy Dekel Peled
2020-06-28 14:06 ` [dpdk-dev] [PATCH 6/6] net/mlx5: add OS specific flow actions operations Dekel Peled
2020-07-01 13:12 ` [dpdk-dev] [PATCH 0/6] net/mlx5: refactor flow infrastructure Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=9fab65273a5ac747699d95543851939e355c4fb7.1593352527.git.dekelp@mellanox.com \
    --to=dekelp@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=matan@mellanox.com \
    --cc=rasland@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).