DPDK patches and discussions
 help / color / mirror / Atom feed
From: Harold Huang <baymaxhuang@gmail.com>
To: dev@dpdk.org
Cc: Harold Huang <baymaxhuang@gmail.com>, Wisam Jaddo <wisamm@nvidia.com>
Subject: [PATCH] app/flow-perf: replace RTE_BE32/16 with rte_cpu_to_be_32/16 for variables
Date: Sun, 12 Mar 2023 02:00:09 +0000	[thread overview]
Message-ID: <20230312020009.288-1-baymaxhuang@gmail.com> (raw)

In DPDK, the macros RTE_BE32 or RTE_BE16 are usually used for
constant values. And functions such as rte_cpu_to_be_32 or
rte_cpu_to_be_16 are optimized for variables.

Signed-off-by: Harold Huang <baymaxhuang@gmail.com>
---
 app/test-flow-perf/actions_gen.c | 28 ++++++++++++++--------------
 app/test-flow-perf/items_gen.c   |  2 +-
 2 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/app/test-flow-perf/actions_gen.c b/app/test-flow-perf/actions_gen.c
index f1d5931325..c2499ad2d0 100644
--- a/app/test-flow-perf/actions_gen.c
+++ b/app/test-flow-perf/actions_gen.c
@@ -262,7 +262,7 @@ add_set_src_ipv4(struct rte_flow_action *actions,
 		ip = 1;
 
 	/* IPv4 value to be set is random each time */
-	set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
+	set_ipv4[para.core_idx].ipv4_addr = rte_cpu_to_be_32(ip + 1);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
 	actions[actions_counter].conf = &set_ipv4[para.core_idx];
@@ -281,7 +281,7 @@ add_set_dst_ipv4(struct rte_flow_action *actions,
 		ip = 1;
 
 	/* IPv4 value to be set is random each time */
-	set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
+	set_ipv4[para.core_idx].ipv4_addr = rte_cpu_to_be_32(ip + 1);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
 	actions[actions_counter].conf = &set_ipv4[para.core_idx];
@@ -348,7 +348,7 @@ add_set_src_tp(struct rte_flow_action *actions,
 	/* TP src port is random each time */
 	tp = tp % 0xffff;
 
-	set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
+	set_tp[para.core_idx].port = rte_cpu_to_be_16(tp & 0xffff);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
 	actions[actions_counter].conf = &set_tp[para.core_idx];
@@ -370,7 +370,7 @@ add_set_dst_tp(struct rte_flow_action *actions,
 	if (tp > 0xffff)
 		tp = tp >> 16;
 
-	set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
+	set_tp[para.core_idx].port = rte_cpu_to_be_16(tp & 0xffff);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
 	actions[actions_counter].conf = &set_tp[para.core_idx];
@@ -388,7 +388,7 @@ add_inc_tcp_ack(struct rte_flow_action *actions,
 	if (!para.unique_data)
 		ack_value = 1;
 
-	value[para.core_idx] = RTE_BE32(ack_value);
+	value[para.core_idx] = rte_cpu_to_be_32(ack_value);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
 	actions[actions_counter].conf = &value[para.core_idx];
@@ -406,7 +406,7 @@ add_dec_tcp_ack(struct rte_flow_action *actions,
 	if (!para.unique_data)
 		ack_value = 1;
 
-	value[para.core_idx] = RTE_BE32(ack_value);
+	value[para.core_idx] = rte_cpu_to_be_32(ack_value);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
 	actions[actions_counter].conf = &value[para.core_idx];
@@ -424,7 +424,7 @@ add_inc_tcp_seq(struct rte_flow_action *actions,
 	if (!para.unique_data)
 		seq_value = 1;
 
-	value[para.core_idx] = RTE_BE32(seq_value);
+	value[para.core_idx] = rte_cpu_to_be_32(seq_value);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
 	actions[actions_counter].conf = &value[para.core_idx];
@@ -442,7 +442,7 @@ add_dec_tcp_seq(struct rte_flow_action *actions,
 	if (!para.unique_data)
 		seq_value = 1;
 
-	value[para.core_idx] = RTE_BE32(seq_value);
+	value[para.core_idx] = rte_cpu_to_be_32(seq_value);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
 	actions[actions_counter].conf = &value[para.core_idx];
@@ -560,7 +560,7 @@ add_vlan_header(uint8_t **header, uint64_t data,
 	vlan_value = VLAN_VALUE;
 
 	memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr));
-	vlan_hdr.vlan_tci = RTE_BE16(vlan_value);
+	vlan_hdr.vlan_tci = rte_cpu_to_be_16(vlan_value);
 
 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
 		vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
@@ -586,7 +586,7 @@ add_ipv4_header(uint8_t **header, uint64_t data,
 
 	memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr));
 	ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
-	ipv4_hdr.dst_addr = RTE_BE32(ip_dst);
+	ipv4_hdr.dst_addr = rte_cpu_to_be_32(ip_dst);
 	ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF;
 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
 		ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP;
@@ -652,7 +652,7 @@ add_vxlan_header(uint8_t **header, uint64_t data,
 
 	memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr));
 
-	vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
+	vxlan_hdr.vx_vni = (rte_cpu_to_be_32(vni_value)) >> 16;
 	vxlan_hdr.vx_flags = 0x8;
 
 	memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr));
@@ -675,7 +675,7 @@ add_vxlan_gpe_header(uint8_t **header, uint64_t data,
 
 	memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr));
 
-	vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
+	vxlan_gpe_hdr.vx_vni = (rte_cpu_to_be_32(vni_value)) >> 16;
 	vxlan_gpe_hdr.vx_flags = 0x0c;
 
 	memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr));
@@ -739,7 +739,7 @@ add_gtp_header(uint8_t **header, uint64_t data,
 
 	memset(&gtp_hdr, 0, sizeof(struct rte_flow_item_gtp));
 
-	gtp_hdr.teid = RTE_BE32(teid_value);
+	gtp_hdr.teid = rte_cpu_to_be_32(teid_value);
 	gtp_hdr.msg_type = 255;
 
 	memcpy(*header, &gtp_hdr, sizeof(gtp_hdr));
@@ -861,7 +861,7 @@ add_vxlan_encap(struct rte_flow_action *actions,
 	items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
 
 	item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
-	item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst);
+	item_ipv4.hdr.dst_addr = rte_cpu_to_be_32(ip_dst);
 	item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
 	items[1].spec = &item_ipv4;
 	items[1].mask = &item_ipv4;
diff --git a/app/test-flow-perf/items_gen.c b/app/test-flow-perf/items_gen.c
index 85928349ee..b4aa1cfc9c 100644
--- a/app/test-flow-perf/items_gen.c
+++ b/app/test-flow-perf/items_gen.c
@@ -56,7 +56,7 @@ add_ipv4(struct rte_flow_item *items,
 	static struct rte_flow_item_ipv4 ipv4_masks[RTE_MAX_LCORE] __rte_cache_aligned;
 	uint8_t ti = para.core_idx;
 
-	ipv4_specs[ti].hdr.src_addr = RTE_BE32(para.src_ip);
+	ipv4_specs[ti].hdr.src_addr = rte_cpu_to_be_32(para.src_ip);
 	ipv4_masks[ti].hdr.src_addr = RTE_BE32(0xffffffff);
 
 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV4;
-- 
2.27.0


             reply	other threads:[~2023-03-12  2:00 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-12  2:00 Harold Huang [this message]
2023-03-27 10:29 ` Wisam Monther
2023-03-27 12:32   ` Harold Huang
2023-04-18 11:32     ` Wisam Monther

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230312020009.288-1-baymaxhuang@gmail.com \
    --to=baymaxhuang@gmail.com \
    --cc=dev@dpdk.org \
    --cc=wisamm@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).