DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] app/flow-perf: replace RTE_BE32/16 with rte_cpu_to_be_32/16 for variables
@ 2023-03-12  2:00 Harold Huang
  2023-03-27 10:29 ` Wisam Monther
  0 siblings, 1 reply; 5+ messages in thread
From: Harold Huang @ 2023-03-12  2:00 UTC (permalink / raw)
  To: dev; +Cc: Harold Huang, Wisam Jaddo

In DPDK, the macros RTE_BE32 or RTE_BE16 are usually used for
constant values. And functions such as rte_cpu_to_be_32 or
rte_cpu_to_be_16 are optimized for variables.

Signed-off-by: Harold Huang <baymaxhuang@gmail.com>
---
 app/test-flow-perf/actions_gen.c | 28 ++++++++++++++--------------
 app/test-flow-perf/items_gen.c   |  2 +-
 2 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/app/test-flow-perf/actions_gen.c b/app/test-flow-perf/actions_gen.c
index f1d5931325..c2499ad2d0 100644
--- a/app/test-flow-perf/actions_gen.c
+++ b/app/test-flow-perf/actions_gen.c
@@ -262,7 +262,7 @@ add_set_src_ipv4(struct rte_flow_action *actions,
 		ip = 1;
 
 	/* IPv4 value to be set is random each time */
-	set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
+	set_ipv4[para.core_idx].ipv4_addr = rte_cpu_to_be_32(ip + 1);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
 	actions[actions_counter].conf = &set_ipv4[para.core_idx];
@@ -281,7 +281,7 @@ add_set_dst_ipv4(struct rte_flow_action *actions,
 		ip = 1;
 
 	/* IPv4 value to be set is random each time */
-	set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
+	set_ipv4[para.core_idx].ipv4_addr = rte_cpu_to_be_32(ip + 1);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
 	actions[actions_counter].conf = &set_ipv4[para.core_idx];
@@ -348,7 +348,7 @@ add_set_src_tp(struct rte_flow_action *actions,
 	/* TP src port is random each time */
 	tp = tp % 0xffff;
 
-	set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
+	set_tp[para.core_idx].port = rte_cpu_to_be_16(tp & 0xffff);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
 	actions[actions_counter].conf = &set_tp[para.core_idx];
@@ -370,7 +370,7 @@ add_set_dst_tp(struct rte_flow_action *actions,
 	if (tp > 0xffff)
 		tp = tp >> 16;
 
-	set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
+	set_tp[para.core_idx].port = rte_cpu_to_be_16(tp & 0xffff);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
 	actions[actions_counter].conf = &set_tp[para.core_idx];
@@ -388,7 +388,7 @@ add_inc_tcp_ack(struct rte_flow_action *actions,
 	if (!para.unique_data)
 		ack_value = 1;
 
-	value[para.core_idx] = RTE_BE32(ack_value);
+	value[para.core_idx] = rte_cpu_to_be_32(ack_value);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
 	actions[actions_counter].conf = &value[para.core_idx];
@@ -406,7 +406,7 @@ add_dec_tcp_ack(struct rte_flow_action *actions,
 	if (!para.unique_data)
 		ack_value = 1;
 
-	value[para.core_idx] = RTE_BE32(ack_value);
+	value[para.core_idx] = rte_cpu_to_be_32(ack_value);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
 	actions[actions_counter].conf = &value[para.core_idx];
@@ -424,7 +424,7 @@ add_inc_tcp_seq(struct rte_flow_action *actions,
 	if (!para.unique_data)
 		seq_value = 1;
 
-	value[para.core_idx] = RTE_BE32(seq_value);
+	value[para.core_idx] = rte_cpu_to_be_32(seq_value);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
 	actions[actions_counter].conf = &value[para.core_idx];
@@ -442,7 +442,7 @@ add_dec_tcp_seq(struct rte_flow_action *actions,
 	if (!para.unique_data)
 		seq_value = 1;
 
-	value[para.core_idx] = RTE_BE32(seq_value);
+	value[para.core_idx] = rte_cpu_to_be_32(seq_value);
 
 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
 	actions[actions_counter].conf = &value[para.core_idx];
@@ -560,7 +560,7 @@ add_vlan_header(uint8_t **header, uint64_t data,
 	vlan_value = VLAN_VALUE;
 
 	memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr));
-	vlan_hdr.vlan_tci = RTE_BE16(vlan_value);
+	vlan_hdr.vlan_tci = rte_cpu_to_be_16(vlan_value);
 
 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
 		vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
@@ -586,7 +586,7 @@ add_ipv4_header(uint8_t **header, uint64_t data,
 
 	memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr));
 	ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
-	ipv4_hdr.dst_addr = RTE_BE32(ip_dst);
+	ipv4_hdr.dst_addr = rte_cpu_to_be_32(ip_dst);
 	ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF;
 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
 		ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP;
@@ -652,7 +652,7 @@ add_vxlan_header(uint8_t **header, uint64_t data,
 
 	memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr));
 
-	vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
+	vxlan_hdr.vx_vni = (rte_cpu_to_be_32(vni_value)) >> 16;
 	vxlan_hdr.vx_flags = 0x8;
 
 	memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr));
@@ -675,7 +675,7 @@ add_vxlan_gpe_header(uint8_t **header, uint64_t data,
 
 	memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr));
 
-	vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
+	vxlan_gpe_hdr.vx_vni = (rte_cpu_to_be_32(vni_value)) >> 16;
 	vxlan_gpe_hdr.vx_flags = 0x0c;
 
 	memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr));
@@ -739,7 +739,7 @@ add_gtp_header(uint8_t **header, uint64_t data,
 
 	memset(&gtp_hdr, 0, sizeof(struct rte_flow_item_gtp));
 
-	gtp_hdr.teid = RTE_BE32(teid_value);
+	gtp_hdr.teid = rte_cpu_to_be_32(teid_value);
 	gtp_hdr.msg_type = 255;
 
 	memcpy(*header, &gtp_hdr, sizeof(gtp_hdr));
@@ -861,7 +861,7 @@ add_vxlan_encap(struct rte_flow_action *actions,
 	items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
 
 	item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
-	item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst);
+	item_ipv4.hdr.dst_addr = rte_cpu_to_be_32(ip_dst);
 	item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
 	items[1].spec = &item_ipv4;
 	items[1].mask = &item_ipv4;
diff --git a/app/test-flow-perf/items_gen.c b/app/test-flow-perf/items_gen.c
index 85928349ee..b4aa1cfc9c 100644
--- a/app/test-flow-perf/items_gen.c
+++ b/app/test-flow-perf/items_gen.c
@@ -56,7 +56,7 @@ add_ipv4(struct rte_flow_item *items,
 	static struct rte_flow_item_ipv4 ipv4_masks[RTE_MAX_LCORE] __rte_cache_aligned;
 	uint8_t ti = para.core_idx;
 
-	ipv4_specs[ti].hdr.src_addr = RTE_BE32(para.src_ip);
+	ipv4_specs[ti].hdr.src_addr = rte_cpu_to_be_32(para.src_ip);
 	ipv4_masks[ti].hdr.src_addr = RTE_BE32(0xffffffff);
 
 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV4;
-- 
2.27.0


^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH] app/flow-perf: replace RTE_BE32/16 with rte_cpu_to_be_32/16 for variables
  2023-03-12  2:00 [PATCH] app/flow-perf: replace RTE_BE32/16 with rte_cpu_to_be_32/16 for variables Harold Huang
@ 2023-03-27 10:29 ` Wisam Monther
  2023-03-27 12:32   ` Harold Huang
  0 siblings, 1 reply; 5+ messages in thread
From: Wisam Monther @ 2023-03-27 10:29 UTC (permalink / raw)
  To: Harold Huang, dev

Hi,

> -----Original Message-----
> From: Harold Huang <baymaxhuang@gmail.com>
> Sent: Sunday, March 12, 2023 4:00 AM
> To: dev@dpdk.org
> Cc: Harold Huang <baymaxhuang@gmail.com>; Wisam Monther
> <wisamm@nvidia.com>
> Subject: [PATCH] app/flow-perf: replace RTE_BE32/16 with
> rte_cpu_to_be_32/16 for variables
> 
> In DPDK, the macros RTE_BE32 or RTE_BE16 are usually used for constant
> values. And functions such as rte_cpu_to_be_32 or
> rte_cpu_to_be_16 are optimized for variables.
> 
> Signed-off-by: Harold Huang <baymaxhuang@gmail.com>
> ---
>  app/test-flow-perf/actions_gen.c | 28 ++++++++++++++--------------
>  app/test-flow-perf/items_gen.c   |  2 +-
>  2 files changed, 15 insertions(+), 15 deletions(-)
> 

Indeed your change is in the correct files and I agree that it's need to be done,
But you are not doing it for all RTE_BE32 and RTE_BE16 in the app or the same files

After quick search I see:
app/test-flow-perf/items_gen.c:12
app/test-flow-perf/actions_gen.c:29

While you are doing the change only for:
app/test-flow-perf/items_gen.c:1
app/test-flow-perf/actions_gen.c:14

Can you please extend your fix for all needed vars.

BRs,
Wisam Jaddo

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] app/flow-perf: replace RTE_BE32/16 with rte_cpu_to_be_32/16 for variables
  2023-03-27 10:29 ` Wisam Monther
@ 2023-03-27 12:32   ` Harold Huang
  2023-04-18 11:32     ` Wisam Monther
  0 siblings, 1 reply; 5+ messages in thread
From: Harold Huang @ 2023-03-27 12:32 UTC (permalink / raw)
  To: Wisam Monther; +Cc: dev

HI,
  I see all the other RTE_BE32 and RTE_BE16 are used for constant
values. I think it is not necessary to fix them:

In app/test-flow-perf/items_gen.c:
.hdr.vlan_tci = RTE_BE16(VLAN_VALUE),
.hdr.vlan_tci = RTE_BE16(0xffff),
ipv4_masks[ti].hdr.src_addr = RTE_BE32(0xffffffff);
.protocol = RTE_BE16(RTE_ETHER_TYPE_TEB),
.protocol = RTE_BE16(0xffff),
.hdr.teid = RTE_BE32(TEID_VALUE),
.hdr.teid = RTE_BE32(0xffffffff),
.data = RTE_BE32(META_DATA),
.data = RTE_BE32(0xffffffff),
.data = RTE_BE32(META_DATA),
.data = RTE_BE32(0xffffffff),

In app/test-flow-perf/actions_gen.c:
.data = RTE_BE32(META_DATA),
.mask = RTE_BE32(0xffffffff),
.data = RTE_BE32(META_DATA),
.mask = RTE_BE32(0xffffffff),
eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
udp_hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
udp_hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
gre_hdr.proto = RTE_BE16(RTE_ETHER_TYPE_TEB);
item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);

On Mon, Mar 27, 2023 at 6:29 PM Wisam Monther <wisamm@nvidia.com> wrote:
>
> Hi,
>
> > -----Original Message-----
> > From: Harold Huang <baymaxhuang@gmail.com>
> > Sent: Sunday, March 12, 2023 4:00 AM
> > To: dev@dpdk.org
> > Cc: Harold Huang <baymaxhuang@gmail.com>; Wisam Monther
> > <wisamm@nvidia.com>
> > Subject: [PATCH] app/flow-perf: replace RTE_BE32/16 with
> > rte_cpu_to_be_32/16 for variables
> >
> > In DPDK, the macros RTE_BE32 or RTE_BE16 are usually used for constant
> > values. And functions such as rte_cpu_to_be_32 or
> > rte_cpu_to_be_16 are optimized for variables.
> >
> > Signed-off-by: Harold Huang <baymaxhuang@gmail.com>
> > ---
> >  app/test-flow-perf/actions_gen.c | 28 ++++++++++++++--------------
> >  app/test-flow-perf/items_gen.c   |  2 +-
> >  2 files changed, 15 insertions(+), 15 deletions(-)
> >
>
> Indeed your change is in the correct files and I agree that it's need to be done,
> But you are not doing it for all RTE_BE32 and RTE_BE16 in the app or the same files
>
> After quick search I see:
> app/test-flow-perf/items_gen.c:12
> app/test-flow-perf/actions_gen.c:29
>
> While you are doing the change only for:
> app/test-flow-perf/items_gen.c:1
> app/test-flow-perf/actions_gen.c:14
>
> Can you please extend your fix for all needed vars.
>
> BRs,
> Wisam Jaddo



-- 
Thanks, Harold.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH] app/flow-perf: replace RTE_BE32/16 with rte_cpu_to_be_32/16 for variables
  2023-03-27 12:32   ` Harold Huang
@ 2023-04-18 11:32     ` Wisam Monther
  2024-10-07 19:36       ` Stephen Hemminger
  0 siblings, 1 reply; 5+ messages in thread
From: Wisam Monther @ 2023-04-18 11:32 UTC (permalink / raw)
  To: Harold Huang; +Cc: dev

Hi,

> >
> > > -----Original Message-----
> > > From: Harold Huang <baymaxhuang@gmail.com>
> > > Sent: Sunday, March 12, 2023 4:00 AM
> > > To: dev@dpdk.org
> > > Cc: Harold Huang <baymaxhuang@gmail.com>; Wisam Monther
> > > <wisamm@nvidia.com>
> > > Subject: [PATCH] app/flow-perf: replace RTE_BE32/16 with
> > > rte_cpu_to_be_32/16 for variables
> > >
> > > In DPDK, the macros RTE_BE32 or RTE_BE16 are usually used for
> > > constant values. And functions such as rte_cpu_to_be_32 or
> > > rte_cpu_to_be_16 are optimized for variables.
> > >
> > > Signed-off-by: Harold Huang <baymaxhuang@gmail.com>

Acked-by: Wisam Jaddo <wisamm@nvidia.com>

BRs,
Wisam Jaddo

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] app/flow-perf: replace RTE_BE32/16 with rte_cpu_to_be_32/16 for variables
  2023-04-18 11:32     ` Wisam Monther
@ 2024-10-07 19:36       ` Stephen Hemminger
  0 siblings, 0 replies; 5+ messages in thread
From: Stephen Hemminger @ 2024-10-07 19:36 UTC (permalink / raw)
  To: Wisam Monther; +Cc: Harold Huang, dev

On Tue, 18 Apr 2023 11:32:50 +0000
Wisam Monther <wisamm@nvidia.com> wrote:

> Hi,
> 
> > >  
> > > > -----Original Message-----
> > > > From: Harold Huang <baymaxhuang@gmail.com>
> > > > Sent: Sunday, March 12, 2023 4:00 AM
> > > > To: dev@dpdk.org
> > > > Cc: Harold Huang <baymaxhuang@gmail.com>; Wisam Monther
> > > > <wisamm@nvidia.com>
> > > > Subject: [PATCH] app/flow-perf: replace RTE_BE32/16 with
> > > > rte_cpu_to_be_32/16 for variables
> > > >
> > > > In DPDK, the macros RTE_BE32 or RTE_BE16 are usually used for
> > > > constant values. And functions such as rte_cpu_to_be_32 or
> > > > rte_cpu_to_be_16 are optimized for variables.
> > > >
> > > > Signed-off-by: Harold Huang <baymaxhuang@gmail.com>  
> 
> Acked-by: Wisam Jaddo <wisamm@nvidia.com>

Make sense.

Really doesn't make much difference and I wounder why the macro version
is necessary at all. Since the function already compiles to:


#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ?            \
                                   rte_constant_bswap32(x) :            \
                                   rte_arch_bswap32(x)))


Acked-by: Stephen Hemminger <stephen@networkplumber.org>

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2024-10-07 19:42 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-03-12  2:00 [PATCH] app/flow-perf: replace RTE_BE32/16 with rte_cpu_to_be_32/16 for variables Harold Huang
2023-03-27 10:29 ` Wisam Monther
2023-03-27 12:32   ` Harold Huang
2023-04-18 11:32     ` Wisam Monther
2024-10-07 19:36       ` Stephen Hemminger

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).