patches for DPDK stable branches
 help / color / mirror / Atom feed
* [PATCH] app/testpmd: fix wrong encap/decap size calculation
@ 2023-03-16 17:16 Michael Baum
  2023-03-16 17:26 ` Thomas Monjalon
  2023-03-16 18:24 ` [PATCH v2] " Michael Baum
  0 siblings, 2 replies; 5+ messages in thread
From: Michael Baum @ 2023-03-16 17:16 UTC (permalink / raw)
  To: dev
  Cc: Aman Singh, Yuying Zhang, Ferruh Yigit, Thomas Monjalon, orika, stable

Testpmd app has some functions to create either encap or decap buffer
for some special cases:
 - "l2_encap" and "l2_decap"
 - "mplsogre_encap" and "mplsogre_decap"
 - "mplsoudp_encap" and "mplsoudp_decap"

The functions use both "rte_flow_item_eth" and "rte_flow_item_vlan"
structures to represent the headers and copy them into "raw_encap"
action. The size of either "raw_encap" or "raw_decap" is capculated as
sum of headers size.

However, the both "rte_flow_item_eth" and "rte_flow_item_vlan" contain
more fields than original headers, so using them cause bad size
calculation.

This patch uses "RTE_ETHER_HDR_LEN" and "RTE_VLAN_HLEN" macros in size
calculation.

Fixes: 3e77031be855 ("app/testpmd: add MPLSoGRE encapsulation")
Fixes: a1191d39cb57 ("app/testpmd: add MPLSoUDP encapsulation")
Cc: orika@nvidia.com
Cc: stable@dpdk.org

Signed-off-by: Michael Baum <michaelba@nvidia.com>
---
 app/test-pmd/cmdline_flow.c | 48 ++++++++++++++++++-------------------
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 63a0b36622..fff0cb3d57 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -7988,15 +7988,15 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
 	       l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.hdr.src_addr.addr_bytes,
 	       l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, RTE_ETHER_HDR_LEN);
+	header += RTE_ETHER_HDR_LEN;
 	if (l2_encap_conf.select_vlan) {
 		if (l2_encap_conf.select_ipv4)
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, RTE_VLAN_HLEN);
+		header += RTE_VLAN_HLEN;
 	}
 	action_encap_data->conf.size = header -
 		action_encap_data->data;
@@ -8044,11 +8044,11 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
 	header = action_decap_data->data;
 	if (l2_decap_conf.select_vlan)
 		eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, RTE_ETHER_HDR_LEN);
+	header += RTE_ETHER_HDR_LEN;
 	if (l2_decap_conf.select_vlan) {
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, RTE_VLAN_HLEN);
+		header += RTE_VLAN_HLEN;
 	}
 	action_decap_data->conf.size = header -
 		action_decap_data->data;
@@ -8128,15 +8128,15 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
 	       mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.hdr.src_addr.addr_bytes,
 	       mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, RTE_ETHER_HDR_LEN);
+	header += RTE_ETHER_HDR_LEN;
 	if (mplsogre_encap_conf.select_vlan) {
 		if (mplsogre_encap_conf.select_ipv4)
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, RTE_VLAN_HLEN);
+		header += RTE_VLAN_HLEN;
 	}
 	if (mplsogre_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
@@ -8223,15 +8223,15 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
 	       mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.hdr.src_addr.addr_bytes,
 	       mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, RTE_ETHER_HDR_LEN);
+	header += RTE_ETHER_HDR_LEN;
 	if (mplsogre_encap_conf.select_vlan) {
 		if (mplsogre_encap_conf.select_ipv4)
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, RTE_VLAN_HLEN);
+		header += RTE_VLAN_HLEN;
 	}
 	if (mplsogre_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
@@ -8322,15 +8322,15 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
 	       mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.hdr.src_addr.addr_bytes,
 	       mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, RTE_ETHER_HDR_LEN);
+	header += RTE_ETHER_HDR_LEN;
 	if (mplsoudp_encap_conf.select_vlan) {
 		if (mplsoudp_encap_conf.select_ipv4)
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, RTE_VLAN_HLEN);
+		header += RTE_VLAN_HLEN;
 	}
 	if (mplsoudp_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
@@ -8419,15 +8419,15 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
 	       mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.hdr.src_addr.addr_bytes,
 	       mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, RTE_ETHER_HDR_LEN);
+	header += RTE_ETHER_HDR_LEN;
 	if (mplsoudp_encap_conf.select_vlan) {
 		if (mplsoudp_encap_conf.select_ipv4)
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, RTE_VLAN_HLEN);
+		header += RTE_VLAN_HLEN;
 	}
 	if (mplsoudp_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] app/testpmd: fix wrong encap/decap size calculation
  2023-03-16 17:16 [PATCH] app/testpmd: fix wrong encap/decap size calculation Michael Baum
@ 2023-03-16 17:26 ` Thomas Monjalon
  2023-03-16 18:24 ` [PATCH v2] " Michael Baum
  1 sibling, 0 replies; 5+ messages in thread
From: Thomas Monjalon @ 2023-03-16 17:26 UTC (permalink / raw)
  To: Michael Baum
  Cc: dev, Aman Singh, Yuying Zhang, Ferruh Yigit, orika, stable, olivier.matz

16/03/2023 18:16, Michael Baum:
> Testpmd app has some functions to create either encap or decap buffer
> for some special cases:
>  - "l2_encap" and "l2_decap"
>  - "mplsogre_encap" and "mplsogre_decap"
>  - "mplsoudp_encap" and "mplsoudp_decap"
> 
> The functions use both "rte_flow_item_eth" and "rte_flow_item_vlan"
> structures to represent the headers and copy them into "raw_encap"
> action. The size of either "raw_encap" or "raw_decap" is capculated as

capculated -> calculated

> sum of headers size.
> 
> However, the both "rte_flow_item_eth" and "rte_flow_item_vlan" contain
> more fields than original headers, so using them cause bad size
> calculation.
> 
> This patch uses "RTE_ETHER_HDR_LEN" and "RTE_VLAN_HLEN" macros in size
> calculation.

Honestly I don't know why we have these *_LEN macros in DPDK.
We would have the same result with the (more explicit)
sizeof(struct rte_ether_hdr) or sizeof(struct rte_vlan_hdr).




^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2] app/testpmd: fix wrong encap/decap size calculation
  2023-03-16 17:16 [PATCH] app/testpmd: fix wrong encap/decap size calculation Michael Baum
  2023-03-16 17:26 ` Thomas Monjalon
@ 2023-03-16 18:24 ` Michael Baum
  2023-03-23 10:34   ` Ori Kam
  1 sibling, 1 reply; 5+ messages in thread
From: Michael Baum @ 2023-03-16 18:24 UTC (permalink / raw)
  To: dev
  Cc: Aman Singh, Yuying Zhang, Ferruh Yigit, Thomas Monjalon, orika, stable

Testpmd app has some functions to create either encap or decap buffer
for some special cases:
 - "l2_encap" and "l2_decap"
 - "mplsogre_encap" and "mplsogre_decap"
 - "mplsoudp_encap" and "mplsoudp_decap"

The functions use both "rte_flow_item_eth" and "rte_flow_item_vlan"
structures to represent the headers and copy them into "raw_encap"
action. The size of either "raw_encap" or "raw_decap" is calculated as
sum of headers size.

However, the both "rte_flow_item_eth" and "rte_flow_item_vlan" contain
more fields than original headers, so using them cause bad size
calculation.

This patch uses "rte_ether_hdr" and "rte_vlan_hdr" structures for header
size calculation.

Fixes: 3e77031be855 ("app/testpmd: add MPLSoGRE encapsulation")
Fixes: a1191d39cb57 ("app/testpmd: add MPLSoUDP encapsulation")
Cc: orika@nvidia.com
Cc: stable@dpdk.org

Signed-off-by: Michael Baum <michaelba@nvidia.com>
---

v2:
- Fix typo in commit log.
- Using "sizeof(struct rte_*_hdr)" instead of "*_LEN" macros.

 app/test-pmd/cmdline_flow.c | 48 ++++++++++++++++++-------------------
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 9309607f11..58939ec321 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -8245,15 +8245,15 @@ parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
 	       l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.hdr.src_addr.addr_bytes,
 	       l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (l2_encap_conf.select_vlan) {
 		if (l2_encap_conf.select_ipv4)
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	action_encap_data->conf.size = header -
 		action_encap_data->data;
@@ -8301,11 +8301,11 @@ parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
 	header = action_decap_data->data;
 	if (l2_decap_conf.select_vlan)
 		eth.hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (l2_decap_conf.select_vlan) {
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	action_decap_data->conf.size = header -
 		action_decap_data->data;
@@ -8385,15 +8385,15 @@ parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
 	       mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.hdr.src_addr.addr_bytes,
 	       mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (mplsogre_encap_conf.select_vlan) {
 		if (mplsogre_encap_conf.select_ipv4)
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	if (mplsogre_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
@@ -8480,15 +8480,15 @@ parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
 	       mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.hdr.src_addr.addr_bytes,
 	       mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (mplsogre_encap_conf.select_vlan) {
 		if (mplsogre_encap_conf.select_ipv4)
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	if (mplsogre_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
@@ -8579,15 +8579,15 @@ parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
 	       mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.hdr.src_addr.addr_bytes,
 	       mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (mplsoudp_encap_conf.select_vlan) {
 		if (mplsoudp_encap_conf.select_ipv4)
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	if (mplsoudp_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
@@ -8676,15 +8676,15 @@ parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
 	       mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
 	memcpy(eth.hdr.src_addr.addr_bytes,
 	       mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
-	memcpy(header, &eth, sizeof(eth));
-	header += sizeof(eth);
+	memcpy(header, &eth.hdr, sizeof(struct rte_ether_hdr));
+	header += sizeof(struct rte_ether_hdr);
 	if (mplsoudp_encap_conf.select_vlan) {
 		if (mplsoudp_encap_conf.select_ipv4)
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
 		else
 			vlan.hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
-		memcpy(header, &vlan, sizeof(vlan));
-		header += sizeof(vlan);
+		memcpy(header, &vlan.hdr, sizeof(struct rte_vlan_hdr));
+		header += sizeof(struct rte_vlan_hdr);
 	}
 	if (mplsoudp_encap_conf.select_ipv4) {
 		memcpy(header, &ipv4, sizeof(ipv4));
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH v2] app/testpmd: fix wrong encap/decap size calculation
  2023-03-16 18:24 ` [PATCH v2] " Michael Baum
@ 2023-03-23 10:34   ` Ori Kam
  2023-03-23 11:09     ` Ferruh Yigit
  0 siblings, 1 reply; 5+ messages in thread
From: Ori Kam @ 2023-03-23 10:34 UTC (permalink / raw)
  To: Michael Baum, dev
  Cc: Aman Singh, Yuying Zhang, Ferruh Yigit,
	NBU-Contact-Thomas Monjalon (EXTERNAL),
	stable

Hi Michael,

> -----Original Message-----
> From: Michael Baum <michaelba@nvidia.com>
> Sent: Thursday, 16 March 2023 20:24
> 
> Testpmd app has some functions to create either encap or decap buffer
> for some special cases:
>  - "l2_encap" and "l2_decap"
>  - "mplsogre_encap" and "mplsogre_decap"
>  - "mplsoudp_encap" and "mplsoudp_decap"
> 
> The functions use both "rte_flow_item_eth" and "rte_flow_item_vlan"
> structures to represent the headers and copy them into "raw_encap"
> action. The size of either "raw_encap" or "raw_decap" is calculated as
> sum of headers size.
> 
> However, the both "rte_flow_item_eth" and "rte_flow_item_vlan" contain
> more fields than original headers, so using them cause bad size
> calculation.
> 
> This patch uses "rte_ether_hdr" and "rte_vlan_hdr" structures for header
> size calculation.
> 
> Fixes: 3e77031be855 ("app/testpmd: add MPLSoGRE encapsulation")
> Fixes: a1191d39cb57 ("app/testpmd: add MPLSoUDP encapsulation")
> Cc: orika@nvidia.com
> Cc: stable@dpdk.org
> 
> Signed-off-by: Michael Baum <michaelba@nvidia.com>
> ---
> 

Acked-by: Ori Kam <orika@nvidia.com>
Best,
Ori

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] app/testpmd: fix wrong encap/decap size calculation
  2023-03-23 10:34   ` Ori Kam
@ 2023-03-23 11:09     ` Ferruh Yigit
  0 siblings, 0 replies; 5+ messages in thread
From: Ferruh Yigit @ 2023-03-23 11:09 UTC (permalink / raw)
  To: Ori Kam, Michael Baum, dev
  Cc: Aman Singh, Yuying Zhang, NBU-Contact-Thomas Monjalon (EXTERNAL), stable

On 3/23/2023 10:34 AM, Ori Kam wrote:
> Hi Michael,
> 
>> -----Original Message-----
>> From: Michael Baum <michaelba@nvidia.com>
>> Sent: Thursday, 16 March 2023 20:24
>>
>> Testpmd app has some functions to create either encap or decap buffer
>> for some special cases:
>>  - "l2_encap" and "l2_decap"
>>  - "mplsogre_encap" and "mplsogre_decap"
>>  - "mplsoudp_encap" and "mplsoudp_decap"
>>
>> The functions use both "rte_flow_item_eth" and "rte_flow_item_vlan"
>> structures to represent the headers and copy them into "raw_encap"
>> action. The size of either "raw_encap" or "raw_decap" is calculated as
>> sum of headers size.
>>
>> However, the both "rte_flow_item_eth" and "rte_flow_item_vlan" contain
>> more fields than original headers, so using them cause bad size
>> calculation.
>>
>> This patch uses "rte_ether_hdr" and "rte_vlan_hdr" structures for header
>> size calculation.
>>
>> Fixes: 3e77031be855 ("app/testpmd: add MPLSoGRE encapsulation")
>> Fixes: a1191d39cb57 ("app/testpmd: add MPLSoUDP encapsulation")
>> Cc: orika@nvidia.com
>> Cc: stable@dpdk.org
>>
>> Signed-off-by: Michael Baum <michaelba@nvidia.com>
>> ---
>>
> 
> Acked-by: Ori Kam <orika@nvidia.com>
>

Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>

Applied to dpdk-next-net/main, thanks.


^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2023-03-23 11:10 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-03-16 17:16 [PATCH] app/testpmd: fix wrong encap/decap size calculation Michael Baum
2023-03-16 17:26 ` Thomas Monjalon
2023-03-16 18:24 ` [PATCH v2] " Michael Baum
2023-03-23 10:34   ` Ori Kam
2023-03-23 11:09     ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).