patches for DPDK stable branches
 help / color / mirror / Atom feed
* [PATCH] net/mlx5: fix shared Rx queue port number in data path
@ 2024-10-28 17:53 Alexander Kozyrev
  2024-10-29 12:34 ` Slava Ovsiienko
  2024-11-13 13:36 ` Raslan Darawsheh
  0 siblings, 2 replies; 3+ messages in thread
From: Alexander Kozyrev @ 2024-10-28 17:53 UTC (permalink / raw)
  To: dev; +Cc: stable, rasland, viacheslavo, matan, dsosnowski, bingz, suanmingm

Wrong CQE is used to get the shared Rx queue port number in
vectorized Rx burst routine. Fix the CQE indexing.

Fixes: 25ed2ebff1 ("net/mlx5: support shared Rx queue port data path")

Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxtx_vec_altivec.h | 12 ++++++------
 drivers/net/mlx5/mlx5_rxtx_vec_neon.h    | 24 ++++++++++++------------
 drivers/net/mlx5/mlx5_rxtx_vec_sse.h     |  6 +++---
 3 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
index cccfa7f2d3..f6e74f4180 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
@@ -1249,9 +1249,9 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 		rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
 		if (unlikely(rxq->shared)) {
 			pkts[pos]->port = cq[pos].user_index_low;
-			pkts[pos + p1]->port = cq[pos + p1].user_index_low;
-			pkts[pos + p2]->port = cq[pos + p2].user_index_low;
-			pkts[pos + p3]->port = cq[pos + p3].user_index_low;
+			pkts[pos + 1]->port = cq[pos + p1].user_index_low;
+			pkts[pos + 2]->port = cq[pos + p2].user_index_low;
+			pkts[pos + 3]->port = cq[pos + p3].user_index_low;
 		}
 		if (rxq->hw_timestamp) {
 			int offset = rxq->timestamp_offset;
@@ -1295,17 +1295,17 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 								metadata;
 			pkts[pos]->ol_flags |= metadata ? flag : 0ULL;
 			metadata = rte_be_to_cpu_32
-				(cq[pos + 1].flow_table_metadata) & mask;
+				(cq[pos + p1].flow_table_metadata) & mask;
 			*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
 								metadata;
 			pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL;
 			metadata = rte_be_to_cpu_32
-				(cq[pos + 2].flow_table_metadata) &	mask;
+				(cq[pos + p2].flow_table_metadata) & mask;
 			*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
 								metadata;
 			pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL;
 			metadata = rte_be_to_cpu_32
-				(cq[pos + 3].flow_table_metadata) &	mask;
+				(cq[pos + p3].flow_table_metadata) & mask;
 			*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
 								metadata;
 			pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index 3ed688191f..942d395dc9 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -835,13 +835,13 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 		rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
 					 opcode, &elts[pos]);
 		if (unlikely(rxq->shared)) {
-			elts[pos]->port = container_of(p0, struct mlx5_cqe,
+			pkts[pos]->port = container_of(p0, struct mlx5_cqe,
 					      pkt_info)->user_index_low;
-			elts[pos + 1]->port = container_of(p1, struct mlx5_cqe,
+			pkts[pos + 1]->port = container_of(p1, struct mlx5_cqe,
 					      pkt_info)->user_index_low;
-			elts[pos + 2]->port = container_of(p2, struct mlx5_cqe,
+			pkts[pos + 2]->port = container_of(p2, struct mlx5_cqe,
 					      pkt_info)->user_index_low;
-			elts[pos + 3]->port = container_of(p3, struct mlx5_cqe,
+			pkts[pos + 3]->port = container_of(p3, struct mlx5_cqe,
 					      pkt_info)->user_index_low;
 		}
 		if (unlikely(rxq->hw_timestamp)) {
@@ -853,34 +853,34 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 				ts = rte_be_to_cpu_64
 					(container_of(p0, struct mlx5_cqe,
 						      pkt_info)->timestamp);
-				mlx5_timestamp_set(elts[pos], offset,
+				mlx5_timestamp_set(pkts[pos], offset,
 					mlx5_txpp_convert_rx_ts(sh, ts));
 				ts = rte_be_to_cpu_64
 					(container_of(p1, struct mlx5_cqe,
 						      pkt_info)->timestamp);
-				mlx5_timestamp_set(elts[pos + 1], offset,
+				mlx5_timestamp_set(pkts[pos + 1], offset,
 					mlx5_txpp_convert_rx_ts(sh, ts));
 				ts = rte_be_to_cpu_64
 					(container_of(p2, struct mlx5_cqe,
 						      pkt_info)->timestamp);
-				mlx5_timestamp_set(elts[pos + 2], offset,
+				mlx5_timestamp_set(pkts[pos + 2], offset,
 					mlx5_txpp_convert_rx_ts(sh, ts));
 				ts = rte_be_to_cpu_64
 					(container_of(p3, struct mlx5_cqe,
 						      pkt_info)->timestamp);
-				mlx5_timestamp_set(elts[pos + 3], offset,
+				mlx5_timestamp_set(pkts[pos + 3], offset,
 					mlx5_txpp_convert_rx_ts(sh, ts));
 			} else {
-				mlx5_timestamp_set(elts[pos], offset,
+				mlx5_timestamp_set(pkts[pos], offset,
 					rte_be_to_cpu_64(container_of(p0,
 					struct mlx5_cqe, pkt_info)->timestamp));
-				mlx5_timestamp_set(elts[pos + 1], offset,
+				mlx5_timestamp_set(pkts[pos + 1], offset,
 					rte_be_to_cpu_64(container_of(p1,
 					struct mlx5_cqe, pkt_info)->timestamp));
-				mlx5_timestamp_set(elts[pos + 2], offset,
+				mlx5_timestamp_set(pkts[pos + 2], offset,
 					rte_be_to_cpu_64(container_of(p2,
 					struct mlx5_cqe, pkt_info)->timestamp));
-				mlx5_timestamp_set(elts[pos + 3], offset,
+				mlx5_timestamp_set(pkts[pos + 3], offset,
 					rte_be_to_cpu_64(container_of(p3,
 					struct mlx5_cqe, pkt_info)->timestamp));
 			}
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 2bdd1f676d..fb59c11346 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -783,9 +783,9 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 		rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
 		if (unlikely(rxq->shared)) {
 			pkts[pos]->port = cq[pos].user_index_low;
-			pkts[pos + p1]->port = cq[pos + p1].user_index_low;
-			pkts[pos + p2]->port = cq[pos + p2].user_index_low;
-			pkts[pos + p3]->port = cq[pos + p3].user_index_low;
+			pkts[pos + 1]->port = cq[pos + p1].user_index_low;
+			pkts[pos + 2]->port = cq[pos + p2].user_index_low;
+			pkts[pos + 3]->port = cq[pos + p3].user_index_low;
 		}
 		if (unlikely(rxq->hw_timestamp)) {
 			int offset = rxq->timestamp_offset;
-- 
2.43.5


^ permalink raw reply	[flat|nested] 3+ messages in thread

* RE: [PATCH] net/mlx5: fix shared Rx queue port number in data path
  2024-10-28 17:53 [PATCH] net/mlx5: fix shared Rx queue port number in data path Alexander Kozyrev
@ 2024-10-29 12:34 ` Slava Ovsiienko
  2024-11-13 13:36 ` Raslan Darawsheh
  1 sibling, 0 replies; 3+ messages in thread
From: Slava Ovsiienko @ 2024-10-29 12:34 UTC (permalink / raw)
  To: Alexander Kozyrev, dev
  Cc: stable, Raslan Darawsheh, Matan Azrad, Dariusz Sosnowski,
	Bing Zhao, Suanming Mou

Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

> -----Original Message-----
> From: Alexander Kozyrev <akozyrev@nvidia.com>
> Sent: Monday, October 28, 2024 7:54 PM
> To: dev@dpdk.org
> Cc: stable@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>; Slava
> Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad <matan@nvidia.com>;
> Dariusz Sosnowski <dsosnowski@nvidia.com>; Bing Zhao
> <bingz@nvidia.com>; Suanming Mou <suanmingm@nvidia.com>
> Subject: [PATCH] net/mlx5: fix shared Rx queue port number in data path
> 
> Wrong CQE is used to get the shared Rx queue port number in vectorized Rx
> burst routine. Fix the CQE indexing.
> 
> Fixes: 25ed2ebff1 ("net/mlx5: support shared Rx queue port data path")
> 
> Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
> ---
>  drivers/net/mlx5/mlx5_rxtx_vec_altivec.h | 12 ++++++------
>  drivers/net/mlx5/mlx5_rxtx_vec_neon.h    | 24 ++++++++++++------------
>  drivers/net/mlx5/mlx5_rxtx_vec_sse.h     |  6 +++---
>  3 files changed, 21 insertions(+), 21 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
> b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
> index cccfa7f2d3..f6e74f4180 100644
> --- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
> +++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
> @@ -1249,9 +1249,9 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq,
> volatile struct mlx5_cqe *cq,
>  		rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
>  		if (unlikely(rxq->shared)) {
>  			pkts[pos]->port = cq[pos].user_index_low;
> -			pkts[pos + p1]->port = cq[pos + p1].user_index_low;
> -			pkts[pos + p2]->port = cq[pos + p2].user_index_low;
> -			pkts[pos + p3]->port = cq[pos + p3].user_index_low;
> +			pkts[pos + 1]->port = cq[pos + p1].user_index_low;
> +			pkts[pos + 2]->port = cq[pos + p2].user_index_low;
> +			pkts[pos + 3]->port = cq[pos + p3].user_index_low;
>  		}
>  		if (rxq->hw_timestamp) {
>  			int offset = rxq->timestamp_offset;
> @@ -1295,17 +1295,17 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq,
> volatile struct mlx5_cqe *cq,
>  								metadata;
>  			pkts[pos]->ol_flags |= metadata ? flag : 0ULL;
>  			metadata = rte_be_to_cpu_32
> -				(cq[pos + 1].flow_table_metadata) & mask;
> +				(cq[pos + p1].flow_table_metadata) & mask;
>  			*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t
> *) =
>  								metadata;
>  			pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL;
>  			metadata = rte_be_to_cpu_32
> -				(cq[pos + 2].flow_table_metadata) &	mask;
> +				(cq[pos + p2].flow_table_metadata) & mask;
>  			*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t
> *) =
>  								metadata;
>  			pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL;
>  			metadata = rte_be_to_cpu_32
> -				(cq[pos + 3].flow_table_metadata) &	mask;
> +				(cq[pos + p3].flow_table_metadata) & mask;
>  			*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t
> *) =
>  								metadata;
>  			pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL; diff
> --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
> b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
> index 3ed688191f..942d395dc9 100644
> --- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
> +++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
> @@ -835,13 +835,13 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq,
> volatile struct mlx5_cqe *cq,
>  		rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag,
>  					 opcode, &elts[pos]);
>  		if (unlikely(rxq->shared)) {
> -			elts[pos]->port = container_of(p0, struct mlx5_cqe,
> +			pkts[pos]->port = container_of(p0, struct mlx5_cqe,
>  					      pkt_info)->user_index_low;
> -			elts[pos + 1]->port = container_of(p1, struct
> mlx5_cqe,
> +			pkts[pos + 1]->port = container_of(p1, struct
> mlx5_cqe,
>  					      pkt_info)->user_index_low;
> -			elts[pos + 2]->port = container_of(p2, struct
> mlx5_cqe,
> +			pkts[pos + 2]->port = container_of(p2, struct
> mlx5_cqe,
>  					      pkt_info)->user_index_low;
> -			elts[pos + 3]->port = container_of(p3, struct
> mlx5_cqe,
> +			pkts[pos + 3]->port = container_of(p3, struct
> mlx5_cqe,
>  					      pkt_info)->user_index_low;
>  		}
>  		if (unlikely(rxq->hw_timestamp)) {
> @@ -853,34 +853,34 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq,
> volatile struct mlx5_cqe *cq,
>  				ts = rte_be_to_cpu_64
>  					(container_of(p0, struct mlx5_cqe,
>  						      pkt_info)->timestamp);
> -				mlx5_timestamp_set(elts[pos], offset,
> +				mlx5_timestamp_set(pkts[pos], offset,
>  					mlx5_txpp_convert_rx_ts(sh, ts));
>  				ts = rte_be_to_cpu_64
>  					(container_of(p1, struct mlx5_cqe,
>  						      pkt_info)->timestamp);
> -				mlx5_timestamp_set(elts[pos + 1], offset,
> +				mlx5_timestamp_set(pkts[pos + 1], offset,
>  					mlx5_txpp_convert_rx_ts(sh, ts));
>  				ts = rte_be_to_cpu_64
>  					(container_of(p2, struct mlx5_cqe,
>  						      pkt_info)->timestamp);
> -				mlx5_timestamp_set(elts[pos + 2], offset,
> +				mlx5_timestamp_set(pkts[pos + 2], offset,
>  					mlx5_txpp_convert_rx_ts(sh, ts));
>  				ts = rte_be_to_cpu_64
>  					(container_of(p3, struct mlx5_cqe,
>  						      pkt_info)->timestamp);
> -				mlx5_timestamp_set(elts[pos + 3], offset,
> +				mlx5_timestamp_set(pkts[pos + 3], offset,
>  					mlx5_txpp_convert_rx_ts(sh, ts));
>  			} else {
> -				mlx5_timestamp_set(elts[pos], offset,
> +				mlx5_timestamp_set(pkts[pos], offset,
>  					rte_be_to_cpu_64(container_of(p0,
>  					struct mlx5_cqe, pkt_info)-
> >timestamp));
> -				mlx5_timestamp_set(elts[pos + 1], offset,
> +				mlx5_timestamp_set(pkts[pos + 1], offset,
>  					rte_be_to_cpu_64(container_of(p1,
>  					struct mlx5_cqe, pkt_info)-
> >timestamp));
> -				mlx5_timestamp_set(elts[pos + 2], offset,
> +				mlx5_timestamp_set(pkts[pos + 2], offset,
>  					rte_be_to_cpu_64(container_of(p2,
>  					struct mlx5_cqe, pkt_info)-
> >timestamp));
> -				mlx5_timestamp_set(elts[pos + 3], offset,
> +				mlx5_timestamp_set(pkts[pos + 3], offset,
>  					rte_be_to_cpu_64(container_of(p3,
>  					struct mlx5_cqe, pkt_info)-
> >timestamp));
>  			}
> diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
> b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
> index 2bdd1f676d..fb59c11346 100644
> --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
> +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
> @@ -783,9 +783,9 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile
> struct mlx5_cqe *cq,
>  		rxq_cq_to_ptype_oflags_v(rxq, cqes, opcode, &pkts[pos]);
>  		if (unlikely(rxq->shared)) {
>  			pkts[pos]->port = cq[pos].user_index_low;
> -			pkts[pos + p1]->port = cq[pos + p1].user_index_low;
> -			pkts[pos + p2]->port = cq[pos + p2].user_index_low;
> -			pkts[pos + p3]->port = cq[pos + p3].user_index_low;
> +			pkts[pos + 1]->port = cq[pos + p1].user_index_low;
> +			pkts[pos + 2]->port = cq[pos + p2].user_index_low;
> +			pkts[pos + 3]->port = cq[pos + p3].user_index_low;
>  		}
>  		if (unlikely(rxq->hw_timestamp)) {
>  			int offset = rxq->timestamp_offset;
> --
> 2.43.5


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] net/mlx5: fix shared Rx queue port number in data path
  2024-10-28 17:53 [PATCH] net/mlx5: fix shared Rx queue port number in data path Alexander Kozyrev
  2024-10-29 12:34 ` Slava Ovsiienko
@ 2024-11-13 13:36 ` Raslan Darawsheh
  1 sibling, 0 replies; 3+ messages in thread
From: Raslan Darawsheh @ 2024-11-13 13:36 UTC (permalink / raw)
  To: Alexander Kozyrev, dev
  Cc: stable, Slava Ovsiienko, Matan Azrad, Dariusz Sosnowski,
	Bing Zhao, Suanming Mou

Hi,

From: Alexander Kozyrev <akozyrev@nvidia.com>
Sent: Monday, October 28, 2024 7:53 PM
To: dev@dpdk.org
Cc: stable@dpdk.org; Raslan Darawsheh; Slava Ovsiienko; Matan Azrad; Dariusz Sosnowski; Bing Zhao; Suanming Mou
Subject: [PATCH] net/mlx5: fix shared Rx queue port number in data path

Wrong CQE is used to get the shared Rx queue port number in
vectorized Rx burst routine. Fix the CQE indexing.

Fixes: 25ed2ebff1 ("net/mlx5: support shared Rx queue port data path")

Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
net/mlx5: fix shared Rx queue port number in data path

Patch applied to next-net-mlx,

Kindest regards
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2024-11-13 13:36 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-10-28 17:53 [PATCH] net/mlx5: fix shared Rx queue port number in data path Alexander Kozyrev
2024-10-29 12:34 ` Slava Ovsiienko
2024-11-13 13:36 ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).