patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH] net/mlx5: fix Rx metadata leftovers
@ 2021-03-07 11:45 Viacheslav Ovsiienko
  2021-03-11 19:58 ` Matan Azrad
  2021-03-16 16:15 ` Raslan Darawsheh
  0 siblings, 2 replies; 3+ messages in thread
From: Viacheslav Ovsiienko @ 2021-03-07 11:45 UTC (permalink / raw)
  To: dev; +Cc: rasland, matan, orika, stable

The Rx metadata might use the metadata register C0 to keep the
values. The same register C0 might be used by kernel for source
vport value handling, kernel uses upper half of the register,
leaving the lower half for application usage.

In the extended metadata mode 1 (dv_xmeta_en devarg is
assigned with value 1) the metadata width is 16 bits only,
the Rx datapath code fetched the entire 32-bit value of the
metadata register and presented one to application. The patch
provides data masking depending on the chosen metadata mode.

Fixes: 6c55b622a956 ("net/mlx5: set dynamic flow metadata in Rx queues")
Cc: stable@dpdk.org

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c             |  4 ++++
 drivers/net/mlx5/mlx5_rxtx.c             | 13 +++++++++----
 drivers/net/mlx5/mlx5_rxtx.h             |  1 +
 drivers/net/mlx5/mlx5_rxtx_vec_altivec.h | 11 ++++++-----
 drivers/net/mlx5/mlx5_rxtx_vec_neon.h    | 13 +++++++++----
 drivers/net/mlx5/mlx5_rxtx_vec_sse.h     |  9 +++++----
 6 files changed, 34 insertions(+), 17 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index ab5be3dacc..fc5f2088b2 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1267,10 +1267,14 @@ mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
 			data->dynf_meta = 0;
 			data->flow_meta_mask = 0;
 			data->flow_meta_offset = -1;
+			data->flow_meta_port_mask = 0;
 		} else {
 			data->dynf_meta = 1;
 			data->flow_meta_mask = rte_flow_dynf_metadata_mask;
 			data->flow_meta_offset = rte_flow_dynf_metadata_offs;
+			data->flow_meta_port_mask = (uint32_t)~0;
+			if (priv->config.dv_xmeta_en == MLX5_XMETA_MODE_META16)
+				data->flow_meta_port_mask >>= 16;
 		}
 	}
 }
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index e3ce9fd224..c76b9951bc 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1388,10 +1388,15 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
 			}
 		}
 	}
-	if (rxq->dynf_meta && cqe->flow_table_metadata) {
-		pkt->ol_flags |= rxq->flow_meta_mask;
-		*RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset, uint32_t *) =
-			cqe->flow_table_metadata;
+	if (rxq->dynf_meta) {
+		uint32_t meta = cqe->flow_table_metadata &
+				rxq->flow_meta_port_mask;
+
+		if (meta) {
+			pkt->ol_flags |= rxq->flow_meta_mask;
+			*RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset,
+						uint32_t *) = meta;
+		}
 	}
 	if (rxq->csum)
 		pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 0fd98af9d1..4f0fda0dec 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -168,6 +168,7 @@ struct mlx5_rxq_data {
 	uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */
 	uint64_t flow_meta_mask;
 	int32_t flow_meta_offset;
+	uint32_t flow_meta_port_mask;
 	uint32_t rxseg_n; /* Number of split segment descriptions. */
 	struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
 	/* Buffer split segment descriptions - sizes, offsets, pools. */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
index 48b677e40d..2d1154b624 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
@@ -1221,22 +1221,23 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 		if (rxq->dynf_meta) {
 			uint64_t flag = rxq->flow_meta_mask;
 			int32_t offs = rxq->flow_meta_offset;
-			uint32_t metadata;
+			uint32_t metadata, mask;
 
+			mask = rxq->flow_meta_port_mask;
 			/* This code is subject for futher optimization. */
-			metadata = cq[pos].flow_table_metadata;
+			metadata = cq[pos].flow_table_metadata & mask;
 			*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
 								metadata;
 			pkts[pos]->ol_flags |= metadata ? flag : 0ULL;
-			metadata = cq[pos + 1].flow_table_metadata;
+			metadata = cq[pos + 1].flow_table_metadata & mask;
 			*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
 								metadata;
 			pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL;
-			metadata = cq[pos + 2].flow_table_metadata;
+			metadata = cq[pos + 2].flow_table_metadata & mask;
 			*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
 								metadata;
 			pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL;
-			metadata = cq[pos + 3].flow_table_metadata;
+			metadata = cq[pos + 3].flow_table_metadata & mask;
 			*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
 								metadata;
 			pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index 4c067d8801..2234fbe6b2 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -832,19 +832,24 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 		if (rxq->dynf_meta) {
 			/* This code is subject for futher optimization. */
 			int32_t offs = rxq->flow_meta_offset;
+			uint32_t mask = rxq->flow_meta_port_mask;
 
 			*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
 				container_of(p0, struct mlx5_cqe,
-					     pkt_info)->flow_table_metadata;
+					     pkt_info)->flow_table_metadata &
+					     mask;
 			*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
 				container_of(p1, struct mlx5_cqe,
-					     pkt_info)->flow_table_metadata;
+					     pkt_info)->flow_table_metadata &
+					     mask;
 			*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
 				container_of(p2, struct mlx5_cqe,
-					     pkt_info)->flow_table_metadata;
+					     pkt_info)->flow_table_metadata &
+					     mask;
 			*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
 				container_of(p3, struct mlx5_cqe,
-					     pkt_info)->flow_table_metadata;
+					     pkt_info)->flow_table_metadata &
+					     mask;
 			if (*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *))
 				elts[pos]->ol_flags |= rxq->flow_meta_mask;
 			if (*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *))
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 0b3f240e10..c508a7a4f2 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -768,15 +768,16 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
 		if (rxq->dynf_meta) {
 			/* This code is subject for futher optimization. */
 			int32_t offs = rxq->flow_meta_offset;
+			uint32_t mask = rxq->flow_meta_port_mask;
 
 			*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
-				cq[pos].flow_table_metadata;
+				cq[pos].flow_table_metadata & mask;
 			*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
-				cq[pos + p1].flow_table_metadata;
+				cq[pos + p1].flow_table_metadata  & mask;
 			*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
-				cq[pos + p2].flow_table_metadata;
+				cq[pos + p2].flow_table_metadata & mask;
 			*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
-				cq[pos + p3].flow_table_metadata;
+				cq[pos + p3].flow_table_metadata & mask;
 			if (*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *))
 				pkts[pos]->ol_flags |= rxq->flow_meta_mask;
 			if (*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *))
-- 
2.28.0


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-03-16 16:15 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-07 11:45 [dpdk-stable] [PATCH] net/mlx5: fix Rx metadata leftovers Viacheslav Ovsiienko
2021-03-11 19:58 ` Matan Azrad
2021-03-16 16:15 ` Raslan Darawsheh

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).