DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros
@ 2017-09-04 11:48 Shachar Beiser
  2017-09-04 11:48 ` [dpdk-dev] [PATCH 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser
                   ` (5 more replies)
  0 siblings, 6 replies; 25+ messages in thread
From: Shachar Beiser @ 2017-09-04 11:48 UTC (permalink / raw)
  To: dev; +Cc: Shachar Beiser, Adrien Mazarguil, Nelio Laranjeiro

Fixes: 8bb5119634b7 ("net/mlx5: replace network byte order macro")
Cc: shacharbe@mellanox.com

Signed-off-by: Shachar Beiser <shacharbe@mellanox.com>
---
 drivers/net/mlx5/mlx5_mac.c          |   8 ++-
 drivers/net/mlx5/mlx5_mr.c           |   2 +-
 drivers/net/mlx5/mlx5_rxmode.c       |   8 ++-
 drivers/net/mlx5/mlx5_rxq.c          |   9 +--
 drivers/net/mlx5/mlx5_rxtx.c         | 131 +++++++++++++++++++----------------
 drivers/net/mlx5/mlx5_rxtx.h         |  12 ++--
 drivers/net/mlx5/mlx5_rxtx_vec_sse.c |  12 ++--
 7 files changed, 102 insertions(+), 80 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 45d23e4..b3c3fa2 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -263,11 +263,15 @@
 				(*mac)[0], (*mac)[1], (*mac)[2],
 				(*mac)[3], (*mac)[4], (*mac)[5]
 			},
-			.vlan_tag = (vlan_enabled ? htons(vlan_id) : 0),
+			.vlan_tag = (vlan_enabled ?
+				     rte_cpu_to_be_16(vlan_id)
+				     : 0),
 		},
 		.mask = {
 			.dst_mac = "\xff\xff\xff\xff\xff\xff",
-			.vlan_tag = (vlan_enabled ? htons(0xfff) : 0),
+			.vlan_tag = (vlan_enabled ?
+				     rte_cpu_to_be_16(0xfff) :
+				     0),
 		},
 	};
 	DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u"
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 9593830..9a9f73a 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -203,7 +203,7 @@ struct ibv_mr *
 	txq_ctrl->txq.mp2mr[idx].start = (uintptr_t)mr->addr;
 	txq_ctrl->txq.mp2mr[idx].end = (uintptr_t)mr->addr + mr->length;
 	txq_ctrl->txq.mp2mr[idx].mr = mr;
-	txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey);
+	txq_ctrl->txq.mp2mr[idx].lkey = rte_cpu_to_be_32(mr->lkey);
 	DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
 	      (void *)txq_ctrl, mp->name, (void *)mp,
 	      txq_ctrl->txq.mp2mr[idx].lkey);
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index 4a51e47..db2e05b 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -159,14 +159,18 @@
 				mac[0], mac[1], mac[2],
 				mac[3], mac[4], mac[5],
 			},
-			.vlan_tag = (vlan_enabled ? htons(vlan_id) : 0),
+			.vlan_tag = (vlan_enabled ?
+				     rte_cpu_to_be_16(vlan_id) :
+				     0),
 		},
 		.mask = {
 			.dst_mac = {
 				mask[0], mask[1], mask[2],
 				mask[3], mask[4], mask[5],
 			},
-			.vlan_tag = (vlan_enabled ? htons(0xfff) : 0),
+			.vlan_tag = (vlan_enabled ?
+				     rte_cpu_to_be_16(0xfff) :
+				     0),
 		},
 	};
 
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 35c5cb4..437dc02 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -672,9 +672,10 @@
 		/* scat->addr must be able to store a pointer. */
 		assert(sizeof(scat->addr) >= sizeof(uintptr_t));
 		*scat = (struct mlx5_wqe_data_seg){
-			.addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)),
-			.byte_count = htonl(DATA_LEN(buf)),
-			.lkey = htonl(rxq_ctrl->mr->lkey),
+			.addr =
+			    rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)),
+			.byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
+			.lkey = rte_cpu_to_be_32(rxq_ctrl->mr->lkey),
 		};
 		(*rxq_ctrl->rxq.elts)[i] = buf;
 	}
@@ -1077,7 +1078,7 @@
 	/* Update doorbell counter. */
 	rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n;
 	rte_wmb();
-	*rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci);
+	*rxq_ctrl->rxq.rq_db = rte_cpu_to_be_32(rxq_ctrl->rxq.rq_ci);
 	DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
 	assert(ret == 0);
 	return 0;
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index fe9e7ea..e1a35a3 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -306,7 +306,7 @@
 
 		op_own = cqe->op_own;
 		if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
-			n = ntohl(cqe->byte_cnt);
+			n = rte_be_to_cpu_32(cqe->byte_cnt);
 		else
 			n = 1;
 		cq_ci += n;
@@ -434,7 +434,8 @@
 		raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
 		/* Replace the Ethernet type by the VLAN if necessary. */
 		if (buf->ol_flags & PKT_TX_VLAN_PKT) {
-			uint32_t vlan = htonl(0x81000000 | buf->vlan_tci);
+			uint32_t vlan = rte_cpu_to_be_32(0x81000000 |
+							 buf->vlan_tci);
 			unsigned int len = 2 * ETHER_ADDR_LEN - 2;
 
 			addr += 2;
@@ -510,8 +511,10 @@
 				} else {
 					/* NOP WQE. */
 					wqe->ctrl = (rte_v128u32_t){
-						     htonl(txq->wqe_ci << 8),
-						     htonl(txq->qp_num_8s | 1),
+						     rte_cpu_to_be_32(
+							txq->wqe_ci << 8),
+						     rte_cpu_to_be_32(
+							txq->qp_num_8s | 1),
 						     0,
 						     0,
 					};
@@ -550,7 +553,8 @@
 				max_wqe -= n;
 				if (tso) {
 					uint32_t inl =
-						htonl(copy_b | MLX5_INLINE_SEG);
+					rte_cpu_to_be_32(copy_b |
+							 MLX5_INLINE_SEG);
 
 					pkt_inline_sz =
 						MLX5_WQE_DS(tso_header_sz) *
@@ -603,9 +607,9 @@
 			ds = 3;
 use_dseg:
 			/* Add the remaining packet as a simple ds. */
-			naddr = htonll(addr);
+			naddr = rte_cpu_to_be_64(addr);
 			*dseg = (rte_v128u32_t){
-				htonl(length),
+				rte_cpu_to_be_32(length),
 				mlx5_tx_mb2mr(txq, buf),
 				naddr,
 				naddr >> 32,
@@ -642,9 +646,9 @@
 		total_length += length;
 #endif
 		/* Store segment information. */
-		naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
+		naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
 		*dseg = (rte_v128u32_t){
-			htonl(length),
+			rte_cpu_to_be_32(length),
 			mlx5_tx_mb2mr(txq, buf),
 			naddr,
 			naddr >> 32,
@@ -663,21 +667,23 @@
 		/* Initialize known and common part of the WQE structure. */
 		if (tso) {
 			wqe->ctrl = (rte_v128u32_t){
-				htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO),
-				htonl(txq->qp_num_8s | ds),
+				rte_cpu_to_be_32((txq->wqe_ci << 8) |
+						 MLX5_OPCODE_TSO),
+				rte_cpu_to_be_32(txq->qp_num_8s | ds),
 				0,
 				0,
 			};
 			wqe->eseg = (rte_v128u32_t){
 				0,
-				cs_flags | (htons(tso_segsz) << 16),
+				cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
 				0,
-				(ehdr << 16) | htons(tso_header_sz),
+				(ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
 			};
 		} else {
 			wqe->ctrl = (rte_v128u32_t){
-				htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
-				htonl(txq->qp_num_8s | ds),
+				rte_cpu_to_be_32((txq->wqe_ci << 8) |
+						 MLX5_OPCODE_SEND),
+				rte_cpu_to_be_32(txq->qp_num_8s | ds),
 				0,
 				0,
 			};
@@ -685,7 +691,7 @@
 				0,
 				cs_flags,
 				0,
-				(ehdr << 16) | htons(pkt_inline_sz),
+				(ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
 			};
 		}
 next_wqe:
@@ -705,7 +711,7 @@
 	comp = txq->elts_comp + i + j + k;
 	if (comp >= MLX5_TX_COMP_THRESH) {
 		/* Request completion on last WQE. */
-		last_wqe->ctrl2 = htonl(8);
+		last_wqe->ctrl2 = rte_cpu_to_be_32(8);
 		/* Save elts_head in unused "immediate" field of WQE. */
 		last_wqe->ctrl3 = txq->elts_head;
 		txq->elts_comp = 0;
@@ -744,13 +750,14 @@
 	mpw->len = length;
 	mpw->total_len = 0;
 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
-	mpw->wqe->eseg.mss = htons(length);
+	mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
 	mpw->wqe->eseg.inline_hdr_sz = 0;
 	mpw->wqe->eseg.rsvd0 = 0;
 	mpw->wqe->eseg.rsvd1 = 0;
 	mpw->wqe->eseg.rsvd2 = 0;
-	mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
-				  (txq->wqe_ci << 8) | MLX5_OPCODE_TSO);
+	mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
+					     (txq->wqe_ci << 8) |
+					     MLX5_OPCODE_TSO);
 	mpw->wqe->ctrl[2] = 0;
 	mpw->wqe->ctrl[3] = 0;
 	mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
@@ -779,7 +786,7 @@
 	 * Store size in multiple of 16 bytes. Control and Ethernet segments
 	 * count as 2.
 	 */
-	mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num));
+	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num));
 	mpw->state = MLX5_MPW_STATE_CLOSED;
 	if (num < 3)
 		++txq->wqe_ci;
@@ -886,9 +893,9 @@
 			dseg = mpw.data.dseg[mpw.pkts_n];
 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
 			*dseg = (struct mlx5_wqe_data_seg){
-				.byte_count = htonl(DATA_LEN(buf)),
+				.byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
 				.lkey = mlx5_tx_mb2mr(txq, buf),
-				.addr = htonll(addr),
+				.addr = rte_cpu_to_be_64(addr),
 			};
 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
 			length += DATA_LEN(buf);
@@ -916,7 +923,7 @@
 		volatile struct mlx5_wqe *wqe = mpw.wqe;
 
 		/* Request completion on last WQE. */
-		wqe->ctrl[2] = htonl(8);
+		wqe->ctrl[2] = rte_cpu_to_be_32(8);
 		/* Save elts_head in unused "immediate" field of WQE. */
 		wqe->ctrl[3] = elts_head;
 		txq->elts_comp = 0;
@@ -956,12 +963,12 @@
 	mpw->len = length;
 	mpw->total_len = 0;
 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
-	mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
-				  (txq->wqe_ci << 8) |
-				  MLX5_OPCODE_TSO);
+	mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) |
+					     (txq->wqe_ci << 8) |
+					     MLX5_OPCODE_TSO);
 	mpw->wqe->ctrl[2] = 0;
 	mpw->wqe->ctrl[3] = 0;
-	mpw->wqe->eseg.mss = htons(length);
+	mpw->wqe->eseg.mss = rte_cpu_to_be_16(length);
 	mpw->wqe->eseg.inline_hdr_sz = 0;
 	mpw->wqe->eseg.cs_flags = 0;
 	mpw->wqe->eseg.rsvd0 = 0;
@@ -992,9 +999,10 @@
 	 * Store size in multiple of 16 bytes. Control and Ethernet segments
 	 * count as 2.
 	 */
-	mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size));
+	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
+					     MLX5_WQE_DS(size));
 	mpw->state = MLX5_MPW_STATE_CLOSED;
-	inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG);
+	inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG);
 	txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
 }
 
@@ -1132,9 +1140,10 @@
 				dseg = mpw.data.dseg[mpw.pkts_n];
 				addr = rte_pktmbuf_mtod(buf, uintptr_t);
 				*dseg = (struct mlx5_wqe_data_seg){
-					.byte_count = htonl(DATA_LEN(buf)),
+					.byte_count =
+					       rte_cpu_to_be_32(DATA_LEN(buf)),
 					.lkey = mlx5_tx_mb2mr(txq, buf),
-					.addr = htonll(addr),
+					.addr = rte_cpu_to_be_64(addr),
 				};
 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
 				length += DATA_LEN(buf);
@@ -1206,7 +1215,7 @@
 		volatile struct mlx5_wqe *wqe = mpw.wqe;
 
 		/* Request completion on last WQE. */
-		wqe->ctrl[2] = htonl(8);
+		wqe->ctrl[2] = rte_cpu_to_be_32(8);
 		/* Save elts_head in unused "immediate" field of WQE. */
 		wqe->ctrl[3] = elts_head;
 		txq->elts_comp = 0;
@@ -1246,9 +1255,10 @@
 	mpw->pkts_n = 0;
 	mpw->total_len = sizeof(struct mlx5_wqe);
 	mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
-	mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
-				  (txq->wqe_ci << 8) |
-				  MLX5_OPCODE_ENHANCED_MPSW);
+	mpw->wqe->ctrl[0] =
+		rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
+				 (txq->wqe_ci << 8) |
+				 MLX5_OPCODE_ENHANCED_MPSW);
 	mpw->wqe->ctrl[2] = 0;
 	mpw->wqe->ctrl[3] = 0;
 	memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
@@ -1256,9 +1266,9 @@
 		uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
 
 		/* Pad the first 2 DWORDs with zero-length inline header. */
-		*(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG);
+		*(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG);
 		*(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
-			htonl(MLX5_INLINE_SEG);
+			rte_cpu_to_be_32(MLX5_INLINE_SEG);
 		mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
 		/* Start from the next WQEBB. */
 		mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
@@ -1286,7 +1296,8 @@
 	/* Store size in multiple of 16 bytes. Control and Ethernet segments
 	 * count as 2.
 	 */
-	mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len));
+	mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s |
+					     MLX5_WQE_DS(mpw->total_len));
 	mpw->state = MLX5_MPW_STATE_CLOSED;
 	ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
 	txq->wqe_ci += ret;
@@ -1439,9 +1450,10 @@
 				dseg = mpw.data.dseg[mpw.pkts_n];
 				addr = rte_pktmbuf_mtod(buf, uintptr_t);
 				*dseg = (struct mlx5_wqe_data_seg){
-					.byte_count = htonl(DATA_LEN(buf)),
+					.byte_count = rte_cpu_to_be_32(
+								DATA_LEN(buf)),
 					.lkey = mlx5_tx_mb2mr(txq, buf),
-					.addr = htonll(addr),
+					.addr = rte_cpu_to_be_64(addr),
 				};
 #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
 				length += DATA_LEN(buf);
@@ -1464,7 +1476,7 @@
 
 			assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
 			assert(length == DATA_LEN(buf));
-			inl_hdr = htonl(length | MLX5_INLINE_SEG);
+			inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG);
 			addr = rte_pktmbuf_mtod(buf, uintptr_t);
 			mpw.data.raw = (volatile void *)
 				((uintptr_t)mpw.data.raw + inl_pad);
@@ -1520,9 +1532,9 @@
 			for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
 				rte_prefetch2((void *)(addr +
 						n * RTE_CACHE_LINE_SIZE));
-			naddr = htonll(addr);
+			naddr = rte_cpu_to_be_64(addr);
 			*dseg = (rte_v128u32_t) {
-				htonl(length),
+				rte_cpu_to_be_32(length),
 				mlx5_tx_mb2mr(txq, buf),
 				naddr,
 				naddr >> 32,
@@ -1550,7 +1562,7 @@
 		volatile struct mlx5_wqe *wqe = mpw.wqe;
 
 		/* Request completion on last WQE. */
-		wqe->ctrl[2] = htonl(8);
+		wqe->ctrl[2] = rte_cpu_to_be_32(8);
 		/* Save elts_head in unused "immediate" field of WQE. */
 		wqe->ctrl[3] = elts_head;
 		txq->elts_comp = 0;
@@ -1634,8 +1646,8 @@
 			(volatile struct mlx5_mini_cqe8 (*)[8])
 			(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
 
-		len = ntohl((*mc)[zip->ai & 7].byte_cnt);
-		*rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result);
+		len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
+		*rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result);
 		if ((++zip->ai & 7) == 0) {
 			/* Invalidate consumed CQEs */
 			idx = zip->ca;
@@ -1683,7 +1695,7 @@
 							  cqe_cnt].pkt_info);
 
 			/* Fix endianness. */
-			zip->cqe_cnt = ntohl(cqe->byte_cnt);
+			zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
 			/*
 			 * Current mini array position is the one returned by
 			 * check_cqe64().
@@ -1698,8 +1710,8 @@
 			--rxq->cq_ci;
 			zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
 			/* Get packet size to return. */
-			len = ntohl((*mc)[0].byte_cnt);
-			*rss_hash = ntohl((*mc)[0].rx_hash_result);
+			len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
+			*rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result);
 			zip->ai = 1;
 			/* Prefetch all the entries to be invalidated */
 			idx = zip->ca;
@@ -1709,8 +1721,8 @@
 				++idx;
 			}
 		} else {
-			len = ntohl(cqe->byte_cnt);
-			*rss_hash = ntohl(cqe->rx_hash_res);
+			len = rte_be_to_cpu_32(cqe->byte_cnt);
+			*rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res);
 		}
 		/* Error while receiving packet. */
 		if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
@@ -1734,7 +1746,7 @@
 rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
 {
 	uint32_t ol_flags = 0;
-	uint16_t flags = ntohs(cqe->hdr_type_etc);
+	uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
 
 	ol_flags =
 		TRANSPOSE(flags,
@@ -1841,7 +1853,7 @@
 			    MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
 				pkt->ol_flags |= PKT_RX_FDIR;
 				if (cqe->sop_drop_qpn !=
-				    htonl(MLX5_FLOW_MARK_DEFAULT)) {
+				    rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
 					uint32_t mark = cqe->sop_drop_qpn;
 
 					pkt->ol_flags |= PKT_RX_FDIR_ID;
@@ -1853,10 +1865,11 @@
 				pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
 			if (rxq->vlan_strip &&
 			    (cqe->hdr_type_etc &
-			     htons(MLX5_CQE_VLAN_STRIPPED))) {
+			     rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
 				pkt->ol_flags |= PKT_RX_VLAN_PKT |
 					PKT_RX_VLAN_STRIPPED;
-				pkt->vlan_tci = ntohs(cqe->vlan_info);
+				pkt->vlan_tci =
+					rte_be_to_cpu_16(cqe->vlan_info);
 			}
 			if (rxq->crc_present)
 				len -= ETHER_CRC_LEN;
@@ -1872,7 +1885,7 @@
 		 * of the buffers are already known, only the buffer address
 		 * changes.
 		 */
-		wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t));
+		wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
 		if (len > DATA_LEN(seg)) {
 			len -= DATA_LEN(seg);
 			++NB_SEGS(pkt);
@@ -1900,9 +1913,9 @@
 	/* Update the consumer index. */
 	rxq->rq_ci = rq_ci >> sges_n;
 	rte_wmb();
-	*rxq->cq_db = htonl(rxq->cq_ci);
+	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
 	rte_wmb();
-	*rxq->rq_db = htonl(rxq->rq_ci);
+	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
 #ifdef MLX5_PMD_SOFT_COUNTERS
 	/* Increment packets counter. */
 	rxq->stats.ipackets += i;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 033e70f..73a4ce8 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -269,7 +269,7 @@ struct txq {
 		uintptr_t start; /* Start address of MR */
 		uintptr_t end; /* End address of MR */
 		struct ibv_mr *mr; /* Memory Region (for mp). */
-		uint32_t lkey; /* htonl(mr->lkey) */
+		uint32_t lkey; /* rte_cpu_to_be_32(mr->lkey) */
 	} mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
 	uint16_t mr_cache_idx; /* Index of last hit entry. */
 	struct rte_mbuf *(*elts)[]; /* TX elements. */
@@ -492,7 +492,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
 	}
 #endif /* NDEBUG */
 	++cq_ci;
-	txq->wqe_pi = ntohs(cqe->wqe_counter);
+	txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
 	ctrl = (volatile struct mlx5_wqe_ctrl *)
 		tx_mlx5_wqe(txq, txq->wqe_pi);
 	elts_tail = ctrl->ctrl3;
@@ -530,7 +530,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
 	txq->elts_tail = elts_tail;
 	/* Update the consumer index. */
 	rte_wmb();
-	*txq->cq_db = htonl(cq_ci);
+	*txq->cq_db = rte_cpu_to_be_32(cq_ci);
 }
 
 /**
@@ -581,7 +581,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
 		if (txq->mp2mr[i].start <= addr &&
 		    txq->mp2mr[i].end >= addr) {
 			assert(txq->mp2mr[i].lkey != (uint32_t)-1);
-			assert(htonl(txq->mp2mr[i].mr->lkey) ==
+			assert(rte_cpu_to_be_32(txq->mp2mr[i].mr->lkey) ==
 			       txq->mp2mr[i].lkey);
 			txq->mr_cache_idx = i;
 			return txq->mp2mr[i].lkey;
@@ -605,8 +605,8 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
 	uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
 	volatile uint64_t *src = ((volatile uint64_t *)wqe);
 
-	rte_io_wmb();
-	*txq->qp_db = htonl(txq->wqe_ci);
+	rte_wmb();
+	*txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
 	/* Ensure ordering between DB record and BF copy. */
 	rte_wmb();
 	*dst = *src;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
index 37854a7..0a5d025 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c
@@ -291,7 +291,7 @@
 		/* Fill ESEG in the header. */
 		_mm_store_si128(t_wqe + 1,
 				_mm_set_epi16(0, 0, 0, 0,
-					      htons(len), cs_flags,
+					      rte_cpu_to_be_16(len), cs_flags,
 					      0, 0));
 		txq->wqe_ci = wqe_ci;
 	}
@@ -300,7 +300,7 @@
 	txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
 	txq->elts_head = elts_head;
 	if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
-		wqe->ctrl[2] = htonl(8);
+		wqe->ctrl[2] = rte_cpu_to_be_32(8);
 		wqe->ctrl[3] = txq->elts_head;
 		txq->elts_comp = 0;
 		++txq->cq_pi;
@@ -561,11 +561,11 @@
 		return;
 	}
 	for (i = 0; i < n; ++i)
-		wq[i].addr = htonll((uintptr_t)elts[i]->buf_addr +
-				    RTE_PKTMBUF_HEADROOM);
+		wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
+					      RTE_PKTMBUF_HEADROOM);
 	rxq->rq_ci += n;
 	rte_wmb();
-	*rxq->rq_db = htonl(rxq->rq_ci);
+	*rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
 }
 
 /**
@@ -1248,7 +1248,7 @@
 		}
 	}
 	rte_wmb();
-	*rxq->cq_db = htonl(rxq->cq_ci);
+	*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
 	return rcvd_pkt;
 }
 
-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 25+ messages in thread

end of thread, other threads:[~2017-09-22 18:02 UTC | newest]

Thread overview: 25+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-04 11:48 [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros Shachar Beiser
2017-09-04 11:48 ` [dpdk-dev] [PATCH 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser
2017-09-04 15:15   ` Nélio Laranjeiro
2017-09-04 11:48 ` [dpdk-dev] [PATCH 3/3] net/mlx5: fix interrupt enable return value Shachar Beiser
2017-09-04 15:24   ` Nélio Laranjeiro
2017-09-05  9:04     ` Shachar Beiser
2017-09-04 15:14 ` [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros Nélio Laranjeiro
2017-09-05  9:05   ` Shachar Beiser
2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 " Shachar Beiser
2017-09-05 13:41   ` Nélio Laranjeiro
2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser
2017-09-05 13:41   ` Nélio Laranjeiro
2017-09-14 13:43   ` [dpdk-dev] [PATCH v3 1/2] net/mlx5: replace network to host macros Shachar Beiser
2017-09-15 20:50     ` Yongseok Koh
2017-09-18  9:47       ` Shachar Beiser
2017-09-14 13:43   ` [dpdk-dev] [PATCH v3 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser
2017-09-17 10:42     ` [dpdk-dev] [PATCH v4 1/2] net/mlx5: replace network to host macros Shachar Beiser
2017-09-18 17:59       ` Yongseok Koh
2017-09-19  6:30       ` Nélio Laranjeiro
2017-09-17 10:42     ` [dpdk-dev] [PATCH v4 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser
2017-09-18 17:52       ` Yongseok Koh
2017-09-22 18:02         ` [dpdk-dev] [dpdk-stable] " Ferruh Yigit
2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 3/3] net/mlx5: fix interrupt enable return value Shachar Beiser
2017-09-05 13:27   ` Adrien Mazarguil
2017-09-06 10:54     ` Shachar Beiser

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).