* [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros @ 2017-09-04 11:48 Shachar Beiser 2017-09-04 11:48 ` [dpdk-dev] [PATCH 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser ` (5 more replies) 0 siblings, 6 replies; 25+ messages in thread From: Shachar Beiser @ 2017-09-04 11:48 UTC (permalink / raw) To: dev; +Cc: Shachar Beiser, Adrien Mazarguil, Nelio Laranjeiro Fixes: 8bb5119634b7 ("net/mlx5: replace network byte order macro") Cc: shacharbe@mellanox.com Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> --- drivers/net/mlx5/mlx5_mac.c | 8 ++- drivers/net/mlx5/mlx5_mr.c | 2 +- drivers/net/mlx5/mlx5_rxmode.c | 8 ++- drivers/net/mlx5/mlx5_rxq.c | 9 +-- drivers/net/mlx5/mlx5_rxtx.c | 131 +++++++++++++++++++---------------- drivers/net/mlx5/mlx5_rxtx.h | 12 ++-- drivers/net/mlx5/mlx5_rxtx_vec_sse.c | 12 ++-- 7 files changed, 102 insertions(+), 80 deletions(-) diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index 45d23e4..b3c3fa2 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -263,11 +263,15 @@ (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5] }, - .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(vlan_id) + : 0), }, .mask = { .dst_mac = "\xff\xff\xff\xff\xff\xff", - .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(0xfff) : + 0), }, }; DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u" diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 9593830..9a9f73a 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -203,7 +203,7 @@ struct ibv_mr * txq_ctrl->txq.mp2mr[idx].start = (uintptr_t)mr->addr; txq_ctrl->txq.mp2mr[idx].end = (uintptr_t)mr->addr + mr->length; txq_ctrl->txq.mp2mr[idx].mr = mr; - txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey); + txq_ctrl->txq.mp2mr[idx].lkey = rte_cpu_to_be_32(mr->lkey); DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, (void *)txq_ctrl, mp->name, (void *)mp, txq_ctrl->txq.mp2mr[idx].lkey); diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c index 4a51e47..db2e05b 100644 --- a/drivers/net/mlx5/mlx5_rxmode.c +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -159,14 +159,18 @@ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], }, - .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(vlan_id) : + 0), }, .mask = { .dst_mac = { mask[0], mask[1], mask[2], mask[3], mask[4], mask[5], }, - .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(0xfff) : + 0), }, }; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 35c5cb4..437dc02 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -672,9 +672,10 @@ /* scat->addr must be able to store a pointer. */ assert(sizeof(scat->addr) >= sizeof(uintptr_t)); *scat = (struct mlx5_wqe_data_seg){ - .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)), - .byte_count = htonl(DATA_LEN(buf)), - .lkey = htonl(rxq_ctrl->mr->lkey), + .addr = + rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)), + .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), + .lkey = rte_cpu_to_be_32(rxq_ctrl->mr->lkey), }; (*rxq_ctrl->rxq.elts)[i] = buf; } @@ -1077,7 +1078,7 @@ /* Update doorbell counter. */ rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n; rte_wmb(); - *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci); + *rxq_ctrl->rxq.rq_db = rte_cpu_to_be_32(rxq_ctrl->rxq.rq_ci); DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); assert(ret == 0); return 0; diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index fe9e7ea..e1a35a3 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -306,7 +306,7 @@ op_own = cqe->op_own; if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) - n = ntohl(cqe->byte_cnt); + n = rte_be_to_cpu_32(cqe->byte_cnt); else n = 1; cq_ci += n; @@ -434,7 +434,8 @@ raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; /* Replace the Ethernet type by the VLAN if necessary. */ if (buf->ol_flags & PKT_TX_VLAN_PKT) { - uint32_t vlan = htonl(0x81000000 | buf->vlan_tci); + uint32_t vlan = rte_cpu_to_be_32(0x81000000 | + buf->vlan_tci); unsigned int len = 2 * ETHER_ADDR_LEN - 2; addr += 2; @@ -510,8 +511,10 @@ } else { /* NOP WQE. */ wqe->ctrl = (rte_v128u32_t){ - htonl(txq->wqe_ci << 8), - htonl(txq->qp_num_8s | 1), + rte_cpu_to_be_32( + txq->wqe_ci << 8), + rte_cpu_to_be_32( + txq->qp_num_8s | 1), 0, 0, }; @@ -550,7 +553,8 @@ max_wqe -= n; if (tso) { uint32_t inl = - htonl(copy_b | MLX5_INLINE_SEG); + rte_cpu_to_be_32(copy_b | + MLX5_INLINE_SEG); pkt_inline_sz = MLX5_WQE_DS(tso_header_sz) * @@ -603,9 +607,9 @@ ds = 3; use_dseg: /* Add the remaining packet as a simple ds. */ - naddr = htonll(addr); + naddr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t){ - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -642,9 +646,9 @@ total_length += length; #endif /* Store segment information. */ - naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)); + naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); *dseg = (rte_v128u32_t){ - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -663,21 +667,23 @@ /* Initialize known and common part of the WQE structure. */ if (tso) { wqe->ctrl = (rte_v128u32_t){ - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO), - htonl(txq->qp_num_8s | ds), + rte_cpu_to_be_32((txq->wqe_ci << 8) | + MLX5_OPCODE_TSO), + rte_cpu_to_be_32(txq->qp_num_8s | ds), 0, 0, }; wqe->eseg = (rte_v128u32_t){ 0, - cs_flags | (htons(tso_segsz) << 16), + cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16), 0, - (ehdr << 16) | htons(tso_header_sz), + (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz), }; } else { wqe->ctrl = (rte_v128u32_t){ - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND), - htonl(txq->qp_num_8s | ds), + rte_cpu_to_be_32((txq->wqe_ci << 8) | + MLX5_OPCODE_SEND), + rte_cpu_to_be_32(txq->qp_num_8s | ds), 0, 0, }; @@ -685,7 +691,7 @@ 0, cs_flags, 0, - (ehdr << 16) | htons(pkt_inline_sz), + (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz), }; } next_wqe: @@ -705,7 +711,7 @@ comp = txq->elts_comp + i + j + k; if (comp >= MLX5_TX_COMP_THRESH) { /* Request completion on last WQE. */ - last_wqe->ctrl2 = htonl(8); + last_wqe->ctrl2 = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ last_wqe->ctrl3 = txq->elts_head; txq->elts_comp = 0; @@ -744,13 +750,14 @@ mpw->len = length; mpw->total_len = 0; mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.rsvd0 = 0; mpw->wqe->eseg.rsvd1 = 0; mpw->wqe->eseg.rsvd2 = 0; - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | MLX5_OPCODE_TSO); + mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_TSO); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *) @@ -779,7 +786,7 @@ * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num)); mpw->state = MLX5_MPW_STATE_CLOSED; if (num < 3) ++txq->wqe_ci; @@ -886,9 +893,9 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -916,7 +923,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -956,12 +963,12 @@ mpw->len = length; mpw->total_len = 0; mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_TSO); + mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_TSO); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; - mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.cs_flags = 0; mpw->wqe->eseg.rsvd0 = 0; @@ -992,9 +999,10 @@ * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | + MLX5_WQE_DS(size)); mpw->state = MLX5_MPW_STATE_CLOSED; - inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG); + inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG); txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; } @@ -1132,9 +1140,10 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = + rte_cpu_to_be_32(DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -1206,7 +1215,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -1246,9 +1255,10 @@ mpw->pkts_n = 0; mpw->total_len = sizeof(struct mlx5_wqe); mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_ENHANCED_MPSW); + mpw->wqe->ctrl[0] = + rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_ENHANCED_MPSW); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE); @@ -1256,9 +1266,9 @@ uintptr_t addr = (uintptr_t)(mpw->wqe + 1); /* Pad the first 2 DWORDs with zero-length inline header. */ - *(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG); + *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG); *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) = - htonl(MLX5_INLINE_SEG); + rte_cpu_to_be_32(MLX5_INLINE_SEG); mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE; /* Start from the next WQEBB. */ mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1)); @@ -1286,7 +1296,8 @@ /* Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | + MLX5_WQE_DS(mpw->total_len)); mpw->state = MLX5_MPW_STATE_CLOSED; ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; txq->wqe_ci += ret; @@ -1439,9 +1450,10 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = rte_cpu_to_be_32( + DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -1464,7 +1476,7 @@ assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED); assert(length == DATA_LEN(buf)); - inl_hdr = htonl(length | MLX5_INLINE_SEG); + inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG); addr = rte_pktmbuf_mtod(buf, uintptr_t); mpw.data.raw = (volatile void *) ((uintptr_t)mpw.data.raw + inl_pad); @@ -1520,9 +1532,9 @@ for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++) rte_prefetch2((void *)(addr + n * RTE_CACHE_LINE_SIZE)); - naddr = htonll(addr); + naddr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t) { - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -1550,7 +1562,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -1634,8 +1646,8 @@ (volatile struct mlx5_mini_cqe8 (*)[8]) (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info); - len = ntohl((*mc)[zip->ai & 7].byte_cnt); - *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result); + len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt); + *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result); if ((++zip->ai & 7) == 0) { /* Invalidate consumed CQEs */ idx = zip->ca; @@ -1683,7 +1695,7 @@ cqe_cnt].pkt_info); /* Fix endianness. */ - zip->cqe_cnt = ntohl(cqe->byte_cnt); + zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt); /* * Current mini array position is the one returned by * check_cqe64(). @@ -1698,8 +1710,8 @@ --rxq->cq_ci; zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; /* Get packet size to return. */ - len = ntohl((*mc)[0].byte_cnt); - *rss_hash = ntohl((*mc)[0].rx_hash_result); + len = rte_be_to_cpu_32((*mc)[0].byte_cnt); + *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result); zip->ai = 1; /* Prefetch all the entries to be invalidated */ idx = zip->ca; @@ -1709,8 +1721,8 @@ ++idx; } } else { - len = ntohl(cqe->byte_cnt); - *rss_hash = ntohl(cqe->rx_hash_res); + len = rte_be_to_cpu_32(cqe->byte_cnt); + *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res); } /* Error while receiving packet. */ if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR)) @@ -1734,7 +1746,7 @@ rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe) { uint32_t ol_flags = 0; - uint16_t flags = ntohs(cqe->hdr_type_etc); + uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); ol_flags = TRANSPOSE(flags, @@ -1841,7 +1853,7 @@ MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { pkt->ol_flags |= PKT_RX_FDIR; if (cqe->sop_drop_qpn != - htonl(MLX5_FLOW_MARK_DEFAULT)) { + rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { uint32_t mark = cqe->sop_drop_qpn; pkt->ol_flags |= PKT_RX_FDIR_ID; @@ -1853,10 +1865,11 @@ pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe); if (rxq->vlan_strip && (cqe->hdr_type_etc & - htons(MLX5_CQE_VLAN_STRIPPED))) { + rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { pkt->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; - pkt->vlan_tci = ntohs(cqe->vlan_info); + pkt->vlan_tci = + rte_be_to_cpu_16(cqe->vlan_info); } if (rxq->crc_present) len -= ETHER_CRC_LEN; @@ -1872,7 +1885,7 @@ * of the buffers are already known, only the buffer address * changes. */ - wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t)); + wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); if (len > DATA_LEN(seg)) { len -= DATA_LEN(seg); ++NB_SEGS(pkt); @@ -1900,9 +1913,9 @@ /* Update the consumer index. */ rxq->rq_ci = rq_ci >> sges_n; rte_wmb(); - *rxq->cq_db = htonl(rxq->cq_ci); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); rte_wmb(); - *rxq->rq_db = htonl(rxq->rq_ci); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment packets counter. */ rxq->stats.ipackets += i; diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 033e70f..73a4ce8 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -269,7 +269,7 @@ struct txq { uintptr_t start; /* Start address of MR */ uintptr_t end; /* End address of MR */ struct ibv_mr *mr; /* Memory Region (for mp). */ - uint32_t lkey; /* htonl(mr->lkey) */ + uint32_t lkey; /* rte_cpu_to_be_32(mr->lkey) */ } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */ uint16_t mr_cache_idx; /* Index of last hit entry. */ struct rte_mbuf *(*elts)[]; /* TX elements. */ @@ -492,7 +492,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, } #endif /* NDEBUG */ ++cq_ci; - txq->wqe_pi = ntohs(cqe->wqe_counter); + txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter); ctrl = (volatile struct mlx5_wqe_ctrl *) tx_mlx5_wqe(txq, txq->wqe_pi); elts_tail = ctrl->ctrl3; @@ -530,7 +530,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, txq->elts_tail = elts_tail; /* Update the consumer index. */ rte_wmb(); - *txq->cq_db = htonl(cq_ci); + *txq->cq_db = rte_cpu_to_be_32(cq_ci); } /** @@ -581,7 +581,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, if (txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr) { assert(txq->mp2mr[i].lkey != (uint32_t)-1); - assert(htonl(txq->mp2mr[i].mr->lkey) == + assert(rte_cpu_to_be_32(txq->mp2mr[i].mr->lkey) == txq->mp2mr[i].lkey); txq->mr_cache_idx = i; return txq->mp2mr[i].lkey; @@ -605,8 +605,8 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg); volatile uint64_t *src = ((volatile uint64_t *)wqe); - rte_io_wmb(); - *txq->qp_db = htonl(txq->wqe_ci); + rte_wmb(); + *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); /* Ensure ordering between DB record and BF copy. */ rte_wmb(); *dst = *src; diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c index 37854a7..0a5d025 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c @@ -291,7 +291,7 @@ /* Fill ESEG in the header. */ _mm_store_si128(t_wqe + 1, _mm_set_epi16(0, 0, 0, 0, - htons(len), cs_flags, + rte_cpu_to_be_16(len), cs_flags, 0, 0)); txq->wqe_ci = wqe_ci; } @@ -300,7 +300,7 @@ txq->elts_comp += (uint16_t)(elts_head - txq->elts_head); txq->elts_head = elts_head; if (txq->elts_comp >= MLX5_TX_COMP_THRESH) { - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); wqe->ctrl[3] = txq->elts_head; txq->elts_comp = 0; ++txq->cq_pi; @@ -561,11 +561,11 @@ return; } for (i = 0; i < n; ++i) - wq[i].addr = htonll((uintptr_t)elts[i]->buf_addr + - RTE_PKTMBUF_HEADROOM); + wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr + + RTE_PKTMBUF_HEADROOM); rxq->rq_ci += n; rte_wmb(); - *rxq->rq_db = htonl(rxq->rq_ci); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); } /** @@ -1248,7 +1248,7 @@ } } rte_wmb(); - *rxq->cq_db = htonl(rxq->cq_ci); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); return rcvd_pkt; } -- 1.8.3.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification 2017-09-04 11:48 [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros Shachar Beiser @ 2017-09-04 11:48 ` Shachar Beiser 2017-09-04 15:15 ` Nélio Laranjeiro 2017-09-04 11:48 ` [dpdk-dev] [PATCH 3/3] net/mlx5: fix interrupt enable return value Shachar Beiser ` (4 subsequent siblings) 5 siblings, 1 reply; 25+ messages in thread From: Shachar Beiser @ 2017-09-04 11:48 UTC (permalink / raw) To: dev; +Cc: Shachar Beiser, Adrien Mazarguil, Nelio Laranjeiro, stable Fixes: 3cf87e68d97b ("net/mlx5: remove old MLNX OFED 3.3 verification") Cc: stable@dpdk.org Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> --- drivers/net/mlx5/mlx5_prm.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 608072f..8b82b5e 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -89,9 +89,6 @@ /* Default max packet length to be inlined. */ #define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE) -#ifndef HAVE_VERBS_MLX5_OPCODE_TSO -#define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */ -#endif #define MLX5_OPC_MOD_ENHANCED_MPSW 0 #define MLX5_OPCODE_ENHANCED_MPSW 0x29 -- 1.8.3.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification 2017-09-04 11:48 ` [dpdk-dev] [PATCH 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser @ 2017-09-04 15:15 ` Nélio Laranjeiro 0 siblings, 0 replies; 25+ messages in thread From: Nélio Laranjeiro @ 2017-09-04 15:15 UTC (permalink / raw) To: Shachar Beiser; +Cc: dev, Adrien Mazarguil, stable On Mon, Sep 04, 2017 at 11:48:46AM +0000, Shachar Beiser wrote: > Fixes: 3cf87e68d97b ("net/mlx5: remove old MLNX OFED 3.3 verification") > Cc: stable@dpdk.org > > Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> > --- > drivers/net/mlx5/mlx5_prm.h | 3 --- > 1 file changed, 3 deletions(-) > > diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h > index 608072f..8b82b5e 100644 > --- a/drivers/net/mlx5/mlx5_prm.h > +++ b/drivers/net/mlx5/mlx5_prm.h > @@ -89,9 +89,6 @@ > /* Default max packet length to be inlined. */ > #define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE) > > -#ifndef HAVE_VERBS_MLX5_OPCODE_TSO > -#define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */ > -#endif > > #define MLX5_OPC_MOD_ENHANCED_MPSW 0 > #define MLX5_OPCODE_ENHANCED_MPSW 0x29 > -- > 1.8.3.1 > Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> -- Nélio Laranjeiro 6WIND ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH 3/3] net/mlx5: fix interrupt enable return value 2017-09-04 11:48 [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros Shachar Beiser 2017-09-04 11:48 ` [dpdk-dev] [PATCH 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser @ 2017-09-04 11:48 ` Shachar Beiser 2017-09-04 15:24 ` Nélio Laranjeiro 2017-09-04 15:14 ` [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros Nélio Laranjeiro ` (3 subsequent siblings) 5 siblings, 1 reply; 25+ messages in thread From: Shachar Beiser @ 2017-09-04 11:48 UTC (permalink / raw) To: dev; +Cc: Shachar Beiser, Adrien Mazarguil, Nelio Laranjeiro, stable Fixes: 3c7d44af252a ("net/mlx5: support user space Rx interrupt event") Cc: stable@dpdk.org Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> --- drivers/net/mlx5/mlx5_rxq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 437dc02..24887fb 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1330,7 +1330,7 @@ struct priv *priv = mlx5_get_priv(dev); struct rxq *rxq = (*priv->rxqs)[rx_queue_id]; struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); - int ret; + int ret = 0; if (!rxq || !rxq_ctrl->channel) { ret = EINVAL; -- 1.8.3.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH 3/3] net/mlx5: fix interrupt enable return value 2017-09-04 11:48 ` [dpdk-dev] [PATCH 3/3] net/mlx5: fix interrupt enable return value Shachar Beiser @ 2017-09-04 15:24 ` Nélio Laranjeiro 2017-09-05 9:04 ` Shachar Beiser 0 siblings, 1 reply; 25+ messages in thread From: Nélio Laranjeiro @ 2017-09-04 15:24 UTC (permalink / raw) To: Shachar Beiser; +Cc: dev, Adrien Mazarguil, stable On Mon, Sep 04, 2017 at 11:48:47AM +0000, Shachar Beiser wrote: > Fixes: 3c7d44af252a ("net/mlx5: support user space Rx interrupt event") It should fix commit e1016cb733 ("net/mlx5: fix Rx interrupts management") > Cc: stable@dpdk.org > > Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> > --- > drivers/net/mlx5/mlx5_rxq.c | 2 +- > 1 file changed, 1 insertion(+), 1 deletion(-) > > diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c > index 437dc02..24887fb 100644 > --- a/drivers/net/mlx5/mlx5_rxq.c > +++ b/drivers/net/mlx5/mlx5_rxq.c > @@ -1330,7 +1330,7 @@ > struct priv *priv = mlx5_get_priv(dev); > struct rxq *rxq = (*priv->rxqs)[rx_queue_id]; > struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); > - int ret; > + int ret = 0; > > if (!rxq || !rxq_ctrl->channel) { > ret = EINVAL; > -- > 1.8.3.1 > -- Nélio Laranjeiro 6WIND ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH 3/3] net/mlx5: fix interrupt enable return value 2017-09-04 15:24 ` Nélio Laranjeiro @ 2017-09-05 9:04 ` Shachar Beiser 0 siblings, 0 replies; 25+ messages in thread From: Shachar Beiser @ 2017-09-05 9:04 UTC (permalink / raw) To: Nélio Laranjeiro; +Cc: dev, Adrien Mazarguil, stable OK, I will fix to " commit e1016cb733 ("net/mlx5: fix Rx interrupts management") " > -----Original Message----- > From: Nélio Laranjeiro [mailto:nelio.laranjeiro@6wind.com] > Sent: Monday, September 4, 2017 6:24 PM > To: Shachar Beiser <shacharbe@mellanox.com> > Cc: dev@dpdk.org; Adrien Mazarguil <adrien.mazarguil@6wind.com>; > stable@dpdk.org > Subject: Re: [PATCH 3/3] net/mlx5: fix interrupt enable return value > > On Mon, Sep 04, 2017 at 11:48:47AM +0000, Shachar Beiser wrote: > > Fixes: 3c7d44af252a ("net/mlx5: support user space Rx interrupt > > event") > > It should fix commit e1016cb733 ("net/mlx5: fix Rx interrupts management") > > > Cc: stable@dpdk.org > > > > Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> > > --- > > drivers/net/mlx5/mlx5_rxq.c | 2 +- > > 1 file changed, 1 insertion(+), 1 deletion(-) > > > > diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c > > index 437dc02..24887fb 100644 > > --- a/drivers/net/mlx5/mlx5_rxq.c > > +++ b/drivers/net/mlx5/mlx5_rxq.c > > @@ -1330,7 +1330,7 @@ > > struct priv *priv = mlx5_get_priv(dev); > > struct rxq *rxq = (*priv->rxqs)[rx_queue_id]; > > struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); > > - int ret; > > + int ret = 0; > > > > if (!rxq || !rxq_ctrl->channel) { > > ret = EINVAL; > > -- > > 1.8.3.1 > > > > -- > Nélio Laranjeiro > 6WIND ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros 2017-09-04 11:48 [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros Shachar Beiser 2017-09-04 11:48 ` [dpdk-dev] [PATCH 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser 2017-09-04 11:48 ` [dpdk-dev] [PATCH 3/3] net/mlx5: fix interrupt enable return value Shachar Beiser @ 2017-09-04 15:14 ` Nélio Laranjeiro 2017-09-05 9:05 ` Shachar Beiser 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 " Shachar Beiser ` (2 subsequent siblings) 5 siblings, 1 reply; 25+ messages in thread From: Nélio Laranjeiro @ 2017-09-04 15:14 UTC (permalink / raw) To: Shachar Beiser; +Cc: dev, Adrien Mazarguil The title is a little wrong, it also replace the host to network. On Mon, Sep 04, 2017 at 11:48:45AM +0000, Shachar Beiser wrote: > Fixes: 8bb5119634b7 ("net/mlx5: replace network byte order macro") This commit does not exists, are you sure of it? > Cc: shacharbe@mellanox.com > > Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> -- Nélio Laranjeiro 6WIND ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros 2017-09-04 15:14 ` [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros Nélio Laranjeiro @ 2017-09-05 9:05 ` Shachar Beiser 0 siblings, 0 replies; 25+ messages in thread From: Shachar Beiser @ 2017-09-05 9:05 UTC (permalink / raw) To: Nélio Laranjeiro; +Cc: dev, Adrien Mazarguil Does it OK to change it to : " Fixes: 1be17b6a5539 ("eal: introduce big and little endian types")" ? > -----Original Message----- > From: Nélio Laranjeiro [mailto:nelio.laranjeiro@6wind.com] > Sent: Monday, September 4, 2017 6:15 PM > To: Shachar Beiser <shacharbe@mellanox.com> > Cc: dev@dpdk.org; Adrien Mazarguil <adrien.mazarguil@6wind.com> > Subject: Re: [PATCH 1/3] net/mlx5: replace network to host macros > > The title is a little wrong, it also replace the host to network. > > On Mon, Sep 04, 2017 at 11:48:45AM +0000, Shachar Beiser wrote: > > Fixes: 8bb5119634b7 ("net/mlx5: replace network byte order macro") > > This commit does not exists, are you sure of it? > > > Cc: shacharbe@mellanox.com > > > > Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> > > -- > Nélio Laranjeiro > 6WIND ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v2 1/3] net/mlx5: replace network to host macros 2017-09-04 11:48 [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros Shachar Beiser ` (2 preceding siblings ...) 2017-09-04 15:14 ` [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros Nélio Laranjeiro @ 2017-09-05 13:04 ` Shachar Beiser 2017-09-05 13:41 ` Nélio Laranjeiro 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 3/3] net/mlx5: fix interrupt enable return value Shachar Beiser 5 siblings, 1 reply; 25+ messages in thread From: Shachar Beiser @ 2017-09-05 13:04 UTC (permalink / raw) To: dev; +Cc: Shachar Beiser, Adrien Mazarguil, Nelio Laranjeiro Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> --- drivers/net/mlx5/mlx5_mac.c | 8 ++- drivers/net/mlx5/mlx5_mr.c | 2 +- drivers/net/mlx5/mlx5_rxmode.c | 8 ++- drivers/net/mlx5/mlx5_rxq.c | 9 +-- drivers/net/mlx5/mlx5_rxtx.c | 131 +++++++++++++++++++---------------- drivers/net/mlx5/mlx5_rxtx.h | 12 ++-- drivers/net/mlx5/mlx5_rxtx_vec_sse.c | 12 ++-- 7 files changed, 102 insertions(+), 80 deletions(-) diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index 45d23e4..b3c3fa2 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -263,11 +263,15 @@ (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5] }, - .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(vlan_id) + : 0), }, .mask = { .dst_mac = "\xff\xff\xff\xff\xff\xff", - .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(0xfff) : + 0), }, }; DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u" diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 9593830..9a9f73a 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -203,7 +203,7 @@ struct ibv_mr * txq_ctrl->txq.mp2mr[idx].start = (uintptr_t)mr->addr; txq_ctrl->txq.mp2mr[idx].end = (uintptr_t)mr->addr + mr->length; txq_ctrl->txq.mp2mr[idx].mr = mr; - txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey); + txq_ctrl->txq.mp2mr[idx].lkey = rte_cpu_to_be_32(mr->lkey); DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, (void *)txq_ctrl, mp->name, (void *)mp, txq_ctrl->txq.mp2mr[idx].lkey); diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c index 4a51e47..db2e05b 100644 --- a/drivers/net/mlx5/mlx5_rxmode.c +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -159,14 +159,18 @@ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], }, - .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(vlan_id) : + 0), }, .mask = { .dst_mac = { mask[0], mask[1], mask[2], mask[3], mask[4], mask[5], }, - .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(0xfff) : + 0), }, }; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 35c5cb4..437dc02 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -672,9 +672,10 @@ /* scat->addr must be able to store a pointer. */ assert(sizeof(scat->addr) >= sizeof(uintptr_t)); *scat = (struct mlx5_wqe_data_seg){ - .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)), - .byte_count = htonl(DATA_LEN(buf)), - .lkey = htonl(rxq_ctrl->mr->lkey), + .addr = + rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)), + .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), + .lkey = rte_cpu_to_be_32(rxq_ctrl->mr->lkey), }; (*rxq_ctrl->rxq.elts)[i] = buf; } @@ -1077,7 +1078,7 @@ /* Update doorbell counter. */ rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n; rte_wmb(); - *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci); + *rxq_ctrl->rxq.rq_db = rte_cpu_to_be_32(rxq_ctrl->rxq.rq_ci); DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); assert(ret == 0); return 0; diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index fe9e7ea..e1a35a3 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -306,7 +306,7 @@ op_own = cqe->op_own; if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) - n = ntohl(cqe->byte_cnt); + n = rte_be_to_cpu_32(cqe->byte_cnt); else n = 1; cq_ci += n; @@ -434,7 +434,8 @@ raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; /* Replace the Ethernet type by the VLAN if necessary. */ if (buf->ol_flags & PKT_TX_VLAN_PKT) { - uint32_t vlan = htonl(0x81000000 | buf->vlan_tci); + uint32_t vlan = rte_cpu_to_be_32(0x81000000 | + buf->vlan_tci); unsigned int len = 2 * ETHER_ADDR_LEN - 2; addr += 2; @@ -510,8 +511,10 @@ } else { /* NOP WQE. */ wqe->ctrl = (rte_v128u32_t){ - htonl(txq->wqe_ci << 8), - htonl(txq->qp_num_8s | 1), + rte_cpu_to_be_32( + txq->wqe_ci << 8), + rte_cpu_to_be_32( + txq->qp_num_8s | 1), 0, 0, }; @@ -550,7 +553,8 @@ max_wqe -= n; if (tso) { uint32_t inl = - htonl(copy_b | MLX5_INLINE_SEG); + rte_cpu_to_be_32(copy_b | + MLX5_INLINE_SEG); pkt_inline_sz = MLX5_WQE_DS(tso_header_sz) * @@ -603,9 +607,9 @@ ds = 3; use_dseg: /* Add the remaining packet as a simple ds. */ - naddr = htonll(addr); + naddr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t){ - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -642,9 +646,9 @@ total_length += length; #endif /* Store segment information. */ - naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)); + naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); *dseg = (rte_v128u32_t){ - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -663,21 +667,23 @@ /* Initialize known and common part of the WQE structure. */ if (tso) { wqe->ctrl = (rte_v128u32_t){ - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO), - htonl(txq->qp_num_8s | ds), + rte_cpu_to_be_32((txq->wqe_ci << 8) | + MLX5_OPCODE_TSO), + rte_cpu_to_be_32(txq->qp_num_8s | ds), 0, 0, }; wqe->eseg = (rte_v128u32_t){ 0, - cs_flags | (htons(tso_segsz) << 16), + cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16), 0, - (ehdr << 16) | htons(tso_header_sz), + (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz), }; } else { wqe->ctrl = (rte_v128u32_t){ - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND), - htonl(txq->qp_num_8s | ds), + rte_cpu_to_be_32((txq->wqe_ci << 8) | + MLX5_OPCODE_SEND), + rte_cpu_to_be_32(txq->qp_num_8s | ds), 0, 0, }; @@ -685,7 +691,7 @@ 0, cs_flags, 0, - (ehdr << 16) | htons(pkt_inline_sz), + (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz), }; } next_wqe: @@ -705,7 +711,7 @@ comp = txq->elts_comp + i + j + k; if (comp >= MLX5_TX_COMP_THRESH) { /* Request completion on last WQE. */ - last_wqe->ctrl2 = htonl(8); + last_wqe->ctrl2 = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ last_wqe->ctrl3 = txq->elts_head; txq->elts_comp = 0; @@ -744,13 +750,14 @@ mpw->len = length; mpw->total_len = 0; mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.rsvd0 = 0; mpw->wqe->eseg.rsvd1 = 0; mpw->wqe->eseg.rsvd2 = 0; - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | MLX5_OPCODE_TSO); + mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_TSO); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *) @@ -779,7 +786,7 @@ * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num)); mpw->state = MLX5_MPW_STATE_CLOSED; if (num < 3) ++txq->wqe_ci; @@ -886,9 +893,9 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -916,7 +923,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -956,12 +963,12 @@ mpw->len = length; mpw->total_len = 0; mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_TSO); + mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_TSO); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; - mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.cs_flags = 0; mpw->wqe->eseg.rsvd0 = 0; @@ -992,9 +999,10 @@ * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | + MLX5_WQE_DS(size)); mpw->state = MLX5_MPW_STATE_CLOSED; - inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG); + inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG); txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; } @@ -1132,9 +1140,10 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = + rte_cpu_to_be_32(DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -1206,7 +1215,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -1246,9 +1255,10 @@ mpw->pkts_n = 0; mpw->total_len = sizeof(struct mlx5_wqe); mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_ENHANCED_MPSW); + mpw->wqe->ctrl[0] = + rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_ENHANCED_MPSW); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE); @@ -1256,9 +1266,9 @@ uintptr_t addr = (uintptr_t)(mpw->wqe + 1); /* Pad the first 2 DWORDs with zero-length inline header. */ - *(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG); + *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG); *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) = - htonl(MLX5_INLINE_SEG); + rte_cpu_to_be_32(MLX5_INLINE_SEG); mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE; /* Start from the next WQEBB. */ mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1)); @@ -1286,7 +1296,8 @@ /* Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | + MLX5_WQE_DS(mpw->total_len)); mpw->state = MLX5_MPW_STATE_CLOSED; ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; txq->wqe_ci += ret; @@ -1439,9 +1450,10 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = rte_cpu_to_be_32( + DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -1464,7 +1476,7 @@ assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED); assert(length == DATA_LEN(buf)); - inl_hdr = htonl(length | MLX5_INLINE_SEG); + inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG); addr = rte_pktmbuf_mtod(buf, uintptr_t); mpw.data.raw = (volatile void *) ((uintptr_t)mpw.data.raw + inl_pad); @@ -1520,9 +1532,9 @@ for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++) rte_prefetch2((void *)(addr + n * RTE_CACHE_LINE_SIZE)); - naddr = htonll(addr); + naddr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t) { - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -1550,7 +1562,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -1634,8 +1646,8 @@ (volatile struct mlx5_mini_cqe8 (*)[8]) (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info); - len = ntohl((*mc)[zip->ai & 7].byte_cnt); - *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result); + len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt); + *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result); if ((++zip->ai & 7) == 0) { /* Invalidate consumed CQEs */ idx = zip->ca; @@ -1683,7 +1695,7 @@ cqe_cnt].pkt_info); /* Fix endianness. */ - zip->cqe_cnt = ntohl(cqe->byte_cnt); + zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt); /* * Current mini array position is the one returned by * check_cqe64(). @@ -1698,8 +1710,8 @@ --rxq->cq_ci; zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; /* Get packet size to return. */ - len = ntohl((*mc)[0].byte_cnt); - *rss_hash = ntohl((*mc)[0].rx_hash_result); + len = rte_be_to_cpu_32((*mc)[0].byte_cnt); + *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result); zip->ai = 1; /* Prefetch all the entries to be invalidated */ idx = zip->ca; @@ -1709,8 +1721,8 @@ ++idx; } } else { - len = ntohl(cqe->byte_cnt); - *rss_hash = ntohl(cqe->rx_hash_res); + len = rte_be_to_cpu_32(cqe->byte_cnt); + *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res); } /* Error while receiving packet. */ if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR)) @@ -1734,7 +1746,7 @@ rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe) { uint32_t ol_flags = 0; - uint16_t flags = ntohs(cqe->hdr_type_etc); + uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); ol_flags = TRANSPOSE(flags, @@ -1841,7 +1853,7 @@ MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { pkt->ol_flags |= PKT_RX_FDIR; if (cqe->sop_drop_qpn != - htonl(MLX5_FLOW_MARK_DEFAULT)) { + rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { uint32_t mark = cqe->sop_drop_qpn; pkt->ol_flags |= PKT_RX_FDIR_ID; @@ -1853,10 +1865,11 @@ pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe); if (rxq->vlan_strip && (cqe->hdr_type_etc & - htons(MLX5_CQE_VLAN_STRIPPED))) { + rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { pkt->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; - pkt->vlan_tci = ntohs(cqe->vlan_info); + pkt->vlan_tci = + rte_be_to_cpu_16(cqe->vlan_info); } if (rxq->crc_present) len -= ETHER_CRC_LEN; @@ -1872,7 +1885,7 @@ * of the buffers are already known, only the buffer address * changes. */ - wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t)); + wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); if (len > DATA_LEN(seg)) { len -= DATA_LEN(seg); ++NB_SEGS(pkt); @@ -1900,9 +1913,9 @@ /* Update the consumer index. */ rxq->rq_ci = rq_ci >> sges_n; rte_wmb(); - *rxq->cq_db = htonl(rxq->cq_ci); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); rte_wmb(); - *rxq->rq_db = htonl(rxq->rq_ci); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment packets counter. */ rxq->stats.ipackets += i; diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 033e70f..73a4ce8 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -269,7 +269,7 @@ struct txq { uintptr_t start; /* Start address of MR */ uintptr_t end; /* End address of MR */ struct ibv_mr *mr; /* Memory Region (for mp). */ - uint32_t lkey; /* htonl(mr->lkey) */ + uint32_t lkey; /* rte_cpu_to_be_32(mr->lkey) */ } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */ uint16_t mr_cache_idx; /* Index of last hit entry. */ struct rte_mbuf *(*elts)[]; /* TX elements. */ @@ -492,7 +492,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, } #endif /* NDEBUG */ ++cq_ci; - txq->wqe_pi = ntohs(cqe->wqe_counter); + txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter); ctrl = (volatile struct mlx5_wqe_ctrl *) tx_mlx5_wqe(txq, txq->wqe_pi); elts_tail = ctrl->ctrl3; @@ -530,7 +530,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, txq->elts_tail = elts_tail; /* Update the consumer index. */ rte_wmb(); - *txq->cq_db = htonl(cq_ci); + *txq->cq_db = rte_cpu_to_be_32(cq_ci); } /** @@ -581,7 +581,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, if (txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr) { assert(txq->mp2mr[i].lkey != (uint32_t)-1); - assert(htonl(txq->mp2mr[i].mr->lkey) == + assert(rte_cpu_to_be_32(txq->mp2mr[i].mr->lkey) == txq->mp2mr[i].lkey); txq->mr_cache_idx = i; return txq->mp2mr[i].lkey; @@ -605,8 +605,8 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg); volatile uint64_t *src = ((volatile uint64_t *)wqe); - rte_io_wmb(); - *txq->qp_db = htonl(txq->wqe_ci); + rte_wmb(); + *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); /* Ensure ordering between DB record and BF copy. */ rte_wmb(); *dst = *src; diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c index 37854a7..0a5d025 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c @@ -291,7 +291,7 @@ /* Fill ESEG in the header. */ _mm_store_si128(t_wqe + 1, _mm_set_epi16(0, 0, 0, 0, - htons(len), cs_flags, + rte_cpu_to_be_16(len), cs_flags, 0, 0)); txq->wqe_ci = wqe_ci; } @@ -300,7 +300,7 @@ txq->elts_comp += (uint16_t)(elts_head - txq->elts_head); txq->elts_head = elts_head; if (txq->elts_comp >= MLX5_TX_COMP_THRESH) { - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); wqe->ctrl[3] = txq->elts_head; txq->elts_comp = 0; ++txq->cq_pi; @@ -561,11 +561,11 @@ return; } for (i = 0; i < n; ++i) - wq[i].addr = htonll((uintptr_t)elts[i]->buf_addr + - RTE_PKTMBUF_HEADROOM); + wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr + + RTE_PKTMBUF_HEADROOM); rxq->rq_ci += n; rte_wmb(); - *rxq->rq_db = htonl(rxq->rq_ci); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); } /** @@ -1248,7 +1248,7 @@ } } rte_wmb(); - *rxq->cq_db = htonl(rxq->cq_ci); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); return rcvd_pkt; } -- 1.8.3.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/3] net/mlx5: replace network to host macros 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 " Shachar Beiser @ 2017-09-05 13:41 ` Nélio Laranjeiro 0 siblings, 0 replies; 25+ messages in thread From: Nélio Laranjeiro @ 2017-09-05 13:41 UTC (permalink / raw) To: Shachar Beiser; +Cc: dev, Adrien Mazarguil On Tue, Sep 05, 2017 at 01:04:36PM +0000, Shachar Beiser wrote: > Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> Thanks, -- Nélio Laranjeiro 6WIND ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v2 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification 2017-09-04 11:48 [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros Shachar Beiser ` (3 preceding siblings ...) 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 " Shachar Beiser @ 2017-09-05 13:04 ` Shachar Beiser 2017-09-05 13:41 ` Nélio Laranjeiro ` (2 more replies) 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 3/3] net/mlx5: fix interrupt enable return value Shachar Beiser 5 siblings, 3 replies; 25+ messages in thread From: Shachar Beiser @ 2017-09-05 13:04 UTC (permalink / raw) To: dev; +Cc: Shachar Beiser, Adrien Mazarguil, Nelio Laranjeiro, stable Fixes: 3cf87e68d97b ("net/mlx5: remove old MLNX OFED 3.3 verification") Cc: stable@dpdk.org Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> --- drivers/net/mlx5/mlx5_prm.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 608072f..8b82b5e 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -89,9 +89,6 @@ /* Default max packet length to be inlined. */ #define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE) -#ifndef HAVE_VERBS_MLX5_OPCODE_TSO -#define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */ -#endif #define MLX5_OPC_MOD_ENHANCED_MPSW 0 #define MLX5_OPCODE_ENHANCED_MPSW 0x29 -- 1.8.3.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser @ 2017-09-05 13:41 ` Nélio Laranjeiro 2017-09-14 13:43 ` [dpdk-dev] [PATCH v3 1/2] net/mlx5: replace network to host macros Shachar Beiser 2017-09-14 13:43 ` [dpdk-dev] [PATCH v3 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser 2 siblings, 0 replies; 25+ messages in thread From: Nélio Laranjeiro @ 2017-09-05 13:41 UTC (permalink / raw) To: Shachar Beiser; +Cc: dev, Adrien Mazarguil, stable On Tue, Sep 05, 2017 at 01:04:37PM +0000, Shachar Beiser wrote: > Fixes: 3cf87e68d97b ("net/mlx5: remove old MLNX OFED 3.3 verification") > Cc: stable@dpdk.org > > Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> > --- > drivers/net/mlx5/mlx5_prm.h | 3 --- > 1 file changed, 3 deletions(-) > > diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h > index 608072f..8b82b5e 100644 > --- a/drivers/net/mlx5/mlx5_prm.h > +++ b/drivers/net/mlx5/mlx5_prm.h > @@ -89,9 +89,6 @@ > /* Default max packet length to be inlined. */ > #define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE) > > -#ifndef HAVE_VERBS_MLX5_OPCODE_TSO > -#define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */ > -#endif > > #define MLX5_OPC_MOD_ENHANCED_MPSW 0 > #define MLX5_OPCODE_ENHANCED_MPSW 0x29 > -- > 1.8.3.1 > Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> -- Nélio Laranjeiro 6WIND ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v3 1/2] net/mlx5: replace network to host macros 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser 2017-09-05 13:41 ` Nélio Laranjeiro @ 2017-09-14 13:43 ` Shachar Beiser 2017-09-15 20:50 ` Yongseok Koh 2017-09-14 13:43 ` [dpdk-dev] [PATCH v3 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser 2 siblings, 1 reply; 25+ messages in thread From: Shachar Beiser @ 2017-09-14 13:43 UTC (permalink / raw) To: dev; +Cc: Shachar Beiser, Adrien Mazarguil, Nelio Laranjeiro Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> --- There are only 2 patches left since I have squashed : [PATCH v2 3/3] “net/mlx5: fix interrupt enable return” http://dpdk.org/dev/patchwork/patch/28380/ into [dpdk-dev,v5] net/mlx5: support upstream rdma-core --- drivers/net/mlx5/mlx5_mac.c | 8 ++- drivers/net/mlx5/mlx5_mr.c | 2 +- drivers/net/mlx5/mlx5_rxmode.c | 8 ++- drivers/net/mlx5/mlx5_rxq.c | 9 +-- drivers/net/mlx5/mlx5_rxtx.c | 131 +++++++++++++++++++---------------- drivers/net/mlx5/mlx5_rxtx.h | 12 ++-- drivers/net/mlx5/mlx5_rxtx_vec_sse.c | 12 ++-- 7 files changed, 102 insertions(+), 80 deletions(-) diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index 45d23e4..b3c3fa2 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -263,11 +263,15 @@ (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5] }, - .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(vlan_id) + : 0), }, .mask = { .dst_mac = "\xff\xff\xff\xff\xff\xff", - .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(0xfff) : + 0), }, }; DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u" diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 9593830..9a9f73a 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -203,7 +203,7 @@ struct ibv_mr * txq_ctrl->txq.mp2mr[idx].start = (uintptr_t)mr->addr; txq_ctrl->txq.mp2mr[idx].end = (uintptr_t)mr->addr + mr->length; txq_ctrl->txq.mp2mr[idx].mr = mr; - txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey); + txq_ctrl->txq.mp2mr[idx].lkey = rte_cpu_to_be_32(mr->lkey); DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, (void *)txq_ctrl, mp->name, (void *)mp, txq_ctrl->txq.mp2mr[idx].lkey); diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c index 4a51e47..db2e05b 100644 --- a/drivers/net/mlx5/mlx5_rxmode.c +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -159,14 +159,18 @@ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], }, - .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(vlan_id) : + 0), }, .mask = { .dst_mac = { mask[0], mask[1], mask[2], mask[3], mask[4], mask[5], }, - .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(0xfff) : + 0), }, }; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 35c5cb4..437dc02 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -672,9 +672,10 @@ /* scat->addr must be able to store a pointer. */ assert(sizeof(scat->addr) >= sizeof(uintptr_t)); *scat = (struct mlx5_wqe_data_seg){ - .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)), - .byte_count = htonl(DATA_LEN(buf)), - .lkey = htonl(rxq_ctrl->mr->lkey), + .addr = + rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)), + .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), + .lkey = rte_cpu_to_be_32(rxq_ctrl->mr->lkey), }; (*rxq_ctrl->rxq.elts)[i] = buf; } @@ -1077,7 +1078,7 @@ /* Update doorbell counter. */ rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n; rte_wmb(); - *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci); + *rxq_ctrl->rxq.rq_db = rte_cpu_to_be_32(rxq_ctrl->rxq.rq_ci); DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); assert(ret == 0); return 0; diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index fe9e7ea..e1a35a3 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -306,7 +306,7 @@ op_own = cqe->op_own; if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) - n = ntohl(cqe->byte_cnt); + n = rte_be_to_cpu_32(cqe->byte_cnt); else n = 1; cq_ci += n; @@ -434,7 +434,8 @@ raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; /* Replace the Ethernet type by the VLAN if necessary. */ if (buf->ol_flags & PKT_TX_VLAN_PKT) { - uint32_t vlan = htonl(0x81000000 | buf->vlan_tci); + uint32_t vlan = rte_cpu_to_be_32(0x81000000 | + buf->vlan_tci); unsigned int len = 2 * ETHER_ADDR_LEN - 2; addr += 2; @@ -510,8 +511,10 @@ } else { /* NOP WQE. */ wqe->ctrl = (rte_v128u32_t){ - htonl(txq->wqe_ci << 8), - htonl(txq->qp_num_8s | 1), + rte_cpu_to_be_32( + txq->wqe_ci << 8), + rte_cpu_to_be_32( + txq->qp_num_8s | 1), 0, 0, }; @@ -550,7 +553,8 @@ max_wqe -= n; if (tso) { uint32_t inl = - htonl(copy_b | MLX5_INLINE_SEG); + rte_cpu_to_be_32(copy_b | + MLX5_INLINE_SEG); pkt_inline_sz = MLX5_WQE_DS(tso_header_sz) * @@ -603,9 +607,9 @@ ds = 3; use_dseg: /* Add the remaining packet as a simple ds. */ - naddr = htonll(addr); + naddr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t){ - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -642,9 +646,9 @@ total_length += length; #endif /* Store segment information. */ - naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)); + naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); *dseg = (rte_v128u32_t){ - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -663,21 +667,23 @@ /* Initialize known and common part of the WQE structure. */ if (tso) { wqe->ctrl = (rte_v128u32_t){ - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO), - htonl(txq->qp_num_8s | ds), + rte_cpu_to_be_32((txq->wqe_ci << 8) | + MLX5_OPCODE_TSO), + rte_cpu_to_be_32(txq->qp_num_8s | ds), 0, 0, }; wqe->eseg = (rte_v128u32_t){ 0, - cs_flags | (htons(tso_segsz) << 16), + cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16), 0, - (ehdr << 16) | htons(tso_header_sz), + (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz), }; } else { wqe->ctrl = (rte_v128u32_t){ - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND), - htonl(txq->qp_num_8s | ds), + rte_cpu_to_be_32((txq->wqe_ci << 8) | + MLX5_OPCODE_SEND), + rte_cpu_to_be_32(txq->qp_num_8s | ds), 0, 0, }; @@ -685,7 +691,7 @@ 0, cs_flags, 0, - (ehdr << 16) | htons(pkt_inline_sz), + (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz), }; } next_wqe: @@ -705,7 +711,7 @@ comp = txq->elts_comp + i + j + k; if (comp >= MLX5_TX_COMP_THRESH) { /* Request completion on last WQE. */ - last_wqe->ctrl2 = htonl(8); + last_wqe->ctrl2 = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ last_wqe->ctrl3 = txq->elts_head; txq->elts_comp = 0; @@ -744,13 +750,14 @@ mpw->len = length; mpw->total_len = 0; mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.rsvd0 = 0; mpw->wqe->eseg.rsvd1 = 0; mpw->wqe->eseg.rsvd2 = 0; - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | MLX5_OPCODE_TSO); + mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_TSO); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *) @@ -779,7 +786,7 @@ * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num)); mpw->state = MLX5_MPW_STATE_CLOSED; if (num < 3) ++txq->wqe_ci; @@ -886,9 +893,9 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -916,7 +923,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -956,12 +963,12 @@ mpw->len = length; mpw->total_len = 0; mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_TSO); + mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_TSO); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; - mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.cs_flags = 0; mpw->wqe->eseg.rsvd0 = 0; @@ -992,9 +999,10 @@ * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | + MLX5_WQE_DS(size)); mpw->state = MLX5_MPW_STATE_CLOSED; - inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG); + inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG); txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; } @@ -1132,9 +1140,10 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = + rte_cpu_to_be_32(DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -1206,7 +1215,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -1246,9 +1255,10 @@ mpw->pkts_n = 0; mpw->total_len = sizeof(struct mlx5_wqe); mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_ENHANCED_MPSW); + mpw->wqe->ctrl[0] = + rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_ENHANCED_MPSW); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE); @@ -1256,9 +1266,9 @@ uintptr_t addr = (uintptr_t)(mpw->wqe + 1); /* Pad the first 2 DWORDs with zero-length inline header. */ - *(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG); + *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG); *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) = - htonl(MLX5_INLINE_SEG); + rte_cpu_to_be_32(MLX5_INLINE_SEG); mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE; /* Start from the next WQEBB. */ mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1)); @@ -1286,7 +1296,8 @@ /* Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | + MLX5_WQE_DS(mpw->total_len)); mpw->state = MLX5_MPW_STATE_CLOSED; ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; txq->wqe_ci += ret; @@ -1439,9 +1450,10 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = rte_cpu_to_be_32( + DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -1464,7 +1476,7 @@ assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED); assert(length == DATA_LEN(buf)); - inl_hdr = htonl(length | MLX5_INLINE_SEG); + inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG); addr = rte_pktmbuf_mtod(buf, uintptr_t); mpw.data.raw = (volatile void *) ((uintptr_t)mpw.data.raw + inl_pad); @@ -1520,9 +1532,9 @@ for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++) rte_prefetch2((void *)(addr + n * RTE_CACHE_LINE_SIZE)); - naddr = htonll(addr); + naddr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t) { - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -1550,7 +1562,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -1634,8 +1646,8 @@ (volatile struct mlx5_mini_cqe8 (*)[8]) (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info); - len = ntohl((*mc)[zip->ai & 7].byte_cnt); - *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result); + len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt); + *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result); if ((++zip->ai & 7) == 0) { /* Invalidate consumed CQEs */ idx = zip->ca; @@ -1683,7 +1695,7 @@ cqe_cnt].pkt_info); /* Fix endianness. */ - zip->cqe_cnt = ntohl(cqe->byte_cnt); + zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt); /* * Current mini array position is the one returned by * check_cqe64(). @@ -1698,8 +1710,8 @@ --rxq->cq_ci; zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; /* Get packet size to return. */ - len = ntohl((*mc)[0].byte_cnt); - *rss_hash = ntohl((*mc)[0].rx_hash_result); + len = rte_be_to_cpu_32((*mc)[0].byte_cnt); + *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result); zip->ai = 1; /* Prefetch all the entries to be invalidated */ idx = zip->ca; @@ -1709,8 +1721,8 @@ ++idx; } } else { - len = ntohl(cqe->byte_cnt); - *rss_hash = ntohl(cqe->rx_hash_res); + len = rte_be_to_cpu_32(cqe->byte_cnt); + *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res); } /* Error while receiving packet. */ if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR)) @@ -1734,7 +1746,7 @@ rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe) { uint32_t ol_flags = 0; - uint16_t flags = ntohs(cqe->hdr_type_etc); + uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); ol_flags = TRANSPOSE(flags, @@ -1841,7 +1853,7 @@ MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { pkt->ol_flags |= PKT_RX_FDIR; if (cqe->sop_drop_qpn != - htonl(MLX5_FLOW_MARK_DEFAULT)) { + rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { uint32_t mark = cqe->sop_drop_qpn; pkt->ol_flags |= PKT_RX_FDIR_ID; @@ -1853,10 +1865,11 @@ pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe); if (rxq->vlan_strip && (cqe->hdr_type_etc & - htons(MLX5_CQE_VLAN_STRIPPED))) { + rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { pkt->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; - pkt->vlan_tci = ntohs(cqe->vlan_info); + pkt->vlan_tci = + rte_be_to_cpu_16(cqe->vlan_info); } if (rxq->crc_present) len -= ETHER_CRC_LEN; @@ -1872,7 +1885,7 @@ * of the buffers are already known, only the buffer address * changes. */ - wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t)); + wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); if (len > DATA_LEN(seg)) { len -= DATA_LEN(seg); ++NB_SEGS(pkt); @@ -1900,9 +1913,9 @@ /* Update the consumer index. */ rxq->rq_ci = rq_ci >> sges_n; rte_wmb(); - *rxq->cq_db = htonl(rxq->cq_ci); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); rte_wmb(); - *rxq->rq_db = htonl(rxq->rq_ci); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment packets counter. */ rxq->stats.ipackets += i; diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 033e70f..73a4ce8 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -269,7 +269,7 @@ struct txq { uintptr_t start; /* Start address of MR */ uintptr_t end; /* End address of MR */ struct ibv_mr *mr; /* Memory Region (for mp). */ - uint32_t lkey; /* htonl(mr->lkey) */ + uint32_t lkey; /* rte_cpu_to_be_32(mr->lkey) */ } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */ uint16_t mr_cache_idx; /* Index of last hit entry. */ struct rte_mbuf *(*elts)[]; /* TX elements. */ @@ -492,7 +492,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, } #endif /* NDEBUG */ ++cq_ci; - txq->wqe_pi = ntohs(cqe->wqe_counter); + txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter); ctrl = (volatile struct mlx5_wqe_ctrl *) tx_mlx5_wqe(txq, txq->wqe_pi); elts_tail = ctrl->ctrl3; @@ -530,7 +530,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, txq->elts_tail = elts_tail; /* Update the consumer index. */ rte_wmb(); - *txq->cq_db = htonl(cq_ci); + *txq->cq_db = rte_cpu_to_be_32(cq_ci); } /** @@ -581,7 +581,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, if (txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr) { assert(txq->mp2mr[i].lkey != (uint32_t)-1); - assert(htonl(txq->mp2mr[i].mr->lkey) == + assert(rte_cpu_to_be_32(txq->mp2mr[i].mr->lkey) == txq->mp2mr[i].lkey); txq->mr_cache_idx = i; return txq->mp2mr[i].lkey; @@ -605,8 +605,8 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg); volatile uint64_t *src = ((volatile uint64_t *)wqe); - rte_io_wmb(); - *txq->qp_db = htonl(txq->wqe_ci); + rte_wmb(); + *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); /* Ensure ordering between DB record and BF copy. */ rte_wmb(); *dst = *src; diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c index 37854a7..0a5d025 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c @@ -291,7 +291,7 @@ /* Fill ESEG in the header. */ _mm_store_si128(t_wqe + 1, _mm_set_epi16(0, 0, 0, 0, - htons(len), cs_flags, + rte_cpu_to_be_16(len), cs_flags, 0, 0)); txq->wqe_ci = wqe_ci; } @@ -300,7 +300,7 @@ txq->elts_comp += (uint16_t)(elts_head - txq->elts_head); txq->elts_head = elts_head; if (txq->elts_comp >= MLX5_TX_COMP_THRESH) { - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); wqe->ctrl[3] = txq->elts_head; txq->elts_comp = 0; ++txq->cq_pi; @@ -561,11 +561,11 @@ return; } for (i = 0; i < n; ++i) - wq[i].addr = htonll((uintptr_t)elts[i]->buf_addr + - RTE_PKTMBUF_HEADROOM); + wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr + + RTE_PKTMBUF_HEADROOM); rxq->rq_ci += n; rte_wmb(); - *rxq->rq_db = htonl(rxq->rq_ci); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); } /** @@ -1248,7 +1248,7 @@ } } rte_wmb(); - *rxq->cq_db = htonl(rxq->cq_ci); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); return rcvd_pkt; } -- 1.8.3.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/2] net/mlx5: replace network to host macros 2017-09-14 13:43 ` [dpdk-dev] [PATCH v3 1/2] net/mlx5: replace network to host macros Shachar Beiser @ 2017-09-15 20:50 ` Yongseok Koh 2017-09-18 9:47 ` Shachar Beiser 0 siblings, 1 reply; 25+ messages in thread From: Yongseok Koh @ 2017-09-15 20:50 UTC (permalink / raw) To: Shachar Beiser; +Cc: dev, Adrien Mazarguil, Nélio Laranjeiro > On Sep 14, 2017, at 6:43 AM, Shachar Beiser <shacharbe@mellanox.com> wrote: > > Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> > --- [...] > @@ -550,7 +553,8 @@ > max_wqe -= n; > if (tso) { > uint32_t inl = > - htonl(copy_b | MLX5_INLINE_SEG); > + rte_cpu_to_be_32(copy_b | > + MLX5_INLINE_SEG); Wrong indentation. [...] > @@ -605,8 +605,8 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, > uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg); > volatile uint64_t *src = ((volatile uint64_t *)wqe); > > - rte_io_wmb(); > - *txq->qp_db = htonl(txq->wqe_ci); > + rte_wmb(); Look like a mistake when rebasing. This should not be touched by this patch. > + *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); > /* Ensure ordering between DB record and BF copy. */ > rte_wmb(); > *dst = *src; -- Thanks, Yongseok ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/2] net/mlx5: replace network to host macros 2017-09-15 20:50 ` Yongseok Koh @ 2017-09-18 9:47 ` Shachar Beiser 0 siblings, 0 replies; 25+ messages in thread From: Shachar Beiser @ 2017-09-18 9:47 UTC (permalink / raw) To: Yongseok Koh; +Cc: dev, Adrien Mazarguil, Nélio Laranjeiro > -----Original Message----- > From: Yongseok Koh > Sent: Friday, September 15, 2017 11:50 PM > To: Shachar Beiser <shacharbe@mellanox.com> > Cc: dev@dpdk.org; Adrien Mazarguil <adrien.mazarguil@6wind.com>; Nélio > Laranjeiro <nelio.laranjeiro@6wind.com> > Subject: Re: [dpdk-dev] [PATCH v3 1/2] net/mlx5: replace network to host > macros > > > > On Sep 14, 2017, at 6:43 AM, Shachar Beiser <shacharbe@mellanox.com> > wrote: > > > > Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> > > --- > [...] > > @@ -550,7 +553,8 @@ > > max_wqe -= n; > > if (tso) { > > uint32_t inl = > > - htonl(copy_b | > MLX5_INLINE_SEG); > > + rte_cpu_to_be_32(copy_b | > > + MLX5_INLINE_SEG); > Wrong indentation. > Since there is a constrain of 80 characters a line , this indentation was accepted by Nélio . > [...] > > @@ -605,8 +605,8 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, > uint16_t, uint16_t, unsigned int, > > uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg); > > volatile uint64_t *src = ((volatile uint64_t *)wqe); > > > > - rte_io_wmb(); > > - *txq->qp_db = htonl(txq->wqe_ci); > > + rte_wmb(); > Look like a mistake when rebasing. This should not be touched by this patch. > > > + *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); > > /* Ensure ordering between DB record and BF copy. */ > > rte_wmb(); > > *dst = *src; > Yes , this is true. There is a fix for this issue in the coming patch: [dpdk-dev,v4,1/2] net/mlx5: replace network to host macros http://dpdk.org/dev/patchwork/patch/28804/ > -- > Thanks, > Yongseok > ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v3 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser 2017-09-05 13:41 ` Nélio Laranjeiro 2017-09-14 13:43 ` [dpdk-dev] [PATCH v3 1/2] net/mlx5: replace network to host macros Shachar Beiser @ 2017-09-14 13:43 ` Shachar Beiser 2017-09-17 10:42 ` [dpdk-dev] [PATCH v4 1/2] net/mlx5: replace network to host macros Shachar Beiser 2017-09-17 10:42 ` [dpdk-dev] [PATCH v4 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser 2 siblings, 2 replies; 25+ messages in thread From: Shachar Beiser @ 2017-09-14 13:43 UTC (permalink / raw) To: dev; +Cc: Shachar Beiser, Adrien Mazarguil, Nelio Laranjeiro, stable Fixes: 3cf87e68d97b ("net/mlx5: remove old MLNX OFED 3.3 verification") Cc: stable@dpdk.org Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> --- There are only 2 patches left since I have squashed : [PATCH v2 3/3] “net/mlx5: fix interrupt enable return” http://dpdk.org/dev/patchwork/patch/28380/ into [dpdk-dev,v5] net/mlx5: support upstream rdma-core --- drivers/net/mlx5/mlx5_prm.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 608072f..8b82b5e 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -89,9 +89,6 @@ /* Default max packet length to be inlined. */ #define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE) -#ifndef HAVE_VERBS_MLX5_OPCODE_TSO -#define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */ -#endif #define MLX5_OPC_MOD_ENHANCED_MPSW 0 #define MLX5_OPCODE_ENHANCED_MPSW 0x29 -- 1.8.3.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v4 1/2] net/mlx5: replace network to host macros 2017-09-14 13:43 ` [dpdk-dev] [PATCH v3 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser @ 2017-09-17 10:42 ` Shachar Beiser 2017-09-18 17:59 ` Yongseok Koh 2017-09-19 6:30 ` Nélio Laranjeiro 2017-09-17 10:42 ` [dpdk-dev] [PATCH v4 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser 1 sibling, 2 replies; 25+ messages in thread From: Shachar Beiser @ 2017-09-17 10:42 UTC (permalink / raw) To: dev; +Cc: Shachar Beiser, Adrien Mazarguil, Nelio Laranjeiro Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> --- I have rebased [PATCH v3 1/2] net/mlx5: replace network to host macros I have fixed a rebase conflict in mlx5_rxtx.h in line 609 -rte_wmb(); +rte_io_wmb(); --- drivers/net/mlx5/mlx5_mac.c | 8 +- drivers/net/mlx5/mlx5_mr.c | 2 +- drivers/net/mlx5/mlx5_rxmode.c | 8 +- drivers/net/mlx5/mlx5_rxq.c | 9 ++- drivers/net/mlx5/mlx5_rxtx.c | 137 ++++++++++++++++++++--------------- drivers/net/mlx5/mlx5_rxtx.h | 10 +-- drivers/net/mlx5/mlx5_rxtx_vec_sse.c | 12 +-- 7 files changed, 107 insertions(+), 79 deletions(-) diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index 45d23e4..b3c3fa2 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -263,11 +263,15 @@ (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5] }, - .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(vlan_id) + : 0), }, .mask = { .dst_mac = "\xff\xff\xff\xff\xff\xff", - .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(0xfff) : + 0), }, }; DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u" diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 9593830..9a9f73a 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -203,7 +203,7 @@ struct ibv_mr * txq_ctrl->txq.mp2mr[idx].start = (uintptr_t)mr->addr; txq_ctrl->txq.mp2mr[idx].end = (uintptr_t)mr->addr + mr->length; txq_ctrl->txq.mp2mr[idx].mr = mr; - txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey); + txq_ctrl->txq.mp2mr[idx].lkey = rte_cpu_to_be_32(mr->lkey); DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, (void *)txq_ctrl, mp->name, (void *)mp, txq_ctrl->txq.mp2mr[idx].lkey); diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c index 4a51e47..db2e05b 100644 --- a/drivers/net/mlx5/mlx5_rxmode.c +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -159,14 +159,18 @@ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], }, - .vlan_tag = (vlan_enabled ? htons(vlan_id) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(vlan_id) : + 0), }, .mask = { .dst_mac = { mask[0], mask[1], mask[2], mask[3], mask[4], mask[5], }, - .vlan_tag = (vlan_enabled ? htons(0xfff) : 0), + .vlan_tag = (vlan_enabled ? + rte_cpu_to_be_16(0xfff) : + 0), }, }; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 35c5cb4..437dc02 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -672,9 +672,10 @@ /* scat->addr must be able to store a pointer. */ assert(sizeof(scat->addr) >= sizeof(uintptr_t)); *scat = (struct mlx5_wqe_data_seg){ - .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)), - .byte_count = htonl(DATA_LEN(buf)), - .lkey = htonl(rxq_ctrl->mr->lkey), + .addr = + rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)), + .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), + .lkey = rte_cpu_to_be_32(rxq_ctrl->mr->lkey), }; (*rxq_ctrl->rxq.elts)[i] = buf; } @@ -1077,7 +1078,7 @@ /* Update doorbell counter. */ rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n; rte_wmb(); - *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci); + *rxq_ctrl->rxq.rq_db = rte_cpu_to_be_32(rxq_ctrl->rxq.rq_ci); DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); assert(ret == 0); return 0; diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index bc1f85c..3f1e2f4 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -306,7 +306,7 @@ op_own = cqe->op_own; if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) - n = ntohl(cqe->byte_cnt); + n = rte_be_to_cpu_32(cqe->byte_cnt); else n = 1; cq_ci += n; @@ -436,7 +436,8 @@ raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; /* Replace the Ethernet type by the VLAN if necessary. */ if (buf->ol_flags & PKT_TX_VLAN_PKT) { - uint32_t vlan = htonl(0x81000000 | buf->vlan_tci); + uint32_t vlan = rte_cpu_to_be_32(0x81000000 | + buf->vlan_tci); unsigned int len = 2 * ETHER_ADDR_LEN - 2; addr += 2; @@ -511,8 +512,10 @@ } else { /* NOP WQE. */ wqe->ctrl = (rte_v128u32_t){ - htonl(txq->wqe_ci << 8), - htonl(txq->qp_num_8s | 1), + rte_cpu_to_be_32( + txq->wqe_ci << 8), + rte_cpu_to_be_32( + txq->qp_num_8s | 1), 0, 0, }; @@ -551,7 +554,14 @@ break; max_wqe -= n; if (tso) { - inl = htonl(copy_b | MLX5_INLINE_SEG); + uint32_t inl = + rte_cpu_to_be_32(copy_b | + MLX5_INLINE_SEG); + + pkt_inline_sz = + MLX5_WQE_DS(tso_header_sz) * + MLX5_WQE_DWORD_SIZE; + rte_memcpy((void *)raw, (void *)&inl, sizeof(inl)); raw += sizeof(inl); @@ -600,9 +610,9 @@ ds = 3; use_dseg: /* Add the remaining packet as a simple ds. */ - naddr = htonll(addr); + naddr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t){ - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -639,9 +649,9 @@ total_length += length; #endif /* Store segment information. */ - naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)); + naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); *dseg = (rte_v128u32_t){ - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -664,21 +674,23 @@ /* Initialize known and common part of the WQE structure. */ if (tso) { wqe->ctrl = (rte_v128u32_t){ - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO), - htonl(txq->qp_num_8s | ds), + rte_cpu_to_be_32((txq->wqe_ci << 8) | + MLX5_OPCODE_TSO), + rte_cpu_to_be_32(txq->qp_num_8s | ds), 0, 0, }; wqe->eseg = (rte_v128u32_t){ 0, - cs_flags | (htons(tso_segsz) << 16), + cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16), 0, - (ehdr << 16) | htons(tso_header_sz), + (ehdr << 16) | rte_cpu_to_be_16(tso_header_sz), }; } else { wqe->ctrl = (rte_v128u32_t){ - htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND), - htonl(txq->qp_num_8s | ds), + rte_cpu_to_be_32((txq->wqe_ci << 8) | + MLX5_OPCODE_SEND), + rte_cpu_to_be_32(txq->qp_num_8s | ds), 0, 0, }; @@ -686,7 +698,7 @@ 0, cs_flags, 0, - (ehdr << 16) | htons(pkt_inline_sz), + (ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz), }; } next_wqe: @@ -706,7 +718,7 @@ comp = txq->elts_comp + i + j + k; if (comp >= MLX5_TX_COMP_THRESH) { /* Request completion on last WQE. */ - last_wqe->ctrl2 = htonl(8); + last_wqe->ctrl2 = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ last_wqe->ctrl3 = txq->elts_head; txq->elts_comp = 0; @@ -745,13 +757,14 @@ mpw->len = length; mpw->total_len = 0; mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.rsvd0 = 0; mpw->wqe->eseg.rsvd1 = 0; mpw->wqe->eseg.rsvd2 = 0; - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | MLX5_OPCODE_TSO); + mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_TSO); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *) @@ -780,7 +793,7 @@ * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | (2 + num)); mpw->state = MLX5_MPW_STATE_CLOSED; if (num < 3) ++txq->wqe_ci; @@ -889,9 +902,9 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -919,7 +932,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -959,12 +972,12 @@ mpw->len = length; mpw->total_len = 0; mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_TSO); + mpw->wqe->ctrl[0] = rte_cpu_to_be_32((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_TSO); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; - mpw->wqe->eseg.mss = htons(length); + mpw->wqe->eseg.mss = rte_cpu_to_be_16(length); mpw->wqe->eseg.inline_hdr_sz = 0; mpw->wqe->eseg.cs_flags = 0; mpw->wqe->eseg.rsvd0 = 0; @@ -995,9 +1008,10 @@ * Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | + MLX5_WQE_DS(size)); mpw->state = MLX5_MPW_STATE_CLOSED; - inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG); + inl->byte_cnt = rte_cpu_to_be_32(mpw->total_len | MLX5_INLINE_SEG); txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; } @@ -1137,9 +1151,10 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = + rte_cpu_to_be_32(DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -1211,7 +1226,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -1251,9 +1266,10 @@ mpw->pkts_n = 0; mpw->total_len = sizeof(struct mlx5_wqe); mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx); - mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | - (txq->wqe_ci << 8) | - MLX5_OPCODE_ENHANCED_MPSW); + mpw->wqe->ctrl[0] = + rte_cpu_to_be_32((MLX5_OPC_MOD_ENHANCED_MPSW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_ENHANCED_MPSW); mpw->wqe->ctrl[2] = 0; mpw->wqe->ctrl[3] = 0; memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE); @@ -1261,9 +1277,9 @@ uintptr_t addr = (uintptr_t)(mpw->wqe + 1); /* Pad the first 2 DWORDs with zero-length inline header. */ - *(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG); + *(volatile uint32_t *)addr = rte_cpu_to_be_32(MLX5_INLINE_SEG); *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) = - htonl(MLX5_INLINE_SEG); + rte_cpu_to_be_32(MLX5_INLINE_SEG); mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE; /* Start from the next WQEBB. */ mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1)); @@ -1291,7 +1307,8 @@ /* Store size in multiple of 16 bytes. Control and Ethernet segments * count as 2. */ - mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len)); + mpw->wqe->ctrl[1] = rte_cpu_to_be_32(txq->qp_num_8s | + MLX5_WQE_DS(mpw->total_len)); mpw->state = MLX5_MPW_STATE_CLOSED; ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE; txq->wqe_ci += ret; @@ -1446,9 +1463,10 @@ dseg = mpw.data.dseg[mpw.pkts_n]; addr = rte_pktmbuf_mtod(buf, uintptr_t); *dseg = (struct mlx5_wqe_data_seg){ - .byte_count = htonl(DATA_LEN(buf)), + .byte_count = rte_cpu_to_be_32( + DATA_LEN(buf)), .lkey = mlx5_tx_mb2mr(txq, buf), - .addr = htonll(addr), + .addr = rte_cpu_to_be_64(addr), }; #if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) length += DATA_LEN(buf); @@ -1471,7 +1489,7 @@ assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED); assert(length == DATA_LEN(buf)); - inl_hdr = htonl(length | MLX5_INLINE_SEG); + inl_hdr = rte_cpu_to_be_32(length | MLX5_INLINE_SEG); addr = rte_pktmbuf_mtod(buf, uintptr_t); mpw.data.raw = (volatile void *) ((uintptr_t)mpw.data.raw + inl_pad); @@ -1527,9 +1545,9 @@ for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++) rte_prefetch2((void *)(addr + n * RTE_CACHE_LINE_SIZE)); - naddr = htonll(addr); + naddr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t) { - htonl(length), + rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), naddr, naddr >> 32, @@ -1557,7 +1575,7 @@ volatile struct mlx5_wqe *wqe = mpw.wqe; /* Request completion on last WQE. */ - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); /* Save elts_head in unused "immediate" field of WQE. */ wqe->ctrl[3] = elts_head; txq->elts_comp = 0; @@ -1641,8 +1659,8 @@ (volatile struct mlx5_mini_cqe8 (*)[8]) (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info); - len = ntohl((*mc)[zip->ai & 7].byte_cnt); - *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result); + len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt); + *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result); if ((++zip->ai & 7) == 0) { /* Invalidate consumed CQEs */ idx = zip->ca; @@ -1690,7 +1708,7 @@ cqe_cnt].pkt_info); /* Fix endianness. */ - zip->cqe_cnt = ntohl(cqe->byte_cnt); + zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt); /* * Current mini array position is the one returned by * check_cqe64(). @@ -1705,8 +1723,8 @@ --rxq->cq_ci; zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; /* Get packet size to return. */ - len = ntohl((*mc)[0].byte_cnt); - *rss_hash = ntohl((*mc)[0].rx_hash_result); + len = rte_be_to_cpu_32((*mc)[0].byte_cnt); + *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result); zip->ai = 1; /* Prefetch all the entries to be invalidated */ idx = zip->ca; @@ -1716,8 +1734,8 @@ ++idx; } } else { - len = ntohl(cqe->byte_cnt); - *rss_hash = ntohl(cqe->rx_hash_res); + len = rte_be_to_cpu_32(cqe->byte_cnt); + *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res); } /* Error while receiving packet. */ if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR)) @@ -1741,7 +1759,7 @@ rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe) { uint32_t ol_flags = 0; - uint16_t flags = ntohs(cqe->hdr_type_etc); + uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc); ol_flags = TRANSPOSE(flags, @@ -1848,7 +1866,7 @@ MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) { pkt->ol_flags |= PKT_RX_FDIR; if (cqe->sop_drop_qpn != - htonl(MLX5_FLOW_MARK_DEFAULT)) { + rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) { uint32_t mark = cqe->sop_drop_qpn; pkt->ol_flags |= PKT_RX_FDIR_ID; @@ -1860,10 +1878,11 @@ pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe); if (rxq->vlan_strip && (cqe->hdr_type_etc & - htons(MLX5_CQE_VLAN_STRIPPED))) { + rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) { pkt->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; - pkt->vlan_tci = ntohs(cqe->vlan_info); + pkt->vlan_tci = + rte_be_to_cpu_16(cqe->vlan_info); } if (rxq->crc_present) len -= ETHER_CRC_LEN; @@ -1879,7 +1898,7 @@ * of the buffers are already known, only the buffer address * changes. */ - wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t)); + wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t)); if (len > DATA_LEN(seg)) { len -= DATA_LEN(seg); ++NB_SEGS(pkt); @@ -1907,9 +1926,9 @@ /* Update the consumer index. */ rxq->rq_ci = rq_ci >> sges_n; rte_wmb(); - *rxq->cq_db = htonl(rxq->cq_ci); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); rte_wmb(); - *rxq->rq_db = htonl(rxq->rq_ci); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment packets counter. */ rxq->stats.ipackets += i; diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 107ada0..9375aa8 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -269,7 +269,7 @@ struct txq { uintptr_t start; /* Start address of MR */ uintptr_t end; /* End address of MR */ struct ibv_mr *mr; /* Memory Region (for mp). */ - uint32_t lkey; /* htonl(mr->lkey) */ + uint32_t lkey; /* rte_cpu_to_be_32(mr->lkey) */ } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */ uint16_t mr_cache_idx; /* Index of last hit entry. */ struct rte_mbuf *(*elts)[]; /* TX elements. */ @@ -492,7 +492,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, } #endif /* NDEBUG */ ++cq_ci; - txq->wqe_pi = ntohs(cqe->wqe_counter); + txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter); ctrl = (volatile struct mlx5_wqe_ctrl *) tx_mlx5_wqe(txq, txq->wqe_pi); elts_tail = ctrl->ctrl3; @@ -530,7 +530,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, txq->elts_tail = elts_tail; /* Update the consumer index. */ rte_wmb(); - *txq->cq_db = htonl(cq_ci); + *txq->cq_db = rte_cpu_to_be_32(cq_ci); } /** @@ -581,7 +581,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, if (txq->mp2mr[i].start <= addr && txq->mp2mr[i].end >= addr) { assert(txq->mp2mr[i].lkey != (uint32_t)-1); - assert(htonl(txq->mp2mr[i].mr->lkey) == + assert(rte_cpu_to_be_32(txq->mp2mr[i].mr->lkey) == txq->mp2mr[i].lkey); txq->mr_cache_idx = i; return txq->mp2mr[i].lkey; @@ -606,7 +606,7 @@ int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, volatile uint64_t *src = ((volatile uint64_t *)wqe); rte_io_wmb(); - *txq->qp_db = htonl(txq->wqe_ci); + *txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci); /* Ensure ordering between DB record and BF copy. */ rte_wmb(); *dst = *src; diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c index 39c7325..aff3359 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.c @@ -293,7 +293,7 @@ /* Fill ESEG in the header. */ _mm_store_si128(t_wqe + 1, _mm_set_epi16(0, 0, 0, 0, - htons(len), cs_flags, + rte_cpu_to_be_16(len), cs_flags, 0, 0)); txq->wqe_ci = wqe_ci; } @@ -302,7 +302,7 @@ txq->elts_comp += (uint16_t)(elts_head - txq->elts_head); txq->elts_head = elts_head; if (txq->elts_comp >= MLX5_TX_COMP_THRESH) { - wqe->ctrl[2] = htonl(8); + wqe->ctrl[2] = rte_cpu_to_be_32(8); wqe->ctrl[3] = txq->elts_head; txq->elts_comp = 0; ++txq->cq_pi; @@ -564,11 +564,11 @@ return; } for (i = 0; i < n; ++i) - wq[i].addr = htonll((uintptr_t)elts[i]->buf_addr + - RTE_PKTMBUF_HEADROOM); + wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr + + RTE_PKTMBUF_HEADROOM); rxq->rq_ci += n; rte_wmb(); - *rxq->rq_db = htonl(rxq->rq_ci); + *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); } /** @@ -1251,7 +1251,7 @@ } } rte_wmb(); - *rxq->cq_db = htonl(rxq->cq_ci); + *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); return rcvd_pkt; } -- 1.8.3.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/2] net/mlx5: replace network to host macros 2017-09-17 10:42 ` [dpdk-dev] [PATCH v4 1/2] net/mlx5: replace network to host macros Shachar Beiser @ 2017-09-18 17:59 ` Yongseok Koh 2017-09-19 6:30 ` Nélio Laranjeiro 1 sibling, 0 replies; 25+ messages in thread From: Yongseok Koh @ 2017-09-18 17:59 UTC (permalink / raw) To: Shachar Beiser; +Cc: dev, Adrien Mazarguil, Nélio Laranjeiro > On Sep 17, 2017, at 3:42 AM, Shachar Beiser <shacharbe@mellanox.com> wrote: > > Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> > --- > I have rebased [PATCH v3 1/2] net/mlx5: replace network to host macros > I have fixed a rebase conflict in mlx5_rxtx.h in line 609 > -rte_wmb(); > +rte_io_wmb(); > > --- Acked-by: Yongseok Koh <yskoh@mellanox.com> Thanks Yongseok ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/2] net/mlx5: replace network to host macros 2017-09-17 10:42 ` [dpdk-dev] [PATCH v4 1/2] net/mlx5: replace network to host macros Shachar Beiser 2017-09-18 17:59 ` Yongseok Koh @ 2017-09-19 6:30 ` Nélio Laranjeiro 1 sibling, 0 replies; 25+ messages in thread From: Nélio Laranjeiro @ 2017-09-19 6:30 UTC (permalink / raw) To: Shachar Beiser; +Cc: dev, Adrien Mazarguil For the series Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com> -- Nélio Laranjeiro 6WIND ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v4 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification 2017-09-14 13:43 ` [dpdk-dev] [PATCH v3 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser 2017-09-17 10:42 ` [dpdk-dev] [PATCH v4 1/2] net/mlx5: replace network to host macros Shachar Beiser @ 2017-09-17 10:42 ` Shachar Beiser 2017-09-18 17:52 ` Yongseok Koh 1 sibling, 1 reply; 25+ messages in thread From: Shachar Beiser @ 2017-09-17 10:42 UTC (permalink / raw) To: dev; +Cc: Shachar Beiser, Adrien Mazarguil, Nelio Laranjeiro, stable Fixes: 3cf87e68d97b ("net/mlx5: remove old MLNX OFED 3.3 verification") Cc: stable@dpdk.org Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> --- I have rebased last patch [PATCH v3 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification --- drivers/net/mlx5/mlx5_prm.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index c61e4d8..e00be81 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -89,9 +89,6 @@ /* Default max packet length to be inlined. */ #define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE) -#ifndef HAVE_VERBS_MLX5_OPCODE_TSO -#define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */ -#endif #define MLX5_OPC_MOD_ENHANCED_MPSW 0 #define MLX5_OPCODE_ENHANCED_MPSW 0x29 -- 1.8.3.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v4 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification 2017-09-17 10:42 ` [dpdk-dev] [PATCH v4 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser @ 2017-09-18 17:52 ` Yongseok Koh 2017-09-22 18:02 ` [dpdk-dev] [dpdk-stable] " Ferruh Yigit 0 siblings, 1 reply; 25+ messages in thread From: Yongseok Koh @ 2017-09-18 17:52 UTC (permalink / raw) To: Shachar Beiser; +Cc: dev, Adrien Mazarguil, Nélio Laranjeiro, stable > On Sep 17, 2017, at 3:42 AM, Shachar Beiser <shacharbe@mellanox.com> wrote: > > Fixes: 3cf87e68d97b ("net/mlx5: remove old MLNX OFED 3.3 verification") > Cc: stable@dpdk.org > > Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> > --- > I have rebased last patch [PATCH v3 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification > --- I think this patch had been acked by Nelio since v2. Then you can keep 'Acked-by' tag through updated versions unless there's any change in the patch. Acked-by: Yongseok Koh <yskoh@mellanox.com> Thanks Yongseok ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [dpdk-stable] [PATCH v4 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification 2017-09-18 17:52 ` Yongseok Koh @ 2017-09-22 18:02 ` Ferruh Yigit 0 siblings, 0 replies; 25+ messages in thread From: Ferruh Yigit @ 2017-09-22 18:02 UTC (permalink / raw) To: Yongseok Koh, Shachar Beiser Cc: dev, Adrien Mazarguil, Nélio Laranjeiro, stable On 9/18/2017 6:52 PM, Yongseok Koh wrote: > >> On Sep 17, 2017, at 3:42 AM, Shachar Beiser <shacharbe@mellanox.com> wrote: >> >> Fixes: 3cf87e68d97b ("net/mlx5: remove old MLNX OFED 3.3 verification") >> Cc: stable@dpdk.org >> >> Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> >> --- >> I have rebased last patch [PATCH v3 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification >> --- > I think this patch had been acked by Nelio since v2. Then you can keep > 'Acked-by' tag through updated versions unless there's any change in the patch. > > Acked-by: Yongseok Koh <yskoh@mellanox.com> This commit is fixing one that is within this release, so this commit squashed into relevant one. > > Thanks > Yongseok > ^ permalink raw reply [flat|nested] 25+ messages in thread
* [dpdk-dev] [PATCH v2 3/3] net/mlx5: fix interrupt enable return value 2017-09-04 11:48 [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros Shachar Beiser ` (4 preceding siblings ...) 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser @ 2017-09-05 13:04 ` Shachar Beiser 2017-09-05 13:27 ` Adrien Mazarguil 5 siblings, 1 reply; 25+ messages in thread From: Shachar Beiser @ 2017-09-05 13:04 UTC (permalink / raw) To: dev; +Cc: Shachar Beiser, Adrien Mazarguil, Nelio Laranjeiro, stable return value is sometimes returned uninitialized Fixes: e1016cb73383 ("net/mlx5: fix Rx interrupts management") Fixes: b18042fb8f49 ("net/mlx5: fix misplaced Rx interrupts functions") Cc: adrien.mazarguil@6wind.com Cc: stable@dpdk.org Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> --- drivers/net/mlx5/mlx5_rxq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 437dc02..24887fb 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1330,7 +1330,7 @@ struct priv *priv = mlx5_get_priv(dev); struct rxq *rxq = (*priv->rxqs)[rx_queue_id]; struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); - int ret; + int ret = 0; if (!rxq || !rxq_ctrl->channel) { ret = EINVAL; -- 1.8.3.1 ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v2 3/3] net/mlx5: fix interrupt enable return value 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 3/3] net/mlx5: fix interrupt enable return value Shachar Beiser @ 2017-09-05 13:27 ` Adrien Mazarguil 2017-09-06 10:54 ` Shachar Beiser 0 siblings, 1 reply; 25+ messages in thread From: Adrien Mazarguil @ 2017-09-05 13:27 UTC (permalink / raw) To: Shachar Beiser; +Cc: dev, Nelio Laranjeiro, stable Hi Shachar, On Tue, Sep 05, 2017 at 01:04:38PM +0000, Shachar Beiser wrote: > return value is sometimes returned uninitialized > > Fixes: e1016cb73383 ("net/mlx5: fix Rx interrupts management") > Fixes: b18042fb8f49 ("net/mlx5: fix misplaced Rx interrupts functions") > > Cc: adrien.mazarguil@6wind.com > Cc: stable@dpdk.org > > Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> Looks like in both commits, ret is properly initialized so I'm wondering if the fixes line is right? Did you even get a compilation error? Otherwise, you should drop this patch from the series. > --- > drivers/net/mlx5/mlx5_rxq.c | 2 +- > 1 file changed, 1 insertion(+), 1 deletion(-) > > diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c > index 437dc02..24887fb 100644 > --- a/drivers/net/mlx5/mlx5_rxq.c > +++ b/drivers/net/mlx5/mlx5_rxq.c > @@ -1330,7 +1330,7 @@ > struct priv *priv = mlx5_get_priv(dev); > struct rxq *rxq = (*priv->rxqs)[rx_queue_id]; > struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); > - int ret; > + int ret = 0; > > if (!rxq || !rxq_ctrl->channel) { > ret = EINVAL; > -- > 1.8.3.1 > -- Adrien Mazarguil 6WIND ^ permalink raw reply [flat|nested] 25+ messages in thread
* Re: [dpdk-dev] [PATCH v2 3/3] net/mlx5: fix interrupt enable return value 2017-09-05 13:27 ` Adrien Mazarguil @ 2017-09-06 10:54 ` Shachar Beiser 0 siblings, 0 replies; 25+ messages in thread From: Shachar Beiser @ 2017-09-06 10:54 UTC (permalink / raw) To: Adrien Mazarguil; +Cc: dev, Nélio Laranjeiro, stable Hi , The problem was that the value ret was not set to 0. As a result we got a warning message: WARN("unable to arm interrupt on rx queue %d", rx_queue_id); The fix is ret =0; I was looking with Nelio for the right commits that this bug fixes I am pretty sure that this ret value was not initialized. -Shachar Beiser. > -----Original Message----- > From: Adrien Mazarguil [mailto:adrien.mazarguil@6wind.com] > Sent: Tuesday, September 5, 2017 4:27 PM > To: Shachar Beiser <shacharbe@mellanox.com> > Cc: dev@dpdk.org; Nélio Laranjeiro <nelio.laranjeiro@6wind.com>; > stable@dpdk.org > Subject: Re: [PATCH v2 3/3] net/mlx5: fix interrupt enable return value > > Hi Shachar, > > On Tue, Sep 05, 2017 at 01:04:38PM +0000, Shachar Beiser wrote: > > return value is sometimes returned uninitialized > > > > Fixes: e1016cb73383 ("net/mlx5: fix Rx interrupts management") > > Fixes: b18042fb8f49 ("net/mlx5: fix misplaced Rx interrupts > > functions") > > > > Cc: adrien.mazarguil@6wind.com > > Cc: stable@dpdk.org > > > > Signed-off-by: Shachar Beiser <shacharbe@mellanox.com> > > Looks like in both commits, ret is properly initialized so I'm wondering if the > fixes line is right? Did you even get a compilation error? > > Otherwise, you should drop this patch from the series. > > > --- > > drivers/net/mlx5/mlx5_rxq.c | 2 +- > > 1 file changed, 1 insertion(+), 1 deletion(-) > > > > diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c > > index 437dc02..24887fb 100644 > > --- a/drivers/net/mlx5/mlx5_rxq.c > > +++ b/drivers/net/mlx5/mlx5_rxq.c > > @@ -1330,7 +1330,7 @@ > > struct priv *priv = mlx5_get_priv(dev); > > struct rxq *rxq = (*priv->rxqs)[rx_queue_id]; > > struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); > > - int ret; > > + int ret = 0; > > > > if (!rxq || !rxq_ctrl->channel) { > > ret = EINVAL; > > -- > > 1.8.3.1 > > > > -- > Adrien Mazarguil > 6WIND ^ permalink raw reply [flat|nested] 25+ messages in thread
end of thread, other threads:[~2017-09-22 18:02 UTC | newest] Thread overview: 25+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2017-09-04 11:48 [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros Shachar Beiser 2017-09-04 11:48 ` [dpdk-dev] [PATCH 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser 2017-09-04 15:15 ` Nélio Laranjeiro 2017-09-04 11:48 ` [dpdk-dev] [PATCH 3/3] net/mlx5: fix interrupt enable return value Shachar Beiser 2017-09-04 15:24 ` Nélio Laranjeiro 2017-09-05 9:04 ` Shachar Beiser 2017-09-04 15:14 ` [dpdk-dev] [PATCH 1/3] net/mlx5: replace network to host macros Nélio Laranjeiro 2017-09-05 9:05 ` Shachar Beiser 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 " Shachar Beiser 2017-09-05 13:41 ` Nélio Laranjeiro 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 2/3] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser 2017-09-05 13:41 ` Nélio Laranjeiro 2017-09-14 13:43 ` [dpdk-dev] [PATCH v3 1/2] net/mlx5: replace network to host macros Shachar Beiser 2017-09-15 20:50 ` Yongseok Koh 2017-09-18 9:47 ` Shachar Beiser 2017-09-14 13:43 ` [dpdk-dev] [PATCH v3 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser 2017-09-17 10:42 ` [dpdk-dev] [PATCH v4 1/2] net/mlx5: replace network to host macros Shachar Beiser 2017-09-18 17:59 ` Yongseok Koh 2017-09-19 6:30 ` Nélio Laranjeiro 2017-09-17 10:42 ` [dpdk-dev] [PATCH v4 2/2] net/mlx5: fix TSO MLNX OFED 3.3 verification Shachar Beiser 2017-09-18 17:52 ` Yongseok Koh 2017-09-22 18:02 ` [dpdk-dev] [dpdk-stable] " Ferruh Yigit 2017-09-05 13:04 ` [dpdk-dev] [PATCH v2 3/3] net/mlx5: fix interrupt enable return value Shachar Beiser 2017-09-05 13:27 ` Adrien Mazarguil 2017-09-06 10:54 ` Shachar Beiser
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).