DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/3] fix Rx checksum offloads
@ 2016-11-02 10:39 Nelio Laranjeiro
  2016-11-02 10:39 ` [dpdk-dev] [PATCH 1/3] net/mlx5: fix Rx checksum macros Nelio Laranjeiro
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: Nelio Laranjeiro @ 2016-11-02 10:39 UTC (permalink / raw)
  To: dev

Fill correctly the Mbuf Rx offloads.

Nelio Laranjeiro (3):
  net/mlx5: fix Rx checksum macros
  net/mlx5: define explicit fields for Rx offloads
  net/mlx: fix support for new Rx checksum flags

 drivers/net/mlx4/mlx4.c      | 21 ++++------
 drivers/net/mlx5/mlx5_prm.h  | 37 +++++++++++++++++-
 drivers/net/mlx5/mlx5_rxtx.c | 93 ++++++++++++++++++++------------------------
 3 files changed, 87 insertions(+), 64 deletions(-)

-- 
2.1.4

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH 1/3] net/mlx5: fix Rx checksum macros
  2016-11-02 10:39 [dpdk-dev] [PATCH 0/3] fix Rx checksum offloads Nelio Laranjeiro
@ 2016-11-02 10:39 ` Nelio Laranjeiro
  2016-11-02 10:39 ` [dpdk-dev] [PATCH 2/3] net/mlx5: define explicit fields for Rx offloads Nelio Laranjeiro
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 6+ messages in thread
From: Nelio Laranjeiro @ 2016-11-02 10:39 UTC (permalink / raw)
  To: dev

Add missing:

 - MLX5_CQE_RX_IPV4_PACKET
 - MLX5_CQE_RX_IPV6_PACKET
 - MLX5_CQE_RX_OUTER_IPV4_PACKET
 - MLX5_CQE_RX_OUTER_IPV6_PACKET
 - MLX5_CQE_RX_TUNNEL_PACKET
 - MLX5_CQE_RX_OUTER_IP_CSUM_OK
 - MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK

Fixes: 51a50a3d9b8f ("net/mlx5: add definitions for data path without Verbs")

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5_prm.h  | 21 +++++++++++++++++++++
 drivers/net/mlx5/mlx5_rxtx.c | 16 ++++++++--------
 2 files changed, 29 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 90b47f0..500f25a 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -84,6 +84,27 @@
 #define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */
 #endif
 
+/* IPv4 packet. */
+#define MLX5_CQE_RX_IPV4_PACKET (1u << 2)
+
+/* IPv6 packet. */
+#define MLX5_CQE_RX_IPV6_PACKET (1u << 3)
+
+/* Outer IPv4 packet. */
+#define MLX5_CQE_RX_OUTER_IPV4_PACKET (1u << 7)
+
+/* Outer IPv6 packet. */
+#define MLX5_CQE_RX_OUTER_IPV6_PACKET (1u << 8)
+
+/* Tunnel packet bit in the CQE. */
+#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 4)
+
+/* Outer IP checksum OK. */
+#define MLX5_CQE_RX_OUTER_IP_CSUM_OK (1u << 5)
+
+/* Outer UDP header and checksum OK. */
+#define MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK (1u << 6)
+
 /* Subset of struct mlx5_wqe_eth_seg. */
 struct mlx5_wqe_eth_seg_small {
 	uint32_t rsvd0;
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index ba8e202..7ebe557 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1096,19 +1096,19 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe)
 	uint8_t flags = cqe->l4_hdr_type_etc;
 	uint8_t info = cqe->rsvd0[0];
 
-	if (info & IBV_EXP_CQ_RX_TUNNEL_PACKET)
+	if (info & MLX5_CQE_RX_TUNNEL_PACKET)
 		pkt_type =
 			TRANSPOSE(flags,
-				  IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
+				  MLX5_CQE_RX_OUTER_IPV4_PACKET,
 				  RTE_PTYPE_L3_IPV4) |
 			TRANSPOSE(flags,
-				  IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
+				  MLX5_CQE_RX_OUTER_IPV6_PACKET,
 				  RTE_PTYPE_L3_IPV6) |
 			TRANSPOSE(flags,
-				  IBV_EXP_CQ_RX_IPV4_PACKET,
+				  MLX5_CQE_RX_IPV4_PACKET,
 				  RTE_PTYPE_INNER_L3_IPV4) |
 			TRANSPOSE(flags,
-				  IBV_EXP_CQ_RX_IPV6_PACKET,
+				  MLX5_CQE_RX_IPV6_PACKET,
 				  RTE_PTYPE_INNER_L3_IPV6);
 	else
 		pkt_type =
@@ -1256,13 +1256,13 @@ rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe)
 	 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
 	 * (its value is 0).
 	 */
-	if ((info & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
+	if ((info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
 		ol_flags |=
 			TRANSPOSE(~cqe->l4_hdr_type_etc,
-				  IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
+				  MLX5_CQE_RX_OUTER_IP_CSUM_OK,
 				  PKT_RX_IP_CKSUM_BAD) |
 			TRANSPOSE(~cqe->l4_hdr_type_etc,
-				  IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
+				  MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK,
 				  PKT_RX_L4_CKSUM_BAD);
 	return ol_flags;
 }
-- 
2.1.4

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH 2/3] net/mlx5: define explicit fields for Rx offloads
  2016-11-02 10:39 [dpdk-dev] [PATCH 0/3] fix Rx checksum offloads Nelio Laranjeiro
  2016-11-02 10:39 ` [dpdk-dev] [PATCH 1/3] net/mlx5: fix Rx checksum macros Nelio Laranjeiro
@ 2016-11-02 10:39 ` Nelio Laranjeiro
  2016-11-02 10:39 ` [dpdk-dev] [PATCH 3/3] net/mlx: fix support for new Rx checksum flags Nelio Laranjeiro
  2016-11-02 15:57 ` [dpdk-dev] [PATCH 0/3] fix Rx checksum offloads Adrien Mazarguil
  3 siblings, 0 replies; 6+ messages in thread
From: Nelio Laranjeiro @ 2016-11-02 10:39 UTC (permalink / raw)
  To: dev

This commit redefines the completion queue element structure as the
original lacks the required fields.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx5/mlx5_prm.h  | 16 ++++++++++++-
 drivers/net/mlx5/mlx5_rxtx.c | 56 +++++++++++++++++++++-----------------------
 2 files changed, 42 insertions(+), 30 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 500f25a..7f31a2f 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -158,7 +158,21 @@ struct mlx5_cqe {
 #if (RTE_CACHE_LINE_SIZE == 128)
 	uint8_t padding[64];
 #endif
-	struct mlx5_cqe64 cqe64;
+	uint8_t pkt_info;
+	uint8_t rsvd0[11];
+	uint32_t rx_hash_res;
+	uint8_t rx_hash_type;
+	uint8_t rsvd1[11];
+	uint8_t hds_ip_ext;
+	uint8_t l4_hdr_type_etc;
+	uint16_t vlan_info;
+	uint8_t rsvd2[12];
+	uint32_t byte_cnt;
+	uint64_t timestamp;
+	uint8_t rsvd3[4];
+	uint16_t wqe_counter;
+	uint8_t rsvd4;
+	uint8_t op_own;
 };
 
 #endif /* RTE_PMD_MLX5_PRM_H_ */
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 7ebe557..b6e0d65 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -83,10 +83,10 @@
  *   0 the first time.
  */
 static inline int
-check_cqe64_seen(volatile struct mlx5_cqe64 *cqe)
+check_cqe_seen(volatile struct mlx5_cqe *cqe)
 {
 	static const uint8_t magic[] = "seen";
-	volatile uint8_t (*buf)[sizeof(cqe->rsvd40)] = &cqe->rsvd40;
+	volatile uint8_t (*buf)[sizeof(cqe->rsvd3)] = &cqe->rsvd3;
 	int ret = 1;
 	unsigned int i;
 
@@ -101,9 +101,9 @@ check_cqe64_seen(volatile struct mlx5_cqe64 *cqe)
 #endif /* NDEBUG */
 
 static inline int
-check_cqe64(volatile struct mlx5_cqe64 *cqe,
-	    unsigned int cqes_n, const uint16_t ci)
-	    __attribute__((always_inline));
+check_cqe(volatile struct mlx5_cqe *cqe,
+	  unsigned int cqes_n, const uint16_t ci)
+	  __attribute__((always_inline));
 
 /**
  * Check whether CQE is valid.
@@ -119,8 +119,8 @@ check_cqe64(volatile struct mlx5_cqe64 *cqe,
  *   0 on success, 1 on failure.
  */
 static inline int
-check_cqe64(volatile struct mlx5_cqe64 *cqe,
-		unsigned int cqes_n, const uint16_t ci)
+check_cqe(volatile struct mlx5_cqe *cqe,
+	  unsigned int cqes_n, const uint16_t ci)
 {
 	uint16_t idx = ci & cqes_n;
 	uint8_t op_own = cqe->op_own;
@@ -138,14 +138,14 @@ check_cqe64(volatile struct mlx5_cqe64 *cqe,
 		if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
 		    (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
 			return 0;
-		if (!check_cqe64_seen(cqe))
+		if (!check_cqe_seen(cqe))
 			ERROR("unexpected CQE error %u (0x%02x)"
 			      " syndrome 0x%02x",
 			      op_code, op_code, syndrome);
 		return 1;
 	} else if ((op_code != MLX5_CQE_RESP_SEND) &&
 		   (op_code != MLX5_CQE_REQ)) {
-		if (!check_cqe64_seen(cqe))
+		if (!check_cqe_seen(cqe))
 			ERROR("unexpected CQE opcode %u (0x%02x)",
 			      op_code, op_code);
 		return 1;
@@ -174,25 +174,25 @@ txq_complete(struct txq *txq)
 	uint16_t elts_free = txq->elts_tail;
 	uint16_t elts_tail;
 	uint16_t cq_ci = txq->cq_ci;
-	volatile struct mlx5_cqe64 *cqe = NULL;
+	volatile struct mlx5_cqe *cqe = NULL;
 	volatile struct mlx5_wqe *wqe;
 
 	do {
-		volatile struct mlx5_cqe64 *tmp;
+		volatile struct mlx5_cqe *tmp;
 
-		tmp = &(*txq->cqes)[cq_ci & cqe_cnt].cqe64;
-		if (check_cqe64(tmp, cqe_n, cq_ci))
+		tmp = &(*txq->cqes)[cq_ci & cqe_cnt];
+		if (check_cqe(tmp, cqe_n, cq_ci))
 			break;
 		cqe = tmp;
 #ifndef NDEBUG
 		if (MLX5_CQE_FORMAT(cqe->op_own) == MLX5_COMPRESSED) {
-			if (!check_cqe64_seen(cqe))
+			if (!check_cqe_seen(cqe))
 				ERROR("unexpected compressed CQE, TX stopped");
 			return;
 		}
 		if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
 		    (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
-			if (!check_cqe64_seen(cqe))
+			if (!check_cqe_seen(cqe))
 				ERROR("unexpected error CQE, TX stopped");
 			return;
 		}
@@ -1090,13 +1090,12 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
  *   Packet type for struct rte_mbuf.
  */
 static inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe)
+rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
 {
 	uint32_t pkt_type;
 	uint8_t flags = cqe->l4_hdr_type_etc;
-	uint8_t info = cqe->rsvd0[0];
 
-	if (info & MLX5_CQE_RX_TUNNEL_PACKET)
+	if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET)
 		pkt_type =
 			TRANSPOSE(flags,
 				  MLX5_CQE_RX_OUTER_IPV4_PACKET,
@@ -1138,7 +1137,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe)
  *   with error.
  */
 static inline int
-mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
+mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
 		 uint16_t cqe_cnt, uint32_t *rss_hash)
 {
 	struct rxq_zip *zip = &rxq->zip;
@@ -1149,7 +1148,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
 	if (zip->ai) {
 		volatile struct mlx5_mini_cqe8 (*mc)[8] =
 			(volatile struct mlx5_mini_cqe8 (*)[8])
-			(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].cqe64);
+			(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt]);
 
 		len = ntohl((*mc)[zip->ai & 7].byte_cnt);
 		*rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result);
@@ -1167,7 +1166,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
 			uint16_t end = zip->cq_ci;
 
 			while (idx != end) {
-				(*rxq->cqes)[idx & cqe_cnt].cqe64.op_own =
+				(*rxq->cqes)[idx & cqe_cnt].op_own =
 					MLX5_CQE_INVALIDATE;
 				++idx;
 			}
@@ -1179,7 +1178,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
 		int ret;
 		int8_t op_own;
 
-		ret = check_cqe64(cqe, cqe_n, rxq->cq_ci);
+		ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
 		if (unlikely(ret == 1))
 			return 0;
 		++rxq->cq_ci;
@@ -1188,7 +1187,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
 			volatile struct mlx5_mini_cqe8 (*mc)[8] =
 				(volatile struct mlx5_mini_cqe8 (*)[8])
 				(uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
-							  cqe_cnt].cqe64);
+							  cqe_cnt]);
 
 			/* Fix endianness. */
 			zip->cqe_cnt = ntohl(cqe->byte_cnt);
@@ -1232,12 +1231,11 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
  *   Offload flags (ol_flags) for struct rte_mbuf.
  */
 static inline uint32_t
-rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe)
+rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
 {
 	uint32_t ol_flags = 0;
 	uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK;
 	uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK;
-	uint8_t info = cqe->rsvd0[0];
 
 	if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) ||
 	    (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6))
@@ -1256,7 +1254,7 @@ rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe)
 	 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
 	 * (its value is 0).
 	 */
-	if ((info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
+	if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
 		ol_flags |=
 			TRANSPOSE(~cqe->l4_hdr_type_etc,
 				  MLX5_CQE_RX_OUTER_IP_CSUM_OK,
@@ -1289,8 +1287,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	const unsigned int sges_n = rxq->sges_n;
 	struct rte_mbuf *pkt = NULL;
 	struct rte_mbuf *seg = NULL;
-	volatile struct mlx5_cqe64 *cqe =
-		&(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64;
+	volatile struct mlx5_cqe *cqe =
+		&(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
 	unsigned int i = 0;
 	unsigned int rq_ci = rxq->rq_ci << sges_n;
 	int len; /* keep its value across iterations. */
@@ -1327,7 +1325,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			break;
 		}
 		if (!pkt) {
-			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64;
+			cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
 			len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt,
 					       &rss_hash_res);
 			if (!len) {
-- 
2.1.4

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH 3/3] net/mlx: fix support for new Rx checksum flags
  2016-11-02 10:39 [dpdk-dev] [PATCH 0/3] fix Rx checksum offloads Nelio Laranjeiro
  2016-11-02 10:39 ` [dpdk-dev] [PATCH 1/3] net/mlx5: fix Rx checksum macros Nelio Laranjeiro
  2016-11-02 10:39 ` [dpdk-dev] [PATCH 2/3] net/mlx5: define explicit fields for Rx offloads Nelio Laranjeiro
@ 2016-11-02 10:39 ` Nelio Laranjeiro
  2016-11-02 15:57 ` [dpdk-dev] [PATCH 0/3] fix Rx checksum offloads Adrien Mazarguil
  3 siblings, 0 replies; 6+ messages in thread
From: Nelio Laranjeiro @ 2016-11-02 10:39 UTC (permalink / raw)
  To: dev

Fixes: 5842289a546c ("mbuf: add new Rx checksum flags")

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 drivers/net/mlx4/mlx4.c      | 21 ++++++++-------------
 drivers/net/mlx5/mlx5_rxtx.c | 25 ++++++++++---------------
 2 files changed, 18 insertions(+), 28 deletions(-)

diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index faa9acd..da61a85 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -2995,25 +2995,20 @@ rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
 
 	if (rxq->csum)
 		ol_flags |=
-			TRANSPOSE(~flags,
+			TRANSPOSE(flags,
 				  IBV_EXP_CQ_RX_IP_CSUM_OK,
-				  PKT_RX_IP_CKSUM_BAD) |
-			TRANSPOSE(~flags,
+				  PKT_RX_IP_CKSUM_GOOD) |
+			TRANSPOSE(flags,
 				  IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
-				  PKT_RX_L4_CKSUM_BAD);
-	/*
-	 * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
-	 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
-	 * (its value is 0).
-	 */
+				  PKT_RX_L4_CKSUM_GOOD);
 	if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
 		ol_flags |=
-			TRANSPOSE(~flags,
+			TRANSPOSE(flags,
 				  IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
-				  PKT_RX_IP_CKSUM_BAD) |
-			TRANSPOSE(~flags,
+				  PKT_RX_IP_CKSUM_GOOD) |
+			TRANSPOSE(flags,
 				  IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
-				  PKT_RX_L4_CKSUM_BAD);
+				  PKT_RX_L4_CKSUM_GOOD);
 	return ol_flags;
 }
 
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index b6e0d65..beff580 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1239,29 +1239,24 @@ rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
 
 	if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) ||
 	    (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6))
-		ol_flags |=
-			(!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) *
-			 PKT_RX_IP_CKSUM_BAD);
+		ol_flags |= TRANSPOSE(cqe->hds_ip_ext,
+				      MLX5_CQE_L3_OK,
+				      PKT_RX_IP_CKSUM_GOOD);
 	if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) ||
 	    (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) ||
 	    (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) ||
 	    (l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP))
-		ol_flags |=
-			(!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) *
-			 PKT_RX_L4_CKSUM_BAD);
-	/*
-	 * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
-	 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
-	 * (its value is 0).
-	 */
+		ol_flags |= TRANSPOSE(cqe->hds_ip_ext,
+				      MLX5_CQE_L4_OK,
+				      PKT_RX_L4_CKSUM_GOOD);
 	if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
 		ol_flags |=
-			TRANSPOSE(~cqe->l4_hdr_type_etc,
+			TRANSPOSE(cqe->l4_hdr_type_etc,
 				  MLX5_CQE_RX_OUTER_IP_CSUM_OK,
-				  PKT_RX_IP_CKSUM_BAD) |
-			TRANSPOSE(~cqe->l4_hdr_type_etc,
+				  PKT_RX_IP_CKSUM_GOOD) |
+			TRANSPOSE(cqe->l4_hdr_type_etc,
 				  MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK,
-				  PKT_RX_L4_CKSUM_BAD);
+				  PKT_RX_L4_CKSUM_GOOD);
 	return ol_flags;
 }
 
-- 
2.1.4

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH 0/3] fix Rx checksum offloads
  2016-11-02 10:39 [dpdk-dev] [PATCH 0/3] fix Rx checksum offloads Nelio Laranjeiro
                   ` (2 preceding siblings ...)
  2016-11-02 10:39 ` [dpdk-dev] [PATCH 3/3] net/mlx: fix support for new Rx checksum flags Nelio Laranjeiro
@ 2016-11-02 15:57 ` Adrien Mazarguil
  2016-11-07 17:42   ` Thomas Monjalon
  3 siblings, 1 reply; 6+ messages in thread
From: Adrien Mazarguil @ 2016-11-02 15:57 UTC (permalink / raw)
  To: Nelio Laranjeiro; +Cc: dev

On Wed, Nov 02, 2016 at 11:39:36AM +0100, Nelio Laranjeiro wrote:
> Fill correctly the Mbuf Rx offloads.
> 
> Nelio Laranjeiro (3):
>   net/mlx5: fix Rx checksum macros
>   net/mlx5: define explicit fields for Rx offloads
>   net/mlx: fix support for new Rx checksum flags
> 
>  drivers/net/mlx4/mlx4.c      | 21 ++++------
>  drivers/net/mlx5/mlx5_prm.h  | 37 +++++++++++++++++-
>  drivers/net/mlx5/mlx5_rxtx.c | 93 ++++++++++++++++++++------------------------
>  3 files changed, 87 insertions(+), 64 deletions(-)
> 
> -- 
> 2.1.4

Thanks. For the series:

Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH 0/3] fix Rx checksum offloads
  2016-11-02 15:57 ` [dpdk-dev] [PATCH 0/3] fix Rx checksum offloads Adrien Mazarguil
@ 2016-11-07 17:42   ` Thomas Monjalon
  0 siblings, 0 replies; 6+ messages in thread
From: Thomas Monjalon @ 2016-11-07 17:42 UTC (permalink / raw)
  To: Nelio Laranjeiro; +Cc: dev, Adrien Mazarguil

2016-11-02 16:57, Adrien Mazarguil:
> On Wed, Nov 02, 2016 at 11:39:36AM +0100, Nelio Laranjeiro wrote:
> > Fill correctly the Mbuf Rx offloads.
> > 
> > Nelio Laranjeiro (3):
> >   net/mlx5: fix Rx checksum macros
> >   net/mlx5: define explicit fields for Rx offloads
> >   net/mlx: fix support for new Rx checksum flags
> > 
> >  drivers/net/mlx4/mlx4.c      | 21 ++++------
> >  drivers/net/mlx5/mlx5_prm.h  | 37 +++++++++++++++++-
> >  drivers/net/mlx5/mlx5_rxtx.c | 93 ++++++++++++++++++++------------------------
> >  3 files changed, 87 insertions(+), 64 deletions(-)
> > 
> 
> Thanks. For the series:
> 
> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>

Applied, thanks

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2016-11-07 17:43 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-11-02 10:39 [dpdk-dev] [PATCH 0/3] fix Rx checksum offloads Nelio Laranjeiro
2016-11-02 10:39 ` [dpdk-dev] [PATCH 1/3] net/mlx5: fix Rx checksum macros Nelio Laranjeiro
2016-11-02 10:39 ` [dpdk-dev] [PATCH 2/3] net/mlx5: define explicit fields for Rx offloads Nelio Laranjeiro
2016-11-02 10:39 ` [dpdk-dev] [PATCH 3/3] net/mlx: fix support for new Rx checksum flags Nelio Laranjeiro
2016-11-02 15:57 ` [dpdk-dev] [PATCH 0/3] fix Rx checksum offloads Adrien Mazarguil
2016-11-07 17:42   ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).