DPDK patches and discussions
 help / color / mirror / Atom feed
From: Matan Azrad <matan@mellanox.com>
To: Shahaf Shuler <shahafs@mellanox.com>,
	Yongseok Koh <yskoh@mellanox.com>,
	Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Cc: dev@dpdk.org, Dekel Peled <dekelp@mellanox.com>
Subject: [dpdk-dev] [PATCH 09/11] net/mlx5: handle LRO packets in regular Rx queue
Date: Mon, 29 Jul 2019 11:53:27 +0000	[thread overview]
Message-ID: <1564401209-18752-10-git-send-email-matan@mellanox.com> (raw)
In-Reply-To: <1564401209-18752-1-git-send-email-matan@mellanox.com>

When LRO offload is configured in Rx queue, the HW may coalesce TCP
packets from same TCP connection into single packet.

In this case the SW should fix the relevant packet headers because
the HW doesn't update them according to the new created packet
characteristics but provides the update values in the CQE.

Add update header code to the regular Rx burst function to support LRO
feature.

Make sure the first mbuf has enough space to include each TCP header,
otherwise the header update may cross mbufs what complicates the
operation too match.

Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
 doc/guides/nics/mlx5.rst     |  4 +++-
 drivers/net/mlx5/mlx5_rxq.c  | 20 +++++++++++++++++---
 drivers/net/mlx5/mlx5_rxtx.c | 17 +++++++++++++++++
 3 files changed, 37 insertions(+), 4 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index cd550f4..6f0c382 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -165,7 +165,9 @@ Limitations
 
 - LRO:
 
-  - scatter_fcs is disabled when LRO is configured.
+  - KEEP_CRC offload cannot be supported with LRO.
+  - The first mbuf length, without head-room,  must be big enough to include the
+    TCP header (122B).
 
 Statistics
 ----------
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index e96bb1e..3705d07 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1541,6 +1541,11 @@ struct mlx5_rxq_obj *
 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
 					sizeof(struct rte_vlan_hdr) * 2 + \
 					sizeof(struct rte_ipv6_hdr)))
+#define MAX_TCP_OPTION_SIZE 40u
+#define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \
+				 sizeof(struct rte_tcp_hdr) + \
+				 MAX_TCP_OPTION_SIZE))
+
 /**
  * Adjust the maximum LRO massage size.
  *
@@ -1607,6 +1612,7 @@ struct mlx5_rxq_ctrl *
 	unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
 							RTE_PKTMBUF_HEADROOM;
 	unsigned int max_lro_size = 0;
+	unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
 
 	if (non_scatter_min_mbuf_size > mb_len && !(offloads &
 						    DEV_RX_OFFLOAD_SCATTER)) {
@@ -1670,8 +1676,8 @@ struct mlx5_rxq_ctrl *
 					      config->mprq.min_stride_size_n);
 		tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
 		tmpl->rxq.strd_headroom_en = strd_headroom_en;
-		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len -
-			    RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len);
+		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
+				config->mprq.max_memcpy_len);
 		max_lro_size = RTE_MIN(max_rx_pkt_len,
 				       (1u << tmpl->rxq.strd_num_n) *
 				       (1u << tmpl->rxq.strd_sz_n));
@@ -1680,13 +1686,21 @@ struct mlx5_rxq_ctrl *
 			" strd_num_n = %u, strd_sz_n = %u",
 			dev->data->port_id, idx,
 			tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
-	} else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
+	} else if (max_rx_pkt_len <= first_mb_free_size) {
 		tmpl->rxq.sges_n = 0;
 		max_lro_size = max_rx_pkt_len;
 	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		unsigned int size = non_scatter_min_mbuf_size;
 		unsigned int sges_n;
 
+		if (mlx5_lro_on(dev) && first_mb_free_size <
+		    MLX5_MAX_LRO_HEADER_FIX) {
+			DRV_LOG(ERR, "Not enough space in the first segment(%u)"
+				" to include the max header size(%u) for LRO",
+				first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX);
+			rte_errno = ENOTSUP;
+			goto error;
+		}
 		/*
 		 * Determine the number of SGEs needed for a full packet
 		 * and round it to the next power of two.
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 003eefd..6627b54 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -107,6 +107,16 @@ enum mlx5_txcmp_code {
 mlx5_queue_state_modify(struct rte_eth_dev *dev,
 			struct mlx5_mp_arg_queue_state_modify *sm);
 
+static inline void
+mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
+			volatile struct mlx5_cqe *restrict cqe,
+			uint32_t phcsum);
+
+static inline void
+mlx5_lro_update_hdr(uint8_t *restrict padd,
+		    volatile struct mlx5_cqe *restrict cqe,
+		    uint32_t len);
+
 uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
 	[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
 };
@@ -1323,6 +1333,13 @@ enum mlx5_txcmp_code {
 			if (rxq->crc_present)
 				len -= RTE_ETHER_CRC_LEN;
 			PKT_LEN(pkt) = len;
+			if (cqe->lro_num_seg > 1) {
+				mlx5_lro_update_hdr
+					(rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
+					 len);
+				pkt->ol_flags |= PKT_RX_LRO;
+				pkt->tso_segsz = len / cqe->lro_num_seg;
+			}
 		}
 		DATA_LEN(rep) = DATA_LEN(seg);
 		PKT_LEN(rep) = PKT_LEN(seg);
-- 
1.8.3.1


  parent reply	other threads:[~2019-07-29 12:17 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-29 11:53 [dpdk-dev] [PATCH 00/11] net/mlx5: LRO fixes and enhancements Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 01/11] net/mlx5: fix Rx scatter mode validation Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 02/11] net/mlx5: limit LRO size to the maximum Rx packet Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 03/11] net/mlx5: remove redundant offload flag reset Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 04/11] net/mlx5: support mbuf headroom for LRO packet Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 05/11] net/mlx5: fix DevX scattered Rx queue size Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 06/11] net/mlx5: fix DevX Rx queue type Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 07/11] net/mlx5: allow LRO in regular Rx queue Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 08/11] net/mlx5: fix DevX Rx queue memory alignment Matan Azrad
2019-07-29 11:53 ` Matan Azrad [this message]
2019-07-29 11:53 ` [dpdk-dev] [PATCH 10/11] net/mlx5: allow implicit LRO flow Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 11/11] net/mlx5: allow LRO per Rx queue Matan Azrad
2019-07-29 12:32 ` [dpdk-dev] [PATCH 00/11] net/mlx5: LRO fixes and enhancements Slava Ovsiienko
2019-07-29 14:37 ` Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1564401209-18752-10-git-send-email-matan@mellanox.com \
    --to=matan@mellanox.com \
    --cc=dekelp@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=shahafs@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).