DPDK patches and discussions
 help / color / mirror / Atom feed
From: Matan Azrad <matan@mellanox.com>
To: Shahaf Shuler <shahafs@mellanox.com>,
	Yongseok Koh <yskoh@mellanox.com>,
	Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Cc: dev@dpdk.org, Dekel Peled <dekelp@mellanox.com>
Subject: [dpdk-dev] [PATCH 07/11] net/mlx5: allow LRO in regular Rx queue
Date: Mon, 29 Jul 2019 11:53:25 +0000	[thread overview]
Message-ID: <1564401209-18752-8-git-send-email-matan@mellanox.com> (raw)
In-Reply-To: <1564401209-18752-1-git-send-email-matan@mellanox.com>

LRO support was only for MPRQ, hence mprq Rx burst was selected when
LRO was configured in the port.

The current support for MPRQ is suffering from bad memory utilization
since an external mempool is allocated by the PMD for the packets data
in addition to the user mempool, besides that, the user may get packet
data addresses which were not configured by him.

Even though MPRQ has the best performance for packet receiving in the
most cases and because of the above facts it is better to remove the
automatic MPRQ select when LRO is configured.

Move MPRQ to be selected only when the user force it by the PMD
arguments including LRO case.

Allow LRO offload using the regular RQ with the regular Rx burst
function.

Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
 drivers/net/mlx5/mlx5.c          |  4 +---
 drivers/net/mlx5/mlx5_ethdev.c   |  6 ------
 drivers/net/mlx5/mlx5_prm.h      |  3 +++
 drivers/net/mlx5/mlx5_rxq.c      | 27 ++++++++++++++-------------
 drivers/net/mlx5/mlx5_rxtx.h     |  4 ++--
 drivers/net/mlx5/mlx5_rxtx_vec.c |  2 ++
 6 files changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index ad0883d..a490bf2 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1856,7 +1856,7 @@ struct mlx5_dev_spawn_data {
 		if (priv->counter_fallback)
 			DRV_LOG(INFO, "Use fall-back DV counter management\n");
 		/* Check for LRO support. */
-		if (config.dest_tir && mprq && config.hca_attr.lro_cap) {
+		if (config.dest_tir && config.hca_attr.lro_cap) {
 			/* TBD check tunnel lro caps. */
 			config.lro.supported = config.hca_attr.lro_cap;
 			DRV_LOG(DEBUG, "Device supports LRO");
@@ -1869,8 +1869,6 @@ struct mlx5_dev_spawn_data {
 				config.hca_attr.lro_timer_supported_periods[0];
 			DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
 				config.lro.timeout);
-			config.mprq.enabled = 1;
-			DRV_LOG(DEBUG, "Enable MPRQ for LRO use");
 		}
 	}
 	if (config.mprq.enabled && mprq) {
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index e627909..9d11831 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -433,12 +433,6 @@ struct ethtool_link_settings {
 			dev->data->port_id, priv->rxqs_n, rxqs_n);
 		priv->rxqs_n = rxqs_n;
 		/*
-		 * WHen using LRO, MPRQ is implicitly enabled.
-		 * Adjust threshold value to ensure MPRQ can be enabled.
-		 */
-		if (lro_on && priv->config.mprq.min_rxqs_num > priv->rxqs_n)
-			priv->config.mprq.min_rxqs_num = priv->rxqs_n;
-		/*
 		 * If the requested number of RX queues is not a power of two,
 		 * use the maximum indirection table size for better balancing.
 		 * The result is always rounded to the next power of two.
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 0716bbd..6ea6345 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -237,6 +237,9 @@
 /* Amount of data bytes after eth data segment. */
 #define MLX5_ESEG_EXTRA_DATA_SIZE 32u
 
+/* The maximum log value of segments per RQ WQE. */
+#define MLX5_MAX_LOG_RQ_SEGS 5u
+
 /* Completion mode. */
 enum mlx5_completion_mode {
 	MLX5_COMP_ONLY_ERR = 0x0,
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 5e54156..ad5b0a9 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -93,7 +93,6 @@
 
 /**
  * Check whether Multi-Packet RQ is enabled for the device.
- * MPRQ can be enabled explicitly, or implicitly by enabling LRO.
  *
  * @param dev
  *   Pointer to Ethernet device.
@@ -1607,6 +1606,7 @@ struct mlx5_rxq_ctrl *
 	unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
 	unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
 							RTE_PKTMBUF_HEADROOM;
+	unsigned int max_lro_size = 0;
 
 	if (non_scatter_min_mbuf_size > mb_len && !(offloads &
 						    DEV_RX_OFFLOAD_SCATTER)) {
@@ -1672,8 +1672,9 @@ struct mlx5_rxq_ctrl *
 		tmpl->rxq.strd_headroom_en = strd_headroom_en;
 		tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len -
 			    RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len);
-		mlx5_max_lro_msg_size_adjust(dev, RTE_MIN(max_rx_pkt_len,
-		   (1u << tmpl->rxq.strd_num_n) * (1u << tmpl->rxq.strd_sz_n)));
+		max_lro_size = RTE_MIN(max_rx_pkt_len,
+				       (1u << tmpl->rxq.strd_num_n) *
+				       (1u << tmpl->rxq.strd_sz_n));
 		DRV_LOG(DEBUG,
 			"port %u Rx queue %u: Multi-Packet RQ is enabled"
 			" strd_num_n = %u, strd_sz_n = %u",
@@ -1681,6 +1682,7 @@ struct mlx5_rxq_ctrl *
 			tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
 	} else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
 		tmpl->rxq.sges_n = 0;
+		max_lro_size = max_rx_pkt_len;
 	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
 		unsigned int size = non_scatter_min_mbuf_size;
 		unsigned int sges_n;
@@ -1690,20 +1692,18 @@ struct mlx5_rxq_ctrl *
 		 * and round it to the next power of two.
 		 */
 		sges_n = log2above((size / mb_len) + !!(size % mb_len));
-		tmpl->rxq.sges_n = sges_n;
-		/* Make sure rxq.sges_n did not overflow. */
-		size = mb_len * (1 << tmpl->rxq.sges_n);
-		size -= RTE_PKTMBUF_HEADROOM;
-		if (size < max_rx_pkt_len) {
+		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
 			DRV_LOG(ERR,
 				"port %u too many SGEs (%u) needed to handle"
-				" requested maximum packet size %u",
-				dev->data->port_id,
-				1 << sges_n,
-				max_rx_pkt_len);
-			rte_errno = EOVERFLOW;
+				" requested maximum packet size %u, the maximum"
+				" supported are %u", dev->data->port_id,
+				1 << sges_n, max_rx_pkt_len,
+				1u << MLX5_MAX_LOG_RQ_SEGS);
+			rte_errno = ENOTSUP;
 			goto error;
 		}
+		tmpl->rxq.sges_n = sges_n;
+		max_lro_size = max_rx_pkt_len;
 	}
 	if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
 		DRV_LOG(WARNING,
@@ -1725,6 +1725,7 @@ struct mlx5_rxq_ctrl *
 		rte_errno = EINVAL;
 		goto error;
 	}
+	mlx5_max_lro_msg_size_adjust(dev, max_lro_size);
 	/* Toggle RX checksum offload if hardware supports it. */
 	tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
 	tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 60d871c..5704d0a 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -105,7 +105,7 @@ struct mlx5_rxq_data {
 	unsigned int hw_timestamp:1; /* Enable HW timestamp. */
 	unsigned int vlan_strip:1; /* Enable VLAN stripping. */
 	unsigned int crc_present:1; /* CRC must be subtracted. */
-	unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
+	unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */
 	unsigned int cqe_n:4; /* Log 2 of CQ elements. */
 	unsigned int elts_n:4; /* Log 2 of Mbufs. */
 	unsigned int rss_hash:1; /* RSS hash result is enabled. */
@@ -115,7 +115,7 @@ struct mlx5_rxq_data {
 	unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
 	unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
 	unsigned int strd_headroom_en:1; /* Enable mbuf headroom in MPRQ. */
-	unsigned int :3; /* Remaining bits. */
+	unsigned int :2; /* Remaining bits. */
 	volatile uint32_t *rq_db;
 	volatile uint32_t *cq_db;
 	uint16_t port_id;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index f6ec828..3815ff6 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -151,6 +151,8 @@ int __attribute__((cold))
 		return -ENOTSUP;
 	if (mlx5_mprq_enabled(dev))
 		return -ENOTSUP;
+	if (mlx5_lro_on(dev))
+		return -ENOTSUP;
 	/* All the configured queues should support. */
 	for (i = 0; i < priv->rxqs_n; ++i) {
 		struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
-- 
1.8.3.1


  parent reply	other threads:[~2019-07-29 12:19 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-29 11:53 [dpdk-dev] [PATCH 00/11] net/mlx5: LRO fixes and enhancements Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 01/11] net/mlx5: fix Rx scatter mode validation Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 02/11] net/mlx5: limit LRO size to the maximum Rx packet Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 03/11] net/mlx5: remove redundant offload flag reset Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 04/11] net/mlx5: support mbuf headroom for LRO packet Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 05/11] net/mlx5: fix DevX scattered Rx queue size Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 06/11] net/mlx5: fix DevX Rx queue type Matan Azrad
2019-07-29 11:53 ` Matan Azrad [this message]
2019-07-29 11:53 ` [dpdk-dev] [PATCH 08/11] net/mlx5: fix DevX Rx queue memory alignment Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 09/11] net/mlx5: handle LRO packets in regular Rx queue Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 10/11] net/mlx5: allow implicit LRO flow Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 11/11] net/mlx5: allow LRO per Rx queue Matan Azrad
2019-07-29 12:32 ` [dpdk-dev] [PATCH 00/11] net/mlx5: LRO fixes and enhancements Slava Ovsiienko
2019-07-29 14:37 ` Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1564401209-18752-8-git-send-email-matan@mellanox.com \
    --to=matan@mellanox.com \
    --cc=dekelp@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=shahafs@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).