DPDK patches and discussions
 help / color / mirror / Atom feed
From: Matan Azrad <matan@mellanox.com>
To: Shahaf Shuler <shahafs@mellanox.com>,
	Yongseok Koh <yskoh@mellanox.com>,
	Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Cc: dev@dpdk.org, Dekel Peled <dekelp@mellanox.com>
Subject: [dpdk-dev] [PATCH 10/11] net/mlx5: allow implicit LRO flow
Date: Mon, 29 Jul 2019 11:53:28 +0000	[thread overview]
Message-ID: <1564401209-18752-11-git-send-email-matan@mellanox.com> (raw)
In-Reply-To: <1564401209-18752-1-git-send-email-matan@mellanox.com>

When a user configures LRO in the port offloads, he probably wants each
TCP packet will have a chance to open an LRO session.

The PMD wasn't configure LRO in the flow TIR if the flow is not
explicitly configured TCP item despite the flow included TCP traffic.

For example, the next flows were not LRO offloaded:
pattern eth / end, pattern eth / ip / end, pattern eth / ipv6 / end.

Enable LRO configuration for all the TIRs if LRO is configured in the
port.

No performance impact for non-LRO traffic in these TIRs.

Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
 drivers/net/mlx5/mlx5.h            |  3 ---
 drivers/net/mlx5/mlx5_flow_dv.c    | 18 +-----------------
 drivers/net/mlx5/mlx5_flow_verbs.c |  3 +--
 drivers/net/mlx5/mlx5_rxq.c        | 10 +++++-----
 drivers/net/mlx5/mlx5_rxtx.h       |  2 +-
 5 files changed, 8 insertions(+), 28 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 6cb8858..5c40091 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -198,9 +198,6 @@ struct mlx5_hca_attr {
 #define MLX5_LRO_ENABLED(dev) \
 	((dev)->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO)
 
-#define MLX5_FLOW_IPV4_LRO	(1 << 0)
-#define MLX5_FLOW_IPV6_LRO	(1 << 1)
-
 /* LRO configurations structure. */
 struct mlx5_lro_config {
 	uint32_t supported:1; /* Whether LRO is supported. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index f1d32bd..59ef716 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -62,9 +62,6 @@
 	uint32_t attr;
 };
 
-#define MLX5_FLOW_IPV4_LRO (1 << 0)
-#define MLX5_FLOW_IPV6_LRO (1 << 1)
-
 /**
  * Initialize flow attributes structure according to flow items' types.
  *
@@ -5186,26 +5183,13 @@ struct field_modify_info modify_tcp[] = {
 					     (*flow->queue),
 					     flow->rss.queue_num);
 			if (!hrxq) {
-				int lro = 0;
-
-				if (mlx5_lro_on(dev)) {
-					if ((dev_flow->layers &
-					     MLX5_FLOW_LAYER_IPV4_LRO)
-					    == MLX5_FLOW_LAYER_IPV4_LRO)
-						lro = MLX5_FLOW_IPV4_LRO;
-					else if ((dev_flow->layers &
-						  MLX5_FLOW_LAYER_IPV6_LRO)
-						 == MLX5_FLOW_LAYER_IPV6_LRO)
-						lro = MLX5_FLOW_IPV6_LRO;
-				}
 				hrxq = mlx5_hrxq_new
 					(dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
 					 dv->hash_fields, (*flow->queue),
 					 flow->rss.queue_num,
 					 !!(dev_flow->layers &
-					    MLX5_FLOW_LAYER_TUNNEL), lro);
+					    MLX5_FLOW_LAYER_TUNNEL));
 			}
-
 			if (!hrxq) {
 				rte_flow_error_set
 					(error, rte_errno,
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index bcec3b4..fd6f2d5 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1669,8 +1669,7 @@
 						     (*flow->queue),
 						     flow->rss.queue_num,
 						     !!(dev_flow->layers &
-							MLX5_FLOW_LAYER_TUNNEL),
-						     0);
+						       MLX5_FLOW_LAYER_TUNNEL));
 			if (!hrxq) {
 				rte_flow_error_set
 					(error, rte_errno,
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 3705d07..f7e861c 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -2100,8 +2100,6 @@ struct mlx5_rxq_ctrl *
  *   Number of queues.
  * @param tunnel
  *   Tunnel type.
- * @param lro
- *   Flow rule is relevant for LRO, i.e. contains IPv4/IPv6 and TCP.
  *
  * @return
  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
@@ -2111,7 +2109,7 @@ struct mlx5_hrxq *
 	      const uint8_t *rss_key, uint32_t rss_key_len,
 	      uint64_t hash_fields,
 	      const uint16_t *queues, uint32_t queues_n,
-	      int tunnel __rte_unused, int lro)
+	      int tunnel __rte_unused)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_hrxq *hrxq;
@@ -2218,11 +2216,13 @@ struct mlx5_hrxq *
 		if (dev->data->dev_conf.lpbk_mode)
 			tir_attr.self_lb_block =
 					MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
-		if (lro) {
+		if (mlx5_lro_on(dev)) {
 			tir_attr.lro_timeout_period_usecs =
 					priv->config.lro.timeout;
 			tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
-			tir_attr.lro_enable_mask = lro;
+			tir_attr.lro_enable_mask =
+					MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+					MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
 		}
 		tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
 		if (!tir) {
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 5704d0a..9b58d0a 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -358,7 +358,7 @@ struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
 				const uint8_t *rss_key, uint32_t rss_key_len,
 				uint64_t hash_fields,
 				const uint16_t *queues, uint32_t queues_n,
-				int tunnel __rte_unused, int lro);
+				int tunnel __rte_unused);
 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
 				const uint8_t *rss_key, uint32_t rss_key_len,
 				uint64_t hash_fields,
-- 
1.8.3.1


  parent reply	other threads:[~2019-07-29 12:17 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-29 11:53 [dpdk-dev] [PATCH 00/11] net/mlx5: LRO fixes and enhancements Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 01/11] net/mlx5: fix Rx scatter mode validation Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 02/11] net/mlx5: limit LRO size to the maximum Rx packet Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 03/11] net/mlx5: remove redundant offload flag reset Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 04/11] net/mlx5: support mbuf headroom for LRO packet Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 05/11] net/mlx5: fix DevX scattered Rx queue size Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 06/11] net/mlx5: fix DevX Rx queue type Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 07/11] net/mlx5: allow LRO in regular Rx queue Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 08/11] net/mlx5: fix DevX Rx queue memory alignment Matan Azrad
2019-07-29 11:53 ` [dpdk-dev] [PATCH 09/11] net/mlx5: handle LRO packets in regular Rx queue Matan Azrad
2019-07-29 11:53 ` Matan Azrad [this message]
2019-07-29 11:53 ` [dpdk-dev] [PATCH 11/11] net/mlx5: allow LRO per " Matan Azrad
2019-07-29 12:32 ` [dpdk-dev] [PATCH 00/11] net/mlx5: LRO fixes and enhancements Slava Ovsiienko
2019-07-29 14:37 ` Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1564401209-18752-11-git-send-email-matan@mellanox.com \
    --to=matan@mellanox.com \
    --cc=dekelp@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=shahafs@mellanox.com \
    --cc=viacheslavo@mellanox.com \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).