From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3EE3BA0487 for ; Mon, 29 Jul 2019 14:19:16 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 4524C1BF91; Mon, 29 Jul 2019 14:16:54 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 5820E1BF59 for ; Mon, 29 Jul 2019 14:16:34 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE2 (envelope-from matan@mellanox.com) with ESMTPS (AES256-SHA encrypted); 29 Jul 2019 15:16:30 +0300 Received: from pegasus07.mtr.labs.mlnx (pegasus07.mtr.labs.mlnx [10.210.16.112]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x6TCGS4P021429; Mon, 29 Jul 2019 15:16:29 +0300 From: Matan Azrad To: Shahaf Shuler , Yongseok Koh , Viacheslav Ovsiienko Cc: dev@dpdk.org, Dekel Peled Date: Mon, 29 Jul 2019 11:53:29 +0000 Message-Id: <1564401209-18752-12-git-send-email-matan@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1564401209-18752-1-git-send-email-matan@mellanox.com> References: <1564401209-18752-1-git-send-email-matan@mellanox.com> Subject: [dpdk-dev] [PATCH 11/11] net/mlx5: allow LRO per Rx queue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Enabling LRO offload per queue makes sense because the user will probably want to allocate different mempool for LRO queues - the LRO mempool mbuf size may be bigger than non LRO mempool. Change the LRO offload to be per queue instead of per port. If one of the queues is with LRO enabled, all the queues will be configured via DevX. If RSS flows direct TCP packets to queues with different LRO enabling, these flows will not be offloaded with LRO. Signed-off-by: Matan Azrad Acked-by: Viacheslav Ovsiienko --- drivers/net/mlx5/mlx5.h | 3 --- drivers/net/mlx5/mlx5_ethdev.c | 8 +------ drivers/net/mlx5/mlx5_rxq.c | 52 +++++++++++++++++++--------------------- drivers/net/mlx5/mlx5_rxtx.h | 6 ++--- drivers/net/mlx5/mlx5_rxtx_vec.c | 4 ++-- drivers/net/mlx5/mlx5_trigger.c | 10 +++++--- 6 files changed, 38 insertions(+), 45 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 5c40091..e812374 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -195,9 +195,6 @@ struct mlx5_hca_attr { #define MLX5_LRO_SUPPORTED(dev) \ (((struct mlx5_priv *)((dev)->data->dev_private))->config.lro.supported) -#define MLX5_LRO_ENABLED(dev) \ - ((dev)->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) - /* LRO configurations structure. */ struct mlx5_lro_config { uint32_t supported:1; /* Whether LRO is supported. */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 9d11831..9629cfb 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -389,7 +389,6 @@ struct ethtool_link_settings { const uint8_t use_app_rss_key = !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; int ret = 0; - unsigned int lro_on = mlx5_lro_on(dev); if (use_app_rss_key && (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != @@ -454,11 +453,6 @@ struct ethtool_link_settings { j = 0; } } - if (lro_on && priv->config.cqe_comp) { - /* CQE compressing is not supported for LRO CQEs. */ - DRV_LOG(WARNING, "Rx CQE compression isn't supported with LRO"); - priv->config.cqe_comp = 0; - } ret = mlx5_proc_priv_init(dev); if (ret) return ret; @@ -571,7 +565,7 @@ struct ethtool_link_settings { info->max_tx_queues = max; info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES; info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev); - info->rx_offload_capa = (mlx5_get_rx_port_offloads(dev) | + info->rx_offload_capa = (mlx5_get_rx_port_offloads() | info->rx_queue_offload_capa); info->tx_offload_capa = mlx5_get_tx_port_offloads(dev); info->if_index = mlx5_ifindex(dev); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index f7e861c..a1fdeef 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -124,21 +124,6 @@ } /** - * Check whether LRO is supported and enabled for the device. - * - * @param dev - * Pointer to Ethernet device. - * - * @return - * 0 if disabled, 1 if enabled. - */ -inline int -mlx5_lro_on(struct rte_eth_dev *dev) -{ - return (MLX5_LRO_SUPPORTED(dev) && MLX5_LRO_ENABLED(dev)); -} - -/** * Allocate RX queue elements for Multi-Packet RQ. * * @param rxq_ctrl @@ -394,6 +379,8 @@ DEV_RX_OFFLOAD_TCP_CKSUM); if (config->hw_vlan_strip) offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + if (MLX5_LRO_SUPPORTED(dev)) + offloads |= DEV_RX_OFFLOAD_TCP_LRO; return offloads; } @@ -401,19 +388,14 @@ /** * Returns the per-port supported offloads. * - * @param dev - * Pointer to Ethernet device. - * * @return * Supported Rx offloads. */ uint64_t -mlx5_get_rx_port_offloads(struct rte_eth_dev *dev) +mlx5_get_rx_port_offloads(void) { uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; - if (MLX5_LRO_SUPPORTED(dev)) - offloads |= DEV_RX_OFFLOAD_TCP_LRO; return offloads; } @@ -889,7 +871,8 @@ cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){ .comp_mask = 0, }; - if (priv->config.cqe_comp && !rxq_data->hw_timestamp) { + if (priv->config.cqe_comp && !rxq_data->hw_timestamp && + !rxq_data->lro) { cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT @@ -911,6 +894,10 @@ "port %u Rx CQE compression is disabled for HW" " timestamp", dev->data->port_id); + } else if (priv->config.cqe_comp && rxq_data->lro) { + DRV_LOG(DEBUG, + "port %u Rx CQE compression is disabled for LRO", + dev->data->port_id); } #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD if (priv->config.cqe_pad) { @@ -1607,6 +1594,7 @@ struct mlx5_rxq_ctrl * desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; uint64_t offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; + unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO); const int mprq_en = mlx5_check_mprq_support(dev) > 0; unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len + @@ -1646,7 +1634,7 @@ struct mlx5_rxq_ctrl * * In this case scatter is, for sure, enabled and an empty mbuf may be * added in the start for the head-room. */ - if (mlx5_lro_on(dev) && RTE_PKTMBUF_HEADROOM > 0 && + if (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 && non_scatter_min_mbuf_size > mb_len) { strd_headroom_en = 0; mprq_stride_size = RTE_MIN(max_rx_pkt_len, @@ -1693,7 +1681,7 @@ struct mlx5_rxq_ctrl * unsigned int size = non_scatter_min_mbuf_size; unsigned int sges_n; - if (mlx5_lro_on(dev) && first_mb_free_size < + if (lro_on_queue && first_mb_free_size < MLX5_MAX_LRO_HEADER_FIX) { DRV_LOG(ERR, "Not enough space in the first segment(%u)" " to include the max header size(%u) for LRO", @@ -1747,13 +1735,14 @@ struct mlx5_rxq_ctrl * tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP); /* By default, FCS (CRC) is stripped by hardware. */ tmpl->rxq.crc_present = 0; + tmpl->rxq.lro = lro_on_queue; if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) { if (config->hw_fcs_strip) { /* * RQs used for LRO-enabled TIRs should not be * configured to scatter the FCS. */ - if (mlx5_lro_on(dev)) + if (lro_on_queue) DRV_LOG(WARNING, "port %u CRC stripping has been " "disabled but will still be performed " @@ -2204,7 +2193,16 @@ struct mlx5_hrxq * } } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */ struct mlx5_devx_tir_attr tir_attr; - + uint32_t i; + uint32_t lro = 1; + + /* Enable TIR LRO only if all the queues were configured for. */ + for (i = 0; i < queues_n; ++i) { + if (!(*priv->rxqs)[queues[i]]->lro) { + lro = 0; + break; + } + } memset(&tir_attr, 0, sizeof(tir_attr)); tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; @@ -2216,7 +2214,7 @@ struct mlx5_hrxq * if (dev->data->dev_conf.lpbk_mode) tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; - if (mlx5_lro_on(dev)) { + if (lro) { tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout; tir_attr.lro_max_msg_sz = priv->max_lro_msg_size; diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 9b58d0a..c209d99 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -115,7 +115,8 @@ struct mlx5_rxq_data { unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */ unsigned int err_state:2; /* enum mlx5_rxq_err_state. */ unsigned int strd_headroom_en:1; /* Enable mbuf headroom in MPRQ. */ - unsigned int :2; /* Remaining bits. */ + unsigned int lro:1; /* Enable LRO. */ + unsigned int :1; /* Remaining bits. */ volatile uint32_t *rq_db; volatile uint32_t *cq_db; uint16_t port_id; @@ -367,9 +368,8 @@ struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, int mlx5_hrxq_verify(struct rte_eth_dev *dev); struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev); void mlx5_hrxq_drop_release(struct rte_eth_dev *dev); -uint64_t mlx5_get_rx_port_offloads(struct rte_eth_dev *dev); +uint64_t mlx5_get_rx_port_offloads(void); uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev); -int mlx5_lro_on(struct rte_eth_dev *dev); /* mlx5_txq.c */ diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c index 3815ff6..3925f4d 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.c +++ b/drivers/net/mlx5/mlx5_rxtx_vec.c @@ -129,6 +129,8 @@ int __attribute__((cold)) return -ENOTSUP; if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0) return -ENOTSUP; + if (rxq->lro) + return -ENOTSUP; return 1; } @@ -151,8 +153,6 @@ int __attribute__((cold)) return -ENOTSUP; if (mlx5_mprq_enabled(dev)) return -ENOTSUP; - if (mlx5_lro_on(dev)) - return -ENOTSUP; /* All the configured queues should support. */ for (i = 0; i < priv->rxqs_n; ++i) { struct mlx5_rxq_data *rxq = (*priv->rxqs)[i]; diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index 8bc2174..aa323ad 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -99,10 +99,14 @@ struct mlx5_priv *priv = dev->data->dev_private; unsigned int i; int ret = 0; - unsigned int lro_on = mlx5_lro_on(dev); - enum mlx5_rxq_obj_type obj_type = lro_on ? MLX5_RXQ_OBJ_TYPE_DEVX_RQ : - MLX5_RXQ_OBJ_TYPE_IBV; + enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV; + for (i = 0; i < priv->rxqs_n; ++i) { + if ((*priv->rxqs)[i]->lro) { + obj_type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ; + break; + } + } /* Allocate/reuse/resize mempool for Multi-Packet RQ. */ if (mlx5_mprq_alloc_mp(dev)) { /* Should not release Rx queues but return immediately. */ -- 1.8.3.1