From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 8F700A04DC; Mon, 26 Oct 2020 12:55:57 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 871B52C2D; Mon, 26 Oct 2020 12:55:21 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id D135C2BD0 for ; Mon, 26 Oct 2020 12:55:16 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@nvidia.com) with SMTP; 26 Oct 2020 13:55:10 +0200 Received: from nvidia.com (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09QBt8vU013951; Mon, 26 Oct 2020 13:55:09 +0200 From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: thomas@monjalon.net, matan@nvidia.com, akozyrev@nvidia.com, rasland@nvidia.com, orika@nvidia.com Date: Mon, 26 Oct 2020 11:55:00 +0000 Message-Id: <1603713305-30991-2-git-send-email-viacheslavo@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> References: <1603381371-5360-2-git-send-email-viacheslavo@nvidia.com> <1603713305-30991-1-git-send-email-viacheslavo@nvidia.com> Subject: [dpdk-dev] [PATCH v4 1/6] net/mlx5: add extended Rx queue setup routine X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The routine to provide Rx queue setup with specifying extended receiving buffer description is added. It allows application to specify desired segment lengths, data position offsets in the buffer and dedicated memory pool for each segment. Signed-off-by: Viacheslav Ovsiienko Acked-by: Matan Azrad --- drivers/net/mlx5/mlx5.h | 3 +++ drivers/net/mlx5/mlx5_rxq.c | 39 ++++++++++++++++++++++++++++++++++----- drivers/net/mlx5/mlx5_rxtx.h | 13 ++++++++++++- 3 files changed, 49 insertions(+), 6 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index bb954c4..258be03 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -164,6 +164,9 @@ struct mlx5_stats_ctrl { /* Maximal size of aggregated LRO packet. */ #define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE) +/* Maximal number of segments to split. */ +#define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS) + /* LRO configurations structure. */ struct mlx5_lro_config { uint32_t supported:1; /* Whether LRO is supported. */ diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 0176ece..72d76c1 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -744,12 +744,40 @@ struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct rte_eth_rxseg_split *rx_seg = + (struct rte_eth_rxseg_split *)conf->rx_seg; + struct rte_eth_rxseg_split rx_single = {.mp = mp}; + uint16_t n_seg = conf->rx_nseg; int res; + if (mp) { + /* + * The parameters should be checked on rte_eth_dev layer. + * If mp is specified it means the compatible configuration + * without buffer split feature tuning. + */ + rx_seg = &rx_single; + n_seg = 1; + } + if (n_seg > 1) { + uint64_t offloads = conf->offloads | + dev->data->dev_conf.rxmode.offloads; + + /* The offloads should be checked on rte_eth_dev layer. */ + MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER); + if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { + DRV_LOG(ERR, "port %u queue index %u split " + "offload not configured", + dev->data->port_id, idx); + rte_errno = ENOSPC; + return -rte_errno; + } + MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG); + } res = mlx5_rx_queue_pre_setup(dev, idx, &desc); if (res) return res; - rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); + rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg); if (!rxq_ctrl) { DRV_LOG(ERR, "port %u unable to allocate queue index %u", dev->data->port_id, idx); @@ -1342,11 +1370,11 @@ struct mlx5_rxq_ctrl * mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) + const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *tmpl; - unsigned int mb_len = rte_pktmbuf_data_room_size(mp); + unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp); struct mlx5_dev_config *config = &priv->config; uint64_t offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; @@ -1358,7 +1386,8 @@ struct mlx5_rxq_ctrl * RTE_PKTMBUF_HEADROOM; unsigned int max_lro_size = 0; unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM; - const int mprq_en = mlx5_check_mprq_support(dev) > 0; + const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 && + !rx_seg[0].offset && !rx_seg[0].length; unsigned int mprq_stride_nums = config->mprq.stride_num_n ? config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N; unsigned int mprq_stride_size = non_scatter_min_mbuf_size <= @@ -1544,7 +1573,7 @@ struct mlx5_rxq_ctrl * (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS)); tmpl->rxq.port_id = dev->data->port_id; tmpl->priv = priv; - tmpl->rxq.mp = mp; + tmpl->rxq.mp = rx_seg[0].mp; tmpl->rxq.elts_n = log2above(desc); tmpl->rxq.rq_repl_thresh = MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n); diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 1b35a26..f204f7e 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -101,6 +101,13 @@ enum mlx5_rqx_code { MLX5_RXQ_CODE_DROPPED, }; +struct mlx5_eth_rxseg { + struct rte_mempool *mp; /**< Memory pool to allocate segment from. */ + uint16_t length; /**< Segment data length, configures split point. */ + uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */ + uint32_t reserved; /**< Reserved field. */ +}; + /* RX queue descriptor. */ struct mlx5_rxq_data { unsigned int csum:1; /* Enable checksum offloading. */ @@ -158,6 +165,9 @@ struct mlx5_rxq_data { uint32_t tunnel; /* Tunnel information. */ uint64_t flow_meta_mask; int32_t flow_meta_offset; + uint32_t rxseg_n; /* Number of split segment descriptions. */ + struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG]; + /* Buffer split segment descriptions - sizes, offsets, pools. */ } __rte_cache_aligned; enum mlx5_rxq_type { @@ -321,7 +331,8 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp); + const struct rte_eth_rxseg_split *rx_seg, + uint16_t n_seg); struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, const struct rte_eth_hairpin_conf *hairpin_conf); -- 1.8.3.1