From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id C420FA04B6; Mon, 12 Oct 2020 18:21:37 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5B64E1D955; Mon, 12 Oct 2020 18:20:04 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 5E9941D93A for ; Mon, 12 Oct 2020 18:19:52 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@nvidia.com) with SMTP; 12 Oct 2020 19:19:46 +0300 Received: from nvidia.com (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 09CGJjKN017440; Mon, 12 Oct 2020 19:19:45 +0300 From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: thomasm@monjalon.net, stephen@networkplumber.org, ferruh.yigit@intel.com, olivier.matz@6wind.com, jerinjacobk@gmail.com, maxime.coquelin@redhat.com, david.marchand@redhat.com, arybchenko@solarflare.com Date: Mon, 12 Oct 2020 16:19:42 +0000 Message-Id: <1602519585-5194-7-git-send-email-viacheslavo@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1602519585-5194-1-git-send-email-viacheslavo@nvidia.com> References: <1602519585-5194-1-git-send-email-viacheslavo@nvidia.com> Subject: [dpdk-dev] [PATCH v3 6/9] net/mlx5: add extended Rx queue setup routine X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The routine to provide Rx queue setup with specifying extended receiving buffer description is added. It allows application to specify desired segment lengths, data position offsets in the buffer and dedicated memory pool for each segment. Signed-off-by: Viacheslav Ovsiienko --- drivers/net/mlx5/linux/mlx5_os.c | 2 + drivers/net/mlx5/mlx5.h | 3 ++ drivers/net/mlx5/mlx5_rxq.c | 91 +++++++++++++++++++++++++++++++++++----- drivers/net/mlx5/mlx5_rxtx.h | 10 ++++- 4 files changed, 95 insertions(+), 11 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 487714f..0e85489 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -2495,6 +2495,7 @@ .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, .vlan_filter_set = mlx5_vlan_filter_set, .rx_queue_setup = mlx5_rx_queue_setup, + .rxseg_queue_setup = mlx5_rxseg_queue_setup, .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, .tx_queue_setup = mlx5_tx_queue_setup, .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, @@ -2578,6 +2579,7 @@ .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, .vlan_filter_set = mlx5_vlan_filter_set, .rx_queue_setup = mlx5_rx_queue_setup, + .rxseg_queue_setup = mlx5_rxseg_queue_setup, .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, .tx_queue_setup = mlx5_tx_queue_setup, .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 87d3c15..bfc0812 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -162,6 +162,9 @@ struct mlx5_stats_ctrl { /* Maximal size of aggregated LRO packet. */ #define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE) +/* Maximal number of segments to split. */ +#define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS) + /* LRO configurations structure. */ struct mlx5_lro_config { uint32_t supported:1; /* Whether LRO is supported. */ diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index f1d8373..42818d8 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -390,6 +390,7 @@ struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_dev_config *config = &priv->config; uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | + RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT | DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_RSS_HASH); @@ -715,16 +716,20 @@ * NUMA socket on which memory must be allocated. * @param[in] conf * Thresholds parameters. - * @param mp - * Memory pool for buffer allocations. + * @param rx_seg + * Pointer the array of segment descriptions, each element + * describes the memory pool, maximal data length, initial + * data offset from the beginning of data buffer in mbuf + * @param n_seg + * Number of elements in the segment descriptions array * * @return * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, - unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) +mlx5_rxseg_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + const struct rte_eth_rxseg *rx_seg, uint16_t n_seg) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; @@ -732,10 +737,43 @@ container_of(rxq, struct mlx5_rxq_ctrl, rxq); int res; + if (!n_seg || !rx_seg) { + DRV_LOG(ERR, "port %u queue index %u invalid " + "split description", + dev->data->port_id, idx); + rte_errno = EINVAL; + return -rte_errno; + } + if (n_seg > 1) { + uint64_t offloads = conf->offloads | + dev->data->dev_conf.rxmode.offloads; + + if (!(offloads & DEV_RX_OFFLOAD_SCATTER)) { + DRV_LOG(ERR, "port %u queue index %u split " + "configuration requires scattering", + dev->data->port_id, idx); + rte_errno = ENOSPC; + return -rte_errno; + } + if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { + DRV_LOG(ERR, "port %u queue index %u split " + "offload not configured", + dev->data->port_id, idx); + rte_errno = ENOSPC; + return -rte_errno; + } + if (n_seg > MLX5_MAX_RXQ_NSEG) { + DRV_LOG(ERR, "port %u queue index %u too many " + "segments %u to split", + dev->data->port_id, idx, n_seg); + rte_errno = EOVERFLOW; + return -rte_errno; + } + } res = mlx5_rx_queue_pre_setup(dev, idx, &desc); if (res) return res; - rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); + rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg); if (!rxq_ctrl) { DRV_LOG(ERR, "port %u unable to allocate queue index %u", dev->data->port_id, idx); @@ -756,6 +794,39 @@ * RX queue index. * @param desc * Number of descriptors to configure in queue. + * @param socket + * NUMA socket on which memory must be allocated. + * @param[in] conf + * Thresholds parameters. + * @param mp + * Memory pool for buffer allocations. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) +{ + struct rte_eth_rxseg rx_seg = { + .mp = mp, + /* + * All other fields are zeroed, zero segment length + * means the pool buffer size should be used by PMD. + */ + }; + return mlx5_rxseg_queue_setup(dev, idx, desc, socket, conf, &rx_seg, 1); +} + +/** + * + * @param dev + * Pointer to Ethernet device structure. + * @param idx + * RX queue index. + * @param desc + * Number of descriptors to configure in queue. * @param hairpin_conf * Hairpin configuration parameters. * @@ -1328,11 +1399,11 @@ struct mlx5_rxq_ctrl * mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) + const struct rte_eth_rxseg *rx_seg, uint16_t n_seg) { struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *tmpl; - unsigned int mb_len = rte_pktmbuf_data_room_size(mp); + unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp); unsigned int mprq_stride_nums; unsigned int mprq_stride_size; unsigned int mprq_stride_cap; @@ -1346,7 +1417,7 @@ struct mlx5_rxq_ctrl * uint64_t offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads; unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO); - const int mprq_en = mlx5_check_mprq_support(dev) > 0; + const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1; unsigned int max_rx_pkt_len = lro_on_queue ? dev->data->dev_conf.rxmode.max_lro_pkt_size : dev->data->dev_conf.rxmode.max_rx_pkt_len; @@ -1531,7 +1602,7 @@ struct mlx5_rxq_ctrl * (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS)); tmpl->rxq.port_id = dev->data->port_id; tmpl->priv = priv; - tmpl->rxq.mp = mp; + tmpl->rxq.mp = rx_seg[0].mp; tmpl->rxq.elts_n = log2above(desc); tmpl->rxq.rq_repl_thresh = MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n); diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 674296e..f103a30 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -150,6 +150,9 @@ struct mlx5_rxq_data { rte_spinlock_t *uar_lock_cq; /* CQ (UAR) access lock required for 32bit implementations */ #endif + struct rte_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG]; + /* Buffer split segment descriptions - sizes, offsets, pools. */ + uint32_t rxseg_n; /* Number of split segment descriptions. */ uint32_t tunnel; /* Tunnel information. */ uint64_t flow_meta_mask; int32_t flow_meta_offset; @@ -304,6 +307,10 @@ struct mlx5_txq_ctrl { int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, struct rte_mempool *mp); +int mlx5_rxseg_queue_setup + (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + const struct rte_eth_rxseg *rx_seg, uint16_t n_seg); int mlx5_rx_hairpin_queue_setup (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, const struct rte_eth_hairpin_conf *hairpin_conf); @@ -316,7 +323,8 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp); + const struct rte_eth_rxseg *rx_seg, + uint16_t n_seg); struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new (struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, const struct rte_eth_hairpin_conf *hairpin_conf); -- 1.8.3.1