DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/5] net/mlx5: add Rx buffer split support
@ 2020-10-22 15:42 Viacheslav Ovsiienko
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 1/5] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
                   ` (4 more replies)
  0 siblings, 5 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-22 15:42 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

This patch adds to PMD the functionality for the receiving
buffer split feasture [1]

[1] http://patches.dpdk.org/patch/81154/

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

---

Viacheslav Ovsiienko (5):
  net/mlx5: add extended Rx queue setup routine
  net/mlx5: configure Rx queue to support split
  net/mlx5: register multiple pool for Rx queue
  net/mlx5: update Rx datapath to support split
  net/mlx5: report Rx segmentation capabilies

 drivers/net/mlx5/mlx5.h         |   3 +
 drivers/net/mlx5/mlx5_ethdev.c  |   4 ++
 drivers/net/mlx5/mlx5_mr.c      |   3 +
 drivers/net/mlx5/mlx5_rxq.c     | 143 +++++++++++++++++++++++++++++++++-------
 drivers/net/mlx5/mlx5_rxtx.c    |   3 +-
 drivers/net/mlx5/mlx5_rxtx.h    |  13 +++-
 drivers/net/mlx5/mlx5_trigger.c |  20 +++---
 7 files changed, 155 insertions(+), 34 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH 1/5] net/mlx5: add extended Rx queue setup routine
  2020-10-22 15:42 [dpdk-dev] [PATCH 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
@ 2020-10-22 15:42 ` Viacheslav Ovsiienko
  2020-10-23  9:46   ` [dpdk-dev] [PATCH v2 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
                     ` (3 more replies)
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 2/5] net/mlx5: configure Rx queue to support split Viacheslav Ovsiienko
                   ` (3 subsequent siblings)
  4 siblings, 4 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-22 15:42 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The routine to provide Rx queue setup with specifying
extended receiving buffer description is added.
It allows application to specify desired segment
lengths, data position offsets in the buffer
and dedicated memory pool for each segment.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.h      |  3 +++
 drivers/net/mlx5/mlx5_rxq.c  | 37 ++++++++++++++++++++++++++++++++-----
 drivers/net/mlx5/mlx5_rxtx.h | 13 ++++++++++++-
 3 files changed, 47 insertions(+), 6 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index c9d5d71..03c4128 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -164,6 +164,9 @@ struct mlx5_stats_ctrl {
 /* Maximal size of aggregated LRO packet. */
 #define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE)
 
+/* Maximal number of segments to split. */
+#define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS)
+
 /* LRO configurations structure. */
 struct mlx5_lro_config {
 	uint32_t supported:1; /* Whether LRO is supported. */
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index e1783ba..ce03c75 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -731,12 +731,39 @@
 	struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
 	struct mlx5_rxq_ctrl *rxq_ctrl =
 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+	struct rte_eth_rxseg_split *rx_seg =
+				(struct rte_eth_rxseg_split *)conf->rx_seg;
+	struct rte_eth_rxseg_split rx_single = {.mp = mp};
+	uint16_t n_seg = conf->rx_nseg;
 	int res;
 
+	if (mp) {
+		/* The parameters should be checked on rte_eth_dev layer. */
+		MLX5_ASSERT(!n_seg);
+		rx_seg = &rx_single;
+		n_seg = 1;
+	} else {
+		MLX5_ASSERT(conf && n_seg && rx_seg);
+	}
+	if (n_seg > 1) {
+		uint64_t offloads = conf->offloads |
+				    dev->data->dev_conf.rxmode.offloads;
+
+		/* The offloads should be checked on rte_eth_dev layer. */
+		MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
+		if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
+			DRV_LOG(ERR, "port %u queue index %u split "
+				     "offload not configured",
+				     dev->data->port_id, idx);
+			rte_errno = ENOSPC;
+			return -rte_errno;
+		}
+		MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
+	}
 	res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
 	if (res)
 		return res;
-	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
+	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
 	if (!rxq_ctrl) {
 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
 			dev->data->port_id, idx);
@@ -1329,11 +1356,11 @@
 struct mlx5_rxq_ctrl *
 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	     unsigned int socket, const struct rte_eth_rxconf *conf,
-	     struct rte_mempool *mp)
+	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_ctrl *tmpl;
-	unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+	unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
 	unsigned int mprq_stride_nums;
 	unsigned int mprq_stride_size;
 	unsigned int mprq_stride_cap;
@@ -1347,7 +1374,7 @@ struct mlx5_rxq_ctrl *
 	uint64_t offloads = conf->offloads |
 			   dev->data->dev_conf.rxmode.offloads;
 	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
-	const int mprq_en = mlx5_check_mprq_support(dev) > 0;
+	const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1;
 	unsigned int max_rx_pkt_len = lro_on_queue ?
 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1532,7 +1559,7 @@ struct mlx5_rxq_ctrl *
 		(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
 	tmpl->rxq.port_id = dev->data->port_id;
 	tmpl->priv = priv;
-	tmpl->rxq.mp = mp;
+	tmpl->rxq.mp = rx_seg[0].mp;
 	tmpl->rxq.elts_n = log2above(desc);
 	tmpl->rxq.rq_repl_thresh =
 		MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index b243b6f..f3af9bd 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -94,6 +94,13 @@ enum mlx5_rxq_err_state {
 	MLX5_RXQ_ERR_STATE_NEED_READY,
 };
 
+struct mlx5_eth_rxseg {
+	struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
+	uint16_t length; /**< Segment data length, configures split point. */
+	uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
+	uint32_t reserved; /**< Reserved field. */
+};
+
 /* RX queue descriptor. */
 struct mlx5_rxq_data {
 	unsigned int csum:1; /* Enable checksum offloading. */
@@ -153,6 +160,9 @@ struct mlx5_rxq_data {
 	uint32_t tunnel; /* Tunnel information. */
 	uint64_t flow_meta_mask;
 	int32_t flow_meta_offset;
+	uint32_t rxseg_n; /* Number of split segment descriptions. */
+	struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
+	/* Buffer split segment descriptions - sizes, offsets, pools. */
 } __rte_cache_aligned;
 
 enum mlx5_rxq_type {
@@ -316,7 +326,8 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
 				   uint16_t desc, unsigned int socket,
 				   const struct rte_eth_rxconf *conf,
-				   struct rte_mempool *mp);
+				   const struct rte_eth_rxseg_split *rx_seg,
+				   uint16_t n_seg);
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
 	(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	 const struct rte_eth_hairpin_conf *hairpin_conf);
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH 2/5] net/mlx5: configure Rx queue to support split
  2020-10-22 15:42 [dpdk-dev] [PATCH 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 1/5] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
@ 2020-10-22 15:42 ` Viacheslav Ovsiienko
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 3/5] net/mlx5: register multiple pool for Rx queue Viacheslav Ovsiienko
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-22 15:42 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The scatter-gather elements should be configured
accordingly to support the buffer split feature.
The application provides the desired settings for
the segments at the beginning of the packets and
PMD pads the buffer chain (if needed) with attributes
of last specified segment to accommodate the packet
of maximal length.

There are some limitations are implied. The MPRQ
feature should be disengaged if split is requested,
due to MPRQ neither supports pushing data to the
dedicated pools nor follows the flexible buffer sizes.
The vectorized rx_burst routines does not support
the scattering (these ones are extremely simplified
and work over the single segment only) and can't
handle split as well.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxq.c | 96 ++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 82 insertions(+), 14 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index ce03c75..dc79498 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1374,7 +1374,8 @@ struct mlx5_rxq_ctrl *
 	uint64_t offloads = conf->offloads |
 			   dev->data->dev_conf.rxmode.offloads;
 	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
-	const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1;
+	const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
+			    !rx_seg[0].offset && !rx_seg[0].length;
 	unsigned int max_rx_pkt_len = lro_on_queue ?
 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1382,22 +1383,89 @@ struct mlx5_rxq_ctrl *
 							RTE_PKTMBUF_HEADROOM;
 	unsigned int max_lro_size = 0;
 	unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
+	const struct rte_eth_rxseg_split *qs_seg = rx_seg;
+	unsigned int tail_len;
 
-	if (non_scatter_min_mbuf_size > mb_len && !(offloads &
-						    DEV_RX_OFFLOAD_SCATTER)) {
+	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
+			   desc_n * sizeof(struct rte_mbuf *), 0, socket);
+	if (!tmpl) {
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+	MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
+	/*
+	 * Build the array of actual buffer offsets and lengths.
+	 * Pad with the buffers from the last memory pool if
+	 * needed to handle max size packets, replace zero length
+	 * with the buffer length from the pool.
+	 */
+	tail_len = max_rx_pkt_len;
+	do {
+		struct mlx5_eth_rxseg *hw_seg =
+					&tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
+		uint32_t buf_len, offset, seg_len;
+
+		/*
+		 * For the buffers beyond descriptions offset is zero,
+		 * the first buffer contains head room.
+		 */
+		buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
+		offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
+			 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
+		/*
+		 * For the buffers beyond descriptions the length is
+		 * pool buffer length, zero lengths are replaced with
+		 * pool buffer length either.
+		 */
+		seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
+						       qs_seg->length ?
+						       qs_seg->length :
+						       (buf_len - offset);
+		/* Check is done in long int, now overflows. */
+		if (buf_len < seg_len + offset) {
+			DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
+				     "%u/%u can't be satisfied",
+				     dev->data->port_id, idx,
+				     qs_seg->length, qs_seg->offset);
+			rte_errno = EINVAL;
+			goto error;
+		}
+		if (seg_len > tail_len)
+			seg_len = buf_len - offset;
+		if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
+			DRV_LOG(ERR,
+				"port %u too many SGEs (%u) needed to handle"
+				" requested maximum packet size %u, the maximum"
+				" supported are %u", dev->data->port_id,
+				tmpl->rxq.rxseg_n, max_rx_pkt_len,
+				MLX5_MAX_RXQ_NSEG);
+			rte_errno = ENOTSUP;
+			goto error;
+		}
+		/* Build the actual scattering element in the queue object. */
+		hw_seg->mp = qs_seg->mp;
+		MLX5_ASSERT(offset <= UINT16_MAX);
+		MLX5_ASSERT(seg_len <= UINT16_MAX);
+		hw_seg->offset = (uint16_t)offset;
+		hw_seg->length = (uint16_t)seg_len;
+		/*
+		 * Advance the segment descriptor, the padding is the based
+		 * on the attributes of the last descriptor.
+		 */
+		if (tmpl->rxq.rxseg_n < n_seg)
+			qs_seg++;
+		tail_len -= RTE_MIN(tail_len, seg_len);
+	} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
+	MLX5_ASSERT(tmpl->rxq.rxseg_n &&
+		    tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
+	if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
 			" configured and no enough mbuf space(%u) to contain "
 			"the maximum RX packet length(%u) with head-room(%u)",
 			dev->data->port_id, idx, mb_len, max_rx_pkt_len,
 			RTE_PKTMBUF_HEADROOM);
 		rte_errno = ENOSPC;
-		return NULL;
-	}
-	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
-			   desc_n * sizeof(struct rte_mbuf *), 0, socket);
-	if (!tmpl) {
-		rte_errno = ENOMEM;
-		return NULL;
+		goto error;
 	}
 	tmpl->type = MLX5_RXQ_TYPE_STANDARD;
 	if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
@@ -1424,7 +1492,7 @@ struct mlx5_rxq_ctrl *
 	 *  - The number of descs is more than the number of strides.
 	 *  - max_rx_pkt_len plus overhead is less than the max size
 	 *    of a stride or mprq_stride_size is specified by a user.
-	 *    Need to nake sure that there are enough stides to encap
+	 *    Need to make sure that there are enough stides to encap
 	 *    the maximum packet size in case mprq_stride_size is set.
 	 *  Otherwise, enable Rx scatter if necessary.
 	 */
@@ -1454,11 +1522,11 @@ struct mlx5_rxq_ctrl *
 			" strd_num_n = %u, strd_sz_n = %u",
 			dev->data->port_id, idx,
 			tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
-	} else if (max_rx_pkt_len <= first_mb_free_size) {
+	} else if (tmpl->rxq.rxseg_n == 1) {
+		MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
 		tmpl->rxq.sges_n = 0;
 		max_lro_size = max_rx_pkt_len;
 	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
-		unsigned int size = non_scatter_min_mbuf_size;
 		unsigned int sges_n;
 
 		if (lro_on_queue && first_mb_free_size <
@@ -1473,7 +1541,7 @@ struct mlx5_rxq_ctrl *
 		 * Determine the number of SGEs needed for a full packet
 		 * and round it to the next power of two.
 		 */
-		sges_n = log2above((size / mb_len) + !!(size % mb_len));
+		sges_n = log2above(tmpl->rxq.rxseg_n);
 		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
 			DRV_LOG(ERR,
 				"port %u too many SGEs (%u) needed to handle"
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH 3/5] net/mlx5: register multiple pool for Rx queue
  2020-10-22 15:42 [dpdk-dev] [PATCH 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 1/5] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 2/5] net/mlx5: configure Rx queue to support split Viacheslav Ovsiienko
@ 2020-10-22 15:42 ` Viacheslav Ovsiienko
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 4/5] net/mlx5: update Rx datapath to support split Viacheslav Ovsiienko
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 5/5] net/mlx5: report Rx segmentation capabilies Viacheslav Ovsiienko
  4 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-22 15:42 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The split feature for receiving packets was added to the mlx5
PMD, now Rx queue can receive the data to the buffers belonging
to the different pools and the memory of all the involved pool
must be registered for DMA operations in order to allow hardware
to store the data.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_mr.c      |  3 +++
 drivers/net/mlx5/mlx5_trigger.c | 20 ++++++++++++--------
 2 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index dbcf0aa..c308ecc 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -536,6 +536,9 @@ struct mr_update_mp_data {
 		.ret = 0,
 	};
 
+	DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s "
+		       "having %u chunks.", dev->data->port_id,
+		       mp->name, mp->nb_mem_chunks);
 	rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
 	if (data.ret < 0 && rte_errno == ENXIO) {
 		/* Mempool may have externally allocated memory. */
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 7735f02..19f2d66 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -145,18 +145,22 @@
 		dev->data->port_id, priv->sh->device_attr.max_sge);
 	for (i = 0; i != priv->rxqs_n; ++i) {
 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
-		struct rte_mempool *mp;
 
 		if (!rxq_ctrl)
 			continue;
 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-			/* Pre-register Rx mempool. */
-			mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
-			     rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
-			DRV_LOG(DEBUG, "Port %u Rx queue %u registering mp %s"
-				" having %u chunks.", dev->data->port_id,
-				rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks);
-			mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
+			/* Pre-register Rx mempools. */
+			if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+				mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
+						  rxq_ctrl->rxq.mprq_mp);
+			} else {
+				uint32_t s;
+
+				for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
+					mlx5_mr_update_mp
+						(dev, &rxq_ctrl->rxq.mr_ctrl,
+						rxq_ctrl->rxq.rxseg[s].mp);
+			}
 			ret = rxq_alloc_elts(rxq_ctrl);
 			if (ret)
 				goto error;
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH 4/5] net/mlx5: update Rx datapath to support split
  2020-10-22 15:42 [dpdk-dev] [PATCH 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
                   ` (2 preceding siblings ...)
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 3/5] net/mlx5: register multiple pool for Rx queue Viacheslav Ovsiienko
@ 2020-10-22 15:42 ` Viacheslav Ovsiienko
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 5/5] net/mlx5: report Rx segmentation capabilies Viacheslav Ovsiienko
  4 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-22 15:42 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

Only the regular rx_burst routine is updated to support split,
because the vectorized ones does not support scatter and MPRQ
does not support split at all.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxq.c  | 11 +++++------
 drivers/net/mlx5/mlx5_rxtx.c |  3 ++-
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index dc79498..e82d14f 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -210,9 +210,10 @@
 
 	/* Iterate on segments. */
 	for (i = 0; (i != elts_n); ++i) {
+		struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
 		struct rte_mbuf *buf;
 
-		buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
+		buf = rte_pktmbuf_alloc(seg->mp);
 		if (buf == NULL) {
 			DRV_LOG(ERR, "port %u empty mbuf pool",
 				PORT_ID(rxq_ctrl->priv));
@@ -225,12 +226,10 @@
 		MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
 		MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
 		MLX5_ASSERT(!buf->next);
-		/* Only the first segment keeps headroom. */
-		if (i % sges_n)
-			SET_DATA_OFF(buf, 0);
+		SET_DATA_OFF(buf, seg->offset);
 		PORT(buf) = rxq_ctrl->rxq.port_id;
-		DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
-		PKT_LEN(buf) = DATA_LEN(buf);
+		DATA_LEN(buf) = seg->length;
+		PKT_LEN(buf) = seg->length;
 		NB_SEGS(buf) = 1;
 		(*rxq_ctrl->rxq.elts)[i] = buf;
 	}
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index b530ff4..dd84249 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1334,7 +1334,8 @@ enum mlx5_txcmp_code {
 		rte_prefetch0(seg);
 		rte_prefetch0(cqe);
 		rte_prefetch0(wqe);
-		rep = rte_mbuf_raw_alloc(rxq->mp);
+		/* Allocate the buf from the same pool. */
+		rep = rte_mbuf_raw_alloc(seg->pool);
 		if (unlikely(rep == NULL)) {
 			++rxq->stats.rx_nombuf;
 			if (!pkt) {
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH 5/5] net/mlx5: report Rx segmentation capabilies
  2020-10-22 15:42 [dpdk-dev] [PATCH 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
                   ` (3 preceding siblings ...)
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 4/5] net/mlx5: update Rx datapath to support split Viacheslav Ovsiienko
@ 2020-10-22 15:42 ` Viacheslav Ovsiienko
  4 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-22 15:42 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

Add rte_eth_dev_info->rx_seg_capa parameters:
  - receiving to multiple pools is supported
  - buffer offsets are supported
  - no offset alignment requirement
  - reports the maximal aamount of segments

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_ethdev.c | 4 ++++
 drivers/net/mlx5/mlx5_rxq.c    | 1 +
 2 files changed, 5 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 7631f64..9017184 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -306,6 +306,10 @@
 	info->max_tx_queues = max;
 	info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
 	info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
+	info->rx_seg_capa.max_nseg = MLX5_MAX_RXQ_NSEG;
+	info->rx_seg_capa.multi_pools = 1;
+	info->rx_seg_capa.offset_allowed = 1;
+	info->rx_seg_capa.offset_align_log2 = 0;
 	info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
 				 info->rx_queue_offload_capa);
 	info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index e82d14f..f7d8661 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -389,6 +389,7 @@
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *config = &priv->config;
 	uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
+			     RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT |
 			     DEV_RX_OFFLOAD_TIMESTAMP |
 			     DEV_RX_OFFLOAD_JUMBO_FRAME |
 			     DEV_RX_OFFLOAD_RSS_HASH);
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v2 0/5] net/mlx5: add Rx buffer split support
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 1/5] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
@ 2020-10-23  9:46   ` Viacheslav Ovsiienko
  2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 1/5] net/mlx5: configure Rx queue to support split Viacheslav Ovsiienko
                       ` (4 more replies)
  2020-10-26 10:11   ` [dpdk-dev] [PATCH v3 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
                     ` (2 subsequent siblings)
  3 siblings, 5 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-23  9:46 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

This patch adds to PMD the functionality for the receiving
buffer split feasture [1]

[1] http://patches.dpdk.org/patch/81154/

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

---
v1: http://patches.dpdk.org/patch/81808/
v2: - typos
    - documentation is updated

Viacheslav Ovsiienko (5):
  net/mlx5: configure Rx queue to support split
  net/mlx5: register multiple pool for Rx queue
  net/mlx5: update Rx datapath to support split
  net/mlx5: report Rx segmentation capabilities
  doc: add buffer split feature limitation to mlx5 guide

 doc/guides/nics/mlx5.rst        |   6 ++-
 drivers/net/mlx5/mlx5_ethdev.c  |   4 ++
 drivers/net/mlx5/mlx5_mr.c      |   3 ++
 drivers/net/mlx5/mlx5_rxq.c     | 108 ++++++++++++++++++++++++++++++++--------
 drivers/net/mlx5/mlx5_rxtx.c    |   3 +-
 drivers/net/mlx5/mlx5_trigger.c |  20 +++++---
 6 files changed, 114 insertions(+), 30 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v2 1/5] net/mlx5: configure Rx queue to support split
  2020-10-23  9:46   ` [dpdk-dev] [PATCH v2 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
@ 2020-10-23  9:46     ` Viacheslav Ovsiienko
  2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 2/5] net/mlx5: register multiple pool for Rx queue Viacheslav Ovsiienko
                       ` (3 subsequent siblings)
  4 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-23  9:46 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The scatter-gather elements should be configured
accordingly to support the buffer split feature.
The application provides the desired settings for
the segments at the beginning of the packets and
PMD pads the buffer chain (if needed) with attributes
of last specified segment to accommodate the packet
of maximal length.

There are some limitations are implied. The MPRQ
feature should be disengaged if split is requested,
due to MPRQ neither supports pushing data to the
dedicated pools nor follows the flexible buffer sizes.
The vectorized rx_burst routines does not support
the scattering (these ones are extremely simplified
and work over the single segment only) and can't
handle split as well.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxq.c | 96 ++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 82 insertions(+), 14 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index ce03c75..dc79498 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1374,7 +1374,8 @@ struct mlx5_rxq_ctrl *
 	uint64_t offloads = conf->offloads |
 			   dev->data->dev_conf.rxmode.offloads;
 	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
-	const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1;
+	const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
+			    !rx_seg[0].offset && !rx_seg[0].length;
 	unsigned int max_rx_pkt_len = lro_on_queue ?
 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1382,22 +1383,89 @@ struct mlx5_rxq_ctrl *
 							RTE_PKTMBUF_HEADROOM;
 	unsigned int max_lro_size = 0;
 	unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
+	const struct rte_eth_rxseg_split *qs_seg = rx_seg;
+	unsigned int tail_len;
 
-	if (non_scatter_min_mbuf_size > mb_len && !(offloads &
-						    DEV_RX_OFFLOAD_SCATTER)) {
+	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
+			   desc_n * sizeof(struct rte_mbuf *), 0, socket);
+	if (!tmpl) {
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+	MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
+	/*
+	 * Build the array of actual buffer offsets and lengths.
+	 * Pad with the buffers from the last memory pool if
+	 * needed to handle max size packets, replace zero length
+	 * with the buffer length from the pool.
+	 */
+	tail_len = max_rx_pkt_len;
+	do {
+		struct mlx5_eth_rxseg *hw_seg =
+					&tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
+		uint32_t buf_len, offset, seg_len;
+
+		/*
+		 * For the buffers beyond descriptions offset is zero,
+		 * the first buffer contains head room.
+		 */
+		buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
+		offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
+			 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
+		/*
+		 * For the buffers beyond descriptions the length is
+		 * pool buffer length, zero lengths are replaced with
+		 * pool buffer length either.
+		 */
+		seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
+						       qs_seg->length ?
+						       qs_seg->length :
+						       (buf_len - offset);
+		/* Check is done in long int, now overflows. */
+		if (buf_len < seg_len + offset) {
+			DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
+				     "%u/%u can't be satisfied",
+				     dev->data->port_id, idx,
+				     qs_seg->length, qs_seg->offset);
+			rte_errno = EINVAL;
+			goto error;
+		}
+		if (seg_len > tail_len)
+			seg_len = buf_len - offset;
+		if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
+			DRV_LOG(ERR,
+				"port %u too many SGEs (%u) needed to handle"
+				" requested maximum packet size %u, the maximum"
+				" supported are %u", dev->data->port_id,
+				tmpl->rxq.rxseg_n, max_rx_pkt_len,
+				MLX5_MAX_RXQ_NSEG);
+			rte_errno = ENOTSUP;
+			goto error;
+		}
+		/* Build the actual scattering element in the queue object. */
+		hw_seg->mp = qs_seg->mp;
+		MLX5_ASSERT(offset <= UINT16_MAX);
+		MLX5_ASSERT(seg_len <= UINT16_MAX);
+		hw_seg->offset = (uint16_t)offset;
+		hw_seg->length = (uint16_t)seg_len;
+		/*
+		 * Advance the segment descriptor, the padding is the based
+		 * on the attributes of the last descriptor.
+		 */
+		if (tmpl->rxq.rxseg_n < n_seg)
+			qs_seg++;
+		tail_len -= RTE_MIN(tail_len, seg_len);
+	} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
+	MLX5_ASSERT(tmpl->rxq.rxseg_n &&
+		    tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
+	if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
 			" configured and no enough mbuf space(%u) to contain "
 			"the maximum RX packet length(%u) with head-room(%u)",
 			dev->data->port_id, idx, mb_len, max_rx_pkt_len,
 			RTE_PKTMBUF_HEADROOM);
 		rte_errno = ENOSPC;
-		return NULL;
-	}
-	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
-			   desc_n * sizeof(struct rte_mbuf *), 0, socket);
-	if (!tmpl) {
-		rte_errno = ENOMEM;
-		return NULL;
+		goto error;
 	}
 	tmpl->type = MLX5_RXQ_TYPE_STANDARD;
 	if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
@@ -1424,7 +1492,7 @@ struct mlx5_rxq_ctrl *
 	 *  - The number of descs is more than the number of strides.
 	 *  - max_rx_pkt_len plus overhead is less than the max size
 	 *    of a stride or mprq_stride_size is specified by a user.
-	 *    Need to nake sure that there are enough stides to encap
+	 *    Need to make sure that there are enough stides to encap
 	 *    the maximum packet size in case mprq_stride_size is set.
 	 *  Otherwise, enable Rx scatter if necessary.
 	 */
@@ -1454,11 +1522,11 @@ struct mlx5_rxq_ctrl *
 			" strd_num_n = %u, strd_sz_n = %u",
 			dev->data->port_id, idx,
 			tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
-	} else if (max_rx_pkt_len <= first_mb_free_size) {
+	} else if (tmpl->rxq.rxseg_n == 1) {
+		MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
 		tmpl->rxq.sges_n = 0;
 		max_lro_size = max_rx_pkt_len;
 	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
-		unsigned int size = non_scatter_min_mbuf_size;
 		unsigned int sges_n;
 
 		if (lro_on_queue && first_mb_free_size <
@@ -1473,7 +1541,7 @@ struct mlx5_rxq_ctrl *
 		 * Determine the number of SGEs needed for a full packet
 		 * and round it to the next power of two.
 		 */
-		sges_n = log2above((size / mb_len) + !!(size % mb_len));
+		sges_n = log2above(tmpl->rxq.rxseg_n);
 		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
 			DRV_LOG(ERR,
 				"port %u too many SGEs (%u) needed to handle"
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v2 2/5] net/mlx5: register multiple pool for Rx queue
  2020-10-23  9:46   ` [dpdk-dev] [PATCH v2 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
  2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 1/5] net/mlx5: configure Rx queue to support split Viacheslav Ovsiienko
@ 2020-10-23  9:46     ` Viacheslav Ovsiienko
  2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 3/5] net/mlx5: update Rx datapath to support split Viacheslav Ovsiienko
                       ` (2 subsequent siblings)
  4 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-23  9:46 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The split feature for receiving packets was added to the mlx5
PMD, now Rx queue can receive the data to the buffers belonging
to the different pools and the memory of all the involved pool
must be registered for DMA operations in order to allow hardware
to store the data.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_mr.c      |  3 +++
 drivers/net/mlx5/mlx5_trigger.c | 20 ++++++++++++--------
 2 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index dbcf0aa..c308ecc 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -536,6 +536,9 @@ struct mr_update_mp_data {
 		.ret = 0,
 	};
 
+	DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s "
+		       "having %u chunks.", dev->data->port_id,
+		       mp->name, mp->nb_mem_chunks);
 	rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
 	if (data.ret < 0 && rte_errno == ENXIO) {
 		/* Mempool may have externally allocated memory. */
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 7735f02..19f2d66 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -145,18 +145,22 @@
 		dev->data->port_id, priv->sh->device_attr.max_sge);
 	for (i = 0; i != priv->rxqs_n; ++i) {
 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
-		struct rte_mempool *mp;
 
 		if (!rxq_ctrl)
 			continue;
 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-			/* Pre-register Rx mempool. */
-			mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
-			     rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
-			DRV_LOG(DEBUG, "Port %u Rx queue %u registering mp %s"
-				" having %u chunks.", dev->data->port_id,
-				rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks);
-			mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
+			/* Pre-register Rx mempools. */
+			if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+				mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
+						  rxq_ctrl->rxq.mprq_mp);
+			} else {
+				uint32_t s;
+
+				for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
+					mlx5_mr_update_mp
+						(dev, &rxq_ctrl->rxq.mr_ctrl,
+						rxq_ctrl->rxq.rxseg[s].mp);
+			}
 			ret = rxq_alloc_elts(rxq_ctrl);
 			if (ret)
 				goto error;
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v2 3/5] net/mlx5: update Rx datapath to support split
  2020-10-23  9:46   ` [dpdk-dev] [PATCH v2 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
  2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 1/5] net/mlx5: configure Rx queue to support split Viacheslav Ovsiienko
  2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 2/5] net/mlx5: register multiple pool for Rx queue Viacheslav Ovsiienko
@ 2020-10-23  9:46     ` Viacheslav Ovsiienko
  2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 4/5] net/mlx5: report Rx segmentation capabilities Viacheslav Ovsiienko
  2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 5/5] doc: add buffer split feature limitation to mlx5 guide Viacheslav Ovsiienko
  4 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-23  9:46 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

Only the regular rx_burst routine is updated to support split,
because the vectorized ones does not support scatter and MPRQ
does not support split at all.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxq.c  | 11 +++++------
 drivers/net/mlx5/mlx5_rxtx.c |  3 ++-
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index dc79498..e82d14f 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -210,9 +210,10 @@
 
 	/* Iterate on segments. */
 	for (i = 0; (i != elts_n); ++i) {
+		struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
 		struct rte_mbuf *buf;
 
-		buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
+		buf = rte_pktmbuf_alloc(seg->mp);
 		if (buf == NULL) {
 			DRV_LOG(ERR, "port %u empty mbuf pool",
 				PORT_ID(rxq_ctrl->priv));
@@ -225,12 +226,10 @@
 		MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
 		MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
 		MLX5_ASSERT(!buf->next);
-		/* Only the first segment keeps headroom. */
-		if (i % sges_n)
-			SET_DATA_OFF(buf, 0);
+		SET_DATA_OFF(buf, seg->offset);
 		PORT(buf) = rxq_ctrl->rxq.port_id;
-		DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
-		PKT_LEN(buf) = DATA_LEN(buf);
+		DATA_LEN(buf) = seg->length;
+		PKT_LEN(buf) = seg->length;
 		NB_SEGS(buf) = 1;
 		(*rxq_ctrl->rxq.elts)[i] = buf;
 	}
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index b530ff4..dd84249 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1334,7 +1334,8 @@ enum mlx5_txcmp_code {
 		rte_prefetch0(seg);
 		rte_prefetch0(cqe);
 		rte_prefetch0(wqe);
-		rep = rte_mbuf_raw_alloc(rxq->mp);
+		/* Allocate the buf from the same pool. */
+		rep = rte_mbuf_raw_alloc(seg->pool);
 		if (unlikely(rep == NULL)) {
 			++rxq->stats.rx_nombuf;
 			if (!pkt) {
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v2 4/5] net/mlx5: report Rx segmentation capabilities
  2020-10-23  9:46   ` [dpdk-dev] [PATCH v2 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
                       ` (2 preceding siblings ...)
  2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 3/5] net/mlx5: update Rx datapath to support split Viacheslav Ovsiienko
@ 2020-10-23  9:46     ` Viacheslav Ovsiienko
  2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 5/5] doc: add buffer split feature limitation to mlx5 guide Viacheslav Ovsiienko
  4 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-23  9:46 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

Add rte_eth_dev_info->rx_seg_capa parameters:
  - receiving to multiple pools is supported
  - buffer offsets are supported
  - no offset alignment requirement
  - reports the maximal number of segments
  - reports the buffer split offload flag

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5_ethdev.c | 4 ++++
 drivers/net/mlx5/mlx5_rxq.c    | 1 +
 2 files changed, 5 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 7631f64..9017184 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -306,6 +306,10 @@
 	info->max_tx_queues = max;
 	info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
 	info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
+	info->rx_seg_capa.max_nseg = MLX5_MAX_RXQ_NSEG;
+	info->rx_seg_capa.multi_pools = 1;
+	info->rx_seg_capa.offset_allowed = 1;
+	info->rx_seg_capa.offset_align_log2 = 0;
 	info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
 				 info->rx_queue_offload_capa);
 	info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index e82d14f..f7d8661 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -389,6 +389,7 @@
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *config = &priv->config;
 	uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
+			     RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT |
 			     DEV_RX_OFFLOAD_TIMESTAMP |
 			     DEV_RX_OFFLOAD_JUMBO_FRAME |
 			     DEV_RX_OFFLOAD_RSS_HASH);
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v2 5/5] doc: add buffer split feature limitation to mlx5 guide
  2020-10-23  9:46   ` [dpdk-dev] [PATCH v2 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
                       ` (3 preceding siblings ...)
  2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 4/5] net/mlx5: report Rx segmentation capabilities Viacheslav Ovsiienko
@ 2020-10-23  9:46     ` Viacheslav Ovsiienko
  4 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-23  9:46 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The buffer split feature is mentioned in the mlx5 PMD
documentation, the limitation is description is added
as well.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 doc/guides/nics/mlx5.rst | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 1a8808e..4621a5e 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -64,7 +64,8 @@ Features
 
 - Multi arch support: x86_64, POWER8, ARMv8, i686.
 - Multiple TX and RX queues.
-- Support for scattered TX and RX frames.
+- Support for scattered TX frames.
+- Advanced support for scattered Rx frames with tunable buffer attributes.
 - IPv4, IPv6, TCPv4, TCPv6, UDPv4 and UDPv6 RSS on any number of queues.
 - RSS using different combinations of fields: L3 only, L4 only or both,
   and source only, destination only or both.
@@ -192,6 +193,9 @@ Limitations
    the device. In case of ungraceful program termination, some entries may
    remain present and should be removed manually by other means.
 
+- Buffer split offload is supported with regular Rx burst routine only,
+  no MPRQ feature or vectorized code can be engaged.
+
 - When Multi-Packet Rx queue is configured (``mprq_en``), a Rx packet can be
   externally attached to a user-provided mbuf with having EXT_ATTACHED_MBUF in
   ol_flags. As the mempool for the external buffer is managed by PMD, all the
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v3 0/6] net/mlx5: add Rx buffer split support
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 1/5] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
  2020-10-23  9:46   ` [dpdk-dev] [PATCH v2 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
@ 2020-10-26 10:11   ` Viacheslav Ovsiienko
  2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 1/6] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
                       ` (5 more replies)
  2020-10-26 11:54   ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
  2020-10-26 17:17   ` [dpdk-dev] [PATCH] net/mlx5: fix Rx queue initialization for scattered segment Viacheslav Ovsiienko
  3 siblings, 6 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 10:11 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

This patch adds to PMD the functionality for the receiving
buffer split feasture [1]

[1] http://patches.dpdk.org/patch/81154/

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

---
v1: http://patches.dpdk.org/patch/81808/

v2: http://patches.dpdk.org/patch/81923/
    - typos
    - documentation is updated

v3: - extra parameter checks in PMD rx_queue_setup removed
    - minor optimizations in PMD

Viacheslav Ovsiienko (6):
  net/mlx5: add extended Rx queue setup routine
  net/mlx5: configure Rx queue to support split
  net/mlx5: register multiple pool for Rx queue
  net/mlx5: update Rx datapath to support split
  net/mlx5: report Rx segmentation capabilities
  doc: add buffer split feature limitation to mlx5 guide

 doc/guides/nics/mlx5.rst        |   6 +-
 drivers/net/mlx5/mlx5.h         |   3 +
 drivers/net/mlx5/mlx5_ethdev.c  |   4 ++
 drivers/net/mlx5/mlx5_mr.c      |   3 +
 drivers/net/mlx5/mlx5_rxq.c     | 144 +++++++++++++++++++++++++++++++++-------
 drivers/net/mlx5/mlx5_rxtx.c    |   3 +-
 drivers/net/mlx5/mlx5_rxtx.h    |  13 +++-
 drivers/net/mlx5/mlx5_trigger.c |  20 +++---
 8 files changed, 161 insertions(+), 35 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v3 1/6] net/mlx5: add extended Rx queue setup routine
  2020-10-26 10:11   ` [dpdk-dev] [PATCH v3 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
@ 2020-10-26 10:11     ` Viacheslav Ovsiienko
  2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 2/6] net/mlx5: configure Rx queue to support split Viacheslav Ovsiienko
                       ` (4 subsequent siblings)
  5 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 10:11 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The routine to provide Rx queue setup with specifying
extended receiving buffer description is added.
It allows application to specify desired segment
lengths, data position offsets in the buffer
and dedicated memory pool for each segment.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5.h      |  3 +++
 drivers/net/mlx5/mlx5_rxq.c  | 39 ++++++++++++++++++++++++++++++++++-----
 drivers/net/mlx5/mlx5_rxtx.h | 13 ++++++++++++-
 3 files changed, 49 insertions(+), 6 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index c9d5d71..03c4128 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -164,6 +164,9 @@ struct mlx5_stats_ctrl {
 /* Maximal size of aggregated LRO packet. */
 #define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE)
 
+/* Maximal number of segments to split. */
+#define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS)
+
 /* LRO configurations structure. */
 struct mlx5_lro_config {
 	uint32_t supported:1; /* Whether LRO is supported. */
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index e1783ba..ffb83de 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -731,12 +731,40 @@
 	struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
 	struct mlx5_rxq_ctrl *rxq_ctrl =
 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+	struct rte_eth_rxseg_split *rx_seg =
+				(struct rte_eth_rxseg_split *)conf->rx_seg;
+	struct rte_eth_rxseg_split rx_single = {.mp = mp};
+	uint16_t n_seg = conf->rx_nseg;
 	int res;
 
+	if (mp) {
+		/*
+		 * The parameters should be checked on rte_eth_dev layer.
+		 * If mp is specified it means the compatible configuration
+		 * without buffer split feature tuning.
+		 */
+		rx_seg = &rx_single;
+		n_seg = 1;
+	}
+	if (n_seg > 1) {
+		uint64_t offloads = conf->offloads |
+				    dev->data->dev_conf.rxmode.offloads;
+
+		/* The offloads should be checked on rte_eth_dev layer. */
+		MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
+		if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
+			DRV_LOG(ERR, "port %u queue index %u split "
+				     "offload not configured",
+				     dev->data->port_id, idx);
+			rte_errno = ENOSPC;
+			return -rte_errno;
+		}
+		MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
+	}
 	res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
 	if (res)
 		return res;
-	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
+	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
 	if (!rxq_ctrl) {
 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
 			dev->data->port_id, idx);
@@ -1329,11 +1357,11 @@
 struct mlx5_rxq_ctrl *
 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	     unsigned int socket, const struct rte_eth_rxconf *conf,
-	     struct rte_mempool *mp)
+	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_ctrl *tmpl;
-	unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+	unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
 	unsigned int mprq_stride_nums;
 	unsigned int mprq_stride_size;
 	unsigned int mprq_stride_cap;
@@ -1347,7 +1375,8 @@ struct mlx5_rxq_ctrl *
 	uint64_t offloads = conf->offloads |
 			   dev->data->dev_conf.rxmode.offloads;
 	unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
-	const int mprq_en = mlx5_check_mprq_support(dev) > 0;
+	const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
+			    !rx_seg[0].offset && !rx_seg[0].length;
 	unsigned int max_rx_pkt_len = lro_on_queue ?
 			dev->data->dev_conf.rxmode.max_lro_pkt_size :
 			dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -1532,7 +1561,7 @@ struct mlx5_rxq_ctrl *
 		(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
 	tmpl->rxq.port_id = dev->data->port_id;
 	tmpl->priv = priv;
-	tmpl->rxq.mp = mp;
+	tmpl->rxq.mp = rx_seg[0].mp;
 	tmpl->rxq.elts_n = log2above(desc);
 	tmpl->rxq.rq_repl_thresh =
 		MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index b243b6f..f3af9bd 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -94,6 +94,13 @@ enum mlx5_rxq_err_state {
 	MLX5_RXQ_ERR_STATE_NEED_READY,
 };
 
+struct mlx5_eth_rxseg {
+	struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
+	uint16_t length; /**< Segment data length, configures split point. */
+	uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
+	uint32_t reserved; /**< Reserved field. */
+};
+
 /* RX queue descriptor. */
 struct mlx5_rxq_data {
 	unsigned int csum:1; /* Enable checksum offloading. */
@@ -153,6 +160,9 @@ struct mlx5_rxq_data {
 	uint32_t tunnel; /* Tunnel information. */
 	uint64_t flow_meta_mask;
 	int32_t flow_meta_offset;
+	uint32_t rxseg_n; /* Number of split segment descriptions. */
+	struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
+	/* Buffer split segment descriptions - sizes, offsets, pools. */
 } __rte_cache_aligned;
 
 enum mlx5_rxq_type {
@@ -316,7 +326,8 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
 				   uint16_t desc, unsigned int socket,
 				   const struct rte_eth_rxconf *conf,
-				   struct rte_mempool *mp);
+				   const struct rte_eth_rxseg_split *rx_seg,
+				   uint16_t n_seg);
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
 	(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	 const struct rte_eth_hairpin_conf *hairpin_conf);
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v3 2/6] net/mlx5: configure Rx queue to support split
  2020-10-26 10:11   ` [dpdk-dev] [PATCH v3 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
  2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 1/6] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
@ 2020-10-26 10:11     ` Viacheslav Ovsiienko
  2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 3/6] net/mlx5: register multiple pool for Rx queue Viacheslav Ovsiienko
                       ` (3 subsequent siblings)
  5 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 10:11 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The scatter-gather elements should be configured
accordingly to support the buffer split feature.
The application provides the desired settings for
the segments at the beginning of the packets and
PMD pads the buffer chain (if needed) with attributes
of last specified segment to accommodate the packet
of maximal length.

There are some limitations are implied. The MPRQ
feature should be disengaged if split is requested,
due to MPRQ neither supports pushing data to the
dedicated pools nor follows the flexible buffer sizes.
The vectorized rx_burst routines does not support
the scattering (these ones are extremely simplified
and work over the single segment only) and can't
handle split as well.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxq.c | 93 ++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 80 insertions(+), 13 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index ffb83de..17fd89e 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1384,22 +1384,89 @@ struct mlx5_rxq_ctrl *
 							RTE_PKTMBUF_HEADROOM;
 	unsigned int max_lro_size = 0;
 	unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
+	const struct rte_eth_rxseg_split *qs_seg = rx_seg;
+	unsigned int tail_len;
 
-	if (non_scatter_min_mbuf_size > mb_len && !(offloads &
-						    DEV_RX_OFFLOAD_SCATTER)) {
+	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
+			   desc_n * sizeof(struct rte_mbuf *), 0, socket);
+	if (!tmpl) {
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+	MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
+	/*
+	 * Build the array of actual buffer offsets and lengths.
+	 * Pad with the buffers from the last memory pool if
+	 * needed to handle max size packets, replace zero length
+	 * with the buffer length from the pool.
+	 */
+	tail_len = max_rx_pkt_len;
+	do {
+		struct mlx5_eth_rxseg *hw_seg =
+					&tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
+		uint32_t buf_len, offset, seg_len;
+
+		/*
+		 * For the buffers beyond descriptions offset is zero,
+		 * the first buffer contains head room.
+		 */
+		buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
+		offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
+			 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
+		/*
+		 * For the buffers beyond descriptions the length is
+		 * pool buffer length, zero lengths are replaced with
+		 * pool buffer length either.
+		 */
+		seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
+						       qs_seg->length ?
+						       qs_seg->length :
+						       (buf_len - offset);
+		/* Check is done in long int, now overflows. */
+		if (buf_len < seg_len + offset) {
+			DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
+				     "%u/%u can't be satisfied",
+				     dev->data->port_id, idx,
+				     qs_seg->length, qs_seg->offset);
+			rte_errno = EINVAL;
+			goto error;
+		}
+		if (seg_len > tail_len)
+			seg_len = buf_len - offset;
+		if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
+			DRV_LOG(ERR,
+				"port %u too many SGEs (%u) needed to handle"
+				" requested maximum packet size %u, the maximum"
+				" supported are %u", dev->data->port_id,
+				tmpl->rxq.rxseg_n, max_rx_pkt_len,
+				MLX5_MAX_RXQ_NSEG);
+			rte_errno = ENOTSUP;
+			goto error;
+		}
+		/* Build the actual scattering element in the queue object. */
+		hw_seg->mp = qs_seg->mp;
+		MLX5_ASSERT(offset <= UINT16_MAX);
+		MLX5_ASSERT(seg_len <= UINT16_MAX);
+		hw_seg->offset = (uint16_t)offset;
+		hw_seg->length = (uint16_t)seg_len;
+		/*
+		 * Advance the segment descriptor, the padding is the based
+		 * on the attributes of the last descriptor.
+		 */
+		if (tmpl->rxq.rxseg_n < n_seg)
+			qs_seg++;
+		tail_len -= RTE_MIN(tail_len, seg_len);
+	} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
+	MLX5_ASSERT(tmpl->rxq.rxseg_n &&
+		    tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
+	if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
 			" configured and no enough mbuf space(%u) to contain "
 			"the maximum RX packet length(%u) with head-room(%u)",
 			dev->data->port_id, idx, mb_len, max_rx_pkt_len,
 			RTE_PKTMBUF_HEADROOM);
 		rte_errno = ENOSPC;
-		return NULL;
-	}
-	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
-			   desc_n * sizeof(struct rte_mbuf *), 0, socket);
-	if (!tmpl) {
-		rte_errno = ENOMEM;
-		return NULL;
+		goto error;
 	}
 	tmpl->type = MLX5_RXQ_TYPE_STANDARD;
 	if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
@@ -1426,7 +1493,7 @@ struct mlx5_rxq_ctrl *
 	 *  - The number of descs is more than the number of strides.
 	 *  - max_rx_pkt_len plus overhead is less than the max size
 	 *    of a stride or mprq_stride_size is specified by a user.
-	 *    Need to nake sure that there are enough stides to encap
+	 *    Need to make sure that there are enough stides to encap
 	 *    the maximum packet size in case mprq_stride_size is set.
 	 *  Otherwise, enable Rx scatter if necessary.
 	 */
@@ -1456,11 +1523,11 @@ struct mlx5_rxq_ctrl *
 			" strd_num_n = %u, strd_sz_n = %u",
 			dev->data->port_id, idx,
 			tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
-	} else if (max_rx_pkt_len <= first_mb_free_size) {
+	} else if (tmpl->rxq.rxseg_n == 1) {
+		MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
 		tmpl->rxq.sges_n = 0;
 		max_lro_size = max_rx_pkt_len;
 	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
-		unsigned int size = non_scatter_min_mbuf_size;
 		unsigned int sges_n;
 
 		if (lro_on_queue && first_mb_free_size <
@@ -1475,7 +1542,7 @@ struct mlx5_rxq_ctrl *
 		 * Determine the number of SGEs needed for a full packet
 		 * and round it to the next power of two.
 		 */
-		sges_n = log2above((size / mb_len) + !!(size % mb_len));
+		sges_n = tmpl->rxq.rxseg_n;
 		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
 			DRV_LOG(ERR,
 				"port %u too many SGEs (%u) needed to handle"
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v3 3/6] net/mlx5: register multiple pool for Rx queue
  2020-10-26 10:11   ` [dpdk-dev] [PATCH v3 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
  2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 1/6] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
  2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 2/6] net/mlx5: configure Rx queue to support split Viacheslav Ovsiienko
@ 2020-10-26 10:11     ` Viacheslav Ovsiienko
  2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 4/6] net/mlx5: update Rx datapath to support split Viacheslav Ovsiienko
                       ` (2 subsequent siblings)
  5 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 10:11 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The split feature for receiving packets was added to the mlx5
PMD, now Rx queue can receive the data to the buffers belonging
to the different pools and the memory of all the involved pool
must be registered for DMA operations in order to allow hardware
to store the data.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5_mr.c      |  3 +++
 drivers/net/mlx5/mlx5_trigger.c | 20 ++++++++++++--------
 2 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index dbcf0aa..c308ecc 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -536,6 +536,9 @@ struct mr_update_mp_data {
 		.ret = 0,
 	};
 
+	DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s "
+		       "having %u chunks.", dev->data->port_id,
+		       mp->name, mp->nb_mem_chunks);
 	rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
 	if (data.ret < 0 && rte_errno == ENXIO) {
 		/* Mempool may have externally allocated memory. */
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 7735f02..19f2d66 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -145,18 +145,22 @@
 		dev->data->port_id, priv->sh->device_attr.max_sge);
 	for (i = 0; i != priv->rxqs_n; ++i) {
 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
-		struct rte_mempool *mp;
 
 		if (!rxq_ctrl)
 			continue;
 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-			/* Pre-register Rx mempool. */
-			mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
-			     rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
-			DRV_LOG(DEBUG, "Port %u Rx queue %u registering mp %s"
-				" having %u chunks.", dev->data->port_id,
-				rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks);
-			mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
+			/* Pre-register Rx mempools. */
+			if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+				mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
+						  rxq_ctrl->rxq.mprq_mp);
+			} else {
+				uint32_t s;
+
+				for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
+					mlx5_mr_update_mp
+						(dev, &rxq_ctrl->rxq.mr_ctrl,
+						rxq_ctrl->rxq.rxseg[s].mp);
+			}
 			ret = rxq_alloc_elts(rxq_ctrl);
 			if (ret)
 				goto error;
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v3 4/6] net/mlx5: update Rx datapath to support split
  2020-10-26 10:11   ` [dpdk-dev] [PATCH v3 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
                       ` (2 preceding siblings ...)
  2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 3/6] net/mlx5: register multiple pool for Rx queue Viacheslav Ovsiienko
@ 2020-10-26 10:11     ` Viacheslav Ovsiienko
  2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 5/6] net/mlx5: report Rx segmentation capabilities Viacheslav Ovsiienko
  2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 6/6] doc: add buffer split feature limitation to mlx5 guide Viacheslav Ovsiienko
  5 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 10:11 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

Only the regular rx_burst routine is updated to support split,
because the vectorized ones does not support scatter and MPRQ
does not support split at all.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxq.c  | 11 +++++------
 drivers/net/mlx5/mlx5_rxtx.c |  3 ++-
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 17fd89e..a19ca7c 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -210,9 +210,10 @@
 
 	/* Iterate on segments. */
 	for (i = 0; (i != elts_n); ++i) {
+		struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
 		struct rte_mbuf *buf;
 
-		buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
+		buf = rte_pktmbuf_alloc(seg->mp);
 		if (buf == NULL) {
 			DRV_LOG(ERR, "port %u empty mbuf pool",
 				PORT_ID(rxq_ctrl->priv));
@@ -225,12 +226,10 @@
 		MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
 		MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
 		MLX5_ASSERT(!buf->next);
-		/* Only the first segment keeps headroom. */
-		if (i % sges_n)
-			SET_DATA_OFF(buf, 0);
+		SET_DATA_OFF(buf, seg->offset);
 		PORT(buf) = rxq_ctrl->rxq.port_id;
-		DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
-		PKT_LEN(buf) = DATA_LEN(buf);
+		DATA_LEN(buf) = seg->length;
+		PKT_LEN(buf) = seg->length;
 		NB_SEGS(buf) = 1;
 		(*rxq_ctrl->rxq.elts)[i] = buf;
 	}
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index b530ff4..dd84249 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1334,7 +1334,8 @@ enum mlx5_txcmp_code {
 		rte_prefetch0(seg);
 		rte_prefetch0(cqe);
 		rte_prefetch0(wqe);
-		rep = rte_mbuf_raw_alloc(rxq->mp);
+		/* Allocate the buf from the same pool. */
+		rep = rte_mbuf_raw_alloc(seg->pool);
 		if (unlikely(rep == NULL)) {
 			++rxq->stats.rx_nombuf;
 			if (!pkt) {
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v3 5/6] net/mlx5: report Rx segmentation capabilities
  2020-10-26 10:11   ` [dpdk-dev] [PATCH v3 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
                       ` (3 preceding siblings ...)
  2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 4/6] net/mlx5: update Rx datapath to support split Viacheslav Ovsiienko
@ 2020-10-26 10:11     ` Viacheslav Ovsiienko
  2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 6/6] doc: add buffer split feature limitation to mlx5 guide Viacheslav Ovsiienko
  5 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 10:11 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

Add rte_eth_dev_info->rx_seg_capa parameters:
  - receiving to multiple pools is supported
  - buffer offsets are supported
  - no offset alignment requirement
  - reports the maximal number of segments
  - reports the buffer split offload flag

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5_ethdev.c | 4 ++++
 drivers/net/mlx5/mlx5_rxq.c    | 1 +
 2 files changed, 5 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 7631f64..9017184 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -306,6 +306,10 @@
 	info->max_tx_queues = max;
 	info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
 	info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
+	info->rx_seg_capa.max_nseg = MLX5_MAX_RXQ_NSEG;
+	info->rx_seg_capa.multi_pools = 1;
+	info->rx_seg_capa.offset_allowed = 1;
+	info->rx_seg_capa.offset_align_log2 = 0;
 	info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
 				 info->rx_queue_offload_capa);
 	info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index a19ca7c..88e8911 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -389,6 +389,7 @@
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *config = &priv->config;
 	uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
+			     RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT |
 			     DEV_RX_OFFLOAD_TIMESTAMP |
 			     DEV_RX_OFFLOAD_JUMBO_FRAME |
 			     DEV_RX_OFFLOAD_RSS_HASH);
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v3 6/6] doc: add buffer split feature limitation to mlx5 guide
  2020-10-26 10:11   ` [dpdk-dev] [PATCH v3 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
                       ` (4 preceding siblings ...)
  2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 5/6] net/mlx5: report Rx segmentation capabilities Viacheslav Ovsiienko
@ 2020-10-26 10:11     ` Viacheslav Ovsiienko
  5 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 10:11 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The buffer split feature is mentioned in the mlx5 PMD
documentation, the limitation is description is added
as well.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 doc/guides/nics/mlx5.rst | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 1a8808e..4621a5e 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -64,7 +64,8 @@ Features
 
 - Multi arch support: x86_64, POWER8, ARMv8, i686.
 - Multiple TX and RX queues.
-- Support for scattered TX and RX frames.
+- Support for scattered TX frames.
+- Advanced support for scattered Rx frames with tunable buffer attributes.
 - IPv4, IPv6, TCPv4, TCPv6, UDPv4 and UDPv6 RSS on any number of queues.
 - RSS using different combinations of fields: L3 only, L4 only or both,
   and source only, destination only or both.
@@ -192,6 +193,9 @@ Limitations
    the device. In case of ungraceful program termination, some entries may
    remain present and should be removed manually by other means.
 
+- Buffer split offload is supported with regular Rx burst routine only,
+  no MPRQ feature or vectorized code can be engaged.
+
 - When Multi-Packet Rx queue is configured (``mprq_en``), a Rx packet can be
   externally attached to a user-provided mbuf with having EXT_ATTACHED_MBUF in
   ol_flags. As the mempool for the external buffer is managed by PMD, all the
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 1/5] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
  2020-10-23  9:46   ` [dpdk-dev] [PATCH v2 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
  2020-10-26 10:11   ` [dpdk-dev] [PATCH v3 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
@ 2020-10-26 11:54   ` Viacheslav Ovsiienko
  2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 1/6] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
                       ` (6 more replies)
  2020-10-26 17:17   ` [dpdk-dev] [PATCH] net/mlx5: fix Rx queue initialization for scattered segment Viacheslav Ovsiienko
  3 siblings, 7 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 11:54 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

This patch adds to PMD the functionality for the receiving
buffer split feasture [1]

[1] http://patches.dpdk.org/patch/81154/

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

---
v1: http://patches.dpdk.org/patch/81808/

v2: http://patches.dpdk.org/patch/81923/
    - typos
    - documentation is updated

v3: http://patches.dpdk.org/patch/82177/
    - extra parameter checks in PMD rx_queue_setup removed
    - minor optimizations in PMD

v4: - rebasing

Viacheslav Ovsiienko (6):
  net/mlx5: add extended Rx queue setup routine
  net/mlx5: configure Rx queue to support split
  net/mlx5: register multiple pool for Rx queue
  net/mlx5: update Rx datapath to support split
  net/mlx5: report Rx segmentation capabilities
  doc: add buffer split feature limitation to mlx5 guide

 doc/guides/nics/mlx5.rst        |   6 +-
 drivers/net/mlx5/mlx5.h         |   3 +
 drivers/net/mlx5/mlx5_ethdev.c  |   4 ++
 drivers/net/mlx5/mlx5_mr.c      |   3 +
 drivers/net/mlx5/mlx5_rxq.c     | 136 +++++++++++++++++++++++++++++++++++-----
 drivers/net/mlx5/mlx5_rxtx.c    |   3 +-
 drivers/net/mlx5/mlx5_rxtx.h    |  13 +++-
 drivers/net/mlx5/mlx5_trigger.c |  20 +++---
 8 files changed, 160 insertions(+), 28 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v4 1/6] net/mlx5: add extended Rx queue setup routine
  2020-10-26 11:54   ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
@ 2020-10-26 11:55     ` Viacheslav Ovsiienko
  2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 2/6] net/mlx5: configure Rx queue to support split Viacheslav Ovsiienko
                       ` (5 subsequent siblings)
  6 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 11:55 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The routine to provide Rx queue setup with specifying
extended receiving buffer description is added.
It allows application to specify desired segment
lengths, data position offsets in the buffer
and dedicated memory pool for each segment.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5.h      |  3 +++
 drivers/net/mlx5/mlx5_rxq.c  | 39 ++++++++++++++++++++++++++++++++++-----
 drivers/net/mlx5/mlx5_rxtx.h | 13 ++++++++++++-
 3 files changed, 49 insertions(+), 6 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index bb954c4..258be03 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -164,6 +164,9 @@ struct mlx5_stats_ctrl {
 /* Maximal size of aggregated LRO packet. */
 #define MLX5_MAX_LRO_SIZE (UINT8_MAX * MLX5_LRO_SEG_CHUNK_SIZE)
 
+/* Maximal number of segments to split. */
+#define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS)
+
 /* LRO configurations structure. */
 struct mlx5_lro_config {
 	uint32_t supported:1; /* Whether LRO is supported. */
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 0176ece..72d76c1 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -744,12 +744,40 @@
 	struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
 	struct mlx5_rxq_ctrl *rxq_ctrl =
 		container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+	struct rte_eth_rxseg_split *rx_seg =
+				(struct rte_eth_rxseg_split *)conf->rx_seg;
+	struct rte_eth_rxseg_split rx_single = {.mp = mp};
+	uint16_t n_seg = conf->rx_nseg;
 	int res;
 
+	if (mp) {
+		/*
+		 * The parameters should be checked on rte_eth_dev layer.
+		 * If mp is specified it means the compatible configuration
+		 * without buffer split feature tuning.
+		 */
+		rx_seg = &rx_single;
+		n_seg = 1;
+	}
+	if (n_seg > 1) {
+		uint64_t offloads = conf->offloads |
+				    dev->data->dev_conf.rxmode.offloads;
+
+		/* The offloads should be checked on rte_eth_dev layer. */
+		MLX5_ASSERT(offloads & DEV_RX_OFFLOAD_SCATTER);
+		if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) {
+			DRV_LOG(ERR, "port %u queue index %u split "
+				     "offload not configured",
+				     dev->data->port_id, idx);
+			rte_errno = ENOSPC;
+			return -rte_errno;
+		}
+		MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
+	}
 	res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
 	if (res)
 		return res;
-	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
+	rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
 	if (!rxq_ctrl) {
 		DRV_LOG(ERR, "port %u unable to allocate queue index %u",
 			dev->data->port_id, idx);
@@ -1342,11 +1370,11 @@
 struct mlx5_rxq_ctrl *
 mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	     unsigned int socket, const struct rte_eth_rxconf *conf,
-	     struct rte_mempool *mp)
+	     const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_rxq_ctrl *tmpl;
-	unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+	unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
 	struct mlx5_dev_config *config = &priv->config;
 	uint64_t offloads = conf->offloads |
 			   dev->data->dev_conf.rxmode.offloads;
@@ -1358,7 +1386,8 @@ struct mlx5_rxq_ctrl *
 							RTE_PKTMBUF_HEADROOM;
 	unsigned int max_lro_size = 0;
 	unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
-	const int mprq_en = mlx5_check_mprq_support(dev) > 0;
+	const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
+			    !rx_seg[0].offset && !rx_seg[0].length;
 	unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
 		config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
 	unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
@@ -1544,7 +1573,7 @@ struct mlx5_rxq_ctrl *
 		(!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
 	tmpl->rxq.port_id = dev->data->port_id;
 	tmpl->priv = priv;
-	tmpl->rxq.mp = mp;
+	tmpl->rxq.mp = rx_seg[0].mp;
 	tmpl->rxq.elts_n = log2above(desc);
 	tmpl->rxq.rq_repl_thresh =
 		MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 1b35a26..f204f7e 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -101,6 +101,13 @@ enum mlx5_rqx_code {
 	MLX5_RXQ_CODE_DROPPED,
 };
 
+struct mlx5_eth_rxseg {
+	struct rte_mempool *mp; /**< Memory pool to allocate segment from. */
+	uint16_t length; /**< Segment data length, configures split point. */
+	uint16_t offset; /**< Data offset from beginning of mbuf data buffer. */
+	uint32_t reserved; /**< Reserved field. */
+};
+
 /* RX queue descriptor. */
 struct mlx5_rxq_data {
 	unsigned int csum:1; /* Enable checksum offloading. */
@@ -158,6 +165,9 @@ struct mlx5_rxq_data {
 	uint32_t tunnel; /* Tunnel information. */
 	uint64_t flow_meta_mask;
 	int32_t flow_meta_offset;
+	uint32_t rxseg_n; /* Number of split segment descriptions. */
+	struct mlx5_eth_rxseg rxseg[MLX5_MAX_RXQ_NSEG];
+	/* Buffer split segment descriptions - sizes, offsets, pools. */
 } __rte_cache_aligned;
 
 enum mlx5_rxq_type {
@@ -321,7 +331,8 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
 				   uint16_t desc, unsigned int socket,
 				   const struct rte_eth_rxconf *conf,
-				   struct rte_mempool *mp);
+				   const struct rte_eth_rxseg_split *rx_seg,
+				   uint16_t n_seg);
 struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new
 	(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
 	 const struct rte_eth_hairpin_conf *hairpin_conf);
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v4 2/6] net/mlx5: configure Rx queue to support split
  2020-10-26 11:54   ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
  2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 1/6] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
@ 2020-10-26 11:55     ` Viacheslav Ovsiienko
  2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 3/6] net/mlx5: register multiple pool for Rx queue Viacheslav Ovsiienko
                       ` (4 subsequent siblings)
  6 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 11:55 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The scatter-gather elements should be configured
accordingly to support the buffer split feature.
The application provides the desired settings for
the segments at the beginning of the packets and
PMD pads the buffer chain (if needed) with attributes
of last specified segment to accommodate the packet
of maximal length.

There are some limitations are implied. The MPRQ
feature should be disengaged if split is requested,
due to MPRQ neither supports pushing data to the
dedicated pools nor follows the flexible buffer sizes.
The vectorized rx_burst routines does not support
the scattering (these ones are extremely simplified
and work over the single segment only) and can't
handle split as well.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxq.c | 85 +++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 79 insertions(+), 6 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 72d76c1..7695d62 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1402,9 +1402,82 @@ struct mlx5_rxq_ctrl *
 	 * the vector Rx will not be used.
 	 */
 	uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+	const struct rte_eth_rxseg_split *qs_seg = rx_seg;
+	unsigned int tail_len;
 
-	if (non_scatter_min_mbuf_size > mb_len && !(offloads &
-						    DEV_RX_OFFLOAD_SCATTER)) {
+	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
+			   desc_n * sizeof(struct rte_mbuf *), 0, socket);
+	if (!tmpl) {
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+	MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
+	/*
+	 * Build the array of actual buffer offsets and lengths.
+	 * Pad with the buffers from the last memory pool if
+	 * needed to handle max size packets, replace zero length
+	 * with the buffer length from the pool.
+	 */
+	tail_len = max_rx_pkt_len;
+	do {
+		struct mlx5_eth_rxseg *hw_seg =
+					&tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
+		uint32_t buf_len, offset, seg_len;
+
+		/*
+		 * For the buffers beyond descriptions offset is zero,
+		 * the first buffer contains head room.
+		 */
+		buf_len = rte_pktmbuf_data_room_size(qs_seg->mp);
+		offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) +
+			 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM);
+		/*
+		 * For the buffers beyond descriptions the length is
+		 * pool buffer length, zero lengths are replaced with
+		 * pool buffer length either.
+		 */
+		seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len :
+						       qs_seg->length ?
+						       qs_seg->length :
+						       (buf_len - offset);
+		/* Check is done in long int, now overflows. */
+		if (buf_len < seg_len + offset) {
+			DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length "
+				     "%u/%u can't be satisfied",
+				     dev->data->port_id, idx,
+				     qs_seg->length, qs_seg->offset);
+			rte_errno = EINVAL;
+			goto error;
+		}
+		if (seg_len > tail_len)
+			seg_len = buf_len - offset;
+		if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) {
+			DRV_LOG(ERR,
+				"port %u too many SGEs (%u) needed to handle"
+				" requested maximum packet size %u, the maximum"
+				" supported are %u", dev->data->port_id,
+				tmpl->rxq.rxseg_n, max_rx_pkt_len,
+				MLX5_MAX_RXQ_NSEG);
+			rte_errno = ENOTSUP;
+			goto error;
+		}
+		/* Build the actual scattering element in the queue object. */
+		hw_seg->mp = qs_seg->mp;
+		MLX5_ASSERT(offset <= UINT16_MAX);
+		MLX5_ASSERT(seg_len <= UINT16_MAX);
+		hw_seg->offset = (uint16_t)offset;
+		hw_seg->length = (uint16_t)seg_len;
+		/*
+		 * Advance the segment descriptor, the padding is the based
+		 * on the attributes of the last descriptor.
+		 */
+		if (tmpl->rxq.rxseg_n < n_seg)
+			qs_seg++;
+		tail_len -= RTE_MIN(tail_len, seg_len);
+	} while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n));
+	MLX5_ASSERT(tmpl->rxq.rxseg_n &&
+		    tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG);
+	if (tmpl->rxq.rxseg_n > 1 && !(offloads & DEV_RX_OFFLOAD_SCATTER)) {
 		DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
 			" configured and no enough mbuf space(%u) to contain "
 			"the maximum RX packet length(%u) with head-room(%u)",
@@ -1438,7 +1511,7 @@ struct mlx5_rxq_ctrl *
 	 *  - The number of descs is more than the number of strides.
 	 *  - max_rx_pkt_len plus overhead is less than the max size
 	 *    of a stride or mprq_stride_size is specified by a user.
-	 *    Need to nake sure that there are enough stides to encap
+	 *    Need to make sure that there are enough strides to encap
 	 *    the maximum packet size in case mprq_stride_size is set.
 	 *  Otherwise, enable Rx scatter if necessary.
 	 */
@@ -1468,11 +1541,11 @@ struct mlx5_rxq_ctrl *
 			" strd_num_n = %u, strd_sz_n = %u",
 			dev->data->port_id, idx,
 			tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
-	} else if (max_rx_pkt_len <= first_mb_free_size) {
+	} else if (tmpl->rxq.rxseg_n == 1) {
+		MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
 		tmpl->rxq.sges_n = 0;
 		max_lro_size = max_rx_pkt_len;
 	} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
-		unsigned int size = non_scatter_min_mbuf_size;
 		unsigned int sges_n;
 
 		if (lro_on_queue && first_mb_free_size <
@@ -1487,7 +1560,7 @@ struct mlx5_rxq_ctrl *
 		 * Determine the number of SGEs needed for a full packet
 		 * and round it to the next power of two.
 		 */
-		sges_n = log2above((size / mb_len) + !!(size % mb_len));
+		sges_n = tmpl->rxq.rxseg_n;
 		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
 			DRV_LOG(ERR,
 				"port %u too many SGEs (%u) needed to handle"
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v4 3/6] net/mlx5: register multiple pool for Rx queue
  2020-10-26 11:54   ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
  2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 1/6] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
  2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 2/6] net/mlx5: configure Rx queue to support split Viacheslav Ovsiienko
@ 2020-10-26 11:55     ` Viacheslav Ovsiienko
  2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 4/6] net/mlx5: update Rx datapath to support split Viacheslav Ovsiienko
                       ` (3 subsequent siblings)
  6 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 11:55 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The split feature for receiving packets was added to the mlx5
PMD, now Rx queue can receive the data to the buffers belonging
to the different pools and the memory of all the involved pool
must be registered for DMA operations in order to allow hardware
to store the data.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5_mr.c      |  3 +++
 drivers/net/mlx5/mlx5_trigger.c | 20 ++++++++++++--------
 2 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index dbcf0aa..c308ecc 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -536,6 +536,9 @@ struct mr_update_mp_data {
 		.ret = 0,
 	};
 
+	DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s "
+		       "having %u chunks.", dev->data->port_id,
+		       mp->name, mp->nb_mem_chunks);
 	rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
 	if (data.ret < 0 && rte_errno == ENXIO) {
 		/* Mempool may have externally allocated memory. */
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 7735f02..19f2d66 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -145,18 +145,22 @@
 		dev->data->port_id, priv->sh->device_attr.max_sge);
 	for (i = 0; i != priv->rxqs_n; ++i) {
 		struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
-		struct rte_mempool *mp;
 
 		if (!rxq_ctrl)
 			continue;
 		if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-			/* Pre-register Rx mempool. */
-			mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
-			     rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
-			DRV_LOG(DEBUG, "Port %u Rx queue %u registering mp %s"
-				" having %u chunks.", dev->data->port_id,
-				rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks);
-			mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
+			/* Pre-register Rx mempools. */
+			if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+				mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
+						  rxq_ctrl->rxq.mprq_mp);
+			} else {
+				uint32_t s;
+
+				for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
+					mlx5_mr_update_mp
+						(dev, &rxq_ctrl->rxq.mr_ctrl,
+						rxq_ctrl->rxq.rxseg[s].mp);
+			}
 			ret = rxq_alloc_elts(rxq_ctrl);
 			if (ret)
 				goto error;
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v4 4/6] net/mlx5: update Rx datapath to support split
  2020-10-26 11:54   ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
                       ` (2 preceding siblings ...)
  2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 3/6] net/mlx5: register multiple pool for Rx queue Viacheslav Ovsiienko
@ 2020-10-26 11:55     ` Viacheslav Ovsiienko
  2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 5/6] net/mlx5: report Rx segmentation capabilities Viacheslav Ovsiienko
                       ` (2 subsequent siblings)
  6 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 11:55 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

Only the regular rx_burst routine is updated to support split,
because the vectorized ones does not support scatter and MPRQ
does not support split at all.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5_rxq.c  | 11 +++++------
 drivers/net/mlx5/mlx5_rxtx.c |  3 ++-
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 7695d62..f9aed38 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -212,9 +212,10 @@
 
 	/* Iterate on segments. */
 	for (i = 0; (i != elts_n); ++i) {
+		struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n];
 		struct rte_mbuf *buf;
 
-		buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
+		buf = rte_pktmbuf_alloc(seg->mp);
 		if (buf == NULL) {
 			DRV_LOG(ERR, "port %u empty mbuf pool",
 				PORT_ID(rxq_ctrl->priv));
@@ -227,12 +228,10 @@
 		MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
 		MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
 		MLX5_ASSERT(!buf->next);
-		/* Only the first segment keeps headroom. */
-		if (i % sges_n)
-			SET_DATA_OFF(buf, 0);
+		SET_DATA_OFF(buf, seg->offset);
 		PORT(buf) = rxq_ctrl->rxq.port_id;
-		DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
-		PKT_LEN(buf) = DATA_LEN(buf);
+		DATA_LEN(buf) = seg->length;
+		PKT_LEN(buf) = seg->length;
 		NB_SEGS(buf) = 1;
 		(*rxq_ctrl->rxq.elts)[i] = buf;
 	}
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index dbb427b..2ffacf8 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1356,7 +1356,8 @@ enum mlx5_txcmp_code {
 		rte_prefetch0(seg);
 		rte_prefetch0(cqe);
 		rte_prefetch0(wqe);
-		rep = rte_mbuf_raw_alloc(rxq->mp);
+		/* Allocate the buf from the same pool. */
+		rep = rte_mbuf_raw_alloc(seg->pool);
 		if (unlikely(rep == NULL)) {
 			++rxq->stats.rx_nombuf;
 			if (!pkt) {
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v4 5/6] net/mlx5: report Rx segmentation capabilities
  2020-10-26 11:54   ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
                       ` (3 preceding siblings ...)
  2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 4/6] net/mlx5: update Rx datapath to support split Viacheslav Ovsiienko
@ 2020-10-26 11:55     ` Viacheslav Ovsiienko
  2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 6/6] doc: add buffer split feature limitation to mlx5 guide Viacheslav Ovsiienko
  2020-10-26 15:25     ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Raslan Darawsheh
  6 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 11:55 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

Add rte_eth_dev_info->rx_seg_capa parameters:
  - receiving to multiple pools is supported
  - buffer offsets are supported
  - no offset alignment requirement
  - reports the maximal number of segments
  - reports the buffer split offload flag

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/mlx5_ethdev.c | 4 ++++
 drivers/net/mlx5/mlx5_rxq.c    | 1 +
 2 files changed, 5 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index c70cd30..fc04fc8 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -306,6 +306,10 @@
 	info->max_tx_queues = max;
 	info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
 	info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
+	info->rx_seg_capa.max_nseg = MLX5_MAX_RXQ_NSEG;
+	info->rx_seg_capa.multi_pools = 1;
+	info->rx_seg_capa.offset_allowed = 1;
+	info->rx_seg_capa.offset_align_log2 = 0;
 	info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
 				 info->rx_queue_offload_capa);
 	info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index f9aed38..1cc477a 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -402,6 +402,7 @@
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_dev_config *config = &priv->config;
 	uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
+			     RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT |
 			     DEV_RX_OFFLOAD_TIMESTAMP |
 			     DEV_RX_OFFLOAD_JUMBO_FRAME |
 			     DEV_RX_OFFLOAD_RSS_HASH);
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH v4 6/6] doc: add buffer split feature limitation to mlx5 guide
  2020-10-26 11:54   ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
                       ` (4 preceding siblings ...)
  2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 5/6] net/mlx5: report Rx segmentation capabilities Viacheslav Ovsiienko
@ 2020-10-26 11:55     ` Viacheslav Ovsiienko
  2020-10-26 15:25     ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Raslan Darawsheh
  6 siblings, 0 replies; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 11:55 UTC (permalink / raw)
  To: dev; +Cc: thomas, matan, akozyrev, rasland, orika

The buffer split feature is mentioned in the mlx5 PMD
documentation, the limitation is description is added
as well.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
 doc/guides/nics/mlx5.rst | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 66524f1..8dc7c62 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -59,7 +59,8 @@ Features
 
 - Multi arch support: x86_64, POWER8, ARMv8, i686.
 - Multiple TX and RX queues.
-- Support for scattered TX and RX frames.
+- Support for scattered TX frames.
+- Advanced support for scattered Rx frames with tunable buffer attributes.
 - IPv4, IPv6, TCPv4, TCPv6, UDPv4 and UDPv6 RSS on any number of queues.
 - RSS using different combinations of fields: L3 only, L4 only or both,
   and source only, destination only or both.
@@ -187,6 +188,9 @@ Limitations
    the device. In case of ungraceful program termination, some entries may
    remain present and should be removed manually by other means.
 
+- Buffer split offload is supported with regular Rx burst routine only,
+  no MPRQ feature or vectorized code can be engaged.
+
 - When Multi-Packet Rx queue is configured (``mprq_en``), a Rx packet can be
   externally attached to a user-provided mbuf with having EXT_ATTACHED_MBUF in
   ol_flags. As the mempool for the external buffer is managed by PMD, all the
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support
  2020-10-26 11:54   ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
                       ` (5 preceding siblings ...)
  2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 6/6] doc: add buffer split feature limitation to mlx5 guide Viacheslav Ovsiienko
@ 2020-10-26 15:25     ` Raslan Darawsheh
  2020-10-26 17:04       ` Ferruh Yigit
  6 siblings, 1 reply; 35+ messages in thread
From: Raslan Darawsheh @ 2020-10-26 15:25 UTC (permalink / raw)
  To: Slava Ovsiienko, dev
  Cc: NBU-Contact-Thomas Monjalon, Matan Azrad, Alexander Kozyrev, Ori Kam

Hi,

> -----Original Message-----
> From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> Sent: Monday, October 26, 2020 1:55 PM
> To: dev@dpdk.org
> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan Azrad
> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>; Raslan
> Darawsheh <rasland@nvidia.com>; Ori Kam <orika@nvidia.com>
> Subject: [PATCH v4 0/6] net/mlx5: add Rx buffer split support
> 
> This patch adds to PMD the functionality for the receiving
> buffer split feasture [1]
> 
> [1]
> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
> es.dpdk.org%2Fpatch%2F81154%2F&amp;data=02%7C01%7Crasland%40nvid
> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db3
> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=fyiL3PS8r8wv8u
> pyOYUtITkVqId9DZsF9LvSJQL9fdM%3D&amp;reserved=0
> 
> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> 
> ---
> v1:
> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
> es.dpdk.org%2Fpatch%2F81808%2F&amp;data=02%7C01%7Crasland%40nvid
> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db3
> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=NPBFlGmVN6bi
> GUpzHC%2FrOVmdMoK2fkYRC0%2FDB%2BNlNno%3D&amp;reserved=0
> 
> v2:
> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
> es.dpdk.org%2Fpatch%2F81923%2F&amp;data=02%7C01%7Crasland%40nvid
> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db3
> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=YwYjMz3jrSYU6
> RBgwl0DmQfmjwwymNJTFjMdx0rsm2U%3D&amp;reserved=0
>     - typos
>     - documentation is updated
> 
> v3:
> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
> es.dpdk.org%2Fpatch%2F82177%2F&amp;data=02%7C01%7Crasland%40nvid
> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db3
> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=HVvLbWS0sJxu
> v%2Bc%2BKIMqllBq3edC4v0GD%2BtrwS7%2FsRo%3D&amp;reserved=0
>     - extra parameter checks in PMD rx_queue_setup removed
>     - minor optimizations in PMD
> 
> v4: - rebasing
> 
> Viacheslav Ovsiienko (6):
>   net/mlx5: add extended Rx queue setup routine
>   net/mlx5: configure Rx queue to support split
>   net/mlx5: register multiple pool for Rx queue
>   net/mlx5: update Rx datapath to support split
>   net/mlx5: report Rx segmentation capabilities
>   doc: add buffer split feature limitation to mlx5 guide
> 
>  doc/guides/nics/mlx5.rst        |   6 +-
>  drivers/net/mlx5/mlx5.h         |   3 +
>  drivers/net/mlx5/mlx5_ethdev.c  |   4 ++
>  drivers/net/mlx5/mlx5_mr.c      |   3 +
>  drivers/net/mlx5/mlx5_rxq.c     | 136
> +++++++++++++++++++++++++++++++++++-----
>  drivers/net/mlx5/mlx5_rxtx.c    |   3 +-
>  drivers/net/mlx5/mlx5_rxtx.h    |  13 +++-
>  drivers/net/mlx5/mlx5_trigger.c |  20 +++---
>  8 files changed, 160 insertions(+), 28 deletions(-)
> 
> --
> 1.8.3.1

Series applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support
  2020-10-26 15:25     ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Raslan Darawsheh
@ 2020-10-26 17:04       ` Ferruh Yigit
  2020-10-26 17:38         ` Slava Ovsiienko
  0 siblings, 1 reply; 35+ messages in thread
From: Ferruh Yigit @ 2020-10-26 17:04 UTC (permalink / raw)
  To: Raslan Darawsheh, Slava Ovsiienko, dev
  Cc: NBU-Contact-Thomas Monjalon, Matan Azrad, Alexander Kozyrev, Ori Kam

On 10/26/2020 3:25 PM, Raslan Darawsheh wrote:
> Hi,
> 
>> -----Original Message-----
>> From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
>> Sent: Monday, October 26, 2020 1:55 PM
>> To: dev@dpdk.org
>> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan Azrad
>> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>; Raslan
>> Darawsheh <rasland@nvidia.com>; Ori Kam <orika@nvidia.com>
>> Subject: [PATCH v4 0/6] net/mlx5: add Rx buffer split support
>>
>> This patch adds to PMD the functionality for the receiving
>> buffer split feasture [1]
>>
>> [1]
>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
>> es.dpdk.org%2Fpatch%2F81154%2F&amp;data=02%7C01%7Crasland%40nvid
>> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db3
>> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=fyiL3PS8r8wv8u
>> pyOYUtITkVqId9DZsF9LvSJQL9fdM%3D&amp;reserved=0
>>
>> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
>>
>> ---
>> v1:
>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
>> es.dpdk.org%2Fpatch%2F81808%2F&amp;data=02%7C01%7Crasland%40nvid
>> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db3
>> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=NPBFlGmVN6bi
>> GUpzHC%2FrOVmdMoK2fkYRC0%2FDB%2BNlNno%3D&amp;reserved=0
>>
>> v2:
>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
>> es.dpdk.org%2Fpatch%2F81923%2F&amp;data=02%7C01%7Crasland%40nvid
>> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db3
>> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=YwYjMz3jrSYU6
>> RBgwl0DmQfmjwwymNJTFjMdx0rsm2U%3D&amp;reserved=0
>>      - typos
>>      - documentation is updated
>>
>> v3:
>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
>> es.dpdk.org%2Fpatch%2F82177%2F&amp;data=02%7C01%7Crasland%40nvid
>> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db3
>> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=HVvLbWS0sJxu
>> v%2Bc%2BKIMqllBq3edC4v0GD%2BtrwS7%2FsRo%3D&amp;reserved=0
>>      - extra parameter checks in PMD rx_queue_setup removed
>>      - minor optimizations in PMD
>>
>> v4: - rebasing
>>
>> Viacheslav Ovsiienko (6):
>>    net/mlx5: add extended Rx queue setup routine
>>    net/mlx5: configure Rx queue to support split
>>    net/mlx5: register multiple pool for Rx queue
>>    net/mlx5: update Rx datapath to support split
>>    net/mlx5: report Rx segmentation capabilities
>>    doc: add buffer split feature limitation to mlx5 guide
>>
>>   doc/guides/nics/mlx5.rst        |   6 +-
>>   drivers/net/mlx5/mlx5.h         |   3 +
>>   drivers/net/mlx5/mlx5_ethdev.c  |   4 ++
>>   drivers/net/mlx5/mlx5_mr.c      |   3 +
>>   drivers/net/mlx5/mlx5_rxq.c     | 136
>> +++++++++++++++++++++++++++++++++++-----
>>   drivers/net/mlx5/mlx5_rxtx.c    |   3 +-
>>   drivers/net/mlx5/mlx5_rxtx.h    |  13 +++-
>>   drivers/net/mlx5/mlx5_trigger.c |  20 +++---
>>   8 files changed, 160 insertions(+), 28 deletions(-)
>>
>> --
>> 1.8.3.1
> 
> Series applied to next-net-mlx,
> 

The feature was references with different name in each commit, I tried to unify 
it as "Rx buffer split" in next-net.
Can you please double check the updated commit log/titles?

^ permalink raw reply	[flat|nested] 35+ messages in thread

* [dpdk-dev] [PATCH] net/mlx5: fix Rx queue initialization for scattered segment
  2020-10-22 15:42 ` [dpdk-dev] [PATCH 1/5] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
                     ` (2 preceding siblings ...)
  2020-10-26 11:54   ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
@ 2020-10-26 17:17   ` Viacheslav Ovsiienko
  2020-10-26 18:07     ` Raslan Darawsheh
  3 siblings, 1 reply; 35+ messages in thread
From: Viacheslav Ovsiienko @ 2020-10-26 17:17 UTC (permalink / raw)
  To: dev; +Cc: rasland

During integration/rebase there was introduced the bugs:

- double memory allocation for queue structure resulting in
  losing the part of configuration settings and following
  crash

- the erroneous fix for the segment logarithm

Fixes: 919ef3e26cff ("net/mlx5: configure Rx queue to support split")

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
 drivers/net/mlx5/mlx5.h     |  1 +
 drivers/net/mlx5/mlx5_rxq.c | 11 +----------
 2 files changed, 2 insertions(+), 10 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 258be03..8d65828 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -730,6 +730,7 @@ struct mlx5_ind_table_obj {
 };
 
 /* Hash Rx queue. */
+__extension__
 struct mlx5_hrxq {
 	ILIST_ENTRY(uint32_t)next; /* Index to the next element. */
 	rte_atomic32_t refcnt; /* Reference counter. */
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 1cc477a..4e17535 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1486,15 +1486,6 @@ struct mlx5_rxq_ctrl *
 		rte_errno = ENOSPC;
 		return NULL;
 	}
-	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
-		sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
-		(desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
-		0, socket);
-
-	if (!tmpl) {
-		rte_errno = ENOMEM;
-		return NULL;
-	}
 	tmpl->type = MLX5_RXQ_TYPE_STANDARD;
 	if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
 			       MLX5_MR_BTREE_CACHE_N, socket)) {
@@ -1560,7 +1551,7 @@ struct mlx5_rxq_ctrl *
 		 * Determine the number of SGEs needed for a full packet
 		 * and round it to the next power of two.
 		 */
-		sges_n = tmpl->rxq.rxseg_n;
+		sges_n = log2above(tmpl->rxq.rxseg_n);
 		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
 			DRV_LOG(ERR,
 				"port %u too many SGEs (%u) needed to handle"
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support
  2020-10-26 17:04       ` Ferruh Yigit
@ 2020-10-26 17:38         ` Slava Ovsiienko
  2020-10-27 11:05           ` Ferruh Yigit
  0 siblings, 1 reply; 35+ messages in thread
From: Slava Ovsiienko @ 2020-10-26 17:38 UTC (permalink / raw)
  To: Ferruh Yigit, Raslan Darawsheh, dev
  Cc: NBU-Contact-Thomas Monjalon, Matan Azrad, Alexander Kozyrev, Ori Kam

Hi,  Ferruh

PSB
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Monday, October 26, 2020 19:04
> To: Raslan Darawsheh <rasland@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; dev@dpdk.org
> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan Azrad
> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>; Ori Kam
> <orika@nvidia.com>
> Subject: Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support
> 
> On 10/26/2020 3:25 PM, Raslan Darawsheh wrote:
> > Hi,
> >
> >> -----Original Message-----
> >> From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> >> Sent: Monday, October 26, 2020 1:55 PM
> >> To: dev@dpdk.org
> >> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan Azrad
> >> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>; Raslan
> >> Darawsheh <rasland@nvidia.com>; Ori Kam <orika@nvidia.com>
> >> Subject: [PATCH v4 0/6] net/mlx5: add Rx buffer split support
> >>
> >> This patch adds to PMD the functionality for the receiving buffer
> >> split feasture [1]
> >>
> >> [1]
> >> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatc
> >> h
> es.dpdk.org%2Fpatch%2F81154%2F&amp;data=02%7C01%7Crasland%40nvid
> >>
> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
> 3
> >>
> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=fyiL3PS8r8wv8u
> >> pyOYUtITkVqId9DZsF9LvSJQL9fdM%3D&amp;reserved=0
> >>
> >> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> >>
> >> ---
> >> v1:
> >> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatc
> >> h
> es.dpdk.org%2Fpatch%2F81808%2F&amp;data=02%7C01%7Crasland%40nvid
> >>
> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
> 3
> >>
> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=NPBFlGmVN6bi
> >> GUpzHC%2FrOVmdMoK2fkYRC0%2FDB%2BNlNno%3D&amp;reserved=0
> >>
> >> v2:
> >> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatc
> >> h
> es.dpdk.org%2Fpatch%2F81923%2F&amp;data=02%7C01%7Crasland%40nvid
> >>
> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
> 3
> >>
> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=YwYjMz3jrSYU6
> >> RBgwl0DmQfmjwwymNJTFjMdx0rsm2U%3D&amp;reserved=0
> >>      - typos
> >>      - documentation is updated
> >>
> >> v3:
> >> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatc
> >> h
> es.dpdk.org%2Fpatch%2F82177%2F&amp;data=02%7C01%7Crasland%40nvid
> >>
> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
> 3
> >>
> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=HVvLbWS0sJxu
> >> v%2Bc%2BKIMqllBq3edC4v0GD%2BtrwS7%2FsRo%3D&amp;reserved=0
> >>      - extra parameter checks in PMD rx_queue_setup removed
> >>      - minor optimizations in PMD
> >>
> >> v4: - rebasing
> >>
> >> Viacheslav Ovsiienko (6):
> >>    net/mlx5: add extended Rx queue setup routine
> >>    net/mlx5: configure Rx queue to support split
> >>    net/mlx5: register multiple pool for Rx queue
> >>    net/mlx5: update Rx datapath to support split
> >>    net/mlx5: report Rx segmentation capabilities
> >>    doc: add buffer split feature limitation to mlx5 guide
> >>
> >>   doc/guides/nics/mlx5.rst        |   6 +-
> >>   drivers/net/mlx5/mlx5.h         |   3 +
> >>   drivers/net/mlx5/mlx5_ethdev.c  |   4 ++
> >>   drivers/net/mlx5/mlx5_mr.c      |   3 +
> >>   drivers/net/mlx5/mlx5_rxq.c     | 136
> >> +++++++++++++++++++++++++++++++++++-----
> >>   drivers/net/mlx5/mlx5_rxtx.c    |   3 +-
> >>   drivers/net/mlx5/mlx5_rxtx.h    |  13 +++-
> >>   drivers/net/mlx5/mlx5_trigger.c |  20 +++---
> >>   8 files changed, 160 insertions(+), 28 deletions(-)
> >>
> >> --
> >> 1.8.3.1
> >
> > Series applied to next-net-mlx,
> >
> 
> The feature was references with different name in each commit, I tried to unify
> it as "Rx buffer split" in next-net.
> Can you please double check the updated commit log/titles?

>>	doc: add Rx buffer split limitation to mlx5 guide
>>	net/mlx5: report Rx buffer split capabilities
OK about above.

>>	net/mlx5: support Rx buffer split
It would be better: "net/mlx5: support Rx buffer split on datapath 

>>	net/mlx5: register multiple pool for Rx queue
OK

>>	net/mlx5: configure Rx buffer split
It would be better: "net/mlx5: configure Rx queue for buffer split"

>>	net/mlx5: receive Rx buffer split description
IMO, it would be better: "net/mlx5: handle Rx buffer split description"
or 
"net/mlx5: support Rx buffer split description"

Could you, please, also squash the hotfix:
http://patches.dpdk.org/patch/82218/

Thanks in advance,
Slava


^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix Rx queue initialization for scattered segment
  2020-10-26 17:17   ` [dpdk-dev] [PATCH] net/mlx5: fix Rx queue initialization for scattered segment Viacheslav Ovsiienko
@ 2020-10-26 18:07     ` Raslan Darawsheh
  0 siblings, 0 replies; 35+ messages in thread
From: Raslan Darawsheh @ 2020-10-26 18:07 UTC (permalink / raw)
  To: Slava Ovsiienko, dev

Hi,

> -----Original Message-----
> From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> Sent: Monday, October 26, 2020 7:18 PM
> To: dev@dpdk.org
> Cc: Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH] net/mlx5: fix Rx queue initialization for scattered segment
> 
> During integration/rebase there was introduced the bugs:
> 
> - double memory allocation for queue structure resulting in
>   losing the part of configuration settings and following
>   crash
> 
> - the erroneous fix for the segment logarithm
> 
> Fixes: 919ef3e26cff ("net/mlx5: configure Rx queue to support split")
> 
> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---
>  drivers/net/mlx5/mlx5.h     |  1 +
>  drivers/net/mlx5/mlx5_rxq.c | 11 +----------
>  2 files changed, 2 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
> index 258be03..8d65828 100644
> --- a/drivers/net/mlx5/mlx5.h
> +++ b/drivers/net/mlx5/mlx5.h
> @@ -730,6 +730,7 @@ struct mlx5_ind_table_obj {
>  };
> 
>  /* Hash Rx queue. */
> +__extension__
>  struct mlx5_hrxq {
>  	ILIST_ENTRY(uint32_t)next; /* Index to the next element. */
>  	rte_atomic32_t refcnt; /* Reference counter. */
> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> index 1cc477a..4e17535 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -1486,15 +1486,6 @@ struct mlx5_rxq_ctrl *
>  		rte_errno = ENOSPC;
>  		return NULL;
>  	}
> -	tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
> -		sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
> -		(desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf
> *),
> -		0, socket);
> -
> -	if (!tmpl) {
> -		rte_errno = ENOMEM;
> -		return NULL;
> -	}
>  	tmpl->type = MLX5_RXQ_TYPE_STANDARD;
>  	if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
>  			       MLX5_MR_BTREE_CACHE_N, socket)) {
> @@ -1560,7 +1551,7 @@ struct mlx5_rxq_ctrl *
>  		 * Determine the number of SGEs needed for a full packet
>  		 * and round it to the next power of two.
>  		 */
> -		sges_n = tmpl->rxq.rxseg_n;
> +		sges_n = log2above(tmpl->rxq.rxseg_n);
>  		if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
>  			DRV_LOG(ERR,
>  				"port %u too many SGEs (%u) needed to
> handle"
> --
> 1.8.3.1

Patch squashed into relevant commit in master-net-mlx,

Kindest regards,
Raslan Darawsheh

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support
  2020-10-26 17:38         ` Slava Ovsiienko
@ 2020-10-27 11:05           ` Ferruh Yigit
  2020-10-27 19:05             ` Slava Ovsiienko
  0 siblings, 1 reply; 35+ messages in thread
From: Ferruh Yigit @ 2020-10-27 11:05 UTC (permalink / raw)
  To: Slava Ovsiienko, Raslan Darawsheh, dev
  Cc: NBU-Contact-Thomas Monjalon, Matan Azrad, Alexander Kozyrev, Ori Kam

On 10/26/2020 5:38 PM, Slava Ovsiienko wrote:
> Hi,  Ferruh
> 
> PSB
>> -----Original Message-----
>> From: Ferruh Yigit <ferruh.yigit@intel.com>
>> Sent: Monday, October 26, 2020 19:04
>> To: Raslan Darawsheh <rasland@nvidia.com>; Slava Ovsiienko
>> <viacheslavo@nvidia.com>; dev@dpdk.org
>> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan Azrad
>> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>; Ori Kam
>> <orika@nvidia.com>
>> Subject: Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support
>>
>> On 10/26/2020 3:25 PM, Raslan Darawsheh wrote:
>>> Hi,
>>>
>>>> -----Original Message-----
>>>> From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
>>>> Sent: Monday, October 26, 2020 1:55 PM
>>>> To: dev@dpdk.org
>>>> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan Azrad
>>>> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>; Raslan
>>>> Darawsheh <rasland@nvidia.com>; Ori Kam <orika@nvidia.com>
>>>> Subject: [PATCH v4 0/6] net/mlx5: add Rx buffer split support
>>>>
>>>> This patch adds to PMD the functionality for the receiving buffer
>>>> split feasture [1]
>>>>
>>>> [1]
>>>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatc
>>>> h
>> es.dpdk.org%2Fpatch%2F81154%2F&amp;data=02%7C01%7Crasland%40nvid
>>>>
>> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
>> 3
>>>>
>> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=fyiL3PS8r8wv8u
>>>> pyOYUtITkVqId9DZsF9LvSJQL9fdM%3D&amp;reserved=0
>>>>
>>>> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
>>>>
>>>> ---
>>>> v1:
>>>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatc
>>>> h
>> es.dpdk.org%2Fpatch%2F81808%2F&amp;data=02%7C01%7Crasland%40nvid
>>>>
>> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
>> 3
>>>>
>> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=NPBFlGmVN6bi
>>>> GUpzHC%2FrOVmdMoK2fkYRC0%2FDB%2BNlNno%3D&amp;reserved=0
>>>>
>>>> v2:
>>>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatc
>>>> h
>> es.dpdk.org%2Fpatch%2F81923%2F&amp;data=02%7C01%7Crasland%40nvid
>>>>
>> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
>> 3
>>>>
>> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=YwYjMz3jrSYU6
>>>> RBgwl0DmQfmjwwymNJTFjMdx0rsm2U%3D&amp;reserved=0
>>>>       - typos
>>>>       - documentation is updated
>>>>
>>>> v3:
>>>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatc
>>>> h
>> es.dpdk.org%2Fpatch%2F82177%2F&amp;data=02%7C01%7Crasland%40nvid
>>>>
>> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
>> 3
>>>>
>> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=HVvLbWS0sJxu
>>>> v%2Bc%2BKIMqllBq3edC4v0GD%2BtrwS7%2FsRo%3D&amp;reserved=0
>>>>       - extra parameter checks in PMD rx_queue_setup removed
>>>>       - minor optimizations in PMD
>>>>
>>>> v4: - rebasing
>>>>
>>>> Viacheslav Ovsiienko (6):
>>>>     net/mlx5: add extended Rx queue setup routine
>>>>     net/mlx5: configure Rx queue to support split
>>>>     net/mlx5: register multiple pool for Rx queue
>>>>     net/mlx5: update Rx datapath to support split
>>>>     net/mlx5: report Rx segmentation capabilities
>>>>     doc: add buffer split feature limitation to mlx5 guide
>>>>
>>>>    doc/guides/nics/mlx5.rst        |   6 +-
>>>>    drivers/net/mlx5/mlx5.h         |   3 +
>>>>    drivers/net/mlx5/mlx5_ethdev.c  |   4 ++
>>>>    drivers/net/mlx5/mlx5_mr.c      |   3 +
>>>>    drivers/net/mlx5/mlx5_rxq.c     | 136
>>>> +++++++++++++++++++++++++++++++++++-----
>>>>    drivers/net/mlx5/mlx5_rxtx.c    |   3 +-
>>>>    drivers/net/mlx5/mlx5_rxtx.h    |  13 +++-
>>>>    drivers/net/mlx5/mlx5_trigger.c |  20 +++---
>>>>    8 files changed, 160 insertions(+), 28 deletions(-)
>>>>
>>>> --
>>>> 1.8.3.1
>>>
>>> Series applied to next-net-mlx,
>>>
>>
>> The feature was references with different name in each commit, I tried to unify
>> it as "Rx buffer split" in next-net.
>> Can you please double check the updated commit log/titles?
> 
>>> 	doc: add Rx buffer split limitation to mlx5 guide
>>> 	net/mlx5: report Rx buffer split capabilities
> OK about above.
> 
>>> 	net/mlx5: support Rx buffer split
> It would be better: "net/mlx5: support Rx buffer split on datapath
> 

Isn't the supporting the "Rx buffer split" mean supporting it on the datapath, 
where else it can be supported, the "on datapath" looks redundant to me.

>>> 	net/mlx5: register multiple pool for Rx queue
> OK
> 
>>> 	net/mlx5: configure Rx buffer split
> It would be better: "net/mlx5: configure Rx queue for buffer split"
> 

Like above, isn't the configure "Rx buffer split" mean configuring Rx queue for 
it, "Rx queue" looks redundant to me.

For both above, if you have strong opinion to update them, I can. But I prefer 
shorter versions.

>>> 	net/mlx5: receive Rx buffer split description
> IMO, it would be better: "net/mlx5: handle Rx buffer split description"
> or
> "net/mlx5: support Rx buffer split description"
> 

OK to use "net/mlx5: support Rx buffer split description"

> Could you, please, also squash the hotfix:
> http://patches.dpdk.org/patch/82218/
>

OK

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support
  2020-10-27 11:05           ` Ferruh Yigit
@ 2020-10-27 19:05             ` Slava Ovsiienko
  2020-10-29 13:09               ` Ferruh Yigit
  0 siblings, 1 reply; 35+ messages in thread
From: Slava Ovsiienko @ 2020-10-27 19:05 UTC (permalink / raw)
  To: Ferruh Yigit, Raslan Darawsheh, dev
  Cc: NBU-Contact-Thomas Monjalon, Matan Azrad, Alexander Kozyrev, Ori Kam

> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Tuesday, October 27, 2020 13:05
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Raslan Darawsheh
> <rasland@nvidia.com>; dev@dpdk.org
> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan Azrad
> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>; Ori Kam
> <orika@nvidia.com>
> Subject: Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support
> 
> On 10/26/2020 5:38 PM, Slava Ovsiienko wrote:
> > Hi,  Ferruh
> >
> > PSB
> >> -----Original Message-----
> >> From: Ferruh Yigit <ferruh.yigit@intel.com>
> >> Sent: Monday, October 26, 2020 19:04
> >> To: Raslan Darawsheh <rasland@nvidia.com>; Slava Ovsiienko
> >> <viacheslavo@nvidia.com>; dev@dpdk.org
> >> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan Azrad
> >> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>; Ori Kam
> >> <orika@nvidia.com>
> >> Subject: Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split
> >> support
> >>
> >> On 10/26/2020 3:25 PM, Raslan Darawsheh wrote:
> >>> Hi,
> >>>
> >>>> -----Original Message-----
> >>>> From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> >>>> Sent: Monday, October 26, 2020 1:55 PM
> >>>> To: dev@dpdk.org
> >>>> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan
> Azrad
> >>>> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>;
> Raslan
> >>>> Darawsheh <rasland@nvidia.com>; Ori Kam <orika@nvidia.com>
> >>>> Subject: [PATCH v4 0/6] net/mlx5: add Rx buffer split support
> >>>>
> >>>> This patch adds to PMD the functionality for the receiving buffer
> >>>> split feasture [1]
> >>>>
> >>>> [1]
> >>>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpa
> >>>> tc
> >>>> h
> >>
> es.dpdk.org%2Fpatch%2F81154%2F&amp;data=02%7C01%7Crasland%40nvid
> >>>>
> >>
> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
> >> 3
> >>>>
> >>
> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=fyiL3PS8r8wv8u
> >>>> pyOYUtITkVqId9DZsF9LvSJQL9fdM%3D&amp;reserved=0
> >>>>
> >>>> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> >>>>
> >>>> ---
> >>>> v1:
> >>>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpa
> >>>> tc
> >>>> h
> >>
> es.dpdk.org%2Fpatch%2F81808%2F&amp;data=02%7C01%7Crasland%40nvid
> >>>>
> >>
> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
> >> 3
> >>>>
> >>
> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=NPBFlGmVN6bi
> >>>> GUpzHC%2FrOVmdMoK2fkYRC0%2FDB%2BNlNno%3D&amp;reserved=0
> >>>>
> >>>> v2:
> >>>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpa
> >>>> tc
> >>>> h
> >>
> es.dpdk.org%2Fpatch%2F81923%2F&amp;data=02%7C01%7Crasland%40nvid
> >>>>
> >>
> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
> >> 3
> >>>>
> >>
> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=YwYjMz3jrSYU6
> >>>> RBgwl0DmQfmjwwymNJTFjMdx0rsm2U%3D&amp;reserved=0
> >>>>       - typos
> >>>>       - documentation is updated
> >>>>
> >>>> v3:
> >>>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpa
> >>>> tc
> >>>> h
> >>
> es.dpdk.org%2Fpatch%2F82177%2F&amp;data=02%7C01%7Crasland%40nvid
> >>>>
> >>
> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
> >> 3
> >>>>
> >>
> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=HVvLbWS0sJxu
> >>>> v%2Bc%2BKIMqllBq3edC4v0GD%2BtrwS7%2FsRo%3D&amp;reserved=0
> >>>>       - extra parameter checks in PMD rx_queue_setup removed
> >>>>       - minor optimizations in PMD
> >>>>
> >>>> v4: - rebasing
> >>>>
> >>>> Viacheslav Ovsiienko (6):
> >>>>     net/mlx5: add extended Rx queue setup routine
> >>>>     net/mlx5: configure Rx queue to support split
> >>>>     net/mlx5: register multiple pool for Rx queue
> >>>>     net/mlx5: update Rx datapath to support split
> >>>>     net/mlx5: report Rx segmentation capabilities
> >>>>     doc: add buffer split feature limitation to mlx5 guide
> >>>>
> >>>>    doc/guides/nics/mlx5.rst        |   6 +-
> >>>>    drivers/net/mlx5/mlx5.h         |   3 +
> >>>>    drivers/net/mlx5/mlx5_ethdev.c  |   4 ++
> >>>>    drivers/net/mlx5/mlx5_mr.c      |   3 +
> >>>>    drivers/net/mlx5/mlx5_rxq.c     | 136
> >>>> +++++++++++++++++++++++++++++++++++-----
> >>>>    drivers/net/mlx5/mlx5_rxtx.c    |   3 +-
> >>>>    drivers/net/mlx5/mlx5_rxtx.h    |  13 +++-
> >>>>    drivers/net/mlx5/mlx5_trigger.c |  20 +++---
> >>>>    8 files changed, 160 insertions(+), 28 deletions(-)
> >>>>
> >>>> --
> >>>> 1.8.3.1
> >>>
> >>> Series applied to next-net-mlx,
> >>>
> >>
> >> The feature was references with different name in each commit, I
> >> tried to unify it as "Rx buffer split" in next-net.
> >> Can you please double check the updated commit log/titles?
> >
> >>> 	doc: add Rx buffer split limitation to mlx5 guide
> >>> 	net/mlx5: report Rx buffer split capabilities
> > OK about above.
> >
> >>> 	net/mlx5: support Rx buffer split
> > It would be better: "net/mlx5: support Rx buffer split on datapath
> >
> 
> Isn't the supporting the "Rx buffer split" mean supporting it on the datapath,
> where else it can be supported, the "on datapath" looks redundant to me.

Options for possible "support Buffer Split" meaning:
- generic PMD configuration
- queue configuration
- reporting caps
- datapath

The series is split for commits those updating the very specific parts in PMD.
We may drop this specifics but we would lose the series split meaning.
Sure, the entire series is about "support Rx buffer split", but each commit has
its own clarification in the headline.

> 
> >>> 	net/mlx5: register multiple pool for Rx queue
> > OK
> >
> >>> 	net/mlx5: configure Rx buffer split
> > It would be better: "net/mlx5: configure Rx queue for buffer split"
> >
> 
> Like above, isn't the configure "Rx buffer split" mean configuring Rx queue for
> it, "Rx queue" looks redundant to me.
It just emphasizes - "the queue object is configured in this specific commit",
it would be easier to find this point and understand what it is in the long git log.
Hence, in my opinion, "queue" is some kind of extra clue, we should not drop it.

> For both above, if you have strong opinion to update them, I can. But I prefer
> shorter versions.
> 

> >>> 	net/mlx5: receive Rx buffer split description
> > IMO, it would be better: "net/mlx5: handle Rx buffer split description"
> > or
> > "net/mlx5: support Rx buffer split description"
> >
> 
> OK to use "net/mlx5: support Rx buffer split description"
> 
Please, see dpdk-next-net-mlx - Raslan updated the subtree, addressing
the hotfix and yours and mine comments.

With best regards,  Slava

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support
  2020-10-27 19:05             ` Slava Ovsiienko
@ 2020-10-29 13:09               ` Ferruh Yigit
  2020-10-29 14:21                 ` Slava Ovsiienko
  0 siblings, 1 reply; 35+ messages in thread
From: Ferruh Yigit @ 2020-10-29 13:09 UTC (permalink / raw)
  To: Slava Ovsiienko, Raslan Darawsheh, dev
  Cc: NBU-Contact-Thomas Monjalon, Matan Azrad, Alexander Kozyrev, Ori Kam

On 10/27/2020 7:05 PM, Slava Ovsiienko wrote:
>> -----Original Message-----
>> From: Ferruh Yigit <ferruh.yigit@intel.com>
>> Sent: Tuesday, October 27, 2020 13:05
>> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Raslan Darawsheh
>> <rasland@nvidia.com>; dev@dpdk.org
>> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan Azrad
>> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>; Ori Kam
>> <orika@nvidia.com>
>> Subject: Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support
>>
>> On 10/26/2020 5:38 PM, Slava Ovsiienko wrote:
>>> Hi,  Ferruh
>>>
>>> PSB
>>>> -----Original Message-----
>>>> From: Ferruh Yigit <ferruh.yigit@intel.com>
>>>> Sent: Monday, October 26, 2020 19:04
>>>> To: Raslan Darawsheh <rasland@nvidia.com>; Slava Ovsiienko
>>>> <viacheslavo@nvidia.com>; dev@dpdk.org
>>>> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan Azrad
>>>> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>; Ori Kam
>>>> <orika@nvidia.com>
>>>> Subject: Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split
>>>> support
>>>>
>>>> On 10/26/2020 3:25 PM, Raslan Darawsheh wrote:
>>>>> Hi,
>>>>>
>>>>>> -----Original Message-----
>>>>>> From: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
>>>>>> Sent: Monday, October 26, 2020 1:55 PM
>>>>>> To: dev@dpdk.org
>>>>>> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan
>> Azrad
>>>>>> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>;
>> Raslan
>>>>>> Darawsheh <rasland@nvidia.com>; Ori Kam <orika@nvidia.com>
>>>>>> Subject: [PATCH v4 0/6] net/mlx5: add Rx buffer split support
>>>>>>
>>>>>> This patch adds to PMD the functionality for the receiving buffer
>>>>>> split feasture [1]
>>>>>>
>>>>>> [1]
>>>>>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpa
>>>>>> tc
>>>>>> h
>>>>
>> es.dpdk.org%2Fpatch%2F81154%2F&amp;data=02%7C01%7Crasland%40nvid
>>>>>>
>>>>
>> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
>>>> 3
>>>>>>
>>>>
>> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=fyiL3PS8r8wv8u
>>>>>> pyOYUtITkVqId9DZsF9LvSJQL9fdM%3D&amp;reserved=0
>>>>>>
>>>>>> Signed-off-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
>>>>>>
>>>>>> ---
>>>>>> v1:
>>>>>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpa
>>>>>> tc
>>>>>> h
>>>>
>> es.dpdk.org%2Fpatch%2F81808%2F&amp;data=02%7C01%7Crasland%40nvid
>>>>>>
>>>>
>> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
>>>> 3
>>>>>>
>>>>
>> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=NPBFlGmVN6bi
>>>>>> GUpzHC%2FrOVmdMoK2fkYRC0%2FDB%2BNlNno%3D&amp;reserved=0
>>>>>>
>>>>>> v2:
>>>>>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpa
>>>>>> tc
>>>>>> h
>>>>
>> es.dpdk.org%2Fpatch%2F81923%2F&amp;data=02%7C01%7Crasland%40nvid
>>>>>>
>>>>
>> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
>>>> 3
>>>>>>
>>>>
>> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=YwYjMz3jrSYU6
>>>>>> RBgwl0DmQfmjwwymNJTFjMdx0rsm2U%3D&amp;reserved=0
>>>>>>        - typos
>>>>>>        - documentation is updated
>>>>>>
>>>>>> v3:
>>>>>> https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpa
>>>>>> tc
>>>>>> h
>>>>
>> es.dpdk.org%2Fpatch%2F82177%2F&amp;data=02%7C01%7Crasland%40nvid
>>>>>>
>>>>
>> ia.com%7Ccf4913c6b58346b50b1b08d879a60608%7C43083d15727340c1b7db
>>>> 3
>>>>>>
>>>>
>> 9efd9ccc17a%7C0%7C0%7C637393101256743078&amp;sdata=HVvLbWS0sJxu
>>>>>> v%2Bc%2BKIMqllBq3edC4v0GD%2BtrwS7%2FsRo%3D&amp;reserved=0
>>>>>>        - extra parameter checks in PMD rx_queue_setup removed
>>>>>>        - minor optimizations in PMD
>>>>>>
>>>>>> v4: - rebasing
>>>>>>
>>>>>> Viacheslav Ovsiienko (6):
>>>>>>      net/mlx5: add extended Rx queue setup routine
>>>>>>      net/mlx5: configure Rx queue to support split
>>>>>>      net/mlx5: register multiple pool for Rx queue
>>>>>>      net/mlx5: update Rx datapath to support split
>>>>>>      net/mlx5: report Rx segmentation capabilities
>>>>>>      doc: add buffer split feature limitation to mlx5 guide
>>>>>>
>>>>>>     doc/guides/nics/mlx5.rst        |   6 +-
>>>>>>     drivers/net/mlx5/mlx5.h         |   3 +
>>>>>>     drivers/net/mlx5/mlx5_ethdev.c  |   4 ++
>>>>>>     drivers/net/mlx5/mlx5_mr.c      |   3 +
>>>>>>     drivers/net/mlx5/mlx5_rxq.c     | 136
>>>>>> +++++++++++++++++++++++++++++++++++-----
>>>>>>     drivers/net/mlx5/mlx5_rxtx.c    |   3 +-
>>>>>>     drivers/net/mlx5/mlx5_rxtx.h    |  13 +++-
>>>>>>     drivers/net/mlx5/mlx5_trigger.c |  20 +++---
>>>>>>     8 files changed, 160 insertions(+), 28 deletions(-)
>>>>>>
>>>>>> --
>>>>>> 1.8.3.1
>>>>>
>>>>> Series applied to next-net-mlx,
>>>>>
>>>>
>>>> The feature was references with different name in each commit, I
>>>> tried to unify it as "Rx buffer split" in next-net.
>>>> Can you please double check the updated commit log/titles?
>>>
>>>>> 	doc: add Rx buffer split limitation to mlx5 guide
>>>>> 	net/mlx5: report Rx buffer split capabilities
>>> OK about above.
>>>
>>>>> 	net/mlx5: support Rx buffer split
>>> It would be better: "net/mlx5: support Rx buffer split on datapath
>>>
>>
>> Isn't the supporting the "Rx buffer split" mean supporting it on the datapath,
>> where else it can be supported, the "on datapath" looks redundant to me.
> 
> Options for possible "support Buffer Split" meaning:
> - generic PMD configuration
> - queue configuration
> - reporting caps
> - datapath
> 
> The series is split for commits those updating the very specific parts in PMD.
> We may drop this specifics but we would lose the series split meaning.
> Sure, the entire series is about "support Rx buffer split", but each commit has
> its own clarification in the headline.
> 
>>
>>>>> 	net/mlx5: register multiple pool for Rx queue
>>> OK
>>>
>>>>> 	net/mlx5: configure Rx buffer split
>>> It would be better: "net/mlx5: configure Rx queue for buffer split"
>>>
>>
>> Like above, isn't the configure "Rx buffer split" mean configuring Rx queue for
>> it, "Rx queue" looks redundant to me.
> It just emphasizes - "the queue object is configured in this specific commit",
> it would be easier to find this point and understand what it is in the long git log.
> Hence, in my opinion, "queue" is some kind of extra clue, we should not drop it.
> 
>> For both above, if you have strong opinion to update them, I can. But I prefer
>> shorter versions.
>>
> 
>>>>> 	net/mlx5: receive Rx buffer split description
>>> IMO, it would be better: "net/mlx5: handle Rx buffer split description"
>>> or
>>> "net/mlx5: support Rx buffer split description"
>>>
>>
>> OK to use "net/mlx5: support Rx buffer split description"
>>
> Please, see dpdk-next-net-mlx - Raslan updated the subtree, addressing
> the hotfix and yours and mine comments.
> 

What to see in the sub-tree?
Making changes is easy, the essence is discussion and reaching into a consensus, 
which is what I am trying to do, without a consensus what is the point of 
updating it in the mlx sub-tree?

^ permalink raw reply	[flat|nested] 35+ messages in thread

* Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support
  2020-10-29 13:09               ` Ferruh Yigit
@ 2020-10-29 14:21                 ` Slava Ovsiienko
  0 siblings, 0 replies; 35+ messages in thread
From: Slava Ovsiienko @ 2020-10-29 14:21 UTC (permalink / raw)
  To: Ferruh Yigit, Raslan Darawsheh, dev
  Cc: NBU-Contact-Thomas Monjalon, Matan Azrad, Alexander Kozyrev, Ori Kam

> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Thursday, October 29, 2020 15:10
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Raslan Darawsheh
> <rasland@nvidia.com>; dev@dpdk.org
> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan Azrad
> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>; Ori Kam
> <orika@nvidia.com>
> Subject: Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support
> 
> On 10/27/2020 7:05 PM, Slava Ovsiienko wrote:
> >> -----Original Message-----
> >> From: Ferruh Yigit <ferruh.yigit@intel.com>
> >> Sent: Tuesday, October 27, 2020 13:05
> >> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Raslan Darawsheh
> >> <rasland@nvidia.com>; dev@dpdk.org
> >> Cc: NBU-Contact-Thomas Monjalon <thomas@monjalon.net>; Matan Azrad
> >> <matan@nvidia.com>; Alexander Kozyrev <akozyrev@nvidia.com>; Ori Kam
> >> <orika@nvidia.com>
> >> Subject: Re: [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split
> >> support
> >>
[..snip..]
> >>>>
> >>>> The feature was references with different name in each commit, I
> >>>> tried to unify it as "Rx buffer split" in next-net.
> >>>> Can you please double check the updated commit log/titles?
> >>>
> >>>>> 	doc: add Rx buffer split limitation to mlx5 guide
> >>>>> 	net/mlx5: report Rx buffer split capabilities
> >>> OK about above.
> >>>
> >>>>> 	net/mlx5: support Rx buffer split
> >>> It would be better: "net/mlx5: support Rx buffer split on datapath
> >>>
> >>
> >> Isn't the supporting the "Rx buffer split" mean supporting it on the
> >> datapath, where else it can be supported, the "on datapath" looks
> redundant to me.
> >
> > Options for possible "support Buffer Split" meaning:
> > - generic PMD configuration
> > - queue configuration
> > - reporting caps
> > - datapath
> >
> > The series is split for commits those updating the very specific parts in PMD.
> > We may drop this specifics but we would lose the series split meaning.
> > Sure, the entire series is about "support Rx buffer split", but each
> > commit has its own clarification in the headline.
> >
> >>
> >>>>> 	net/mlx5: register multiple pool for Rx queue
> >>> OK
> >>>
> >>>>> 	net/mlx5: configure Rx buffer split
> >>> It would be better: "net/mlx5: configure Rx queue for buffer split"
> >>>
> >>
> >> Like above, isn't the configure "Rx buffer split" mean configuring Rx
> >> queue for it, "Rx queue" looks redundant to me.
> > It just emphasizes - "the queue object is configured in this specific
> > commit", it would be easier to find this point and understand what it is in the
> long git log.
> > Hence, in my opinion, "queue" is some kind of extra clue, we should not drop
> it.
> >
> >> For both above, if you have strong opinion to update them, I can. But
> >> I prefer shorter versions.
> >>
> >
> >>>>> 	net/mlx5: receive Rx buffer split description
> >>> IMO, it would be better: "net/mlx5: handle Rx buffer split description"
> >>> or
> >>> "net/mlx5: support Rx buffer split description"
> >>>
> >>
> >> OK to use "net/mlx5: support Rx buffer split description"
> >>
> > Please, see dpdk-next-net-mlx - Raslan updated the subtree, addressing
> > the hotfix and yours and mine comments.
> >
> 
> What to see in the sub-tree?
> Making changes is easy, the essence is discussion and reaching into a
> consensus, which is what I am trying to do, without a consensus what is the
> point of updating it in the mlx sub-tree?

Mmm, I hoped we got consensus, I agreed with you - the feature should be
named in the same fashion in the commits and, in the same time, we would like
to emphasize some commit specifics. I just tried to save your efforts with taking prepared
commits from sub-tree. The patches are exactly the same, only headlines are updated.
What, in your opinion, should be the next step? Do you mean we should send
an updated version with updated headlines to the mailing list?

With best regards, Slava


^ permalink raw reply	[flat|nested] 35+ messages in thread

end of thread, other threads:[~2020-10-29 14:21 UTC | newest]

Thread overview: 35+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-22 15:42 [dpdk-dev] [PATCH 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
2020-10-22 15:42 ` [dpdk-dev] [PATCH 1/5] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
2020-10-23  9:46   ` [dpdk-dev] [PATCH v2 0/5] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 1/5] net/mlx5: configure Rx queue to support split Viacheslav Ovsiienko
2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 2/5] net/mlx5: register multiple pool for Rx queue Viacheslav Ovsiienko
2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 3/5] net/mlx5: update Rx datapath to support split Viacheslav Ovsiienko
2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 4/5] net/mlx5: report Rx segmentation capabilities Viacheslav Ovsiienko
2020-10-23  9:46     ` [dpdk-dev] [PATCH v2 5/5] doc: add buffer split feature limitation to mlx5 guide Viacheslav Ovsiienko
2020-10-26 10:11   ` [dpdk-dev] [PATCH v3 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 1/6] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 2/6] net/mlx5: configure Rx queue to support split Viacheslav Ovsiienko
2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 3/6] net/mlx5: register multiple pool for Rx queue Viacheslav Ovsiienko
2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 4/6] net/mlx5: update Rx datapath to support split Viacheslav Ovsiienko
2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 5/6] net/mlx5: report Rx segmentation capabilities Viacheslav Ovsiienko
2020-10-26 10:11     ` [dpdk-dev] [PATCH v3 6/6] doc: add buffer split feature limitation to mlx5 guide Viacheslav Ovsiienko
2020-10-26 11:54   ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Viacheslav Ovsiienko
2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 1/6] net/mlx5: add extended Rx queue setup routine Viacheslav Ovsiienko
2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 2/6] net/mlx5: configure Rx queue to support split Viacheslav Ovsiienko
2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 3/6] net/mlx5: register multiple pool for Rx queue Viacheslav Ovsiienko
2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 4/6] net/mlx5: update Rx datapath to support split Viacheslav Ovsiienko
2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 5/6] net/mlx5: report Rx segmentation capabilities Viacheslav Ovsiienko
2020-10-26 11:55     ` [dpdk-dev] [PATCH v4 6/6] doc: add buffer split feature limitation to mlx5 guide Viacheslav Ovsiienko
2020-10-26 15:25     ` [dpdk-dev] [PATCH v4 0/6] net/mlx5: add Rx buffer split support Raslan Darawsheh
2020-10-26 17:04       ` Ferruh Yigit
2020-10-26 17:38         ` Slava Ovsiienko
2020-10-27 11:05           ` Ferruh Yigit
2020-10-27 19:05             ` Slava Ovsiienko
2020-10-29 13:09               ` Ferruh Yigit
2020-10-29 14:21                 ` Slava Ovsiienko
2020-10-26 17:17   ` [dpdk-dev] [PATCH] net/mlx5: fix Rx queue initialization for scattered segment Viacheslav Ovsiienko
2020-10-26 18:07     ` Raslan Darawsheh
2020-10-22 15:42 ` [dpdk-dev] [PATCH 2/5] net/mlx5: configure Rx queue to support split Viacheslav Ovsiienko
2020-10-22 15:42 ` [dpdk-dev] [PATCH 3/5] net/mlx5: register multiple pool for Rx queue Viacheslav Ovsiienko
2020-10-22 15:42 ` [dpdk-dev] [PATCH 4/5] net/mlx5: update Rx datapath to support split Viacheslav Ovsiienko
2020-10-22 15:42 ` [dpdk-dev] [PATCH 5/5] net/mlx5: report Rx segmentation capabilies Viacheslav Ovsiienko

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror http://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ http://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git