DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/4] Use bulk alloc/free in virtio packed
@ 2021-04-06 15:44 Balazs Nemeth
  2021-04-06 15:44 ` [dpdk-dev] [PATCH 1/4] vhost: move allocation of mbuf outside of packet enqueue Balazs Nemeth
                   ` (3 more replies)
  0 siblings, 4 replies; 27+ messages in thread
From: Balazs Nemeth @ 2021-04-06 15:44 UTC (permalink / raw)
  To: bnemeth, dev

Use the faster bulk versions of alloc and free in virtio packed path.

Balazs Nemeth (4):
  vhost: move allocation of mbuf outside of packet enqueue
  vhost: perform all mbuf allocations in one loop
  vhost: allocate and free packets in bulk
  vhost: remove unnecessary level of indirection

 lib/librte_vhost/virtio_net.c | 56 ++++++++++++++++++++++++-----------
 1 file changed, 39 insertions(+), 17 deletions(-)

--
2.30.2


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH 1/4] vhost: move allocation of mbuf outside of packet enqueue
  2021-04-06 15:44 [dpdk-dev] [PATCH 0/4] Use bulk alloc/free in virtio packed Balazs Nemeth
@ 2021-04-06 15:44 ` Balazs Nemeth
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 " Balazs Nemeth
                     ` (3 more replies)
  2021-04-06 15:44 ` [dpdk-dev] [PATCH 2/4] vhost: perform all mbuf allocations in one loop Balazs Nemeth
                   ` (2 subsequent siblings)
  3 siblings, 4 replies; 27+ messages in thread
From: Balazs Nemeth @ 2021-04-06 15:44 UTC (permalink / raw)
  To: bnemeth, dev

In preparation for subsequent patches, move mbuf allocation out and
rename virtio_dev_pktmbuf_alloc to virtio_dev_pktmbuf_prep. This
function now receives an already allocated mbuf pointer.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 54 ++++++++++++++++++++++++++---------
 1 file changed, 40 insertions(+), 14 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 7f621fb6d..666e7fdb8 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2166,6 +2166,23 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
 	return NULL;
 }
 
+static __rte_always_inline int
+virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
+			 uint32_t data_len)
+{
+	if (rte_pktmbuf_tailroom(pkt) >= data_len)
+		return 0;
+
+	/* attach an external buffer if supported */
+	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+		return 0;
+
+	/* check if chained buffers are allowed */
+	if (!dev->linearbuf)
+		return 0;
+	return 1;
+}
+
 static __rte_noinline uint16_t
 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
@@ -2259,7 +2276,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline int
 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 				 struct vhost_virtqueue *vq,
-				 struct rte_mempool *mbuf_pool,
 				 struct rte_mbuf **pkts,
 				 uint16_t avail_idx,
 				 uintptr_t *desc_addrs,
@@ -2304,8 +2320,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-		pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
-		if (!pkts[i])
+		if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
 			goto free_buf;
 	}
 
@@ -2326,16 +2341,12 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 	return 0;
 
 free_buf:
-	for (i = 0; i < PACKED_BATCH_SIZE; i++)
-		rte_pktmbuf_free(pkts[i]);
-
 	return -1;
 }
 
 static __rte_always_inline int
 virtio_dev_tx_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
-			   struct rte_mempool *mbuf_pool,
 			   struct rte_mbuf **pkts)
 {
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -2345,8 +2356,8 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
 	uint16_t ids[PACKED_BATCH_SIZE];
 	uint16_t i;
 
-	if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
-					     avail_idx, desc_addrs, ids))
+	if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
+					     desc_addrs, ids))
 		return -1;
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2396,8 +2407,8 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 					 VHOST_ACCESS_RO) < 0))
 		return -1;
 
-	*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
-	if (unlikely(*pkts == NULL)) {
+
+	if (unlikely(virtio_dev_pktmbuf_prep(dev, *pkts, buf_len))) {
 		if (!allocerr_warned) {
 			VHOST_LOG_DATA(ERR,
 				"Failed mbuf alloc of size %d from %s on %s.\n",
@@ -2416,7 +2427,6 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 				dev->ifname);
 			allocerr_warned = true;
 		}
-		rte_pktmbuf_free(*pkts);
 		return -1;
 	}
 
@@ -2459,22 +2469,38 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 {
 	uint32_t pkt_idx = 0;
 	uint32_t remained = count;
+	uint16_t i;
 
 	do {
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
 		if (remained >= PACKED_BATCH_SIZE) {
-			if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
+			vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+				pkts[pkt_idx + i] =
+					rte_pktmbuf_alloc(mbuf_pool);
+			}
+
+			if (!virtio_dev_tx_batch_packed(dev, vq,
 							&pkts[pkt_idx])) {
 				pkt_idx += PACKED_BATCH_SIZE;
 				remained -= PACKED_BATCH_SIZE;
+
 				continue;
+			} else {
+				vhost_for_each_try_unroll(i, 0,
+					PACKED_BATCH_SIZE) {
+					rte_pktmbuf_free(pkts[pkt_idx + i]);
+				}
 			}
 		}
 
+		pkts[pkt_idx] = rte_pktmbuf_alloc(mbuf_pool);
+
 		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
-						&pkts[pkt_idx]))
+						&pkts[pkt_idx])) {
+			rte_pktmbuf_free(pkts[pkt_idx]);
 			break;
+		}
 		pkt_idx++;
 		remained--;
 
-- 
2.30.2


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH 2/4] vhost: perform all mbuf allocations in one loop
  2021-04-06 15:44 [dpdk-dev] [PATCH 0/4] Use bulk alloc/free in virtio packed Balazs Nemeth
  2021-04-06 15:44 ` [dpdk-dev] [PATCH 1/4] vhost: move allocation of mbuf outside of packet enqueue Balazs Nemeth
@ 2021-04-06 15:44 ` Balazs Nemeth
  2021-04-06 15:44 ` [dpdk-dev] [PATCH 3/4] vhost: allocate and free packets in bulk Balazs Nemeth
  2021-04-06 15:44 ` [dpdk-dev] [PATCH 4/4] vhost: remove unnecessary level of indirection Balazs Nemeth
  3 siblings, 0 replies; 27+ messages in thread
From: Balazs Nemeth @ 2021-04-06 15:44 UTC (permalink / raw)
  To: bnemeth, dev

Move allocation out further and perform all allocation in one loop. The
same goes for freeing packets. This is to prepare for use of bulk
versions of these functions.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 18 ++++++------------
 1 file changed, 6 insertions(+), 12 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 666e7fdb8..496f750e3 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2471,14 +2471,13 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 	uint32_t remained = count;
 	uint16_t i;
 
+	for (i = 0; i < count; ++i)
+		pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
+
 	do {
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
 		if (remained >= PACKED_BATCH_SIZE) {
-			vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-				pkts[pkt_idx + i] =
-					rte_pktmbuf_alloc(mbuf_pool);
-			}
 
 			if (!virtio_dev_tx_batch_packed(dev, vq,
 							&pkts[pkt_idx])) {
@@ -2486,19 +2485,11 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 				remained -= PACKED_BATCH_SIZE;
 
 				continue;
-			} else {
-				vhost_for_each_try_unroll(i, 0,
-					PACKED_BATCH_SIZE) {
-					rte_pktmbuf_free(pkts[pkt_idx + i]);
-				}
 			}
 		}
 
-		pkts[pkt_idx] = rte_pktmbuf_alloc(mbuf_pool);
-
 		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
 						&pkts[pkt_idx])) {
-			rte_pktmbuf_free(pkts[pkt_idx]);
 			break;
 		}
 		pkt_idx++;
@@ -2506,6 +2497,9 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 
 	} while (remained);
 
+	for (i = pkt_idx; i < count; ++i)
+		rte_pktmbuf_free(pkts[i]);
+
 	if (vq->shadow_used_idx) {
 		do_data_copy_dequeue(vq);
 
-- 
2.30.2


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH 3/4] vhost: allocate and free packets in bulk
  2021-04-06 15:44 [dpdk-dev] [PATCH 0/4] Use bulk alloc/free in virtio packed Balazs Nemeth
  2021-04-06 15:44 ` [dpdk-dev] [PATCH 1/4] vhost: move allocation of mbuf outside of packet enqueue Balazs Nemeth
  2021-04-06 15:44 ` [dpdk-dev] [PATCH 2/4] vhost: perform all mbuf allocations in one loop Balazs Nemeth
@ 2021-04-06 15:44 ` Balazs Nemeth
  2021-04-06 15:44 ` [dpdk-dev] [PATCH 4/4] vhost: remove unnecessary level of indirection Balazs Nemeth
  3 siblings, 0 replies; 27+ messages in thread
From: Balazs Nemeth @ 2021-04-06 15:44 UTC (permalink / raw)
  To: bnemeth, dev

Now that all allocation and freeing has been moved together, use the
faster bulk versions instead of handling packets one by one.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 496f750e3..e1696c0c0 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2471,8 +2471,9 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 	uint32_t remained = count;
 	uint16_t i;
 
-	for (i = 0; i < count; ++i)
-		pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
+	if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count)) {
+		return 0;
+	}
 
 	do {
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
@@ -2497,8 +2498,9 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 
 	} while (remained);
 
-	for (i = pkt_idx; i < count; ++i)
-		rte_pktmbuf_free(pkts[i]);
+	if (pkt_idx != count) {
+		rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
+	}
 
 	if (vq->shadow_used_idx) {
 		do_data_copy_dequeue(vq);
-- 
2.30.2


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH 4/4] vhost: remove unnecessary level of indirection
  2021-04-06 15:44 [dpdk-dev] [PATCH 0/4] Use bulk alloc/free in virtio packed Balazs Nemeth
                   ` (2 preceding siblings ...)
  2021-04-06 15:44 ` [dpdk-dev] [PATCH 3/4] vhost: allocate and free packets in bulk Balazs Nemeth
@ 2021-04-06 15:44 ` Balazs Nemeth
  3 siblings, 0 replies; 27+ messages in thread
From: Balazs Nemeth @ 2021-04-06 15:44 UTC (permalink / raw)
  To: bnemeth, dev

There is no need to pass a pointer to an mbuf pointer.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index e1696c0c0..179c57b46 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2390,7 +2390,7 @@ static __rte_always_inline int
 vhost_dequeue_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts,
+			    struct rte_mbuf *pkts,
 			    uint16_t *buf_id,
 			    uint16_t *desc_count)
 {
@@ -2408,7 +2408,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 		return -1;
 
 
-	if (unlikely(virtio_dev_pktmbuf_prep(dev, *pkts, buf_len))) {
+	if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
 		if (!allocerr_warned) {
 			VHOST_LOG_DATA(ERR,
 				"Failed mbuf alloc of size %d from %s on %s.\n",
@@ -2418,7 +2418,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 		return -1;
 	}
 
-	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
+	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
 				mbuf_pool);
 	if (unlikely(err)) {
 		if (!allocerr_warned) {
@@ -2437,7 +2437,7 @@ static __rte_always_inline int
 virtio_dev_tx_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts)
+			    struct rte_mbuf *pkts)
 {
 
 	uint16_t buf_id, desc_count = 0;
@@ -2490,7 +2490,7 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 		}
 
 		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
-						&pkts[pkt_idx])) {
+						pkts[pkt_idx])) {
 			break;
 		}
 		pkt_idx++;
-- 
2.30.2


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v2 1/4] vhost: move allocation of mbuf outside of packet enqueue
  2021-04-06 15:44 ` [dpdk-dev] [PATCH 1/4] vhost: move allocation of mbuf outside of packet enqueue Balazs Nemeth
@ 2021-04-07 10:17   ` Balazs Nemeth
  2021-04-15 12:37     ` Maxime Coquelin
  2021-04-16  8:18     ` [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk Balazs Nemeth
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 2/4] vhost: perform all mbuf allocations in one loop Balazs Nemeth
                     ` (2 subsequent siblings)
  3 siblings, 2 replies; 27+ messages in thread
From: Balazs Nemeth @ 2021-04-07 10:17 UTC (permalink / raw)
  To: bnemeth, dev

In preparation for subsequent patches, move mbuf allocation out and
rename virtio_dev_pktmbuf_alloc to virtio_dev_pktmbuf_prep. This
function now receives an already allocated mbuf pointer.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 54 ++++++++++++++++++++++++++---------
 1 file changed, 40 insertions(+), 14 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 7f621fb6d..666e7fdb8 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2166,6 +2166,23 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
 	return NULL;
 }
 
+static __rte_always_inline int
+virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
+			 uint32_t data_len)
+{
+	if (rte_pktmbuf_tailroom(pkt) >= data_len)
+		return 0;
+
+	/* attach an external buffer if supported */
+	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+		return 0;
+
+	/* check if chained buffers are allowed */
+	if (!dev->linearbuf)
+		return 0;
+	return 1;
+}
+
 static __rte_noinline uint16_t
 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
@@ -2259,7 +2276,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline int
 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 				 struct vhost_virtqueue *vq,
-				 struct rte_mempool *mbuf_pool,
 				 struct rte_mbuf **pkts,
 				 uint16_t avail_idx,
 				 uintptr_t *desc_addrs,
@@ -2304,8 +2320,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-		pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
-		if (!pkts[i])
+		if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
 			goto free_buf;
 	}
 
@@ -2326,16 +2341,12 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 	return 0;
 
 free_buf:
-	for (i = 0; i < PACKED_BATCH_SIZE; i++)
-		rte_pktmbuf_free(pkts[i]);
-
 	return -1;
 }
 
 static __rte_always_inline int
 virtio_dev_tx_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
-			   struct rte_mempool *mbuf_pool,
 			   struct rte_mbuf **pkts)
 {
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -2345,8 +2356,8 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
 	uint16_t ids[PACKED_BATCH_SIZE];
 	uint16_t i;
 
-	if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
-					     avail_idx, desc_addrs, ids))
+	if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
+					     desc_addrs, ids))
 		return -1;
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2396,8 +2407,8 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 					 VHOST_ACCESS_RO) < 0))
 		return -1;
 
-	*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
-	if (unlikely(*pkts == NULL)) {
+
+	if (unlikely(virtio_dev_pktmbuf_prep(dev, *pkts, buf_len))) {
 		if (!allocerr_warned) {
 			VHOST_LOG_DATA(ERR,
 				"Failed mbuf alloc of size %d from %s on %s.\n",
@@ -2416,7 +2427,6 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 				dev->ifname);
 			allocerr_warned = true;
 		}
-		rte_pktmbuf_free(*pkts);
 		return -1;
 	}
 
@@ -2459,22 +2469,38 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 {
 	uint32_t pkt_idx = 0;
 	uint32_t remained = count;
+	uint16_t i;
 
 	do {
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
 		if (remained >= PACKED_BATCH_SIZE) {
-			if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
+			vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+				pkts[pkt_idx + i] =
+					rte_pktmbuf_alloc(mbuf_pool);
+			}
+
+			if (!virtio_dev_tx_batch_packed(dev, vq,
 							&pkts[pkt_idx])) {
 				pkt_idx += PACKED_BATCH_SIZE;
 				remained -= PACKED_BATCH_SIZE;
+
 				continue;
+			} else {
+				vhost_for_each_try_unroll(i, 0,
+					PACKED_BATCH_SIZE) {
+					rte_pktmbuf_free(pkts[pkt_idx + i]);
+				}
 			}
 		}
 
+		pkts[pkt_idx] = rte_pktmbuf_alloc(mbuf_pool);
+
 		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
-						&pkts[pkt_idx]))
+						&pkts[pkt_idx])) {
+			rte_pktmbuf_free(pkts[pkt_idx]);
 			break;
+		}
 		pkt_idx++;
 		remained--;
 
-- 
2.30.2


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v2 2/4] vhost: perform all mbuf allocations in one loop
  2021-04-06 15:44 ` [dpdk-dev] [PATCH 1/4] vhost: move allocation of mbuf outside of packet enqueue Balazs Nemeth
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 " Balazs Nemeth
@ 2021-04-07 10:17   ` Balazs Nemeth
  2021-04-15 15:30     ` Maxime Coquelin
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 3/4] vhost: allocate and free packets in bulk Balazs Nemeth
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 4/4] vhost: remove unnecessary level of indirection Balazs Nemeth
  3 siblings, 1 reply; 27+ messages in thread
From: Balazs Nemeth @ 2021-04-07 10:17 UTC (permalink / raw)
  To: bnemeth, dev

Move allocation out further and perform all allocation in one loop. The
same goes for freeing packets. This is to prepare for use of bulk
versions of these functions.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 18 ++++++------------
 1 file changed, 6 insertions(+), 12 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 666e7fdb8..496f750e3 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2471,14 +2471,13 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 	uint32_t remained = count;
 	uint16_t i;
 
+	for (i = 0; i < count; ++i)
+		pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
+
 	do {
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
 		if (remained >= PACKED_BATCH_SIZE) {
-			vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-				pkts[pkt_idx + i] =
-					rte_pktmbuf_alloc(mbuf_pool);
-			}
 
 			if (!virtio_dev_tx_batch_packed(dev, vq,
 							&pkts[pkt_idx])) {
@@ -2486,19 +2485,11 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 				remained -= PACKED_BATCH_SIZE;
 
 				continue;
-			} else {
-				vhost_for_each_try_unroll(i, 0,
-					PACKED_BATCH_SIZE) {
-					rte_pktmbuf_free(pkts[pkt_idx + i]);
-				}
 			}
 		}
 
-		pkts[pkt_idx] = rte_pktmbuf_alloc(mbuf_pool);
-
 		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
 						&pkts[pkt_idx])) {
-			rte_pktmbuf_free(pkts[pkt_idx]);
 			break;
 		}
 		pkt_idx++;
@@ -2506,6 +2497,9 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 
 	} while (remained);
 
+	for (i = pkt_idx; i < count; ++i)
+		rte_pktmbuf_free(pkts[i]);
+
 	if (vq->shadow_used_idx) {
 		do_data_copy_dequeue(vq);
 
-- 
2.30.2


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v2 3/4] vhost: allocate and free packets in bulk
  2021-04-06 15:44 ` [dpdk-dev] [PATCH 1/4] vhost: move allocation of mbuf outside of packet enqueue Balazs Nemeth
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 " Balazs Nemeth
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 2/4] vhost: perform all mbuf allocations in one loop Balazs Nemeth
@ 2021-04-07 10:17   ` Balazs Nemeth
  2021-04-15 15:32     ` Maxime Coquelin
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 4/4] vhost: remove unnecessary level of indirection Balazs Nemeth
  3 siblings, 1 reply; 27+ messages in thread
From: Balazs Nemeth @ 2021-04-07 10:17 UTC (permalink / raw)
  To: bnemeth, dev

Now that all allocation and freeing has been moved together, use the
faster bulk versions instead of handling packets one by one.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 496f750e3..2f0c97b91 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2469,10 +2469,10 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 {
 	uint32_t pkt_idx = 0;
 	uint32_t remained = count;
-	uint16_t i;
 
-	for (i = 0; i < count; ++i)
-		pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
+	if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count)) {
+		return 0;
+	}
 
 	do {
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
@@ -2497,8 +2497,9 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 
 	} while (remained);
 
-	for (i = pkt_idx; i < count; ++i)
-		rte_pktmbuf_free(pkts[i]);
+	if (pkt_idx != count) {
+		rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
+	}
 
 	if (vq->shadow_used_idx) {
 		do_data_copy_dequeue(vq);
-- 
2.30.2


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v2 4/4] vhost: remove unnecessary level of indirection
  2021-04-06 15:44 ` [dpdk-dev] [PATCH 1/4] vhost: move allocation of mbuf outside of packet enqueue Balazs Nemeth
                     ` (2 preceding siblings ...)
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 3/4] vhost: allocate and free packets in bulk Balazs Nemeth
@ 2021-04-07 10:17   ` Balazs Nemeth
  2021-04-15 15:38     ` Maxime Coquelin
  3 siblings, 1 reply; 27+ messages in thread
From: Balazs Nemeth @ 2021-04-07 10:17 UTC (permalink / raw)
  To: bnemeth, dev

There is no need to pass a pointer to an mbuf pointer.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 2f0c97b91..1d3ad18fe 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2390,7 +2390,7 @@ static __rte_always_inline int
 vhost_dequeue_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts,
+			    struct rte_mbuf *pkts,
 			    uint16_t *buf_id,
 			    uint16_t *desc_count)
 {
@@ -2408,7 +2408,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 		return -1;
 
 
-	if (unlikely(virtio_dev_pktmbuf_prep(dev, *pkts, buf_len))) {
+	if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
 		if (!allocerr_warned) {
 			VHOST_LOG_DATA(ERR,
 				"Failed mbuf alloc of size %d from %s on %s.\n",
@@ -2418,7 +2418,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 		return -1;
 	}
 
-	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
+	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
 				mbuf_pool);
 	if (unlikely(err)) {
 		if (!allocerr_warned) {
@@ -2437,7 +2437,7 @@ static __rte_always_inline int
 virtio_dev_tx_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts)
+			    struct rte_mbuf *pkts)
 {
 
 	uint16_t buf_id, desc_count = 0;
@@ -2489,7 +2489,7 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 		}
 
 		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
-						&pkts[pkt_idx])) {
+						pkts[pkt_idx])) {
 			break;
 		}
 		pkt_idx++;
-- 
2.30.2


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/4] vhost: move allocation of mbuf outside of packet enqueue
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 " Balazs Nemeth
@ 2021-04-15 12:37     ` Maxime Coquelin
  2021-04-16  8:18     ` [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk Balazs Nemeth
  1 sibling, 0 replies; 27+ messages in thread
From: Maxime Coquelin @ 2021-04-15 12:37 UTC (permalink / raw)
  To: Balazs Nemeth, dev

Hi Balazs,

Hint for future revisions, please add a cover letter when multiple
patches, it makes the series handling for the maintainer easier.
Also, please use the MAINTAINERS file to add the other maintainers.

On 4/7/21 12:17 PM, Balazs Nemeth wrote:
> In preparation for subsequent patches, move mbuf allocation out and
> rename virtio_dev_pktmbuf_alloc to virtio_dev_pktmbuf_prep. This
> function now receives an already allocated mbuf pointer.
> 
> Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
> ---
>  lib/librte_vhost/virtio_net.c | 54 ++++++++++++++++++++++++++---------
>  1 file changed, 40 insertions(+), 14 deletions(-)
> 
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 7f621fb6d..666e7fdb8 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -2166,6 +2166,23 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
>  	return NULL;
>  }
>  
> +static __rte_always_inline int
> +virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
> +			 uint32_t data_len)
> +{
> +	if (rte_pktmbuf_tailroom(pkt) >= data_len)
> +		return 0;
> +
> +	/* attach an external buffer if supported */
> +	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
> +		return 0;
> +
> +	/* check if chained buffers are allowed */
> +	if (!dev->linearbuf)
> +		return 0;

Add new line here.

> +	return 1;

Maybe return a negative value for consistency.

> +}
> +
>  static __rte_noinline uint16_t
>  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
>  	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
> @@ -2259,7 +2276,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
>  static __rte_always_inline int
>  vhost_reserve_avail_batch_packed(struct virtio_net *dev,
>  				 struct vhost_virtqueue *vq,
> -				 struct rte_mempool *mbuf_pool,
>  				 struct rte_mbuf **pkts,
>  				 uint16_t avail_idx,
>  				 uintptr_t *desc_addrs,
> @@ -2304,8 +2320,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
>  	}
>  
>  	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> -		pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
> -		if (!pkts[i])
> +		if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
>  			goto free_buf;
>  	}
>  
> @@ -2326,16 +2341,12 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
>  	return 0;
>  
>  free_buf:

It is no more freeing here, so better to rename the label.

> -	for (i = 0; i < PACKED_BATCH_SIZE; i++)
> -		rte_pktmbuf_free(pkts[i]);
> -
>  	return -1;
>  }
>  
>  static __rte_always_inline int
>  virtio_dev_tx_batch_packed(struct virtio_net *dev,
>  			   struct vhost_virtqueue *vq,
> -			   struct rte_mempool *mbuf_pool,
>  			   struct rte_mbuf **pkts)
>  {
>  	uint16_t avail_idx = vq->last_avail_idx;
> @@ -2345,8 +2356,8 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
>  	uint16_t ids[PACKED_BATCH_SIZE];
>  	uint16_t i;
>  
> -	if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
> -					     avail_idx, desc_addrs, ids))
> +	if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
> +					     desc_addrs, ids))
>  		return -1;
>  
>  	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
> @@ -2396,8 +2407,8 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
>  					 VHOST_ACCESS_RO) < 0))
>  		return -1;
>  
> -	*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
> -	if (unlikely(*pkts == NULL)) {
> +
> +	if (unlikely(virtio_dev_pktmbuf_prep(dev, *pkts, buf_len))) {
>  		if (!allocerr_warned) {
>  			VHOST_LOG_DATA(ERR,
>  				"Failed mbuf alloc of size %d from %s on %s.\n",
> @@ -2416,7 +2427,6 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
>  				dev->ifname);
>  			allocerr_warned = true;
>  		}
> -		rte_pktmbuf_free(*pkts);
>  		return -1;
>  	}
>  
> @@ -2459,22 +2469,38 @@ virtio_dev_tx_packed(struct virtio_net *dev,
>  {
>  	uint32_t pkt_idx = 0;
>  	uint32_t remained = count;
> +	uint16_t i;
>  
>  	do {
>  		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
>  
>  		if (remained >= PACKED_BATCH_SIZE) {
> -			if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
> +			vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> +				pkts[pkt_idx + i] =
> +					rte_pktmbuf_alloc(mbuf_pool);

No check on whether the alloc succeeded?

Also, we recently move to up to 100 chars lines, so maybe it can fit in
a single one here now.

> +			}
> +
> +			if (!virtio_dev_tx_batch_packed(dev, vq,
>  							&pkts[pkt_idx])) {

Ditto

>  				pkt_idx += PACKED_BATCH_SIZE;
>  				remained -= PACKED_BATCH_SIZE;
> +
>  				continue;
> +			} else {
> +				vhost_for_each_try_unroll(i, 0,
> +					PACKED_BATCH_SIZE) {

Same here

> +					rte_pktmbuf_free(pkts[pkt_idx + i]);
> +				}
>  			}
>  		}
>  
> +		pkts[pkt_idx] = rte_pktmbuf_alloc(mbuf_pool);

Here also you may want to ensure the allocation succeeded.

> +
>  		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
> -						&pkts[pkt_idx]))
> +						&pkts[pkt_idx])) {
> +			rte_pktmbuf_free(pkts[pkt_idx]);
>  			break;
> +		}
>  		pkt_idx++;
>  		remained--;
>  
> 

Maxime


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/4] vhost: perform all mbuf allocations in one loop
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 2/4] vhost: perform all mbuf allocations in one loop Balazs Nemeth
@ 2021-04-15 15:30     ` Maxime Coquelin
  0 siblings, 0 replies; 27+ messages in thread
From: Maxime Coquelin @ 2021-04-15 15:30 UTC (permalink / raw)
  To: Balazs Nemeth, dev



On 4/7/21 12:17 PM, Balazs Nemeth wrote:
> Move allocation out further and perform all allocation in one loop. The
> same goes for freeing packets. This is to prepare for use of bulk
> versions of these functions.
> 
> Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
> ---
>  lib/librte_vhost/virtio_net.c | 18 ++++++------------
>  1 file changed, 6 insertions(+), 12 deletions(-)
> 
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 666e7fdb8..496f750e3 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -2471,14 +2471,13 @@ virtio_dev_tx_packed(struct virtio_net *dev,
>  	uint32_t remained = count;
>  	uint16_t i;
>  
> +	for (i = 0; i < count; ++i)
> +		pkts[i] = rte_pktmbuf_alloc(mbuf_pool);

Same comment as for previous patch, we need to check whether allocation
succeeded.


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v2 3/4] vhost: allocate and free packets in bulk
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 3/4] vhost: allocate and free packets in bulk Balazs Nemeth
@ 2021-04-15 15:32     ` Maxime Coquelin
  2021-04-15 15:45       ` David Marchand
  0 siblings, 1 reply; 27+ messages in thread
From: Maxime Coquelin @ 2021-04-15 15:32 UTC (permalink / raw)
  To: Balazs Nemeth, dev



On 4/7/21 12:17 PM, Balazs Nemeth wrote:
> Now that all allocation and freeing has been moved together, use the
> faster bulk versions instead of handling packets one by one.
> 
> Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
> ---
>  lib/librte_vhost/virtio_net.c | 11 ++++++-----
>  1 file changed, 6 insertions(+), 5 deletions(-)
> 
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 496f750e3..2f0c97b91 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -2469,10 +2469,10 @@ virtio_dev_tx_packed(struct virtio_net *dev,
>  {
>  	uint32_t pkt_idx = 0;
>  	uint32_t remained = count;
> -	uint16_t i;
>  
> -	for (i = 0; i < count; ++i)
> -		pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
> +	if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count)) {
> +		return 0;
> +	}

OK, get it now :)
Maybe just add a comment in previous patches that it is going to be
handled when moving to bulk allocation.

>  
>  	do {
>  		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
> @@ -2497,8 +2497,9 @@ virtio_dev_tx_packed(struct virtio_net *dev,
>  
>  	} while (remained);
>  
> -	for (i = pkt_idx; i < count; ++i)
> -		rte_pktmbuf_free(pkts[i]);
> +	if (pkt_idx != count) {
> +		rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
> +	}
>  
>  	if (vq->shadow_used_idx) {
>  		do_data_copy_dequeue(vq);
> 


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v2 4/4] vhost: remove unnecessary level of indirection
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 4/4] vhost: remove unnecessary level of indirection Balazs Nemeth
@ 2021-04-15 15:38     ` Maxime Coquelin
  0 siblings, 0 replies; 27+ messages in thread
From: Maxime Coquelin @ 2021-04-15 15:38 UTC (permalink / raw)
  To: Balazs Nemeth, dev



On 4/7/21 12:17 PM, Balazs Nemeth wrote:
> There is no need to pass a pointer to an mbuf pointer.
> 
> Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
> ---
>  lib/librte_vhost/virtio_net.c | 10 +++++-----
>  1 file changed, 5 insertions(+), 5 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v2 3/4] vhost: allocate and free packets in bulk
  2021-04-15 15:32     ` Maxime Coquelin
@ 2021-04-15 15:45       ` David Marchand
  2021-04-15 15:50         ` Maxime Coquelin
  0 siblings, 1 reply; 27+ messages in thread
From: David Marchand @ 2021-04-15 15:45 UTC (permalink / raw)
  To: Maxime Coquelin, Balazs Nemeth; +Cc: dev

On Thu, Apr 15, 2021 at 5:33 PM Maxime Coquelin
<maxime.coquelin@redhat.com> wrote:
> On 4/7/21 12:17 PM, Balazs Nemeth wrote:
> > Now that all allocation and freeing has been moved together, use the
> > faster bulk versions instead of handling packets one by one.
> >
> > Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
> > ---
> >  lib/librte_vhost/virtio_net.c | 11 ++++++-----
> >  1 file changed, 6 insertions(+), 5 deletions(-)
> >
> > diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> > index 496f750e3..2f0c97b91 100644
> > --- a/lib/librte_vhost/virtio_net.c
> > +++ b/lib/librte_vhost/virtio_net.c
> > @@ -2469,10 +2469,10 @@ virtio_dev_tx_packed(struct virtio_net *dev,
> >  {
> >       uint32_t pkt_idx = 0;
> >       uint32_t remained = count;
> > -     uint16_t i;
> >
> > -     for (i = 0; i < count; ++i)
> > -             pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
> > +     if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count)) {
> > +             return 0;
> > +     }
>
> OK, get it now :)
> Maybe just add a comment in previous patches that it is going to be
> handled when moving to bulk allocation.

Looking at the whole series, it is easy to read and understand as a
single patch.
And it avoids this intermediate issue.



-- 
David Marchand


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v2 3/4] vhost: allocate and free packets in bulk
  2021-04-15 15:45       ` David Marchand
@ 2021-04-15 15:50         ` Maxime Coquelin
  2021-04-15 15:58           ` Maxime Coquelin
  0 siblings, 1 reply; 27+ messages in thread
From: Maxime Coquelin @ 2021-04-15 15:50 UTC (permalink / raw)
  To: David Marchand, Balazs Nemeth; +Cc: dev



On 4/15/21 5:45 PM, David Marchand wrote:
> On Thu, Apr 15, 2021 at 5:33 PM Maxime Coquelin
> <maxime.coquelin@redhat.com> wrote:
>> On 4/7/21 12:17 PM, Balazs Nemeth wrote:
>>> Now that all allocation and freeing has been moved together, use the
>>> faster bulk versions instead of handling packets one by one.
>>>
>>> Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
>>> ---
>>>  lib/librte_vhost/virtio_net.c | 11 ++++++-----
>>>  1 file changed, 6 insertions(+), 5 deletions(-)
>>>
>>> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
>>> index 496f750e3..2f0c97b91 100644
>>> --- a/lib/librte_vhost/virtio_net.c
>>> +++ b/lib/librte_vhost/virtio_net.c
>>> @@ -2469,10 +2469,10 @@ virtio_dev_tx_packed(struct virtio_net *dev,
>>>  {
>>>       uint32_t pkt_idx = 0;
>>>       uint32_t remained = count;
>>> -     uint16_t i;
>>>
>>> -     for (i = 0; i < count; ++i)
>>> -             pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
>>> +     if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count)) {
>>> +             return 0;
>>> +     }
>>
>> OK, get it now :)
>> Maybe just add a comment in previous patches that it is going to be
>> handled when moving to bulk allocation.
> 
> Looking at the whole series, it is easy to read and understand as a
> single patch.
> And it avoids this intermediate issue.

I agree with David, better to squash the first three patches.

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v2 3/4] vhost: allocate and free packets in bulk
  2021-04-15 15:50         ` Maxime Coquelin
@ 2021-04-15 15:58           ` Maxime Coquelin
  0 siblings, 0 replies; 27+ messages in thread
From: Maxime Coquelin @ 2021-04-15 15:58 UTC (permalink / raw)
  To: David Marchand, Balazs Nemeth; +Cc: dev



On 4/15/21 5:50 PM, Maxime Coquelin wrote:
> 
> 
> On 4/15/21 5:45 PM, David Marchand wrote:
>> On Thu, Apr 15, 2021 at 5:33 PM Maxime Coquelin
>> <maxime.coquelin@redhat.com> wrote:
>>> On 4/7/21 12:17 PM, Balazs Nemeth wrote:
>>>> Now that all allocation and freeing has been moved together, use the
>>>> faster bulk versions instead of handling packets one by one.
>>>>
>>>> Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
>>>> ---
>>>>  lib/librte_vhost/virtio_net.c | 11 ++++++-----
>>>>  1 file changed, 6 insertions(+), 5 deletions(-)
>>>>
>>>> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
>>>> index 496f750e3..2f0c97b91 100644
>>>> --- a/lib/librte_vhost/virtio_net.c
>>>> +++ b/lib/librte_vhost/virtio_net.c
>>>> @@ -2469,10 +2469,10 @@ virtio_dev_tx_packed(struct virtio_net *dev,
>>>>  {
>>>>       uint32_t pkt_idx = 0;
>>>>       uint32_t remained = count;
>>>> -     uint16_t i;
>>>>
>>>> -     for (i = 0; i < count; ++i)
>>>> -             pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
>>>> +     if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count)) {
>>>> +             return 0;
>>>> +     }
>>>
>>> OK, get it now :)
>>> Maybe just add a comment in previous patches that it is going to be
>>> handled when moving to bulk allocation.
>>
>> Looking at the whole series, it is easy to read and understand as a
>> single patch.
>> And it avoids this intermediate issue.
> 
> I agree with David, better to squash the first three patches.

As David told me off-list, all the series can be squashed since the last
patch is a consequence of doing bulk allocation.

Cheers,
Maxime

> Thanks,
> Maxime
> 


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk
  2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 " Balazs Nemeth
  2021-04-15 12:37     ` Maxime Coquelin
@ 2021-04-16  8:18     ` Balazs Nemeth
  2021-04-16  8:36       ` Maxime Coquelin
                         ` (2 more replies)
  1 sibling, 3 replies; 27+ messages in thread
From: Balazs Nemeth @ 2021-04-16  8:18 UTC (permalink / raw)
  To: bnemeth, dev; +Cc: maxime.coquelin, david.marchand

Move allocation out further and perform all allocation in bulk. The same
goes for freeing packets. In the process, also rename
virtio_dev_pktmbuf_alloc to virtio_dev_pktmbuf_prep. This
function now receives an already allocated mbuf pointer.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 58 +++++++++++++++++++++++------------
 1 file changed, 38 insertions(+), 20 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index ff39878609..d6d5636e0f 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2168,6 +2168,24 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
 	return NULL;
 }
 
+static __rte_always_inline int
+virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
+			 uint32_t data_len)
+{
+	if (rte_pktmbuf_tailroom(pkt) >= data_len)
+		return 0;
+
+	/* attach an external buffer if supported */
+	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+		return 0;
+
+	/* check if chained buffers are allowed */
+	if (!dev->linearbuf)
+		return 0;
+
+	return -1;
+}
+
 static __rte_noinline uint16_t
 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
@@ -2261,7 +2279,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline int
 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 				 struct vhost_virtqueue *vq,
-				 struct rte_mempool *mbuf_pool,
 				 struct rte_mbuf **pkts,
 				 uint16_t avail_idx,
 				 uintptr_t *desc_addrs,
@@ -2306,9 +2323,8 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-		pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
-		if (!pkts[i])
-			goto free_buf;
+		if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
+			goto err;
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2316,7 +2332,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
 		if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
-			goto free_buf;
+			goto err;
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
@@ -2327,17 +2343,13 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 
 	return 0;
 
-free_buf:
-	for (i = 0; i < PACKED_BATCH_SIZE; i++)
-		rte_pktmbuf_free(pkts[i]);
-
+err:
 	return -1;
 }
 
 static __rte_always_inline int
 virtio_dev_tx_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
-			   struct rte_mempool *mbuf_pool,
 			   struct rte_mbuf **pkts)
 {
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -2347,8 +2359,8 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
 	uint16_t ids[PACKED_BATCH_SIZE];
 	uint16_t i;
 
-	if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
-					     avail_idx, desc_addrs, ids))
+	if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
+					     desc_addrs, ids))
 		return -1;
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2381,7 +2393,7 @@ static __rte_always_inline int
 vhost_dequeue_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts,
+			    struct rte_mbuf *pkts,
 			    uint16_t *buf_id,
 			    uint16_t *desc_count)
 {
@@ -2398,8 +2410,8 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 					 VHOST_ACCESS_RO) < 0))
 		return -1;
 
-	*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
-	if (unlikely(*pkts == NULL)) {
+
+	if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
 		if (!allocerr_warned) {
 			VHOST_LOG_DATA(ERR,
 				"Failed mbuf alloc of size %d from %s on %s.\n",
@@ -2409,7 +2421,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 		return -1;
 	}
 
-	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
+	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
 				mbuf_pool);
 	if (unlikely(err)) {
 		if (!allocerr_warned) {
@@ -2418,7 +2430,6 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 				dev->ifname);
 			allocerr_warned = true;
 		}
-		rte_pktmbuf_free(*pkts);
 		return -1;
 	}
 
@@ -2429,7 +2440,7 @@ static __rte_always_inline int
 virtio_dev_tx_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts)
+			    struct rte_mbuf *pkts)
 {
 
 	uint16_t buf_id, desc_count = 0;
@@ -2462,26 +2473,33 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 	uint32_t pkt_idx = 0;
 	uint32_t remained = count;
 
+	if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
+		return 0;
+
 	do {
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
 		if (remained >= PACKED_BATCH_SIZE) {
-			if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
+			if (!virtio_dev_tx_batch_packed(dev, vq,
 							&pkts[pkt_idx])) {
 				pkt_idx += PACKED_BATCH_SIZE;
 				remained -= PACKED_BATCH_SIZE;
+
 				continue;
 			}
 		}
 
 		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
-						&pkts[pkt_idx]))
+						pkts[pkt_idx]))
 			break;
 		pkt_idx++;
 		remained--;
 
 	} while (remained);
 
+	if (pkt_idx != count)
+		rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
+
 	if (vq->shadow_used_idx) {
 		do_data_copy_dequeue(vq);
 
-- 
2.30.2


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk
  2021-04-16  8:18     ` [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk Balazs Nemeth
@ 2021-04-16  8:36       ` Maxime Coquelin
  2021-04-16  9:05       ` David Marchand
  2021-04-16  9:48       ` [dpdk-dev] [PATCH v4] " Balazs Nemeth
  2 siblings, 0 replies; 27+ messages in thread
From: Maxime Coquelin @ 2021-04-16  8:36 UTC (permalink / raw)
  To: Balazs Nemeth, dev; +Cc: david.marchand



On 4/16/21 10:18 AM, Balazs Nemeth wrote:
> Move allocation out further and perform all allocation in bulk. The same
> goes for freeing packets. In the process, also rename
> virtio_dev_pktmbuf_alloc to virtio_dev_pktmbuf_prep. This
> function now receives an already allocated mbuf pointer.
> 
> Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
> ---
>  lib/librte_vhost/virtio_net.c | 58 +++++++++++++++++++++++------------
>  1 file changed, 38 insertions(+), 20 deletions(-)
> 


Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk
  2021-04-16  8:18     ` [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk Balazs Nemeth
  2021-04-16  8:36       ` Maxime Coquelin
@ 2021-04-16  9:05       ` David Marchand
  2021-04-16  9:12         ` Balazs Nemeth
  2021-04-16  9:48       ` [dpdk-dev] [PATCH v4] " Balazs Nemeth
  2 siblings, 1 reply; 27+ messages in thread
From: David Marchand @ 2021-04-16  9:05 UTC (permalink / raw)
  To: Balazs Nemeth; +Cc: dev, Maxime Coquelin, Xia, Chenbo

On Fri, Apr 16, 2021 at 10:18 AM Balazs Nemeth <bnemeth@redhat.com> wrote:
>
> Move allocation out further and perform all allocation in bulk. The same
> goes for freeing packets. In the process, also rename
> virtio_dev_pktmbuf_alloc to virtio_dev_pktmbuf_prep. This
> function now receives an already allocated mbuf pointer.
>
> Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>

The title should indicate we are only touching the tx packed path.

What about tx split?
If it is too complex to rework, this can wait.


> ---
>  lib/librte_vhost/virtio_net.c | 58 +++++++++++++++++++++++------------
>  1 file changed, 38 insertions(+), 20 deletions(-)
>
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index ff39878609..d6d5636e0f 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -2168,6 +2168,24 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
>         return NULL;
>  }
>
> +static __rte_always_inline int
> +virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
> +                        uint32_t data_len)
> +{
> +       if (rte_pktmbuf_tailroom(pkt) >= data_len)
> +               return 0;
> +
> +       /* attach an external buffer if supported */
> +       if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
> +               return 0;
> +
> +       /* check if chained buffers are allowed */
> +       if (!dev->linearbuf)
> +               return 0;
> +
> +       return -1;
> +}
> +

If virtio_dev_pktmbuf_alloc() uses this new helper, we avoid
duplicating the logic.


>  static __rte_noinline uint16_t
>  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
>         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)

[snip]

> @@ -2429,7 +2440,7 @@ static __rte_always_inline int
>  virtio_dev_tx_single_packed(struct virtio_net *dev,
>                             struct vhost_virtqueue *vq,
>                             struct rte_mempool *mbuf_pool,
> -                           struct rte_mbuf **pkts)
> +                           struct rte_mbuf *pkts)
>  {
>
>         uint16_t buf_id, desc_count = 0;
> @@ -2462,26 +2473,33 @@ virtio_dev_tx_packed(struct virtio_net *dev,
>         uint32_t pkt_idx = 0;
>         uint32_t remained = count;
>
> +       if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
> +               return 0;
> +
>         do {
>                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
>
>                 if (remained >= PACKED_BATCH_SIZE) {
> -                       if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
> +                       if (!virtio_dev_tx_batch_packed(dev, vq,
>                                                         &pkts[pkt_idx])) {
>                                 pkt_idx += PACKED_BATCH_SIZE;
>                                 remained -= PACKED_BATCH_SIZE;
> +

No need for the extra line.


>                                 continue;
>                         }
>                 }
>
>                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
> -                                               &pkts[pkt_idx]))
> +                                               pkts[pkt_idx]))
>                         break;
>                 pkt_idx++;
>                 remained--;
>
>         } while (remained);
>
> +       if (pkt_idx != count)
> +               rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
> +
>         if (vq->shadow_used_idx) {
>                 do_data_copy_dequeue(vq);
>

With those comments addressed,
Reviewed-by: David Marchand <david.marchand@redhat.com>

Thanks Balazs!


-- 
David Marchand


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk
  2021-04-16  9:05       ` David Marchand
@ 2021-04-16  9:12         ` Balazs Nemeth
  2021-04-16  9:41           ` Maxime Coquelin
  2021-04-16  9:43           ` David Marchand
  0 siblings, 2 replies; 27+ messages in thread
From: Balazs Nemeth @ 2021-04-16  9:12 UTC (permalink / raw)
  To: David Marchand; +Cc: dev, Maxime Coquelin, Xia, Chenbo

Hi David,

I was also thinking about using the same idea for tx split so that
virtio_dev_pktmbuf_alloc can be removed completely. I don't have those
patches yet. However, I can make virtio_dev_pktmbuf_alloc use
virtio_dev_pktmbuf_prep for this patch that addresses the packed
version and submit other patches for tx split later. Would that work for
now?

Regards,
Balazs

On Fri, Apr 16, 2021 at 11:05 AM David Marchand <david.marchand@redhat.com>
wrote:

> On Fri, Apr 16, 2021 at 10:18 AM Balazs Nemeth <bnemeth@redhat.com> wrote:
> >
> > Move allocation out further and perform all allocation in bulk. The same
> > goes for freeing packets. In the process, also rename
> > virtio_dev_pktmbuf_alloc to virtio_dev_pktmbuf_prep. This
> > function now receives an already allocated mbuf pointer.
> >
> > Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
>
> The title should indicate we are only touching the tx packed path.
>
> What about tx split?
> If it is too complex to rework, this can wait.
>
>
> > ---
> >  lib/librte_vhost/virtio_net.c | 58 +++++++++++++++++++++++------------
> >  1 file changed, 38 insertions(+), 20 deletions(-)
> >
> > diff --git a/lib/librte_vhost/virtio_net.c
> b/lib/librte_vhost/virtio_net.c
> > index ff39878609..d6d5636e0f 100644
> > --- a/lib/librte_vhost/virtio_net.c
> > +++ b/lib/librte_vhost/virtio_net.c
> > @@ -2168,6 +2168,24 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev,
> struct rte_mempool *mp,
> >         return NULL;
> >  }
> >
> > +static __rte_always_inline int
> > +virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
> > +                        uint32_t data_len)
> > +{
> > +       if (rte_pktmbuf_tailroom(pkt) >= data_len)
> > +               return 0;
> > +
> > +       /* attach an external buffer if supported */
> > +       if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
> > +               return 0;
> > +
> > +       /* check if chained buffers are allowed */
> > +       if (!dev->linearbuf)
> > +               return 0;
> > +
> > +       return -1;
> > +}
> > +
>
> If virtio_dev_pktmbuf_alloc() uses this new helper, we avoid
> duplicating the logic.
>
>
> >  static __rte_noinline uint16_t
> >  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
> >         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t
> count)
>
> [snip]
>
> > @@ -2429,7 +2440,7 @@ static __rte_always_inline int
> >  virtio_dev_tx_single_packed(struct virtio_net *dev,
> >                             struct vhost_virtqueue *vq,
> >                             struct rte_mempool *mbuf_pool,
> > -                           struct rte_mbuf **pkts)
> > +                           struct rte_mbuf *pkts)
> >  {
> >
> >         uint16_t buf_id, desc_count = 0;
> > @@ -2462,26 +2473,33 @@ virtio_dev_tx_packed(struct virtio_net *dev,
> >         uint32_t pkt_idx = 0;
> >         uint32_t remained = count;
> >
> > +       if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
> > +               return 0;
> > +
> >         do {
> >                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
> >
> >                 if (remained >= PACKED_BATCH_SIZE) {
> > -                       if (!virtio_dev_tx_batch_packed(dev, vq,
> mbuf_pool,
> > +                       if (!virtio_dev_tx_batch_packed(dev, vq,
> >                                                         &pkts[pkt_idx]))
> {
> >                                 pkt_idx += PACKED_BATCH_SIZE;
> >                                 remained -= PACKED_BATCH_SIZE;
> > +
>
> No need for the extra line.
>
>
> >                                 continue;
> >                         }
> >                 }
> >
> >                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
> > -                                               &pkts[pkt_idx]))
> > +                                               pkts[pkt_idx]))
> >                         break;
> >                 pkt_idx++;
> >                 remained--;
> >
> >         } while (remained);
> >
> > +       if (pkt_idx != count)
> > +               rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
> > +
> >         if (vq->shadow_used_idx) {
> >                 do_data_copy_dequeue(vq);
> >
>
> With those comments addressed,
> Reviewed-by: David Marchand <david.marchand@redhat.com>
>
> Thanks Balazs!
>
>
> --
> David Marchand
>
>

^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk
  2021-04-16  9:12         ` Balazs Nemeth
@ 2021-04-16  9:41           ` Maxime Coquelin
  2021-04-16  9:43           ` David Marchand
  1 sibling, 0 replies; 27+ messages in thread
From: Maxime Coquelin @ 2021-04-16  9:41 UTC (permalink / raw)
  To: Balazs Nemeth, David Marchand; +Cc: dev, Xia, Chenbo

Hi Balazs,

On 4/16/21 11:12 AM, Balazs Nemeth wrote:
> Hi David,
> 
> I was also thinking about using the same idea for tx split so that
> virtio_dev_pktmbuf_alloc can be removed completely. I don't have those
> patches yet. However, I can make virtio_dev_pktmbuf_alloc use
> virtio_dev_pktmbuf_prep for this patch that addresses the packed
> version and submit other patches for tx split later. Would that work for
> now?

I think so.
That would be great to have the same for split ring, but more rework may
be necessary than for packed ring. So I'm fine if the split ring part is
done after this release.

Thanks,
Maxime
> Regards,
> Balazs 
> 
> On Fri, Apr 16, 2021 at 11:05 AM David Marchand
> <david.marchand@redhat.com <mailto:david.marchand@redhat.com>> wrote:
> 
>     On Fri, Apr 16, 2021 at 10:18 AM Balazs Nemeth <bnemeth@redhat.com
>     <mailto:bnemeth@redhat.com>> wrote:
>     >
>     > Move allocation out further and perform all allocation in bulk.
>     The same
>     > goes for freeing packets. In the process, also rename
>     > virtio_dev_pktmbuf_alloc to virtio_dev_pktmbuf_prep. This
>     > function now receives an already allocated mbuf pointer.
>     >
>     > Signed-off-by: Balazs Nemeth <bnemeth@redhat.com
>     <mailto:bnemeth@redhat.com>>
> 
>     The title should indicate we are only touching the tx packed path.
> 
>     What about tx split?
>     If it is too complex to rework, this can wait.
> 
> 
>     > ---
>     >  lib/librte_vhost/virtio_net.c | 58
>     +++++++++++++++++++++++------------
>     >  1 file changed, 38 insertions(+), 20 deletions(-)
>     >
>     > diff --git a/lib/librte_vhost/virtio_net.c
>     b/lib/librte_vhost/virtio_net.c
>     > index ff39878609..d6d5636e0f 100644
>     > --- a/lib/librte_vhost/virtio_net.c
>     > +++ b/lib/librte_vhost/virtio_net.c
>     > @@ -2168,6 +2168,24 @@ virtio_dev_pktmbuf_alloc(struct virtio_net
>     *dev, struct rte_mempool *mp,
>     >         return NULL;
>     >  }
>     >
>     > +static __rte_always_inline int
>     > +virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
>     > +                        uint32_t data_len)
>     > +{
>     > +       if (rte_pktmbuf_tailroom(pkt) >= data_len)
>     > +               return 0;
>     > +
>     > +       /* attach an external buffer if supported */
>     > +       if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
>     > +               return 0;
>     > +
>     > +       /* check if chained buffers are allowed */
>     > +       if (!dev->linearbuf)
>     > +               return 0;
>     > +
>     > +       return -1;
>     > +}
>     > +
> 
>     If virtio_dev_pktmbuf_alloc() uses this new helper, we avoid
>     duplicating the logic.
> 
> 
>     >  static __rte_noinline uint16_t
>     >  virtio_dev_tx_split(struct virtio_net *dev, struct
>     vhost_virtqueue *vq,
>     >         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
>     uint16_t count)
> 
>     [snip]
> 
>     > @@ -2429,7 +2440,7 @@ static __rte_always_inline int
>     >  virtio_dev_tx_single_packed(struct virtio_net *dev,
>     >                             struct vhost_virtqueue *vq,
>     >                             struct rte_mempool *mbuf_pool,
>     > -                           struct rte_mbuf **pkts)
>     > +                           struct rte_mbuf *pkts)
>     >  {
>     >
>     >         uint16_t buf_id, desc_count = 0;
>     > @@ -2462,26 +2473,33 @@ virtio_dev_tx_packed(struct virtio_net *dev,
>     >         uint32_t pkt_idx = 0;
>     >         uint32_t remained = count;
>     >
>     > +       if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
>     > +               return 0;
>     > +
>     >         do {
>     >                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
>     >
>     >                 if (remained >= PACKED_BATCH_SIZE) {
>     > -                       if (!virtio_dev_tx_batch_packed(dev, vq,
>     mbuf_pool,
>     > +                       if (!virtio_dev_tx_batch_packed(dev, vq,
>     >                                                       
>      &pkts[pkt_idx])) {
>     >                                 pkt_idx += PACKED_BATCH_SIZE;
>     >                                 remained -= PACKED_BATCH_SIZE;
>     > +
> 
>     No need for the extra line.
> 
> 
>     >                                 continue;
>     >                         }
>     >                 }
>     >
>     >                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
>     > -                                               &pkts[pkt_idx]))
>     > +                                               pkts[pkt_idx]))
>     >                         break;
>     >                 pkt_idx++;
>     >                 remained--;
>     >
>     >         } while (remained);
>     >
>     > +       if (pkt_idx != count)
>     > +               rte_pktmbuf_free_bulk(&pkts[pkt_idx], count -
>     pkt_idx);
>     > +
>     >         if (vq->shadow_used_idx) {
>     >                 do_data_copy_dequeue(vq);
>     >
> 
>     With those comments addressed,
>     Reviewed-by: David Marchand <david.marchand@redhat.com
>     <mailto:david.marchand@redhat.com>>
> 
>     Thanks Balazs!
> 
> 
>     -- 
>     David Marchand
> 


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk
  2021-04-16  9:12         ` Balazs Nemeth
  2021-04-16  9:41           ` Maxime Coquelin
@ 2021-04-16  9:43           ` David Marchand
  1 sibling, 0 replies; 27+ messages in thread
From: David Marchand @ 2021-04-16  9:43 UTC (permalink / raw)
  To: Balazs Nemeth; +Cc: dev, Maxime Coquelin, Xia, Chenbo

On Fri, Apr 16, 2021 at 11:12 AM Balazs Nemeth <bnemeth@redhat.com> wrote:
> I was also thinking about using the same idea for tx split so that virtio_dev_pktmbuf_alloc can be removed completely. I don't have those patches yet. However, I can make virtio_dev_pktmbuf_alloc use virtio_dev_pktmbuf_prep for this patch that addresses the packed version and submit other patches for tx split later. Would that work for now?

Ok for me.

And revisit the tx split part in 21.08 :-).



-- 
David Marchand


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v4] vhost: allocate and free packets in bulk
  2021-04-16  8:18     ` [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk Balazs Nemeth
  2021-04-16  8:36       ` Maxime Coquelin
  2021-04-16  9:05       ` David Marchand
@ 2021-04-16  9:48       ` Balazs Nemeth
  2021-04-16 10:25         ` [dpdk-dev] [PATCH v5] vhost: allocate and free packets in bulk in Tx packed Balazs Nemeth
  2 siblings, 1 reply; 27+ messages in thread
From: Balazs Nemeth @ 2021-04-16  9:48 UTC (permalink / raw)
  To: bnemeth, dev; +Cc: maxime.coquelin, david.marchand

Move allocation out further and perform all allocation in bulk. The same
goes for freeing packets. In the process, also introduce
virtio_dev_pktmbuf_prep and make virtio_dev_pktmbuf_alloc use that.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 80 +++++++++++++++++++----------------
 1 file changed, 44 insertions(+), 36 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index ff39878609..95d2454de0 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2134,6 +2134,24 @@ virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
 	return 0;
 }
 
+static __rte_always_inline int
+virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
+			 uint32_t data_len)
+{
+	if (rte_pktmbuf_tailroom(pkt) >= data_len)
+		return 0;
+
+	/* attach an external buffer if supported */
+	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+		return 0;
+
+	/* check if chained buffers are allowed */
+	if (!dev->linearbuf)
+		return 0;
+
+	return -1;
+}
+
 /*
  * Allocate a host supported pktmbuf.
  */
@@ -2149,23 +2167,15 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
 		return NULL;
 	}
 
-	if (rte_pktmbuf_tailroom(pkt) >= data_len)
-		return pkt;
+	if (virtio_dev_pktmbuf_prep(dev, pkt, data_len)) {
+		/* Data doesn't fit into the buffer and the host supports
+		 * only linear buffers
+		 */
+		rte_pktmbuf_free(pkt);
+		return NULL;
+	}
 
-	/* attach an external buffer if supported */
-	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
-		return pkt;
-
-	/* check if chained buffers are allowed */
-	if (!dev->linearbuf)
-		return pkt;
-
-	/* Data doesn't fit into the buffer and the host supports
-	 * only linear buffers
-	 */
-	rte_pktmbuf_free(pkt);
-
-	return NULL;
+	return pkt
 }
 
 static __rte_noinline uint16_t
@@ -2261,7 +2271,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline int
 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 				 struct vhost_virtqueue *vq,
-				 struct rte_mempool *mbuf_pool,
 				 struct rte_mbuf **pkts,
 				 uint16_t avail_idx,
 				 uintptr_t *desc_addrs,
@@ -2306,9 +2315,8 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-		pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
-		if (!pkts[i])
-			goto free_buf;
+		if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
+			goto err;
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2316,7 +2324,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
 		if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
-			goto free_buf;
+			goto err;
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
@@ -2327,17 +2335,13 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 
 	return 0;
 
-free_buf:
-	for (i = 0; i < PACKED_BATCH_SIZE; i++)
-		rte_pktmbuf_free(pkts[i]);
-
+err:
 	return -1;
 }
 
 static __rte_always_inline int
 virtio_dev_tx_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
-			   struct rte_mempool *mbuf_pool,
 			   struct rte_mbuf **pkts)
 {
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -2347,8 +2351,8 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
 	uint16_t ids[PACKED_BATCH_SIZE];
 	uint16_t i;
 
-	if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
-					     avail_idx, desc_addrs, ids))
+	if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
+					     desc_addrs, ids))
 		return -1;
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2381,7 +2385,7 @@ static __rte_always_inline int
 vhost_dequeue_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts,
+			    struct rte_mbuf *pkts,
 			    uint16_t *buf_id,
 			    uint16_t *desc_count)
 {
@@ -2398,8 +2402,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 					 VHOST_ACCESS_RO) < 0))
 		return -1;
 
-	*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
-	if (unlikely(*pkts == NULL)) {
+	if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
 		if (!allocerr_warned) {
 			VHOST_LOG_DATA(ERR,
 				"Failed mbuf alloc of size %d from %s on %s.\n",
@@ -2409,7 +2412,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 		return -1;
 	}
 
-	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
+	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
 				mbuf_pool);
 	if (unlikely(err)) {
 		if (!allocerr_warned) {
@@ -2418,7 +2421,6 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 				dev->ifname);
 			allocerr_warned = true;
 		}
-		rte_pktmbuf_free(*pkts);
 		return -1;
 	}
 
@@ -2429,7 +2431,7 @@ static __rte_always_inline int
 virtio_dev_tx_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts)
+			    struct rte_mbuf *pkts)
 {
 
 	uint16_t buf_id, desc_count = 0;
@@ -2462,11 +2464,14 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 	uint32_t pkt_idx = 0;
 	uint32_t remained = count;
 
+	if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
+		return 0;
+
 	do {
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
 		if (remained >= PACKED_BATCH_SIZE) {
-			if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
+			if (!virtio_dev_tx_batch_packed(dev, vq,
 							&pkts[pkt_idx])) {
 				pkt_idx += PACKED_BATCH_SIZE;
 				remained -= PACKED_BATCH_SIZE;
@@ -2475,13 +2480,16 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 		}
 
 		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
-						&pkts[pkt_idx]))
+						pkts[pkt_idx]))
 			break;
 		pkt_idx++;
 		remained--;
 
 	} while (remained);
 
+	if (pkt_idx != count)
+		rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
+
 	if (vq->shadow_used_idx) {
 		do_data_copy_dequeue(vq);
 
-- 
2.30.2


^ permalink raw reply	[flat|nested] 27+ messages in thread

* [dpdk-dev] [PATCH v5] vhost: allocate and free packets in bulk in Tx packed
  2021-04-16  9:48       ` [dpdk-dev] [PATCH v4] " Balazs Nemeth
@ 2021-04-16 10:25         ` Balazs Nemeth
  2021-04-16 11:14           ` David Marchand
  2021-04-21  7:48           ` Maxime Coquelin
  0 siblings, 2 replies; 27+ messages in thread
From: Balazs Nemeth @ 2021-04-16 10:25 UTC (permalink / raw)
  To: bnemeth, dev; +Cc: maxime.coquelin, david.marchand

Move allocation out further and perform all allocation in bulk. The same
goes for freeing packets. In the process, also introduce
virtio_dev_pktmbuf_prep and make virtio_dev_pktmbuf_alloc use that.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 80 +++++++++++++++++++----------------
 1 file changed, 44 insertions(+), 36 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index ff39878609..77a00c9499 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2134,6 +2134,24 @@ virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
 	return 0;
 }
 
+static __rte_always_inline int
+virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
+			 uint32_t data_len)
+{
+	if (rte_pktmbuf_tailroom(pkt) >= data_len)
+		return 0;
+
+	/* attach an external buffer if supported */
+	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+		return 0;
+
+	/* check if chained buffers are allowed */
+	if (!dev->linearbuf)
+		return 0;
+
+	return -1;
+}
+
 /*
  * Allocate a host supported pktmbuf.
  */
@@ -2149,23 +2167,15 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
 		return NULL;
 	}
 
-	if (rte_pktmbuf_tailroom(pkt) >= data_len)
-		return pkt;
+	if (virtio_dev_pktmbuf_prep(dev, pkt, data_len)) {
+		/* Data doesn't fit into the buffer and the host supports
+		 * only linear buffers
+		 */
+		rte_pktmbuf_free(pkt);
+		return NULL;
+	}
 
-	/* attach an external buffer if supported */
-	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
-		return pkt;
-
-	/* check if chained buffers are allowed */
-	if (!dev->linearbuf)
-		return pkt;
-
-	/* Data doesn't fit into the buffer and the host supports
-	 * only linear buffers
-	 */
-	rte_pktmbuf_free(pkt);
-
-	return NULL;
+	return pkt;
 }
 
 static __rte_noinline uint16_t
@@ -2261,7 +2271,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline int
 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 				 struct vhost_virtqueue *vq,
-				 struct rte_mempool *mbuf_pool,
 				 struct rte_mbuf **pkts,
 				 uint16_t avail_idx,
 				 uintptr_t *desc_addrs,
@@ -2306,9 +2315,8 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-		pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
-		if (!pkts[i])
-			goto free_buf;
+		if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
+			goto err;
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2316,7 +2324,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
 		if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
-			goto free_buf;
+			goto err;
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
@@ -2327,17 +2335,13 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 
 	return 0;
 
-free_buf:
-	for (i = 0; i < PACKED_BATCH_SIZE; i++)
-		rte_pktmbuf_free(pkts[i]);
-
+err:
 	return -1;
 }
 
 static __rte_always_inline int
 virtio_dev_tx_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
-			   struct rte_mempool *mbuf_pool,
 			   struct rte_mbuf **pkts)
 {
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -2347,8 +2351,8 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
 	uint16_t ids[PACKED_BATCH_SIZE];
 	uint16_t i;
 
-	if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
-					     avail_idx, desc_addrs, ids))
+	if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
+					     desc_addrs, ids))
 		return -1;
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2381,7 +2385,7 @@ static __rte_always_inline int
 vhost_dequeue_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts,
+			    struct rte_mbuf *pkts,
 			    uint16_t *buf_id,
 			    uint16_t *desc_count)
 {
@@ -2398,8 +2402,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 					 VHOST_ACCESS_RO) < 0))
 		return -1;
 
-	*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
-	if (unlikely(*pkts == NULL)) {
+	if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
 		if (!allocerr_warned) {
 			VHOST_LOG_DATA(ERR,
 				"Failed mbuf alloc of size %d from %s on %s.\n",
@@ -2409,7 +2412,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 		return -1;
 	}
 
-	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
+	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
 				mbuf_pool);
 	if (unlikely(err)) {
 		if (!allocerr_warned) {
@@ -2418,7 +2421,6 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 				dev->ifname);
 			allocerr_warned = true;
 		}
-		rte_pktmbuf_free(*pkts);
 		return -1;
 	}
 
@@ -2429,7 +2431,7 @@ static __rte_always_inline int
 virtio_dev_tx_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts)
+			    struct rte_mbuf *pkts)
 {
 
 	uint16_t buf_id, desc_count = 0;
@@ -2462,11 +2464,14 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 	uint32_t pkt_idx = 0;
 	uint32_t remained = count;
 
+	if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
+		return 0;
+
 	do {
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
 		if (remained >= PACKED_BATCH_SIZE) {
-			if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
+			if (!virtio_dev_tx_batch_packed(dev, vq,
 							&pkts[pkt_idx])) {
 				pkt_idx += PACKED_BATCH_SIZE;
 				remained -= PACKED_BATCH_SIZE;
@@ -2475,13 +2480,16 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 		}
 
 		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
-						&pkts[pkt_idx]))
+						pkts[pkt_idx]))
 			break;
 		pkt_idx++;
 		remained--;
 
 	} while (remained);
 
+	if (pkt_idx != count)
+		rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
+
 	if (vq->shadow_used_idx) {
 		do_data_copy_dequeue(vq);
 
-- 
2.30.2


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v5] vhost: allocate and free packets in bulk in Tx packed
  2021-04-16 10:25         ` [dpdk-dev] [PATCH v5] vhost: allocate and free packets in bulk in Tx packed Balazs Nemeth
@ 2021-04-16 11:14           ` David Marchand
  2021-04-21  7:48           ` Maxime Coquelin
  1 sibling, 0 replies; 27+ messages in thread
From: David Marchand @ 2021-04-16 11:14 UTC (permalink / raw)
  To: Balazs Nemeth; +Cc: dev, Maxime Coquelin

On Fri, Apr 16, 2021 at 12:26 PM Balazs Nemeth <bnemeth@redhat.com> wrote:
>
> Move allocation out further and perform all allocation in bulk. The same
> goes for freeing packets. In the process, also introduce
> virtio_dev_pktmbuf_prep and make virtio_dev_pktmbuf_alloc use that.
>
> Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>

Reviewed-by: David Marchand <david.marchand@redhat.com>


-- 
David Marchand


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v5] vhost: allocate and free packets in bulk in Tx packed
  2021-04-16 10:25         ` [dpdk-dev] [PATCH v5] vhost: allocate and free packets in bulk in Tx packed Balazs Nemeth
  2021-04-16 11:14           ` David Marchand
@ 2021-04-21  7:48           ` Maxime Coquelin
  2021-04-28  3:16             ` Xia, Chenbo
  1 sibling, 1 reply; 27+ messages in thread
From: Maxime Coquelin @ 2021-04-21  7:48 UTC (permalink / raw)
  To: Balazs Nemeth, dev; +Cc: david.marchand



On 4/16/21 12:25 PM, Balazs Nemeth wrote:
> Move allocation out further and perform all allocation in bulk. The same
> goes for freeing packets. In the process, also introduce
> virtio_dev_pktmbuf_prep and make virtio_dev_pktmbuf_alloc use that.
> 
> Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
> ---
>  lib/librte_vhost/virtio_net.c | 80 +++++++++++++++++++----------------
>  1 file changed, 44 insertions(+), 36 deletions(-)
> 

Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 27+ messages in thread

* Re: [dpdk-dev] [PATCH v5] vhost: allocate and free packets in bulk in Tx packed
  2021-04-21  7:48           ` Maxime Coquelin
@ 2021-04-28  3:16             ` Xia, Chenbo
  0 siblings, 0 replies; 27+ messages in thread
From: Xia, Chenbo @ 2021-04-28  3:16 UTC (permalink / raw)
  To: Maxime Coquelin, Balazs Nemeth, dev; +Cc: david.marchand

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Maxime Coquelin
> Sent: Wednesday, April 21, 2021 3:49 PM
> To: Balazs Nemeth <bnemeth@redhat.com>; dev@dpdk.org
> Cc: david.marchand@redhat.com
> Subject: Re: [dpdk-dev] [PATCH v5] vhost: allocate and free packets in bulk in
> Tx packed
> 
> 
> 
> On 4/16/21 12:25 PM, Balazs Nemeth wrote:
> > Move allocation out further and perform all allocation in bulk. The same
> > goes for freeing packets. In the process, also introduce
> > virtio_dev_pktmbuf_prep and make virtio_dev_pktmbuf_alloc use that.
> >
> > Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
> > ---
> >  lib/librte_vhost/virtio_net.c | 80 +++++++++++++++++++----------------
> >  1 file changed, 44 insertions(+), 36 deletions(-)
> >

Patch applied to next-virtio/main with conflict resolved.

Thanks

^ permalink raw reply	[flat|nested] 27+ messages in thread

end of thread, other threads:[~2021-04-28  3:16 UTC | newest]

Thread overview: 27+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-04-06 15:44 [dpdk-dev] [PATCH 0/4] Use bulk alloc/free in virtio packed Balazs Nemeth
2021-04-06 15:44 ` [dpdk-dev] [PATCH 1/4] vhost: move allocation of mbuf outside of packet enqueue Balazs Nemeth
2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 " Balazs Nemeth
2021-04-15 12:37     ` Maxime Coquelin
2021-04-16  8:18     ` [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk Balazs Nemeth
2021-04-16  8:36       ` Maxime Coquelin
2021-04-16  9:05       ` David Marchand
2021-04-16  9:12         ` Balazs Nemeth
2021-04-16  9:41           ` Maxime Coquelin
2021-04-16  9:43           ` David Marchand
2021-04-16  9:48       ` [dpdk-dev] [PATCH v4] " Balazs Nemeth
2021-04-16 10:25         ` [dpdk-dev] [PATCH v5] vhost: allocate and free packets in bulk in Tx packed Balazs Nemeth
2021-04-16 11:14           ` David Marchand
2021-04-21  7:48           ` Maxime Coquelin
2021-04-28  3:16             ` Xia, Chenbo
2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 2/4] vhost: perform all mbuf allocations in one loop Balazs Nemeth
2021-04-15 15:30     ` Maxime Coquelin
2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 3/4] vhost: allocate and free packets in bulk Balazs Nemeth
2021-04-15 15:32     ` Maxime Coquelin
2021-04-15 15:45       ` David Marchand
2021-04-15 15:50         ` Maxime Coquelin
2021-04-15 15:58           ` Maxime Coquelin
2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 4/4] vhost: remove unnecessary level of indirection Balazs Nemeth
2021-04-15 15:38     ` Maxime Coquelin
2021-04-06 15:44 ` [dpdk-dev] [PATCH 2/4] vhost: perform all mbuf allocations in one loop Balazs Nemeth
2021-04-06 15:44 ` [dpdk-dev] [PATCH 3/4] vhost: allocate and free packets in bulk Balazs Nemeth
2021-04-06 15:44 ` [dpdk-dev] [PATCH 4/4] vhost: remove unnecessary level of indirection Balazs Nemeth

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git