DPDK patches and discussions
 help / color / mirror / Atom feed
From: Balazs Nemeth <bnemeth@redhat.com>
To: bnemeth@redhat.com, dev@dpdk.org
Cc: maxime.coquelin@redhat.com, david.marchand@redhat.com
Subject: [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk
Date: Fri, 16 Apr 2021 10:18:00 +0200
Message-ID: <f273e53ea580bad5f72410e276b371ededa272f2.1618560902.git.bnemeth@redhat.com> (raw)
In-Reply-To: <f7b08832f86d2fb9a8834b2a19b13f26d3c6857b.1617790501.git.bnemeth@redhat.com>

Move allocation out further and perform all allocation in bulk. The same
goes for freeing packets. In the process, also rename
virtio_dev_pktmbuf_alloc to virtio_dev_pktmbuf_prep. This
function now receives an already allocated mbuf pointer.

Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 58 +++++++++++++++++++++++------------
 1 file changed, 38 insertions(+), 20 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index ff39878609..d6d5636e0f 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2168,6 +2168,24 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
 	return NULL;
 }
 
+static __rte_always_inline int
+virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
+			 uint32_t data_len)
+{
+	if (rte_pktmbuf_tailroom(pkt) >= data_len)
+		return 0;
+
+	/* attach an external buffer if supported */
+	if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+		return 0;
+
+	/* check if chained buffers are allowed */
+	if (!dev->linearbuf)
+		return 0;
+
+	return -1;
+}
+
 static __rte_noinline uint16_t
 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
@@ -2261,7 +2279,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 static __rte_always_inline int
 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 				 struct vhost_virtqueue *vq,
-				 struct rte_mempool *mbuf_pool,
 				 struct rte_mbuf **pkts,
 				 uint16_t avail_idx,
 				 uintptr_t *desc_addrs,
@@ -2306,9 +2323,8 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-		pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
-		if (!pkts[i])
-			goto free_buf;
+		if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
+			goto err;
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2316,7 +2332,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
 		if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
-			goto free_buf;
+			goto err;
 	}
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
@@ -2327,17 +2343,13 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
 
 	return 0;
 
-free_buf:
-	for (i = 0; i < PACKED_BATCH_SIZE; i++)
-		rte_pktmbuf_free(pkts[i]);
-
+err:
 	return -1;
 }
 
 static __rte_always_inline int
 virtio_dev_tx_batch_packed(struct virtio_net *dev,
 			   struct vhost_virtqueue *vq,
-			   struct rte_mempool *mbuf_pool,
 			   struct rte_mbuf **pkts)
 {
 	uint16_t avail_idx = vq->last_avail_idx;
@@ -2347,8 +2359,8 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
 	uint16_t ids[PACKED_BATCH_SIZE];
 	uint16_t i;
 
-	if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
-					     avail_idx, desc_addrs, ids))
+	if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
+					     desc_addrs, ids))
 		return -1;
 
 	vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2381,7 +2393,7 @@ static __rte_always_inline int
 vhost_dequeue_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts,
+			    struct rte_mbuf *pkts,
 			    uint16_t *buf_id,
 			    uint16_t *desc_count)
 {
@@ -2398,8 +2410,8 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 					 VHOST_ACCESS_RO) < 0))
 		return -1;
 
-	*pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
-	if (unlikely(*pkts == NULL)) {
+
+	if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
 		if (!allocerr_warned) {
 			VHOST_LOG_DATA(ERR,
 				"Failed mbuf alloc of size %d from %s on %s.\n",
@@ -2409,7 +2421,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 		return -1;
 	}
 
-	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
+	err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts,
 				mbuf_pool);
 	if (unlikely(err)) {
 		if (!allocerr_warned) {
@@ -2418,7 +2430,6 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 				dev->ifname);
 			allocerr_warned = true;
 		}
-		rte_pktmbuf_free(*pkts);
 		return -1;
 	}
 
@@ -2429,7 +2440,7 @@ static __rte_always_inline int
 virtio_dev_tx_single_packed(struct virtio_net *dev,
 			    struct vhost_virtqueue *vq,
 			    struct rte_mempool *mbuf_pool,
-			    struct rte_mbuf **pkts)
+			    struct rte_mbuf *pkts)
 {
 
 	uint16_t buf_id, desc_count = 0;
@@ -2462,26 +2473,33 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 	uint32_t pkt_idx = 0;
 	uint32_t remained = count;
 
+	if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
+		return 0;
+
 	do {
 		rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
 		if (remained >= PACKED_BATCH_SIZE) {
-			if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
+			if (!virtio_dev_tx_batch_packed(dev, vq,
 							&pkts[pkt_idx])) {
 				pkt_idx += PACKED_BATCH_SIZE;
 				remained -= PACKED_BATCH_SIZE;
+
 				continue;
 			}
 		}
 
 		if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
-						&pkts[pkt_idx]))
+						pkts[pkt_idx]))
 			break;
 		pkt_idx++;
 		remained--;
 
 	} while (remained);
 
+	if (pkt_idx != count)
+		rte_pktmbuf_free_bulk(&pkts[pkt_idx], count - pkt_idx);
+
 	if (vq->shadow_used_idx) {
 		do_data_copy_dequeue(vq);
 
-- 
2.30.2


  parent reply	other threads:[~2021-04-16  8:18 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-06 15:44 [dpdk-dev] [PATCH 0/4] Use bulk alloc/free in virtio packed Balazs Nemeth
2021-04-06 15:44 ` [dpdk-dev] [PATCH 1/4] vhost: move allocation of mbuf outside of packet enqueue Balazs Nemeth
2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 " Balazs Nemeth
2021-04-15 12:37     ` Maxime Coquelin
2021-04-16  8:18     ` Balazs Nemeth [this message]
2021-04-16  8:36       ` [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk Maxime Coquelin
2021-04-16  9:05       ` David Marchand
2021-04-16  9:12         ` Balazs Nemeth
2021-04-16  9:41           ` Maxime Coquelin
2021-04-16  9:43           ` David Marchand
2021-04-16  9:48       ` [dpdk-dev] [PATCH v4] " Balazs Nemeth
2021-04-16 10:25         ` [dpdk-dev] [PATCH v5] vhost: allocate and free packets in bulk in Tx packed Balazs Nemeth
2021-04-16 11:14           ` David Marchand
2021-04-21  7:48           ` Maxime Coquelin
2021-04-28  3:16             ` Xia, Chenbo
2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 2/4] vhost: perform all mbuf allocations in one loop Balazs Nemeth
2021-04-15 15:30     ` Maxime Coquelin
2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 3/4] vhost: allocate and free packets in bulk Balazs Nemeth
2021-04-15 15:32     ` Maxime Coquelin
2021-04-15 15:45       ` David Marchand
2021-04-15 15:50         ` Maxime Coquelin
2021-04-15 15:58           ` Maxime Coquelin
2021-04-07 10:17   ` [dpdk-dev] [PATCH v2 4/4] vhost: remove unnecessary level of indirection Balazs Nemeth
2021-04-15 15:38     ` Maxime Coquelin
2021-04-06 15:44 ` [dpdk-dev] [PATCH 2/4] vhost: perform all mbuf allocations in one loop Balazs Nemeth
2021-04-06 15:44 ` [dpdk-dev] [PATCH 3/4] vhost: allocate and free packets in bulk Balazs Nemeth
2021-04-06 15:44 ` [dpdk-dev] [PATCH 4/4] vhost: remove unnecessary level of indirection Balazs Nemeth

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=f273e53ea580bad5f72410e276b371ededa272f2.1618560902.git.bnemeth@redhat.com \
    --to=bnemeth@redhat.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git