From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: Balazs Nemeth <bnemeth@redhat.com>, dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v2 1/4] vhost: move allocation of mbuf outside of packet enqueue
Date: Thu, 15 Apr 2021 14:37:28 +0200 [thread overview]
Message-ID: <c87cc63a-b49c-5e42-9d45-af4eda1c1138@redhat.com> (raw)
In-Reply-To: <f7b08832f86d2fb9a8834b2a19b13f26d3c6857b.1617790501.git.bnemeth@redhat.com>
Hi Balazs,
Hint for future revisions, please add a cover letter when multiple
patches, it makes the series handling for the maintainer easier.
Also, please use the MAINTAINERS file to add the other maintainers.
On 4/7/21 12:17 PM, Balazs Nemeth wrote:
> In preparation for subsequent patches, move mbuf allocation out and
> rename virtio_dev_pktmbuf_alloc to virtio_dev_pktmbuf_prep. This
> function now receives an already allocated mbuf pointer.
>
> Signed-off-by: Balazs Nemeth <bnemeth@redhat.com>
> ---
> lib/librte_vhost/virtio_net.c | 54 ++++++++++++++++++++++++++---------
> 1 file changed, 40 insertions(+), 14 deletions(-)
>
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 7f621fb6d..666e7fdb8 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -2166,6 +2166,23 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
> return NULL;
> }
>
> +static __rte_always_inline int
> +virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
> + uint32_t data_len)
> +{
> + if (rte_pktmbuf_tailroom(pkt) >= data_len)
> + return 0;
> +
> + /* attach an external buffer if supported */
> + if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
> + return 0;
> +
> + /* check if chained buffers are allowed */
> + if (!dev->linearbuf)
> + return 0;
Add new line here.
> + return 1;
Maybe return a negative value for consistency.
> +}
> +
> static __rte_noinline uint16_t
> virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
> struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
> @@ -2259,7 +2276,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
> static __rte_always_inline int
> vhost_reserve_avail_batch_packed(struct virtio_net *dev,
> struct vhost_virtqueue *vq,
> - struct rte_mempool *mbuf_pool,
> struct rte_mbuf **pkts,
> uint16_t avail_idx,
> uintptr_t *desc_addrs,
> @@ -2304,8 +2320,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
> }
>
> vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> - pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
> - if (!pkts[i])
> + if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
> goto free_buf;
> }
>
> @@ -2326,16 +2341,12 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
> return 0;
>
> free_buf:
It is no more freeing here, so better to rename the label.
> - for (i = 0; i < PACKED_BATCH_SIZE; i++)
> - rte_pktmbuf_free(pkts[i]);
> -
> return -1;
> }
>
> static __rte_always_inline int
> virtio_dev_tx_batch_packed(struct virtio_net *dev,
> struct vhost_virtqueue *vq,
> - struct rte_mempool *mbuf_pool,
> struct rte_mbuf **pkts)
> {
> uint16_t avail_idx = vq->last_avail_idx;
> @@ -2345,8 +2356,8 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
> uint16_t ids[PACKED_BATCH_SIZE];
> uint16_t i;
>
> - if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
> - avail_idx, desc_addrs, ids))
> + if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
> + desc_addrs, ids))
> return -1;
>
> vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
> @@ -2396,8 +2407,8 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
> VHOST_ACCESS_RO) < 0))
> return -1;
>
> - *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
> - if (unlikely(*pkts == NULL)) {
> +
> + if (unlikely(virtio_dev_pktmbuf_prep(dev, *pkts, buf_len))) {
> if (!allocerr_warned) {
> VHOST_LOG_DATA(ERR,
> "Failed mbuf alloc of size %d from %s on %s.\n",
> @@ -2416,7 +2427,6 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
> dev->ifname);
> allocerr_warned = true;
> }
> - rte_pktmbuf_free(*pkts);
> return -1;
> }
>
> @@ -2459,22 +2469,38 @@ virtio_dev_tx_packed(struct virtio_net *dev,
> {
> uint32_t pkt_idx = 0;
> uint32_t remained = count;
> + uint16_t i;
>
> do {
> rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
>
> if (remained >= PACKED_BATCH_SIZE) {
> - if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
> + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
> + pkts[pkt_idx + i] =
> + rte_pktmbuf_alloc(mbuf_pool);
No check on whether the alloc succeeded?
Also, we recently move to up to 100 chars lines, so maybe it can fit in
a single one here now.
> + }
> +
> + if (!virtio_dev_tx_batch_packed(dev, vq,
> &pkts[pkt_idx])) {
Ditto
> pkt_idx += PACKED_BATCH_SIZE;
> remained -= PACKED_BATCH_SIZE;
> +
> continue;
> + } else {
> + vhost_for_each_try_unroll(i, 0,
> + PACKED_BATCH_SIZE) {
Same here
> + rte_pktmbuf_free(pkts[pkt_idx + i]);
> + }
> }
> }
>
> + pkts[pkt_idx] = rte_pktmbuf_alloc(mbuf_pool);
Here also you may want to ensure the allocation succeeded.
> +
> if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
> - &pkts[pkt_idx]))
> + &pkts[pkt_idx])) {
> + rte_pktmbuf_free(pkts[pkt_idx]);
> break;
> + }
> pkt_idx++;
> remained--;
>
>
Maxime
next prev parent reply other threads:[~2021-04-15 12:37 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-06 15:44 [dpdk-dev] [PATCH 0/4] Use bulk alloc/free in virtio packed Balazs Nemeth
2021-04-06 15:44 ` [dpdk-dev] [PATCH 1/4] vhost: move allocation of mbuf outside of packet enqueue Balazs Nemeth
2021-04-07 10:17 ` [dpdk-dev] [PATCH v2 " Balazs Nemeth
2021-04-15 12:37 ` Maxime Coquelin [this message]
2021-04-16 8:18 ` [dpdk-dev] [PATCH v3] vhost: allocate and free packets in bulk Balazs Nemeth
2021-04-16 8:36 ` Maxime Coquelin
2021-04-16 9:05 ` David Marchand
2021-04-16 9:12 ` Balazs Nemeth
2021-04-16 9:41 ` Maxime Coquelin
2021-04-16 9:43 ` David Marchand
2021-04-16 9:48 ` [dpdk-dev] [PATCH v4] " Balazs Nemeth
2021-04-16 10:25 ` [dpdk-dev] [PATCH v5] vhost: allocate and free packets in bulk in Tx packed Balazs Nemeth
2021-04-16 11:14 ` David Marchand
2021-04-21 7:48 ` Maxime Coquelin
2021-04-28 3:16 ` Xia, Chenbo
2021-04-07 10:17 ` [dpdk-dev] [PATCH v2 2/4] vhost: perform all mbuf allocations in one loop Balazs Nemeth
2021-04-15 15:30 ` Maxime Coquelin
2021-04-07 10:17 ` [dpdk-dev] [PATCH v2 3/4] vhost: allocate and free packets in bulk Balazs Nemeth
2021-04-15 15:32 ` Maxime Coquelin
2021-04-15 15:45 ` David Marchand
2021-04-15 15:50 ` Maxime Coquelin
2021-04-15 15:58 ` Maxime Coquelin
2021-04-07 10:17 ` [dpdk-dev] [PATCH v2 4/4] vhost: remove unnecessary level of indirection Balazs Nemeth
2021-04-15 15:38 ` Maxime Coquelin
2021-04-06 15:44 ` [dpdk-dev] [PATCH 2/4] vhost: perform all mbuf allocations in one loop Balazs Nemeth
2021-04-06 15:44 ` [dpdk-dev] [PATCH 3/4] vhost: allocate and free packets in bulk Balazs Nemeth
2021-04-06 15:44 ` [dpdk-dev] [PATCH 4/4] vhost: remove unnecessary level of indirection Balazs Nemeth
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=c87cc63a-b49c-5e42-9d45-af4eda1c1138@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=bnemeth@redhat.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).