DPDK patches and discussions
 help / color / mirror / Atom feed
From: Chenbo Xia <chenbox@nvidia.com>
To: Maxime Coquelin <maxime.coquelin@redhat.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>,
	David Marchand <david.marchand@redhat.com>
Subject: Re: [PATCH v2 1/2] vhost: add logging mechanism for reconnection
Date: Mon, 23 Sep 2024 07:12:02 +0000	[thread overview]
Message-ID: <58EF732F-9683-4BDB-83ED-B555B9554296@nvidia.com> (raw)
In-Reply-To: <20240920150921.994434-2-maxime.coquelin@redhat.com>


> On Sep 20, 2024, at 23:09, Maxime Coquelin <maxime.coquelin@redhat.com> wrote:
> 
> External email: Use caution opening links or attachments
> 
> 
> This patch introduces a way for backend to keep track
> of the needed information to be able to reconnect without
> frontend cooperation.
> 
> It will be used for VDUSE, which does not provide interface
> for the backend to save and later recover local virtqueues
> metadata needed to reconnect.
> 
> Vhost-user support could also be added for improved packed
> ring reconnection support.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
> lib/vhost/vhost.h           | 41 ++++++++++++++++++++++++++++++++++---
> lib/vhost/virtio_net.c      |  8 ++++++++
> lib/vhost/virtio_net_ctrl.c |  2 ++
> 3 files changed, 48 insertions(+), 3 deletions(-)
> 
> diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
> index cd3fa55f1b..1f4192f5d1 100644
> --- a/lib/vhost/vhost.h
> +++ b/lib/vhost/vhost.h
> @@ -269,6 +269,24 @@ struct vhost_async {
>        };
> };
> 
> +#define VHOST_RECONNECT_VERSION                0x0
> +#define VHOST_MAX_VRING                        0x100
> +#define VHOST_MAX_QUEUE_PAIRS          0x80
> +
> +struct __rte_cache_aligned vhost_reconnect_vring {
> +       uint16_t last_avail_idx;
> +       bool avail_wrap_counter;
> +};
> +
> +struct vhost_reconnect_data {
> +       uint32_t version;
> +       uint64_t features;
> +       uint8_t status;
> +       struct virtio_net_config config;
> +       uint32_t nr_vrings;
> +       struct vhost_reconnect_vring vring[VHOST_MAX_VRING];
> +};
> +
> /**
>  * Structure contains variables relevant to RX/TX virtqueues.
>  */
> @@ -351,6 +369,7 @@ struct __rte_cache_aligned vhost_virtqueue {
>        struct virtqueue_stats  stats;
> 
>        RTE_ATOMIC(bool) irq_pending;
> +       struct vhost_reconnect_vring *reconnect_log;
> };
> 
> /* Virtio device status as per Virtio specification */
> @@ -362,9 +381,6 @@ struct __rte_cache_aligned vhost_virtqueue {
> #define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET    0x40
> #define VIRTIO_DEVICE_STATUS_FAILED            0x80
> 
> -#define VHOST_MAX_VRING                        0x100
> -#define VHOST_MAX_QUEUE_PAIRS          0x80
> -
> /* Declare IOMMU related bits for older kernels */
> #ifndef VIRTIO_F_IOMMU_PLATFORM
> 
> @@ -538,8 +554,26 @@ struct __rte_cache_aligned virtio_net {
>        struct rte_vhost_user_extern_ops extern_ops;
> 
>        struct vhost_backend_ops *backend_ops;
> +
> +       struct vhost_reconnect_data *reconnect_log;
> };
> 
> +static __rte_always_inline void
> +vhost_virtqueue_reconnect_log_split(struct vhost_virtqueue *vq)
> +{
> +       if (vq->reconnect_log != NULL)
> +               vq->reconnect_log->last_avail_idx = vq->last_avail_idx;
> +}
> +
> +static __rte_always_inline void
> +vhost_virtqueue_reconnect_log_packed(struct vhost_virtqueue *vq)
> +{
> +       if (vq->reconnect_log != NULL) {
> +               vq->reconnect_log->last_avail_idx = vq->last_avail_idx;
> +               vq->reconnect_log->avail_wrap_counter = vq->avail_wrap_counter;
> +       }
> +}
> +
> static inline void
> vq_assert_lock__(struct virtio_net *dev, struct vhost_virtqueue *vq, const char *func)
>        __rte_assert_exclusive_lock(&vq->access_lock)
> @@ -584,6 +618,7 @@ vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
>                vq->avail_wrap_counter ^= 1;
>                vq->last_avail_idx -= vq->size;
>        }
> +       vhost_virtqueue_reconnect_log_packed(vq);
> }
> 
> void __vhost_log_cache_write(struct virtio_net *dev,
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index 370402d849..f66a0c82f8 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -1445,6 +1445,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
>                }
> 
>                vq->last_avail_idx += num_buffers;
> +               vhost_virtqueue_reconnect_log_split(vq);
>        }
> 
>        do_data_copy_enqueue(dev, vq);
> @@ -1857,6 +1858,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
>                pkts_info[slot_idx].mbuf = pkts[pkt_idx];
> 
>                vq->last_avail_idx += num_buffers;
> +               vhost_virtqueue_reconnect_log_split(vq);
>        }
> 
>        if (unlikely(pkt_idx == 0))
> @@ -1885,6 +1887,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
>                /* recover shadow used ring and available ring */
>                vq->shadow_used_idx -= num_descs;
>                vq->last_avail_idx -= num_descs;
> +               vhost_virtqueue_reconnect_log_split(vq);
>        }
> 
>        /* keep used descriptors */
> @@ -2100,6 +2103,7 @@ dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
>                vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
>                vq->avail_wrap_counter ^= 1;
>        }
> +       vhost_virtqueue_reconnect_log_packed(vq);
> 
>        if (async->buffer_idx_packed >= buffers_err)
>                async->buffer_idx_packed -= buffers_err;
> @@ -3182,6 +3186,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
> 
>        if (likely(vq->shadow_used_idx)) {
>                vq->last_avail_idx += vq->shadow_used_idx;
> +               vhost_virtqueue_reconnect_log_split(vq);
>                do_data_copy_dequeue(vq);
>                flush_shadow_used_ring_split(dev, vq);
>                vhost_vring_call_split(dev, vq);
> @@ -3854,6 +3859,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
>                async->desc_idx_split++;
> 
>                vq->last_avail_idx++;
> +               vhost_virtqueue_reconnect_log_split(vq);
>        }
> 
>        if (unlikely(dropped))
> @@ -3872,6 +3878,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
>                pkt_idx = n_xfer;
>                /* recover available ring */
>                vq->last_avail_idx -= pkt_err;
> +               vhost_virtqueue_reconnect_log_split(vq);
> 
>                /**
>                 * recover async channel copy related structures and free pktmbufs
> @@ -4153,6 +4160,7 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
>                        vq->last_avail_idx += vq->size - descs_err;
>                        vq->avail_wrap_counter ^= 1;
>                }
> +               vhost_virtqueue_reconnect_log_packed(vq);
>        }
> 
>        async->pkts_idx += pkt_idx;
> diff --git a/lib/vhost/virtio_net_ctrl.c b/lib/vhost/virtio_net_ctrl.c
> index 8f78122361..b8ee94018e 100644
> --- a/lib/vhost/virtio_net_ctrl.c
> +++ b/lib/vhost/virtio_net_ctrl.c
> @@ -169,6 +169,7 @@ virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
>        cvq->last_avail_idx++;
>        if (cvq->last_avail_idx >= cvq->size)
>                cvq->last_avail_idx -= cvq->size;
> +       vhost_virtqueue_reconnect_log_split(cvq);
> 
>        if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
>                vhost_avail_event(cvq) = cvq->last_avail_idx;
> @@ -181,6 +182,7 @@ virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
>        cvq->last_avail_idx++;
>        if (cvq->last_avail_idx >= cvq->size)
>                cvq->last_avail_idx -= cvq->size;
> +       vhost_virtqueue_reconnect_log_split(cvq);
> 
>        if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
>                vhost_avail_event(cvq) = cvq->last_avail_idx;
> --
> 2.46.0
> 

Reviewed-by: Chenbo Xia <chenbox@nvidia.com>


  reply	other threads:[~2024-09-23  7:12 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-20 15:09 [PATCH v2 0/2] vhost: add VDUSE reconnection support Maxime Coquelin
2024-09-20 15:09 ` [PATCH v2 1/2] vhost: add logging mechanism for reconnection Maxime Coquelin
2024-09-23  7:12   ` Chenbo Xia [this message]
2024-09-23 15:42   ` David Marchand
2024-09-23 19:46     ` Maxime Coquelin
2024-09-20 15:09 ` [PATCH v2 2/2] vhost: add reconnection support to VDUSE Maxime Coquelin
2024-09-23  7:13   ` Chenbo Xia
2024-09-23 16:43   ` David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=58EF732F-9683-4BDB-83ED-B555B9554296@nvidia.com \
    --to=chenbox@nvidia.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).