DPDK patches and discussions
 help / color / mirror / Atom feed
From: Cindy Lu <lulu@redhat.com>
To: lulu@redhat.com, jasowang@redhat.com, xieyongji@bytedance.com,
	dev@dpdk.org, maxime.coquelin@redhat.com
Subject: [RFC v2 2/2] vhost: add reconnection support to VDUSE (WIP)
Date: Tue, 17 Oct 2023 22:24:03 +0800	[thread overview]
Message-ID: <20231017142403.2995341-3-lulu@redhat.com> (raw)
In-Reply-To: <20231017142403.2995341-1-lulu@redhat.com>

From: Maxime Coquelin <maxime.coquelin@redhat.com>

this patch is changed from
https://gitlab.com/mcoquelin/dpdk-next-virtio/-/commit/a89dc311f2d03e99b8180f377b4a60a0e94
the biggest change is moving the mmaping-related process to the Previous patch

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 lib/vhost/vduse.c           | 43 ++++++++++++++++++++++++++-----------
 lib/vhost/virtio_net.c      | 22 +++++++++++++++++++
 lib/vhost/virtio_net_ctrl.c |  4 ++++
 3 files changed, 56 insertions(+), 13 deletions(-)

diff --git a/lib/vhost/vduse.c b/lib/vhost/vduse.c
index 9b7f829a7a..4f12f039db 100644
--- a/lib/vhost/vduse.c
+++ b/lib/vhost/vduse.c
@@ -165,7 +165,7 @@ vduse_control_queue_event(int fd, void *arg, int *remove __rte_unused)
 }
 
 static void
-vduse_vring_setup(struct virtio_net *dev, unsigned int index)
+vduse_vring_setup(struct virtio_net *dev, unsigned int index, bool reconnect)
 {
 	struct vhost_virtqueue *vq = dev->virtqueue[index];
 	struct vhost_vring_addr *ra = &vq->ring_addrs;
@@ -181,15 +181,13 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 		return;
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "VQ %u info:\n", index);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tnum: %u\n", vq_info.num);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdesc_addr: %llx\n", vq_info.desc_addr);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdriver_addr: %llx\n", vq_info.driver_addr);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdevice_addr: %llx\n", vq_info.device_addr);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tavail_idx: %u\n", vq_info.split.avail_index);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tready: %u\n", vq_info.ready);
-
-	vq->last_avail_idx = vq_info.split.avail_index;
+	if (reconnect) {
+		vq->last_avail_idx = vq->log->last_avail_idx;
+		vq->last_used_idx = vq->log->last_avail_idx;
+	} else {
+		vq->last_avail_idx = vq_info.split.avail_index;
+		vq->last_used_idx = vq_info.split.avail_index;
+	}
 	vq->size = vq_info.num;
 	vq->ready = vq_info.ready;
 	vq->enabled = true;
@@ -197,6 +195,14 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 	ra->avail_user_addr = vq_info.driver_addr;
 	ra->used_user_addr = vq_info.device_addr;
 
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "VQ %u info:\n", index);
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tnum: %u\n", vq_info.num);
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdesc_addr: %llx\n", vq_info.desc_addr);
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdriver_addr: %llx\n", vq_info.driver_addr);
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdevice_addr: %llx\n", vq_info.device_addr);
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tavail_idx: %u\n", vq->last_avail_idx);
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tready: %u\n", vq_info.ready);
+
 	vq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
 	if (vq->kickfd < 0) {
 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to init kickfd for VQ %u: %s\n",
@@ -250,7 +256,7 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 }
 
 static void
-vduse_device_start(struct virtio_net *dev)
+vduse_device_start(struct virtio_net *dev, bool reconnect)
 {
 	unsigned int i, ret;
 
@@ -268,6 +274,16 @@ vduse_device_start(struct virtio_net *dev)
 		return;
 	}
 
+	if (reconnect && dev->features != dev->log->features) {
+		VHOST_LOG_CONFIG(dev->ifname, ERR,
+				 "Mismatch between reconnect file features 0x%" PRIx64
+				 " & device features 0x%" PRIx64 "\n",
+				 (uint64_t)dev->log->features, dev->features);
+		return;
+	}
+
+	dev->log->features = dev->features;
+
 	VHOST_LOG_CONFIG(dev->ifname, INFO, "negotiated Virtio features: 0x%" PRIx64 "\n",
 		dev->features);
 
@@ -281,7 +297,7 @@ vduse_device_start(struct virtio_net *dev)
 	}
 
 	for (i = 0; i < dev->nr_vring; i++)
-		vduse_vring_setup(dev, i);
+		vduse_vring_setup(dev, i, reconnect);
 
 	dev->flags |= VIRTIO_DEV_READY;
 
@@ -335,9 +351,10 @@ vduse_events_handler(int fd, void *arg, int *remove __rte_unused)
 		VHOST_LOG_CONFIG(dev->ifname, INFO, "\tnew status: 0x%08x\n",
 				req.s.status);
 		dev->status = req.s.status;
+		dev->log->status = dev->status;
 
 		if (dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)
-			vduse_device_start(dev);
+			vduse_device_start(dev, false);
 
 		resp.result = VDUSE_REQ_RESULT_OK;
 		break;
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index be28ea5151..49cce787bc 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1443,6 +1443,8 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		}
 
 		vq->last_avail_idx += num_buffers;
+		if (vq->log)
+			vq->log->last_avail_idx = vq->last_avail_idx;
 	}
 
 	do_data_copy_enqueue(dev, vq);
@@ -1838,6 +1840,8 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
 		pkts_info[slot_idx].mbuf = pkts[pkt_idx];
 
 		vq->last_avail_idx += num_buffers;
+		if (vq->log)
+			vq->log->last_avail_idx = vq->last_avail_idx;
 	}
 
 	if (unlikely(pkt_idx == 0))
@@ -1866,6 +1870,8 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
 		/* recover shadow used ring and available ring */
 		vq->shadow_used_idx -= num_descs;
 		vq->last_avail_idx -= num_descs;
+		if (vq->log)
+			vq->log->last_avail_idx = vq->last_avail_idx;
 	}
 
 	/* keep used descriptors */
@@ -2077,9 +2083,15 @@ dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
 
 	if (vq->last_avail_idx >= descs_err) {
 		vq->last_avail_idx -= descs_err;
+		if (vq->log)
+			vq->log->last_avail_idx = vq->last_avail_idx;
 	} else {
 		vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
 		vq->avail_wrap_counter ^= 1;
+		if (vq->log) {
+			vq->log->last_avail_idx = vq->last_avail_idx;
+			vq->log->avail_wrap_counter = vq->avail_wrap_counter;
+		}
 	}
 
 	if (async->buffer_idx_packed >= buffers_err)
@@ -3161,6 +3173,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		rte_pktmbuf_free_bulk(&pkts[i - 1], count - i + 1);
 
 	vq->last_avail_idx += i;
+	if (vq->log)
+		vq->log->last_avail_idx = vq->last_avail_idx;
 
 	do_data_copy_dequeue(vq);
 	if (unlikely(i < count))
@@ -3796,6 +3810,8 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		async->desc_idx_split++;
 
 		vq->last_avail_idx++;
+		if (vq->log)
+			vq->log->last_avail_idx = vq->last_avail_idx;
 	}
 
 	if (unlikely(dropped))
@@ -3814,6 +3830,8 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		pkt_idx = n_xfer;
 		/* recover available ring */
 		vq->last_avail_idx -= pkt_err;
+		if (vq->log)
+			vq->log->last_avail_idx = vq->last_avail_idx;
 
 		/**
 		 * recover async channel copy related structures and free pktmbufs
@@ -4093,6 +4111,10 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			vq->last_avail_idx += vq->size - descs_err;
 			vq->avail_wrap_counter ^= 1;
 		}
+		if (vq->log) {
+			vq->log->last_avail_idx = vq->last_avail_idx;
+			vq->log->avail_wrap_counter = vq->avail_wrap_counter;
+		}
 	}
 
 	async->pkts_idx += pkt_idx;
diff --git a/lib/vhost/virtio_net_ctrl.c b/lib/vhost/virtio_net_ctrl.c
index 36e34153e7..00c55a12e5 100644
--- a/lib/vhost/virtio_net_ctrl.c
+++ b/lib/vhost/virtio_net_ctrl.c
@@ -168,6 +168,8 @@ virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
 	cvq->last_avail_idx++;
 	if (cvq->last_avail_idx >= cvq->size)
 		cvq->last_avail_idx -= cvq->size;
+	if (cvq->log)
+		cvq->log->last_avail_idx = cvq->last_avail_idx;
 
 	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
 		vhost_avail_event(cvq) = cvq->last_avail_idx;
@@ -180,6 +182,8 @@ virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
 	cvq->last_avail_idx++;
 	if (cvq->last_avail_idx >= cvq->size)
 		cvq->last_avail_idx -= cvq->size;
+	if (cvq->log)
+		cvq->log->last_avail_idx = cvq->last_avail_idx;
 
 	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
 		vhost_avail_event(cvq) = cvq->last_avail_idx;
-- 
2.34.3


      parent reply	other threads:[~2023-10-17 14:24 UTC|newest]

Thread overview: 3+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-17 14:24 [RFC v2 0/2] vduse: Add support for reconnection Cindy Lu
2023-10-17 14:24 ` [RFC v2 1/2] vduse: add mapping process in vduse create and destroy Cindy Lu
2023-10-17 14:24 ` Cindy Lu [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231017142403.2995341-3-lulu@redhat.com \
    --to=lulu@redhat.com \
    --cc=dev@dpdk.org \
    --cc=jasowang@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=xieyongji@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).