DPDK patches and discussions
 help / color / mirror / Atom feed
* [RFC v2 0/2] vduse: Add support for reconnection
@ 2023-10-17 14:24 Cindy Lu
  2023-10-17 14:24 ` [RFC v2 1/2] vduse: add mapping process in vduse create and destroy Cindy Lu
  2023-10-17 14:24 ` [RFC v2 2/2] vhost: add reconnection support to VDUSE (WIP) Cindy Lu
  0 siblings, 2 replies; 3+ messages in thread
From: Cindy Lu @ 2023-10-17 14:24 UTC (permalink / raw)
  To: lulu, jasowang, xieyongji, dev, maxime.coquelin

This patch changes based on Maxime's patch
https://gitlab.com/mcoquelin/dpdk-next-virtio/-/commit/a89dc311f2d03e99b8180f377b4a60a0e94
The biggest change is sync the information with kernel by mmap

the kernel branch is
https://gitlab.com/lulu6/vhost/tree/vduse5
the dpdk branch is
https://gitlab.com/lulu6/dpdk/tree/rfc_vduse

test passed in vduse+dpdk-testpmd

changes in V2
1. move struct vhost_reconnect_data to uAPI
2. Add struct vduse_reconnect_mmap_info to save the reconnect
related information

Signed-off-by: Cindy Lu <lulu@redhat.com>

Cindy Lu (1):
  vduse: add mapping process in vduse create and destroy

Maxime Coquelin (1):
  vhost: add reconnection support to VDUSE (WIP)

 lib/vhost/vduse.c           | 191 +++++++++++++++++++++++++++---------
 lib/vhost/vhost.h           |  10 ++
 lib/vhost/virtio_net.c      |  22 +++++
 lib/vhost/virtio_net_ctrl.c |   4 +
 4 files changed, 183 insertions(+), 44 deletions(-)

-- 
2.34.3


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [RFC v2 1/2] vduse: add mapping process in vduse create and destroy
  2023-10-17 14:24 [RFC v2 0/2] vduse: Add support for reconnection Cindy Lu
@ 2023-10-17 14:24 ` Cindy Lu
  2023-10-17 14:24 ` [RFC v2 2/2] vhost: add reconnection support to VDUSE (WIP) Cindy Lu
  1 sibling, 0 replies; 3+ messages in thread
From: Cindy Lu @ 2023-10-17 14:24 UTC (permalink / raw)
  To: lulu, jasowang, xieyongji, dev, maxime.coquelin

the change in the creation process is add
1> check if we need to do reconnect
2> Use ioctl get the reconnect info (size and max page number) from kernel
3> mapping 1 page for reconnect  status + vq_numbers pages for every vqs,
The change in the destroy process is the add the related unmap process

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 lib/vhost/vduse.c | 148 ++++++++++++++++++++++++++++++++++++----------
 lib/vhost/vhost.h |  10 ++++
 2 files changed, 127 insertions(+), 31 deletions(-)

diff --git a/lib/vhost/vduse.c b/lib/vhost/vduse.c
index 4f36277d3b..9b7f829a7a 100644
--- a/lib/vhost/vduse.c
+++ b/lib/vhost/vduse.c
@@ -376,6 +376,19 @@ vduse_device_create(const char *path)
 	uint64_t features = VDUSE_NET_SUPPORTED_FEATURES;
 	struct vduse_dev_config *dev_config = NULL;
 	const char *name = path + strlen("/dev/vduse/");
+	char reconnect_dev[PATH_MAX];
+	struct vhost_reconnect_data *log = NULL;
+	struct vduse_reconnect_mmap_info mmap_info;
+	bool reconnect = false;
+
+	ret = snprintf(reconnect_dev, sizeof(reconnect_dev), "%s/%s", "/dev/vduse", name);
+	if (access(reconnect_dev, F_OK) == 0) {
+		reconnect = true;
+		VHOST_LOG_CONFIG(name, INFO, "Device already exists, reconnecting...\n");
+	} else {
+		reconnect = false;
+		VHOST_LOG_CONFIG(name, ERR, "device %s not exists, creating...\n", reconnect_dev);
+	}
 
 	/* If first device, create events dispatcher thread */
 	if (vduse_events_thread == false) {
@@ -407,16 +420,8 @@ vduse_device_create(const char *path)
 	}
 
 	if (ioctl(control_fd, VDUSE_SET_API_VERSION, &ver)) {
-		VHOST_LOG_CONFIG(name, ERR, "Failed to set API version: %" PRIu64 ": %s\n",
-				ver, strerror(errno));
-		ret = -1;
-		goto out_ctrl_close;
-	}
-
-	dev_config = malloc(offsetof(struct vduse_dev_config, config) +
-			sizeof(vnet_config));
-	if (!dev_config) {
-		VHOST_LOG_CONFIG(name, ERR, "Failed to allocate VDUSE config\n");
+		VHOST_LOG_CONFIG(name, ERR, "Failed to set API version: %" PRIu64 ": %s\n", ver,
+				 strerror(errno));
 		ret = -1;
 		goto out_ctrl_close;
 	}
@@ -424,7 +429,7 @@ vduse_device_create(const char *path)
 	ret = rte_vhost_driver_get_queue_num(path, &max_queue_pairs);
 	if (ret < 0) {
 		VHOST_LOG_CONFIG(name, ERR, "Failed to get max queue pairs\n");
-		goto out_free;
+		goto out_ctrl_close;
 	}
 
 	VHOST_LOG_CONFIG(path, INFO, "VDUSE max queue pairs: %u\n", max_queue_pairs);
@@ -435,23 +440,34 @@ vduse_device_create(const char *path)
 	else
 		total_queues += 1; /* Includes ctrl queue */
 
-	vnet_config.max_virtqueue_pairs = max_queue_pairs;
-	memset(dev_config, 0, sizeof(struct vduse_dev_config));
-
-	strncpy(dev_config->name, name, VDUSE_NAME_MAX - 1);
-	dev_config->device_id = VIRTIO_ID_NET;
-	dev_config->vendor_id = 0;
-	dev_config->features = features;
-	dev_config->vq_num = total_queues;
-	dev_config->vq_align = sysconf(_SC_PAGE_SIZE);
-	dev_config->config_size = sizeof(struct virtio_net_config);
-	memcpy(dev_config->config, &vnet_config, sizeof(vnet_config));
+	if (reconnect != true) {
+		dev_config =
+			malloc(offsetof(struct vduse_dev_config, config) + sizeof(vnet_config));
+		if (!dev_config) {
+			VHOST_LOG_CONFIG(name, ERR, "Failed to allocate VDUSE config\n");
+			ret = -1;
+			goto out_ctrl_close;
+		}
 
-	ret = ioctl(control_fd, VDUSE_CREATE_DEV, dev_config);
-	if (ret < 0) {
-		VHOST_LOG_CONFIG(name, ERR, "Failed to create VDUSE device: %s\n",
-				strerror(errno));
-		goto out_free;
+		vnet_config.max_virtqueue_pairs = max_queue_pairs;
+		memset(dev_config, 0, sizeof(struct vduse_dev_config));
+
+		strncpy(dev_config->name, name, VDUSE_NAME_MAX - 1);
+		dev_config->device_id = VIRTIO_ID_NET;
+		dev_config->vendor_id = 0;
+		dev_config->features = features;
+		dev_config->vq_num = total_queues;
+		dev_config->vq_align = sysconf(_SC_PAGE_SIZE);
+		dev_config->config_size = sizeof(struct virtio_net_config);
+		memcpy(dev_config->config, &vnet_config, sizeof(vnet_config));
+
+		ret = ioctl(control_fd, VDUSE_CREATE_DEV, dev_config);
+		free(dev_config);
+		if (ret < 0) {
+			VHOST_LOG_CONFIG(name, ERR, "Failed to create VDUSE device: %s\n",
+					 strerror(errno));
+			goto out_ctrl_close;
+		}
 	}
 
 	dev_fd = open(path, O_RDWR);
@@ -485,10 +501,43 @@ vduse_device_create(const char *path)
 	strncpy(dev->ifname, path, IF_NAME_SZ - 1);
 	dev->vduse_ctrl_fd = control_fd;
 	dev->vduse_dev_fd = dev_fd;
+
+	ret = ioctl(dev_fd, VDUSE_GET_RECONNECT_INFO, &mmap_info);
+	if (ret < 0) {
+		VHOST_LOG_CONFIG(name, ERR, "Failed to get reconnect info VDUSE device: %s\n",
+				 strerror(errno));
+		goto out_dev_close;
+	}
+	dev->mmap_info.size = mmap_info.size;
+	dev->mmap_info.max_index = mmap_info.max_index;
+	log = (struct vhost_reconnect_data *)mmap(NULL, mmap_info.size, PROT_READ | PROT_WRITE,
+						  MAP_SHARED, dev->vduse_dev_fd, 0);
+	if (log == MAP_FAILED) {
+		VHOST_LOG_CONFIG(name, ERR, "Failed to mapping vduse reconnect data\n");
+		goto out_dev_close;
+	}
+
+	dev->log = log;
+
+	if (reconnect == true) {
+		dev->status = dev->log->status;
+		log->version = VHOST_VDUSE_API_VERSION;
+		log->reconnect_time += 1;
+		log->nr_vrings = total_queues;
+	}
+
 	vhost_setup_virtio_net(dev->vid, true, true, true, true);
 
+	if (total_queues > mmap_info.max_index - 1) {
+		VHOST_LOG_CONFIG(name, ERR, "The max vring number %d lager then %d\n", total_queues,
+				 mmap_info.max_index - 1);
+		goto out_dev_close;
+	}
+
 	for (i = 0; i < total_queues; i++) {
 		struct vduse_vq_config vq_cfg = { 0 };
+		struct vhost_reconnect_vring *log_vq;
+		struct vhost_virtqueue *vq;
 
 		ret = alloc_vring_queue(dev, i);
 		if (ret) {
@@ -496,6 +545,22 @@ vduse_device_create(const char *path)
 			goto out_dev_destroy;
 		}
 
+		log_vq = (struct vhost_reconnect_vring *)mmap(NULL, mmap_info.size,
+							      PROT_READ | PROT_WRITE, MAP_SHARED,
+							      dev->vduse_dev_fd,
+							      (i + 1) * mmap_info.size);
+		if (log_vq == MAP_FAILED) {
+			VHOST_LOG_CONFIG(name, ERR, "Failed to mapping vring %d reconnect data\n",
+					 i);
+
+			goto out_dev_destroy;
+		}
+
+		vq = dev->virtqueue[i];
+		vq->log = log_vq;
+		if (reconnect)
+			continue;
+
 		vq_cfg.index = i;
 		vq_cfg.max_size = 1024;
 
@@ -516,7 +581,8 @@ vduse_device_create(const char *path)
 	}
 	fdset_pipe_notify(&vduse.fdset);
 
-	free(dev_config);
+	if (reconnect && dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)
+		vduse_device_start(dev, true);
 
 	return 0;
 
@@ -526,8 +592,6 @@ vduse_device_create(const char *path)
 	if (dev_fd >= 0)
 		close(dev_fd);
 	ioctl(control_fd, VDUSE_DESTROY_DEV, name);
-out_free:
-	free(dev_config);
 out_ctrl_close:
 	close(control_fd);
 
@@ -553,7 +617,29 @@ vduse_device_destroy(const char *path)
 
 	if (vid == RTE_MAX_VHOST_DEVICE)
 		return -1;
-
+	if (dev->log) {
+		for (uint32_t i = 0; i < dev->log->nr_vrings; i++) {
+			struct vhost_virtqueue *vq;
+
+			vq = dev->virtqueue[i];
+			if (vq->log) {
+				ret = munmap(vq->log, dev->mmap_info.size);
+				if (ret) {
+					VHOST_LOG_CONFIG(name, ERR,
+							 "Failed to unmap device %s vq %d: %s\n",
+							 path, i, strerror(errno));
+					ret = -1;
+				}
+			}
+		}
+		ret = munmap(dev->log, dev->mmap_info.size);
+		if (ret) {
+			VHOST_LOG_CONFIG(name, ERR, "Failed to unmap device %s dev status: %s\n",
+					 path, strerror(errno));
+			ret = -1;
+		}
+	}
+	dev->log = NULL;
 	if (dev->cvq && dev->cvq->kickfd >= 0) {
 		fdset_del(&vduse.fdset, dev->cvq->kickfd);
 		fdset_pipe_notify(&vduse.fdset);
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index c8f2a0d43a..1879c6875b 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -19,6 +19,7 @@
 #include <rte_ether.h>
 #include <rte_malloc.h>
 #include <rte_dmadev.h>
+#include <linux/vduse.h>
 
 #include "rte_vhost.h"
 #include "vdpa_driver.h"
@@ -344,6 +345,8 @@ struct vhost_virtqueue {
 
 	struct vhost_vring_addr ring_addrs;
 	struct virtqueue_stats	stats;
+
+	struct vhost_reconnect_vring *log;
 } __rte_cache_aligned;
 
 /* Virtio device status as per Virtio specification */
@@ -537,6 +540,9 @@ struct virtio_net {
 	struct rte_vhost_user_extern_ops extern_ops;
 
 	struct vhost_backend_ops *backend_ops;
+
+	struct vhost_reconnect_data *log;
+	struct vduse_reconnect_mmap_info mmap_info;
 } __rte_cache_aligned;
 
 static inline void
@@ -582,6 +588,10 @@ vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
 		vq->avail_wrap_counter ^= 1;
 		vq->last_avail_idx -= vq->size;
 	}
+	if (vq->log) {
+		vq->log->last_avail_idx = vq->last_avail_idx;
+		vq->log->avail_wrap_counter = vq->avail_wrap_counter;
+	}
 }
 
 void __vhost_log_cache_write(struct virtio_net *dev,
-- 
2.34.3


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [RFC v2 2/2] vhost: add reconnection support to VDUSE (WIP)
  2023-10-17 14:24 [RFC v2 0/2] vduse: Add support for reconnection Cindy Lu
  2023-10-17 14:24 ` [RFC v2 1/2] vduse: add mapping process in vduse create and destroy Cindy Lu
@ 2023-10-17 14:24 ` Cindy Lu
  1 sibling, 0 replies; 3+ messages in thread
From: Cindy Lu @ 2023-10-17 14:24 UTC (permalink / raw)
  To: lulu, jasowang, xieyongji, dev, maxime.coquelin

From: Maxime Coquelin <maxime.coquelin@redhat.com>

this patch is changed from
https://gitlab.com/mcoquelin/dpdk-next-virtio/-/commit/a89dc311f2d03e99b8180f377b4a60a0e94
the biggest change is moving the mmaping-related process to the Previous patch

Signed-off-by: Cindy Lu <lulu@redhat.com>
---
 lib/vhost/vduse.c           | 43 ++++++++++++++++++++++++++-----------
 lib/vhost/virtio_net.c      | 22 +++++++++++++++++++
 lib/vhost/virtio_net_ctrl.c |  4 ++++
 3 files changed, 56 insertions(+), 13 deletions(-)

diff --git a/lib/vhost/vduse.c b/lib/vhost/vduse.c
index 9b7f829a7a..4f12f039db 100644
--- a/lib/vhost/vduse.c
+++ b/lib/vhost/vduse.c
@@ -165,7 +165,7 @@ vduse_control_queue_event(int fd, void *arg, int *remove __rte_unused)
 }
 
 static void
-vduse_vring_setup(struct virtio_net *dev, unsigned int index)
+vduse_vring_setup(struct virtio_net *dev, unsigned int index, bool reconnect)
 {
 	struct vhost_virtqueue *vq = dev->virtqueue[index];
 	struct vhost_vring_addr *ra = &vq->ring_addrs;
@@ -181,15 +181,13 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 		return;
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "VQ %u info:\n", index);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tnum: %u\n", vq_info.num);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdesc_addr: %llx\n", vq_info.desc_addr);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdriver_addr: %llx\n", vq_info.driver_addr);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdevice_addr: %llx\n", vq_info.device_addr);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tavail_idx: %u\n", vq_info.split.avail_index);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tready: %u\n", vq_info.ready);
-
-	vq->last_avail_idx = vq_info.split.avail_index;
+	if (reconnect) {
+		vq->last_avail_idx = vq->log->last_avail_idx;
+		vq->last_used_idx = vq->log->last_avail_idx;
+	} else {
+		vq->last_avail_idx = vq_info.split.avail_index;
+		vq->last_used_idx = vq_info.split.avail_index;
+	}
 	vq->size = vq_info.num;
 	vq->ready = vq_info.ready;
 	vq->enabled = true;
@@ -197,6 +195,14 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 	ra->avail_user_addr = vq_info.driver_addr;
 	ra->used_user_addr = vq_info.device_addr;
 
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "VQ %u info:\n", index);
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tnum: %u\n", vq_info.num);
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdesc_addr: %llx\n", vq_info.desc_addr);
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdriver_addr: %llx\n", vq_info.driver_addr);
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdevice_addr: %llx\n", vq_info.device_addr);
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tavail_idx: %u\n", vq->last_avail_idx);
+	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tready: %u\n", vq_info.ready);
+
 	vq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
 	if (vq->kickfd < 0) {
 		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to init kickfd for VQ %u: %s\n",
@@ -250,7 +256,7 @@ vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 }
 
 static void
-vduse_device_start(struct virtio_net *dev)
+vduse_device_start(struct virtio_net *dev, bool reconnect)
 {
 	unsigned int i, ret;
 
@@ -268,6 +274,16 @@ vduse_device_start(struct virtio_net *dev)
 		return;
 	}
 
+	if (reconnect && dev->features != dev->log->features) {
+		VHOST_LOG_CONFIG(dev->ifname, ERR,
+				 "Mismatch between reconnect file features 0x%" PRIx64
+				 " & device features 0x%" PRIx64 "\n",
+				 (uint64_t)dev->log->features, dev->features);
+		return;
+	}
+
+	dev->log->features = dev->features;
+
 	VHOST_LOG_CONFIG(dev->ifname, INFO, "negotiated Virtio features: 0x%" PRIx64 "\n",
 		dev->features);
 
@@ -281,7 +297,7 @@ vduse_device_start(struct virtio_net *dev)
 	}
 
 	for (i = 0; i < dev->nr_vring; i++)
-		vduse_vring_setup(dev, i);
+		vduse_vring_setup(dev, i, reconnect);
 
 	dev->flags |= VIRTIO_DEV_READY;
 
@@ -335,9 +351,10 @@ vduse_events_handler(int fd, void *arg, int *remove __rte_unused)
 		VHOST_LOG_CONFIG(dev->ifname, INFO, "\tnew status: 0x%08x\n",
 				req.s.status);
 		dev->status = req.s.status;
+		dev->log->status = dev->status;
 
 		if (dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)
-			vduse_device_start(dev);
+			vduse_device_start(dev, false);
 
 		resp.result = VDUSE_REQ_RESULT_OK;
 		break;
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index be28ea5151..49cce787bc 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1443,6 +1443,8 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		}
 
 		vq->last_avail_idx += num_buffers;
+		if (vq->log)
+			vq->log->last_avail_idx = vq->last_avail_idx;
 	}
 
 	do_data_copy_enqueue(dev, vq);
@@ -1838,6 +1840,8 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
 		pkts_info[slot_idx].mbuf = pkts[pkt_idx];
 
 		vq->last_avail_idx += num_buffers;
+		if (vq->log)
+			vq->log->last_avail_idx = vq->last_avail_idx;
 	}
 
 	if (unlikely(pkt_idx == 0))
@@ -1866,6 +1870,8 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
 		/* recover shadow used ring and available ring */
 		vq->shadow_used_idx -= num_descs;
 		vq->last_avail_idx -= num_descs;
+		if (vq->log)
+			vq->log->last_avail_idx = vq->last_avail_idx;
 	}
 
 	/* keep used descriptors */
@@ -2077,9 +2083,15 @@ dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
 
 	if (vq->last_avail_idx >= descs_err) {
 		vq->last_avail_idx -= descs_err;
+		if (vq->log)
+			vq->log->last_avail_idx = vq->last_avail_idx;
 	} else {
 		vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err;
 		vq->avail_wrap_counter ^= 1;
+		if (vq->log) {
+			vq->log->last_avail_idx = vq->last_avail_idx;
+			vq->log->avail_wrap_counter = vq->avail_wrap_counter;
+		}
 	}
 
 	if (async->buffer_idx_packed >= buffers_err)
@@ -3161,6 +3173,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		rte_pktmbuf_free_bulk(&pkts[i - 1], count - i + 1);
 
 	vq->last_avail_idx += i;
+	if (vq->log)
+		vq->log->last_avail_idx = vq->last_avail_idx;
 
 	do_data_copy_dequeue(vq);
 	if (unlikely(i < count))
@@ -3796,6 +3810,8 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		async->desc_idx_split++;
 
 		vq->last_avail_idx++;
+		if (vq->log)
+			vq->log->last_avail_idx = vq->last_avail_idx;
 	}
 
 	if (unlikely(dropped))
@@ -3814,6 +3830,8 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		pkt_idx = n_xfer;
 		/* recover available ring */
 		vq->last_avail_idx -= pkt_err;
+		if (vq->log)
+			vq->log->last_avail_idx = vq->last_avail_idx;
 
 		/**
 		 * recover async channel copy related structures and free pktmbufs
@@ -4093,6 +4111,10 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			vq->last_avail_idx += vq->size - descs_err;
 			vq->avail_wrap_counter ^= 1;
 		}
+		if (vq->log) {
+			vq->log->last_avail_idx = vq->last_avail_idx;
+			vq->log->avail_wrap_counter = vq->avail_wrap_counter;
+		}
 	}
 
 	async->pkts_idx += pkt_idx;
diff --git a/lib/vhost/virtio_net_ctrl.c b/lib/vhost/virtio_net_ctrl.c
index 36e34153e7..00c55a12e5 100644
--- a/lib/vhost/virtio_net_ctrl.c
+++ b/lib/vhost/virtio_net_ctrl.c
@@ -168,6 +168,8 @@ virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
 	cvq->last_avail_idx++;
 	if (cvq->last_avail_idx >= cvq->size)
 		cvq->last_avail_idx -= cvq->size;
+	if (cvq->log)
+		cvq->log->last_avail_idx = cvq->last_avail_idx;
 
 	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
 		vhost_avail_event(cvq) = cvq->last_avail_idx;
@@ -180,6 +182,8 @@ virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
 	cvq->last_avail_idx++;
 	if (cvq->last_avail_idx >= cvq->size)
 		cvq->last_avail_idx -= cvq->size;
+	if (cvq->log)
+		cvq->log->last_avail_idx = cvq->last_avail_idx;
 
 	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
 		vhost_avail_event(cvq) = cvq->last_avail_idx;
-- 
2.34.3


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2023-10-17 14:24 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-10-17 14:24 [RFC v2 0/2] vduse: Add support for reconnection Cindy Lu
2023-10-17 14:24 ` [RFC v2 1/2] vduse: add mapping process in vduse create and destroy Cindy Lu
2023-10-17 14:24 ` [RFC v2 2/2] vhost: add reconnection support to VDUSE (WIP) Cindy Lu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).