From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F28A445A12; Mon, 23 Sep 2024 21:52:05 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 595FA4060A; Mon, 23 Sep 2024 21:52:04 +0200 (CEST) Received: from us-smtp-delivery-124.mimecast.com (us-smtp-delivery-124.mimecast.com [170.10.129.124]) by mails.dpdk.org (Postfix) with ESMTP id A6DF94042E for ; Mon, 23 Sep 2024 21:52:02 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=redhat.com; s=mimecast20190719; t=1727121122; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=DNVLxuY0PW1VeqEvp6B+Fp0GVT8AlRj0L7Qu3caeAUI=; b=ZLFPKz7jhPCNs4PXJTQOzPktw6UlFix3ETgxJo7r+LE71CbggphUN9zR9jYS5DqeXyxATs 6JnDa9RHxMsrtr7aoW+mRfDoWmFrmsb5J89EOpRhKYYSngHd2NSbT/HV/plI3JjH4SUlh2 i73bQgGBWxhNosm0o43Bg9M9CkoGjq8= Received: from mx-prod-mc-03.mail-002.prod.us-west-2.aws.redhat.com (ec2-54-186-198-63.us-west-2.compute.amazonaws.com [54.186.198.63]) by relay.mimecast.com with ESMTP with STARTTLS (version=TLSv1.3, cipher=TLS_AES_256_GCM_SHA384) id us-mta-353-z6mzIh5TNiSJyfCm8fkECw-1; Mon, 23 Sep 2024 15:52:01 -0400 X-MC-Unique: z6mzIh5TNiSJyfCm8fkECw-1 Received: from mx-prod-int-03.mail-002.prod.us-west-2.aws.redhat.com (unknown [10.30.177.12]) (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256) (No client certificate requested) by mx-prod-mc-03.mail-002.prod.us-west-2.aws.redhat.com (Postfix) with ESMTPS id 464DA18E6A41; Mon, 23 Sep 2024 19:52:00 +0000 (UTC) Received: from max-p1.redhat.com (unknown [10.39.208.8]) by mx-prod-int-03.mail-002.prod.us-west-2.aws.redhat.com (Postfix) with ESMTP id 28A1119560AD; Mon, 23 Sep 2024 19:51:57 +0000 (UTC) From: Maxime Coquelin To: dev@dpdk.org, david.marchand@redhat.com, chenbox@nvidia.com Cc: Maxime Coquelin Subject: [PATCH v3 1/2] vhost: add logging mechanism for reconnection Date: Mon, 23 Sep 2024 21:51:50 +0200 Message-ID: <20240923195151.73527-2-maxime.coquelin@redhat.com> In-Reply-To: <20240923195151.73527-1-maxime.coquelin@redhat.com> References: <20240923195151.73527-1-maxime.coquelin@redhat.com> MIME-Version: 1.0 X-Scanned-By: MIMEDefang 3.0 on 10.30.177.12 X-Mimecast-Spam-Score: 0 X-Mimecast-Originator: redhat.com Content-Transfer-Encoding: 8bit Content-Type: text/plain; charset="US-ASCII"; x-default=true X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch introduces a way for backend to keep track of the needed information to be able to reconnect without frontend cooperation. It will be used for VDUSE, which does not provide interface for the backend to save and later recover local virtqueues metadata needed to reconnect. Vhost-user support could also be added for improved packed ring reconnection support. Signed-off-by: Maxime Coquelin Reviewed-by: Chenbo Xia Reviewed-by: David Marchand --- lib/vhost/vhost.c | 2 ++ lib/vhost/vhost.h | 41 ++++++++++++++++++++++++++++++++++--- lib/vhost/vhost_user.c | 4 ++++ lib/vhost/virtio_net.c | 8 ++++++++ lib/vhost/virtio_net_ctrl.c | 2 ++ 5 files changed, 54 insertions(+), 3 deletions(-) diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index ac71d17784..5a50a06f8d 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -1712,9 +1712,11 @@ rte_vhost_set_vring_base(int vid, uint16_t queue_id, vq->avail_wrap_counter = !!(last_avail_idx & (1 << 15)); vq->last_used_idx = last_used_idx & 0x7fff; vq->used_wrap_counter = !!(last_used_idx & (1 << 15)); + vhost_virtqueue_reconnect_log_packed(vq); } else { vq->last_avail_idx = last_avail_idx; vq->last_used_idx = last_used_idx; + vhost_virtqueue_reconnect_log_split(vq); } return 0; diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h index cd3fa55f1b..1f4192f5d1 100644 --- a/lib/vhost/vhost.h +++ b/lib/vhost/vhost.h @@ -269,6 +269,24 @@ struct vhost_async { }; }; +#define VHOST_RECONNECT_VERSION 0x0 +#define VHOST_MAX_VRING 0x100 +#define VHOST_MAX_QUEUE_PAIRS 0x80 + +struct __rte_cache_aligned vhost_reconnect_vring { + uint16_t last_avail_idx; + bool avail_wrap_counter; +}; + +struct vhost_reconnect_data { + uint32_t version; + uint64_t features; + uint8_t status; + struct virtio_net_config config; + uint32_t nr_vrings; + struct vhost_reconnect_vring vring[VHOST_MAX_VRING]; +}; + /** * Structure contains variables relevant to RX/TX virtqueues. */ @@ -351,6 +369,7 @@ struct __rte_cache_aligned vhost_virtqueue { struct virtqueue_stats stats; RTE_ATOMIC(bool) irq_pending; + struct vhost_reconnect_vring *reconnect_log; }; /* Virtio device status as per Virtio specification */ @@ -362,9 +381,6 @@ struct __rte_cache_aligned vhost_virtqueue { #define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET 0x40 #define VIRTIO_DEVICE_STATUS_FAILED 0x80 -#define VHOST_MAX_VRING 0x100 -#define VHOST_MAX_QUEUE_PAIRS 0x80 - /* Declare IOMMU related bits for older kernels */ #ifndef VIRTIO_F_IOMMU_PLATFORM @@ -538,8 +554,26 @@ struct __rte_cache_aligned virtio_net { struct rte_vhost_user_extern_ops extern_ops; struct vhost_backend_ops *backend_ops; + + struct vhost_reconnect_data *reconnect_log; }; +static __rte_always_inline void +vhost_virtqueue_reconnect_log_split(struct vhost_virtqueue *vq) +{ + if (vq->reconnect_log != NULL) + vq->reconnect_log->last_avail_idx = vq->last_avail_idx; +} + +static __rte_always_inline void +vhost_virtqueue_reconnect_log_packed(struct vhost_virtqueue *vq) +{ + if (vq->reconnect_log != NULL) { + vq->reconnect_log->last_avail_idx = vq->last_avail_idx; + vq->reconnect_log->avail_wrap_counter = vq->avail_wrap_counter; + } +} + static inline void vq_assert_lock__(struct virtio_net *dev, struct vhost_virtqueue *vq, const char *func) __rte_assert_exclusive_lock(&vq->access_lock) @@ -584,6 +618,7 @@ vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num) vq->avail_wrap_counter ^= 1; vq->last_avail_idx -= vq->size; } + vhost_virtqueue_reconnect_log_packed(vq); } void __vhost_log_cache_write(struct virtio_net *dev, diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index 5f470da38a..d20b9e8497 100644 --- a/lib/vhost/vhost_user.c +++ b/lib/vhost/vhost_user.c @@ -954,6 +954,7 @@ translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq) vq->last_used_idx, vq->used->idx); vq->last_used_idx = vq->used->idx; vq->last_avail_idx = vq->used->idx; + vhost_virtqueue_reconnect_log_split(vq); VHOST_CONFIG_LOG(dev->ifname, WARNING, "some packets maybe resent for Tx and dropped for Rx"); } @@ -1039,9 +1040,11 @@ vhost_user_set_vring_base(struct virtio_net **pdev, */ vq->last_used_idx = vq->last_avail_idx; vq->used_wrap_counter = vq->avail_wrap_counter; + vhost_virtqueue_reconnect_log_packed(vq); } else { vq->last_used_idx = ctx->msg.payload.state.num; vq->last_avail_idx = ctx->msg.payload.state.num; + vhost_virtqueue_reconnect_log_split(vq); } VHOST_CONFIG_LOG(dev->ifname, INFO, @@ -1997,6 +2000,7 @@ vhost_check_queue_inflights_split(struct virtio_net *dev, } vq->last_avail_idx += resubmit_num; + vhost_virtqueue_reconnect_log_split(vq); if (resubmit_num) { resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info), diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c index 370402d849..f66a0c82f8 100644 --- a/lib/vhost/virtio_net.c +++ b/lib/vhost/virtio_net.c @@ -1445,6 +1445,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, } vq->last_avail_idx += num_buffers; + vhost_virtqueue_reconnect_log_split(vq); } do_data_copy_enqueue(dev, vq); @@ -1857,6 +1858,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue pkts_info[slot_idx].mbuf = pkts[pkt_idx]; vq->last_avail_idx += num_buffers; + vhost_virtqueue_reconnect_log_split(vq); } if (unlikely(pkt_idx == 0)) @@ -1885,6 +1887,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue /* recover shadow used ring and available ring */ vq->shadow_used_idx -= num_descs; vq->last_avail_idx -= num_descs; + vhost_virtqueue_reconnect_log_split(vq); } /* keep used descriptors */ @@ -2100,6 +2103,7 @@ dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx, vq->last_avail_idx = vq->last_avail_idx + vq->size - descs_err; vq->avail_wrap_counter ^= 1; } + vhost_virtqueue_reconnect_log_packed(vq); if (async->buffer_idx_packed >= buffers_err) async->buffer_idx_packed -= buffers_err; @@ -3182,6 +3186,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, if (likely(vq->shadow_used_idx)) { vq->last_avail_idx += vq->shadow_used_idx; + vhost_virtqueue_reconnect_log_split(vq); do_data_copy_dequeue(vq); flush_shadow_used_ring_split(dev, vq); vhost_vring_call_split(dev, vq); @@ -3854,6 +3859,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq, async->desc_idx_split++; vq->last_avail_idx++; + vhost_virtqueue_reconnect_log_split(vq); } if (unlikely(dropped)) @@ -3872,6 +3878,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq, pkt_idx = n_xfer; /* recover available ring */ vq->last_avail_idx -= pkt_err; + vhost_virtqueue_reconnect_log_split(vq); /** * recover async channel copy related structures and free pktmbufs @@ -4153,6 +4160,7 @@ virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, vq->last_avail_idx += vq->size - descs_err; vq->avail_wrap_counter ^= 1; } + vhost_virtqueue_reconnect_log_packed(vq); } async->pkts_idx += pkt_idx; diff --git a/lib/vhost/virtio_net_ctrl.c b/lib/vhost/virtio_net_ctrl.c index 8f78122361..b8ee94018e 100644 --- a/lib/vhost/virtio_net_ctrl.c +++ b/lib/vhost/virtio_net_ctrl.c @@ -169,6 +169,7 @@ virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq, cvq->last_avail_idx++; if (cvq->last_avail_idx >= cvq->size) cvq->last_avail_idx -= cvq->size; + vhost_virtqueue_reconnect_log_split(cvq); if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) vhost_avail_event(cvq) = cvq->last_avail_idx; @@ -181,6 +182,7 @@ virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq, cvq->last_avail_idx++; if (cvq->last_avail_idx >= cvq->size) cvq->last_avail_idx -= cvq->size; + vhost_virtqueue_reconnect_log_split(cvq); if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) vhost_avail_event(cvq) = cvq->last_avail_idx; -- 2.46.0