From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, david.marchand@redhat.com, chenbox@nvidia.com
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>, stable@dpdk.org
Subject: [PATCH 3/4] net/virtio: fix shadow control queue notification init
Date: Wed, 27 Mar 2024 10:40:31 +0100 [thread overview]
Message-ID: <20240327094032.2400951-4-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20240327094032.2400951-1-maxime.coquelin@redhat.com>
The Virtio-user control queue kick and call FDs were not
uninitialized at device stop time.
This patch fixes this using the queues iterator helper for
both initialization and uninitialization.
Fixes: 90966e8e5b67 ("net/virtio-user: send shadow virtqueue info to the backend")
Cc: stable@dpdk.org
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
.../net/virtio/virtio_user/virtio_user_dev.c | 90 +++++++++----------
1 file changed, 43 insertions(+), 47 deletions(-)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 0776c54deb..912e87fecf 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -33,6 +33,45 @@ const char * const virtio_user_backend_strings[] = {
[VIRTIO_USER_BACKEND_VHOST_VDPA] = "VHOST_VDPA",
};
+static int
+virtio_user_uninit_notify_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+ if (dev->kickfds[queue_sel] >= 0) {
+ close(dev->kickfds[queue_sel]);
+ dev->kickfds[queue_sel] = -1;
+ }
+
+ if (dev->callfds[queue_sel] >= 0) {
+ close(dev->callfds[queue_sel]);
+ dev->callfds[queue_sel] = -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_user_init_notify_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
+{
+ /* May use invalid flag, but some backend uses kickfd and
+ * callfd as criteria to judge if dev is alive. so finally we
+ * use real event_fd.
+ */
+ dev->callfds[queue_sel] = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (dev->callfds[queue_sel] < 0) {
+ PMD_DRV_LOG(ERR, "(%s) Failed to setup callfd for queue %u: %s",
+ dev->path, queue_sel, strerror(errno));
+ return -1;
+ }
+ dev->kickfds[queue_sel] = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (dev->kickfds[queue_sel] < 0) {
+ PMD_DRV_LOG(ERR, "(%s) Failed to setup kickfd for queue %u: %s",
+ dev->path, queue_sel, strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
static int
virtio_user_destroy_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
{
@@ -423,33 +462,9 @@ virtio_user_dev_init_mac(struct virtio_user_dev *dev, const char *mac)
static int
virtio_user_dev_init_notify(struct virtio_user_dev *dev)
{
- uint32_t i, j, nr_vq;
- int callfd;
- int kickfd;
-
- nr_vq = dev->max_queue_pairs * 2;
- if (dev->hw_cvq)
- nr_vq++;
- for (i = 0; i < nr_vq; i++) {
- /* May use invalid flag, but some backend uses kickfd and
- * callfd as criteria to judge if dev is alive. so finally we
- * use real event_fd.
- */
- callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
- if (callfd < 0) {
- PMD_DRV_LOG(ERR, "(%s) callfd error, %s", dev->path, strerror(errno));
- goto err;
- }
- kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
- if (kickfd < 0) {
- close(callfd);
- PMD_DRV_LOG(ERR, "(%s) kickfd error, %s", dev->path, strerror(errno));
- goto err;
- }
- dev->callfds[i] = callfd;
- dev->kickfds[i] = kickfd;
- }
+ if (virtio_user_foreach_queue(dev, virtio_user_init_notify_queue) < 0)
+ goto err;
if (dev->device_features & (1ULL << VIRTIO_F_NOTIFICATION_DATA))
if (dev->ops->map_notification_area &&
@@ -458,16 +473,7 @@ virtio_user_dev_init_notify(struct virtio_user_dev *dev)
return 0;
err:
- for (j = 0; j < i; j++) {
- if (dev->kickfds[j] >= 0) {
- close(dev->kickfds[j]);
- dev->kickfds[j] = -1;
- }
- if (dev->callfds[j] >= 0) {
- close(dev->callfds[j]);
- dev->callfds[j] = -1;
- }
- }
+ virtio_user_foreach_queue(dev, virtio_user_uninit_notify_queue);
return -1;
}
@@ -475,18 +481,8 @@ virtio_user_dev_init_notify(struct virtio_user_dev *dev)
static void
virtio_user_dev_uninit_notify(struct virtio_user_dev *dev)
{
- uint32_t i;
+ virtio_user_foreach_queue(dev, virtio_user_uninit_notify_queue);
- for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
- if (dev->kickfds[i] >= 0) {
- close(dev->kickfds[i]);
- dev->kickfds[i] = -1;
- }
- if (dev->callfds[i] >= 0) {
- close(dev->callfds[i]);
- dev->callfds[i] = -1;
- }
- }
if (dev->ops->unmap_notification_area && dev->notify_area)
dev->ops->unmap_notification_area(dev);
}
--
2.44.0
next prev parent reply other threads:[~2024-03-27 9:41 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-03-27 9:40 [PATCH 0/4] Virtio-user queues setup fixes Maxime Coquelin
2024-03-27 9:40 ` [PATCH 1/4] net/virtio: rename Virtio-user queue iterator Maxime Coquelin
2024-03-27 9:40 ` [PATCH 2/4] net/virtio: use iterator to destroy Virtio-user queues Maxime Coquelin
2024-03-27 9:40 ` Maxime Coquelin [this message]
2024-03-27 9:40 ` [PATCH 4/4] net/virtio: fix shadow control queue allocation Maxime Coquelin
2024-03-27 10:04 ` [PATCH 0/4] Virtio-user queues setup fixes David Marchand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240327094032.2400951-4-maxime.coquelin@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=chenbox@nvidia.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).