From: Yuan Wang <yuanx.wang@intel.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org, jiayu.hu@intel.com, xuan.ding@intel.com,
wenwux.ma@intel.com, weix.ling@intel.com
Subject: [PATCH] vhost: fix data-plane access to released vq
Date: Fri, 3 Dec 2021 16:34:00 +0000 [thread overview]
Message-ID: <20211203163400.164545-1-yuanx.wang@intel.com> (raw)
From: yuan wang <yuanx.wang@intel.com>
When numa reallocation occurs, numa_realoc() on the control
plane will free the old vq. If rte_vhost_dequeue_burst()
on the data plane get the vq just before release, then it
will access the released vq. We need to put the
vq->access_lock into struct virtio_net to ensure that it
can prevents this situation.
Signed-off-by: Yuan Wang <yuanx.wang@intel.com>
---
lib/vhost/vhost.c | 26 +++++++++++++-------------
lib/vhost/vhost.h | 4 +---
lib/vhost/vhost_user.c | 4 ++--
lib/vhost/virtio_net.c | 16 ++++++++--------
4 files changed, 24 insertions(+), 26 deletions(-)
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index 13a9bb9dd1..4259931be9 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -627,7 +627,7 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
dev->virtqueue[i] = vq;
init_vring_queue(dev, i);
- rte_spinlock_init(&vq->access_lock);
+ rte_spinlock_init(&dev->vq_access_lock[i]);
vq->avail_wrap_counter = 1;
vq->used_wrap_counter = 1;
vq->signalled_used_valid = false;
@@ -1325,7 +1325,7 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
if (!vq)
return 0;
- rte_spinlock_lock(&vq->access_lock);
+ rte_spinlock_lock(&dev->vq_access_lock[queue_id]);
if (unlikely(!vq->enabled || vq->avail == NULL))
goto out;
@@ -1333,7 +1333,7 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
out:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_spinlock_unlock(&dev->vq_access_lock[queue_id]);
return ret;
}
@@ -1417,12 +1417,12 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
if (!vq)
return -1;
- rte_spinlock_lock(&vq->access_lock);
+ rte_spinlock_lock(&dev->vq_access_lock[queue_id]);
vq->notif_enable = enable;
ret = vhost_enable_guest_notification(dev, vq, enable);
- rte_spinlock_unlock(&vq->access_lock);
+ rte_spinlock_unlock(&dev->vq_access_lock[queue_id]);
return ret;
}
@@ -1479,7 +1479,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
if (vq == NULL)
return 0;
- rte_spinlock_lock(&vq->access_lock);
+ rte_spinlock_lock(&dev->vq_access_lock[qid]);
if (unlikely(!vq->enabled || vq->avail == NULL))
goto out;
@@ -1487,7 +1487,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
out:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_spinlock_unlock(&dev->vq_access_lock[qid]);
return ret;
}
@@ -1721,9 +1721,9 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id,
ops->transfer_data == NULL))
return -1;
- rte_spinlock_lock(&vq->access_lock);
+ rte_spinlock_lock(&dev->vq_access_lock[queue_id]);
ret = async_channel_register(vid, queue_id, ops);
- rte_spinlock_unlock(&vq->access_lock);
+ rte_spinlock_unlock(&dev->vq_access_lock[queue_id]);
return ret;
}
@@ -1784,7 +1784,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
if (!vq->async)
return ret;
- if (!rte_spinlock_trylock(&vq->access_lock)) {
+ if (!rte_spinlock_trylock(&dev->vq_access_lock[queue_id])) {
VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
"virt queue busy.\n");
return -1;
@@ -1799,7 +1799,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
vhost_free_async_mem(vq);
out:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_spinlock_unlock(&dev->vq_access_lock[queue_id]);
return ret;
}
@@ -1856,14 +1856,14 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
if (!vq->async)
return ret;
- if (!rte_spinlock_trylock(&vq->access_lock)) {
+ if (!rte_spinlock_trylock(&dev->vq_access_lock[queue_id])) {
VHOST_LOG_CONFIG(DEBUG, "Failed to check in-flight packets. "
"virt queue busy.\n");
return ret;
}
ret = vq->async->pkts_inflight_n;
- rte_spinlock_unlock(&vq->access_lock);
+ rte_spinlock_unlock(&dev->vq_access_lock[queue_id]);
return ret;
}
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 7085e0885c..f85ce4fda5 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -185,9 +185,6 @@ struct vhost_virtqueue {
bool access_ok;
bool ready;
- rte_spinlock_t access_lock;
-
-
union {
struct vring_used_elem *shadow_used_split;
struct vring_used_elem_packed *shadow_used_packed;
@@ -384,6 +381,7 @@ struct virtio_net {
int extbuf;
int linearbuf;
struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
+ rte_spinlock_t vq_access_lock[VHOST_MAX_QUEUE_PAIRS * 2];
struct inflight_mem_info *inflight_info;
#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
char ifname[IF_NAME_SZ];
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index a781346c4d..305b4059bb 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -2899,7 +2899,7 @@ vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
struct vhost_virtqueue *vq = dev->virtqueue[i];
if (vq) {
- rte_spinlock_lock(&vq->access_lock);
+ rte_spinlock_lock(&dev->vq_access_lock[i]);
vq_num++;
}
i++;
@@ -2916,7 +2916,7 @@ vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
struct vhost_virtqueue *vq = dev->virtqueue[i];
if (vq) {
- rte_spinlock_unlock(&vq->access_lock);
+ rte_spinlock_unlock(&dev->vq_access_lock[i]);
vq_num++;
}
i++;
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index b3d954aab4..c5a05292ab 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -1354,7 +1354,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
vq = dev->virtqueue[queue_id];
- rte_spinlock_lock(&vq->access_lock);
+ rte_spinlock_lock(&dev->vq_access_lock[queue_id]);
if (unlikely(!vq->enabled))
goto out_access_unlock;
@@ -1380,7 +1380,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
vhost_user_iotlb_rd_unlock(vq);
out_access_unlock:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_spinlock_unlock(&dev->vq_access_lock[queue_id]);
return nb_tx;
}
@@ -1906,11 +1906,11 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
return 0;
}
- rte_spinlock_lock(&vq->access_lock);
+ rte_spinlock_lock(&dev->vq_access_lock[queue_id]);
n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
- rte_spinlock_unlock(&vq->access_lock);
+ rte_spinlock_unlock(&dev->vq_access_lock[queue_id]);
return n_pkts_cpl;
}
@@ -1962,7 +1962,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
vq = dev->virtqueue[queue_id];
- rte_spinlock_lock(&vq->access_lock);
+ rte_spinlock_lock(&dev->vq_access_lock[queue_id]);
if (unlikely(!vq->enabled || !vq->async))
goto out_access_unlock;
@@ -1990,7 +1990,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
vhost_user_iotlb_rd_unlock(vq);
out_access_unlock:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_spinlock_unlock(&dev->vq_access_lock[queue_id]);
return nb_tx;
}
@@ -2900,7 +2900,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
vq = dev->virtqueue[queue_id];
- if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
+ if (unlikely(rte_spinlock_trylock(&dev->vq_access_lock[queue_id]) == 0))
return 0;
if (unlikely(!vq->enabled)) {
@@ -2969,7 +2969,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
vhost_user_iotlb_rd_unlock(vq);
out_access_unlock:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_spinlock_unlock(&dev->vq_access_lock[queue_id]);
if (unlikely(rarp_mbuf != NULL))
count += 1;
--
2.25.1
next reply other threads:[~2021-12-03 8:39 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-12-03 16:34 Yuan Wang [this message]
2022-01-26 14:02 ` Maxime Coquelin
2022-01-27 10:30 ` Wang, YuanX
2022-01-27 10:46 ` Maxime Coquelin
2022-01-29 9:26 ` Wang, YuanX
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211203163400.164545-1-yuanx.wang@intel.com \
--to=yuanx.wang@intel.com \
--cc=chenbo.xia@intel.com \
--cc=dev@dpdk.org \
--cc=jiayu.hu@intel.com \
--cc=maxime.coquelin@redhat.com \
--cc=weix.ling@intel.com \
--cc=wenwux.ma@intel.com \
--cc=xuan.ding@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).