From: Eelco Chaudron <echaudro@redhat.com>
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org
Subject: [PATCH v2 1/3] vhost: Change vhost_virtqueue access lock to a read/write one.
Date: Wed, 5 Apr 2023 14:40:52 +0200 [thread overview]
Message-ID: <168069842756.833254.13949971045184398628.stgit@ebuild.local> (raw)
In-Reply-To: <168069838578.833254.4856666346839028593.stgit@ebuild.local>
This change will allow the vhost interrupt datapath handling to be split
between two processed without one of them holding an explicit lock.
Signed-off-by: Eelco Chaudron <echaudro@redhat.com>
---
lib/eal/include/generic/rte_rwlock.h | 17 ++++++
lib/vhost/vhost.c | 46 +++++++++--------
lib/vhost/vhost.h | 4 +-
lib/vhost/vhost_user.c | 14 +++--
lib/vhost/virtio_net.c | 90 +++++++++++++++++-----------------
5 files changed, 94 insertions(+), 77 deletions(-)
diff --git a/lib/eal/include/generic/rte_rwlock.h b/lib/eal/include/generic/rte_rwlock.h
index d45c22c189..3a5f0e568f 100644
--- a/lib/eal/include/generic/rte_rwlock.h
+++ b/lib/eal/include/generic/rte_rwlock.h
@@ -236,6 +236,23 @@ rte_rwlock_write_unlock(rte_rwlock_t *rwl)
__atomic_fetch_sub(&rwl->cnt, RTE_RWLOCK_WRITE, __ATOMIC_RELEASE);
}
+/**
+ * Test if the write lock is taken.
+ *
+ * @param rwl
+ * A pointer to a rwlock structure.
+ * @return
+ * 1 if the write lock is currently taken; 0 otherwise.
+ */
+static inline int
+rte_rwlock_write_is_locked(rte_rwlock_t *rwl)
+{
+ if (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) & RTE_RWLOCK_WRITE)
+ return 1;
+
+ return 0;
+}
+
/**
* Try to execute critical section in a hardware memory transaction, if it
* fails or not available take a read lock
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index ef37943817..74bdbfd810 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -393,9 +393,9 @@ free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
else
rte_free(vq->shadow_used_split);
- rte_spinlock_lock(&vq->access_lock);
+ rte_rwlock_write_lock(&vq->access_lock);
vhost_free_async_mem(vq);
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
rte_free(vq->batch_copy_elems);
vhost_user_iotlb_destroy(vq);
rte_free(vq->log_cache);
@@ -630,7 +630,7 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
dev->virtqueue[i] = vq;
init_vring_queue(dev, vq, i);
- rte_spinlock_init(&vq->access_lock);
+ rte_rwlock_init(&vq->access_lock);
vq->avail_wrap_counter = 1;
vq->used_wrap_counter = 1;
vq->signalled_used_valid = false;
@@ -1305,14 +1305,14 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx)
if (!vq)
return -1;
- rte_spinlock_lock(&vq->access_lock);
+ rte_rwlock_read_lock(&vq->access_lock);
if (vq_is_packed(dev))
vhost_vring_call_packed(dev, vq);
else
vhost_vring_call_split(dev, vq);
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_read_unlock(&vq->access_lock);
return 0;
}
@@ -1334,7 +1334,7 @@ rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx)
if (!vq)
return -1;
- if (!rte_spinlock_trylock(&vq->access_lock))
+ if (rte_rwlock_read_trylock(&vq->access_lock))
return -EAGAIN;
if (vq_is_packed(dev))
@@ -1342,7 +1342,7 @@ rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx)
else
vhost_vring_call_split(dev, vq);
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_read_unlock(&vq->access_lock);
return 0;
}
@@ -1365,7 +1365,7 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
if (!vq)
return 0;
- rte_spinlock_lock(&vq->access_lock);
+ rte_rwlock_write_lock(&vq->access_lock);
if (unlikely(!vq->enabled || vq->avail == NULL))
goto out;
@@ -1373,7 +1373,7 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
out:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
return ret;
}
@@ -1457,12 +1457,12 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
if (!vq)
return -1;
- rte_spinlock_lock(&vq->access_lock);
+ rte_rwlock_write_lock(&vq->access_lock);
vq->notif_enable = enable;
ret = vhost_enable_guest_notification(dev, vq, enable);
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
return ret;
}
@@ -1520,7 +1520,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
if (vq == NULL)
return 0;
- rte_spinlock_lock(&vq->access_lock);
+ rte_rwlock_write_lock(&vq->access_lock);
if (unlikely(!vq->enabled || vq->avail == NULL))
goto out;
@@ -1528,7 +1528,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
out:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
return ret;
}
@@ -1757,9 +1757,9 @@ rte_vhost_async_channel_register(int vid, uint16_t queue_id)
if (unlikely(vq == NULL || !dev->async_copy || dev->vdpa_dev != NULL))
return -1;
- rte_spinlock_lock(&vq->access_lock);
+ rte_rwlock_write_lock(&vq->access_lock);
ret = async_channel_register(dev, vq);
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
return ret;
}
@@ -1804,7 +1804,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
if (vq == NULL)
return ret;
- if (!rte_spinlock_trylock(&vq->access_lock)) {
+ if (rte_rwlock_write_trylock(&vq->access_lock)) {
VHOST_LOG_CONFIG(dev->ifname, ERR,
"failed to unregister async channel, virtqueue busy.\n");
return ret;
@@ -1821,7 +1821,7 @@ rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
ret = 0;
}
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
return ret;
}
@@ -1954,7 +1954,7 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
if (vq == NULL)
return ret;
- if (!rte_spinlock_trylock(&vq->access_lock)) {
+ if (rte_rwlock_write_trylock(&vq->access_lock)) {
VHOST_LOG_CONFIG(dev->ifname, DEBUG,
"failed to check in-flight packets. virtqueue busy.\n");
return ret;
@@ -1963,7 +1963,7 @@ rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
if (vq->async)
ret = vq->async->pkts_inflight_n;
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
return ret;
}
@@ -2084,13 +2084,13 @@ rte_vhost_vring_stats_get(int vid, uint16_t queue_id,
vq = dev->virtqueue[queue_id];
- rte_spinlock_lock(&vq->access_lock);
+ rte_rwlock_write_lock(&vq->access_lock);
for (i = 0; i < VHOST_NB_VQ_STATS; i++) {
stats[i].value =
*(uint64_t *)(((char *)vq) + vhost_vq_stat_strings[i].offset);
stats[i].id = i;
}
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
return VHOST_NB_VQ_STATS;
}
@@ -2111,9 +2111,9 @@ int rte_vhost_vring_stats_reset(int vid, uint16_t queue_id)
vq = dev->virtqueue[queue_id];
- rte_spinlock_lock(&vq->access_lock);
+ rte_rwlock_write_lock(&vq->access_lock);
memset(&vq->stats, 0, sizeof(vq->stats));
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
return 0;
}
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 8fdab13c70..5c939ef06f 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -277,7 +277,7 @@ struct vhost_virtqueue {
bool access_ok;
bool ready;
- rte_spinlock_t access_lock;
+ rte_rwlock_t access_lock;
union {
@@ -517,7 +517,7 @@ static inline void
vq_assert_lock__(struct virtio_net *dev, struct vhost_virtqueue *vq, const char *func)
__rte_assert_exclusive_lock(&vq->access_lock)
{
- if (unlikely(!rte_spinlock_is_locked(&vq->access_lock)))
+ if (unlikely(!rte_rwlock_write_is_locked(&vq->access_lock)))
rte_panic("VHOST_CONFIG: (%s) %s() called without access lock taken.\n",
dev->ifname, func);
}
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index d60e39b6bc..c9454ce3d9 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -399,7 +399,7 @@ vhost_user_set_features(struct virtio_net **pdev,
cleanup_vq_inflight(dev, vq);
/* vhost_user_lock_all_queue_pairs locked all qps */
vq_assert_lock(dev, vq);
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
free_vq(dev, vq);
}
}
@@ -2649,10 +2649,10 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
len, imsg->perm);
if (is_vring_iotlb(dev, vq, imsg)) {
- rte_spinlock_lock(&vq->access_lock);
+ rte_rwlock_write_lock(&vq->access_lock);
translate_ring_addresses(&dev, &vq);
*pdev = dev;
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
}
}
break;
@@ -2667,9 +2667,9 @@ vhost_user_iotlb_msg(struct virtio_net **pdev,
imsg->size);
if (is_vring_iotlb(dev, vq, imsg)) {
- rte_spinlock_lock(&vq->access_lock);
+ rte_rwlock_write_lock(&vq->access_lock);
vring_invalidate(dev, vq);
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
}
}
break;
@@ -3030,7 +3030,7 @@ vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
struct vhost_virtqueue *vq = dev->virtqueue[i];
if (vq) {
- rte_spinlock_lock(&vq->access_lock);
+ rte_rwlock_write_lock(&vq->access_lock);
vq_num++;
}
i++;
@@ -3048,7 +3048,7 @@ vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
struct vhost_virtqueue *vq = dev->virtqueue[i];
if (vq) {
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
vq_num++;
}
i++;
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index be28ea5151..267faaa4fd 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -55,7 +55,7 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
static inline void
vhost_queue_stats_update(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint16_t count)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
{
struct virtqueue_stats *stats = &vq->stats;
int i;
@@ -102,7 +102,7 @@ static __rte_always_inline int64_t
vhost_async_dma_transfer_one(struct virtio_net *dev, struct vhost_virtqueue *vq,
int16_t dma_id, uint16_t vchan_id, uint16_t flag_idx,
struct vhost_iov_iter *pkt)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
{
struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
uint16_t ring_mask = dma_info->ring_mask;
@@ -152,7 +152,7 @@ static __rte_always_inline uint16_t
vhost_async_dma_transfer(struct virtio_net *dev, struct vhost_virtqueue *vq,
int16_t dma_id, uint16_t vchan_id, uint16_t head_idx,
struct vhost_iov_iter *pkts, uint16_t nr_pkts)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
{
struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
int64_t ret, nr_copies = 0;
@@ -454,7 +454,7 @@ vhost_async_shadow_enqueue_packed_batch(struct vhost_virtqueue *vq,
static __rte_always_inline void
vhost_async_shadow_dequeue_packed_batch(struct vhost_virtqueue *vq, uint16_t *ids)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
{
uint16_t i;
struct vhost_async *async = vq->async;
@@ -1131,7 +1131,7 @@ static __rte_always_inline int
async_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf *m, uint32_t mbuf_offset,
uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
struct vhost_async *async = vq->async;
@@ -1211,7 +1211,7 @@ static __rte_always_inline int
mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf *m, struct buf_vector *buf_vec,
uint16_t nr_vec, uint16_t num_buffers, bool is_async)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
uint32_t vec_idx = 0;
@@ -1341,7 +1341,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
struct rte_mbuf *pkt,
struct buf_vector *buf_vec,
uint16_t *nr_descs)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
uint16_t nr_vec = 0;
@@ -1403,7 +1403,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
static __rte_noinline uint32_t
virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint32_t count)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
uint32_t pkt_idx = 0;
@@ -1635,7 +1635,7 @@ static __rte_always_inline int16_t
virtio_dev_rx_single_packed(struct virtio_net *dev,
struct vhost_virtqueue *vq,
struct rte_mbuf *pkt)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
struct buf_vector buf_vec[BUF_VECTOR_MAX];
@@ -1661,7 +1661,7 @@ virtio_dev_rx_packed(struct virtio_net *dev,
struct vhost_virtqueue *__rte_restrict vq,
struct rte_mbuf **__rte_restrict pkts,
uint32_t count)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
uint32_t pkt_idx = 0;
@@ -1701,7 +1701,7 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t nb_tx = 0;
VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
- rte_spinlock_lock(&vq->access_lock);
+ rte_rwlock_read_lock(&vq->access_lock);
if (unlikely(!vq->enabled))
goto out_access_unlock;
@@ -1727,7 +1727,7 @@ virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
vhost_user_iotlb_rd_unlock(vq);
out_access_unlock:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_read_unlock(&vq->access_lock);
return nb_tx;
}
@@ -1760,7 +1760,7 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
static __rte_always_inline uint16_t
async_get_first_inflight_pkt_idx(struct vhost_virtqueue *vq)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
{
struct vhost_async *async = vq->async;
@@ -2165,7 +2165,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue
static __rte_always_inline void
write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
{
struct vhost_async *async = vq->async;
uint16_t nr_left = n_descs;
@@ -2198,7 +2198,7 @@ write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t n_descs)
static __rte_always_inline void
write_back_completed_descs_packed(struct vhost_virtqueue *vq,
uint16_t n_buffers)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
{
struct vhost_async *async = vq->async;
uint16_t from = async->last_buffer_idx_packed;
@@ -2263,7 +2263,7 @@ write_back_completed_descs_packed(struct vhost_virtqueue *vq,
static __rte_always_inline uint16_t
vhost_poll_enqueue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint16_t count, int16_t dma_id, uint16_t vchan_id)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
{
struct vhost_async *async = vq->async;
struct async_inflight_info *pkts_info = async->pkts_info;
@@ -2357,7 +2357,7 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq = dev->virtqueue[queue_id];
- if (!rte_spinlock_trylock(&vq->access_lock)) {
+ if (rte_rwlock_read_trylock(&vq->access_lock)) {
VHOST_LOG_DATA(dev->ifname, DEBUG,
"%s: virtqueue %u is busy.\n",
__func__, queue_id);
@@ -2377,7 +2377,7 @@ rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
vq->stats.inflight_completed += n_pkts_cpl;
out:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_read_unlock(&vq->access_lock);
return n_pkts_cpl;
}
@@ -2465,7 +2465,7 @@ rte_vhost_clear_queue(int vid, uint16_t queue_id, struct rte_mbuf **pkts,
vq = dev->virtqueue[queue_id];
- if (!rte_spinlock_trylock(&vq->access_lock)) {
+ if (rte_rwlock_read_trylock(&vq->access_lock)) {
VHOST_LOG_DATA(dev->ifname, DEBUG, "%s: virtqueue %u is busy.\n",
__func__, queue_id);
return 0;
@@ -2495,7 +2495,7 @@ rte_vhost_clear_queue(int vid, uint16_t queue_id, struct rte_mbuf **pkts,
vq->stats.inflight_completed += n_pkts_cpl;
out_access_unlock:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_read_unlock(&vq->access_lock);
return n_pkts_cpl;
}
@@ -2516,7 +2516,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
return 0;
}
- rte_spinlock_lock(&vq->access_lock);
+ rte_rwlock_write_lock(&vq->access_lock);
if (unlikely(!vq->enabled || !vq->async))
goto out_access_unlock;
@@ -2544,7 +2544,7 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
vhost_user_iotlb_rd_unlock(vq);
out_access_unlock:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_write_unlock(&vq->access_lock);
return nb_tx;
}
@@ -2866,7 +2866,7 @@ desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct buf_vector *buf_vec, uint16_t nr_vec,
struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
bool legacy_ol_flags, uint16_t slot_idx, bool is_async)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
uint32_t buf_avail, buf_offset, buf_len;
@@ -3073,7 +3073,7 @@ static uint16_t
virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
bool legacy_ol_flags)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
uint16_t i;
@@ -3178,7 +3178,7 @@ static uint16_t
virtio_dev_tx_split_legacy(struct virtio_net *dev,
struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **pkts, uint16_t count)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
@@ -3189,7 +3189,7 @@ static uint16_t
virtio_dev_tx_split_compliant(struct virtio_net *dev,
struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **pkts, uint16_t count)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
@@ -3393,7 +3393,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
uint16_t *buf_id,
uint16_t *desc_count,
bool legacy_ol_flags)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
struct buf_vector buf_vec[BUF_VECTOR_MAX];
@@ -3443,7 +3443,7 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
struct rte_mempool *mbuf_pool,
struct rte_mbuf *pkts,
bool legacy_ol_flags)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
@@ -3475,7 +3475,7 @@ virtio_dev_tx_packed(struct virtio_net *dev,
struct rte_mbuf **__rte_restrict pkts,
uint32_t count,
bool legacy_ol_flags)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
uint32_t pkt_idx = 0;
@@ -3520,7 +3520,7 @@ static uint16_t
virtio_dev_tx_packed_legacy(struct virtio_net *dev,
struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **__rte_restrict pkts, uint32_t count)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
@@ -3531,7 +3531,7 @@ static uint16_t
virtio_dev_tx_packed_compliant(struct virtio_net *dev,
struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **__rte_restrict pkts, uint32_t count)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
@@ -3566,7 +3566,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
vq = dev->virtqueue[queue_id];
- if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
+ if (unlikely(rte_rwlock_read_trylock(&vq->access_lock) != 0))
return 0;
if (unlikely(!vq->enabled)) {
@@ -3636,7 +3636,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
vhost_user_iotlb_rd_unlock(vq);
out_access_unlock:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_read_unlock(&vq->access_lock);
if (unlikely(rarp_mbuf != NULL))
count += 1;
@@ -3648,7 +3648,7 @@ static __rte_always_inline uint16_t
async_poll_dequeue_completed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
uint16_t vchan_id, bool legacy_ol_flags)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
{
uint16_t start_idx, from, i;
uint16_t nr_cpl_pkts = 0;
@@ -3695,7 +3695,7 @@ static __rte_always_inline uint16_t
virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
int16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
static bool allocerr_warned;
@@ -3844,7 +3844,7 @@ virtio_dev_tx_async_split_legacy(struct virtio_net *dev,
struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **pkts, uint16_t count,
int16_t dma_id, uint16_t vchan_id)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
@@ -3857,7 +3857,7 @@ virtio_dev_tx_async_split_compliant(struct virtio_net *dev,
struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
struct rte_mbuf **pkts, uint16_t count,
int16_t dma_id, uint16_t vchan_id)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
@@ -3867,7 +3867,7 @@ virtio_dev_tx_async_split_compliant(struct virtio_net *dev,
static __rte_always_inline void
vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
uint16_t buf_id, uint16_t count)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
{
struct vhost_async *async = vq->async;
uint16_t idx = async->buffer_idx_packed;
@@ -3889,7 +3889,7 @@ virtio_dev_tx_async_single_packed(struct virtio_net *dev,
struct rte_mbuf *pkts,
uint16_t slot_idx,
bool legacy_ol_flags)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
int err;
@@ -3942,7 +3942,7 @@ virtio_dev_tx_async_packed_batch(struct virtio_net *dev,
struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint16_t slot_idx,
uint16_t dma_id, uint16_t vchan_id)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
uint16_t avail_idx = vq->last_avail_idx;
@@ -4000,7 +4000,7 @@ static __rte_always_inline uint16_t
virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
uint16_t count, uint16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
uint32_t pkt_idx = 0;
@@ -4111,7 +4111,7 @@ static uint16_t
virtio_dev_tx_async_packed_legacy(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
uint16_t count, uint16_t dma_id, uint16_t vchan_id)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
@@ -4123,7 +4123,7 @@ static uint16_t
virtio_dev_tx_async_packed_compliant(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
uint16_t count, uint16_t dma_id, uint16_t vchan_id)
- __rte_exclusive_locks_required(&vq->access_lock)
+ __rte_shared_locks_required(&vq->access_lock)
__rte_shared_locks_required(&vq->iotlb_lock)
{
return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
@@ -4173,7 +4173,7 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
vq = dev->virtqueue[queue_id];
- if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
+ if (unlikely(rte_rwlock_read_trylock(&vq->access_lock) != 0))
return 0;
if (unlikely(vq->enabled == 0)) {
@@ -4255,7 +4255,7 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
vhost_user_iotlb_rd_unlock(vq);
out_access_unlock:
- rte_spinlock_unlock(&vq->access_lock);
+ rte_rwlock_read_unlock(&vq->access_lock);
if (unlikely(rarp_mbuf != NULL))
count += 1;
next prev parent reply other threads:[~2023-04-05 12:41 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-05 12:40 [PATCH v2 0/3] vhost: add device op to offload the interrupt kick Eelco Chaudron
2023-04-05 12:40 ` Eelco Chaudron [this message]
2023-04-05 12:41 ` [PATCH v2 2/3] vhost: make the guest_notifications statistic counter atomic Eelco Chaudron
2023-04-05 12:41 ` [PATCH v2 3/3] vhost: add device op to offload the interrupt kick Eelco Chaudron
2023-05-10 11:44 ` David Marchand
2023-05-16 8:53 ` Eelco Chaudron
2023-05-16 10:12 ` David Marchand
2023-05-16 11:36 ` Eelco Chaudron
2023-05-16 11:45 ` Maxime Coquelin
2023-05-16 12:07 ` Eelco Chaudron
2023-05-17 9:18 ` Eelco Chaudron
2023-05-08 13:58 ` [PATCH v2 0/3] " Eelco Chaudron
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=168069842756.833254.13949971045184398628.stgit@ebuild.local \
--to=echaudro@redhat.com \
--cc=chenbo.xia@intel.com \
--cc=dev@dpdk.org \
--cc=maxime.coquelin@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).