From: "Xia, Chenbo" <chenbo.xia@intel.com>
To: David Marchand <david.marchand@redhat.com>,
"dev@dpdk.org" <dev@dpdk.org>
Cc: "maxime.coquelin@redhat.com" <maxime.coquelin@redhat.com>,
"stephen@networkplumber.org" <stephen@networkplumber.org>,
"Hu, Jiayu" <jiayu.hu@intel.com>,
"Wang, YuanX" <yuanx.wang@intel.com>,
"Ding, Xuan" <xuan.ding@intel.com>,
"mb@smartsharesystems.com" <mb@smartsharesystems.com>
Subject: RE: [PATCH v6 5/9] vhost: annotate async accesses
Date: Thu, 9 Feb 2023 08:01:34 +0000 [thread overview]
Message-ID: <SN6PR11MB3504E96C7D41474FBD83121F9CD99@SN6PR11MB3504.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20230207104532.2370869-6-david.marchand@redhat.com>
> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Tuesday, February 7, 2023 6:45 PM
> To: dev@dpdk.org
> Cc: maxime.coquelin@redhat.com; stephen@networkplumber.org; Xia, Chenbo
> <chenbo.xia@intel.com>; Hu, Jiayu <jiayu.hu@intel.com>; Wang, YuanX
> <yuanx.wang@intel.com>; Ding, Xuan <xuan.ding@intel.com>;
> mb@smartsharesystems.com
> Subject: [PATCH v6 5/9] vhost: annotate async accesses
>
> vq->async is initialised and must be accessed under vq->access_lock.
>
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
> Changes since v5:
> - rebased after packed support was added to async code,
>
> Changes since RFC v3:
> - rebased,
> - fixed annotations vq->access_lock -> &vq->access_lock,
> - reworked free_vq,
>
> ---
> lib/vhost/vhost.c | 4 ++++
> lib/vhost/vhost.h | 2 +-
> lib/vhost/vhost_user.c | 10 +++++++---
> lib/vhost/virtio_net.c | 41 +++++++++++++++++++++++++++++++++++++++++
> 4 files changed, 53 insertions(+), 4 deletions(-)
>
> diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
> index 8cd727ca2f..8bccdd8584 100644
> --- a/lib/vhost/vhost.c
> +++ b/lib/vhost/vhost.c
> @@ -369,6 +369,7 @@ cleanup_device(struct virtio_net *dev, int destroy)
>
> static void
> vhost_free_async_mem(struct vhost_virtqueue *vq)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> if (!vq->async)
> return;
> @@ -393,7 +394,9 @@ free_vq(struct virtio_net *dev, struct vhost_virtqueue
> *vq)
> else
> rte_free(vq->shadow_used_split);
>
> + rte_spinlock_lock(&vq->access_lock);
> vhost_free_async_mem(vq);
> + rte_spinlock_unlock(&vq->access_lock);
> rte_free(vq->batch_copy_elems);
> vhost_user_iotlb_destroy(vq);
> rte_free(vq->log_cache);
> @@ -1669,6 +1672,7 @@ rte_vhost_extern_callback_register(int vid,
>
> static __rte_always_inline int
> async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> struct vhost_async *async;
> int node = vq->numa_node;
> diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
> index 82fe9b5fda..c05313cf37 100644
> --- a/lib/vhost/vhost.h
> +++ b/lib/vhost/vhost.h
> @@ -326,7 +326,7 @@ struct vhost_virtqueue {
> struct rte_vhost_resubmit_info *resubmit_inflight;
> uint64_t global_counter;
>
> - struct vhost_async *async;
> + struct vhost_async *async __rte_guarded_var;
>
> int notif_enable;
> #define VIRTIO_UNINITIALIZED_NOTIF (-1)
> diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
> index 70d221b9f6..8c1d60b76b 100644
> --- a/lib/vhost/vhost_user.c
> +++ b/lib/vhost/vhost_user.c
> @@ -2168,6 +2168,7 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
> int main_fd __rte_unused)
> {
> struct virtio_net *dev = *pdev;
> + struct vhost_virtqueue *vq;
> bool enable = !!ctx->msg.payload.state.num;
> int index = (int)ctx->msg.payload.state.index;
>
> @@ -2175,15 +2176,18 @@ vhost_user_set_vring_enable(struct virtio_net
> **pdev,
> "set queue enable: %d to qp idx: %d\n",
> enable, index);
>
> - if (enable && dev->virtqueue[index]->async) {
> - if (dev->virtqueue[index]->async->pkts_inflight_n) {
> + vq = dev->virtqueue[index];
> + /* vhost_user_lock_all_queue_pairs locked all qps */
> + vq_assert_lock(dev, vq);
> + if (enable && vq->async) {
> + if (vq->async->pkts_inflight_n) {
> VHOST_LOG_CONFIG(dev->ifname, ERR,
> "failed to enable vring. Inflight packets must be
> completed first\n");
> return RTE_VHOST_MSG_RESULT_ERR;
> }
> }
>
> - dev->virtqueue[index]->enabled = enable;
> + vq->enabled = enable;
>
> return RTE_VHOST_MSG_RESULT_OK;
> }
> diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
> index f2ab6dba15..6672caac49 100644
> --- a/lib/vhost/virtio_net.c
> +++ b/lib/vhost/virtio_net.c
> @@ -102,6 +102,7 @@ static __rte_always_inline int64_t
> vhost_async_dma_transfer_one(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
> int16_t dma_id, uint16_t vchan_id, uint16_t flag_idx,
> struct vhost_iov_iter *pkt)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> struct async_dma_vchan_info *dma_info =
> &dma_copy_track[dma_id].vchans[vchan_id];
> uint16_t ring_mask = dma_info->ring_mask;
> @@ -151,6 +152,7 @@ static __rte_always_inline uint16_t
> vhost_async_dma_transfer(struct virtio_net *dev, struct vhost_virtqueue
> *vq,
> int16_t dma_id, uint16_t vchan_id, uint16_t head_idx,
> struct vhost_iov_iter *pkts, uint16_t nr_pkts)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> struct async_dma_vchan_info *dma_info =
> &dma_copy_track[dma_id].vchans[vchan_id];
> int64_t ret, nr_copies = 0;
> @@ -434,6 +436,7 @@ static __rte_always_inline void
> vhost_async_shadow_enqueue_packed_batch(struct vhost_virtqueue *vq,
> uint64_t *lens,
> uint16_t *ids)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint16_t i;
> struct vhost_async *async = vq->async;
> @@ -450,6 +453,7 @@ vhost_async_shadow_enqueue_packed_batch(struct
> vhost_virtqueue *vq,
>
> static __rte_always_inline void
> vhost_async_shadow_dequeue_packed_batch(struct vhost_virtqueue *vq,
> uint16_t *ids)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint16_t i;
> struct vhost_async *async = vq->async;
> @@ -611,6 +615,7 @@ vhost_async_shadow_enqueue_packed(struct
> vhost_virtqueue *vq,
> uint16_t *id,
> uint16_t *count,
> uint16_t num_buffers)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint16_t i;
> struct vhost_async *async = vq->async;
> @@ -1118,6 +1123,7 @@ static __rte_always_inline int
> async_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
> struct rte_mbuf *m, uint32_t mbuf_offset,
> uint64_t buf_iova, uint32_t cpy_len, bool to_desc)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> struct vhost_async *async = vq->async;
> uint64_t mapped_len;
> @@ -1195,6 +1201,7 @@ static __rte_always_inline int
> mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
> struct rte_mbuf *m, struct buf_vector *buf_vec,
> uint16_t nr_vec, uint16_t num_buffers, bool is_async)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint32_t vec_idx = 0;
> uint32_t mbuf_offset, mbuf_avail;
> @@ -1323,6 +1330,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
> struct rte_mbuf *pkt,
> struct buf_vector *buf_vec,
> uint16_t *nr_descs)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint16_t nr_vec = 0;
> uint16_t avail_idx = vq->last_avail_idx;
> @@ -1383,6 +1391,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
> static __rte_noinline uint32_t
> virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
> struct rte_mbuf **pkts, uint32_t count)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint32_t pkt_idx = 0;
> uint16_t num_buffers;
> @@ -1610,6 +1619,7 @@ static __rte_always_inline int16_t
> virtio_dev_rx_single_packed(struct virtio_net *dev,
> struct vhost_virtqueue *vq,
> struct rte_mbuf *pkt)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> struct buf_vector buf_vec[BUF_VECTOR_MAX];
> uint16_t nr_descs = 0;
> @@ -1634,6 +1644,7 @@ virtio_dev_rx_packed(struct virtio_net *dev,
> struct vhost_virtqueue *__rte_restrict vq,
> struct rte_mbuf **__rte_restrict pkts,
> uint32_t count)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint32_t pkt_idx = 0;
>
> @@ -1733,6 +1744,7 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
>
> static __rte_always_inline uint16_t
> async_get_first_inflight_pkt_idx(struct vhost_virtqueue *vq)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> struct vhost_async *async = vq->async;
>
> @@ -1761,6 +1773,7 @@ store_dma_desc_info_split(struct vring_used_elem
> *s_ring, struct vring_used_elem
> static __rte_noinline uint32_t
> virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
> struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t
> vchan_id)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> struct buf_vector buf_vec[BUF_VECTOR_MAX];
> uint32_t pkt_idx = 0;
> @@ -1867,6 +1880,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
> struct buf_vector *buf_vec,
> uint16_t *nr_descs,
> uint16_t *nr_buffers)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint16_t nr_vec = 0;
> uint16_t avail_idx = vq->last_avail_idx;
> @@ -1925,6 +1939,7 @@ vhost_enqueue_async_packed(struct virtio_net *dev,
> static __rte_always_inline int16_t
> virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue
> *vq,
> struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t
> *nr_buffers)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> struct buf_vector buf_vec[BUF_VECTOR_MAX];
>
> @@ -1947,6 +1962,7 @@ virtio_dev_rx_async_packed_batch_enqueue(struct
> virtio_net *dev,
> struct rte_mbuf **pkts,
> uint64_t *desc_addrs,
> uint64_t *lens)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
> struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
> @@ -2007,6 +2023,7 @@ virtio_dev_rx_async_packed_batch(struct virtio_net
> *dev,
> struct vhost_virtqueue *vq,
> struct rte_mbuf **pkts,
> int16_t dma_id, uint16_t vchan_id)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint64_t desc_addrs[PACKED_BATCH_SIZE];
> uint64_t lens[PACKED_BATCH_SIZE];
> @@ -2022,6 +2039,7 @@ virtio_dev_rx_async_packed_batch(struct virtio_net
> *dev,
> static __rte_always_inline void
> dma_error_handler_packed(struct vhost_virtqueue *vq, uint16_t slot_idx,
> uint32_t nr_err, uint32_t *pkt_idx)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint16_t descs_err = 0;
> uint16_t buffers_err = 0;
> @@ -2052,6 +2070,7 @@ dma_error_handler_packed(struct vhost_virtqueue *vq,
> uint16_t slot_idx,
> static __rte_noinline uint32_t
> virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
> struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t
> vchan_id)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint32_t pkt_idx = 0;
> uint16_t n_xfer;
> @@ -2124,6 +2143,7 @@ virtio_dev_rx_async_submit_packed(struct virtio_net
> *dev, struct vhost_virtqueue
>
> static __rte_always_inline void
> write_back_completed_descs_split(struct vhost_virtqueue *vq, uint16_t
> n_descs)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> struct vhost_async *async = vq->async;
> uint16_t nr_left = n_descs;
> @@ -2156,6 +2176,7 @@ write_back_completed_descs_split(struct
> vhost_virtqueue *vq, uint16_t n_descs)
> static __rte_always_inline void
> write_back_completed_descs_packed(struct vhost_virtqueue *vq,
> uint16_t n_buffers)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> struct vhost_async *async = vq->async;
> uint16_t from = async->last_buffer_idx_packed;
> @@ -2220,6 +2241,7 @@ write_back_completed_descs_packed(struct
> vhost_virtqueue *vq,
> static __rte_always_inline uint16_t
> vhost_poll_enqueue_completed(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
> struct rte_mbuf **pkts, uint16_t count, int16_t dma_id, uint16_t
> vchan_id)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> struct vhost_async *async = vq->async;
> struct async_inflight_info *pkts_info = async->pkts_info;
> @@ -2824,6 +2846,7 @@ desc_to_mbuf(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
> struct buf_vector *buf_vec, uint16_t nr_vec,
> struct rte_mbuf *m, struct rte_mempool *mbuf_pool,
> bool legacy_ol_flags, uint16_t slot_idx, bool is_async)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint32_t buf_avail, buf_offset, buf_len;
> uint64_t buf_addr, buf_iova;
> @@ -3029,6 +3052,7 @@ static uint16_t
> virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
> struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t
> count,
> bool legacy_ol_flags)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint16_t i;
> uint16_t avail_entries;
> @@ -3132,6 +3156,7 @@ static uint16_t
> virtio_dev_tx_split_legacy(struct virtio_net *dev,
> struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
> struct rte_mbuf **pkts, uint16_t count)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, true);
> }
> @@ -3141,6 +3166,7 @@ static uint16_t
> virtio_dev_tx_split_compliant(struct virtio_net *dev,
> struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
> struct rte_mbuf **pkts, uint16_t count)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> return virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count, false);
> }
> @@ -3341,6 +3367,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
> uint16_t *buf_id,
> uint16_t *desc_count,
> bool legacy_ol_flags)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> struct buf_vector buf_vec[BUF_VECTOR_MAX];
> uint32_t buf_len;
> @@ -3389,6 +3416,7 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
> struct rte_mempool *mbuf_pool,
> struct rte_mbuf *pkts,
> bool legacy_ol_flags)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
>
> uint16_t buf_id, desc_count = 0;
> @@ -3419,6 +3447,7 @@ virtio_dev_tx_packed(struct virtio_net *dev,
> struct rte_mbuf **__rte_restrict pkts,
> uint32_t count,
> bool legacy_ol_flags)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint32_t pkt_idx = 0;
>
> @@ -3462,6 +3491,7 @@ static uint16_t
> virtio_dev_tx_packed_legacy(struct virtio_net *dev,
> struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool
> *mbuf_pool,
> struct rte_mbuf **__rte_restrict pkts, uint32_t count)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, true);
> }
> @@ -3471,6 +3501,7 @@ static uint16_t
> virtio_dev_tx_packed_compliant(struct virtio_net *dev,
> struct vhost_virtqueue *__rte_restrict vq, struct rte_mempool
> *mbuf_pool,
> struct rte_mbuf **__rte_restrict pkts, uint32_t count)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> return virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count, false);
> }
> @@ -3588,6 +3619,7 @@ static __rte_always_inline uint16_t
> async_poll_dequeue_completed(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
> struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
> uint16_t vchan_id, bool legacy_ol_flags)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint16_t start_idx, from, i;
> uint16_t nr_cpl_pkts = 0;
> @@ -3634,6 +3666,7 @@ static __rte_always_inline uint16_t
> virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue
> *vq,
> struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
> uint16_t count,
> int16_t dma_id, uint16_t vchan_id, bool legacy_ol_flags)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> static bool allocerr_warned;
> bool dropped = false;
> @@ -3780,6 +3813,7 @@ virtio_dev_tx_async_split_legacy(struct virtio_net
> *dev,
> struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
> struct rte_mbuf **pkts, uint16_t count,
> int16_t dma_id, uint16_t vchan_id)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
> pkts, count, dma_id, vchan_id, true);
> @@ -3791,6 +3825,7 @@ virtio_dev_tx_async_split_compliant(struct
> virtio_net *dev,
> struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool,
> struct rte_mbuf **pkts, uint16_t count,
> int16_t dma_id, uint16_t vchan_id)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> return virtio_dev_tx_async_split(dev, vq, mbuf_pool,
> pkts, count, dma_id, vchan_id, false);
> @@ -3799,6 +3834,7 @@ virtio_dev_tx_async_split_compliant(struct
> virtio_net *dev,
> static __rte_always_inline void
> vhost_async_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
> uint16_t buf_id, uint16_t count)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> struct vhost_async *async = vq->async;
> uint16_t idx = async->buffer_idx_packed;
> @@ -3820,6 +3856,7 @@ virtio_dev_tx_async_single_packed(struct virtio_net
> *dev,
> struct rte_mbuf *pkts,
> uint16_t slot_idx,
> bool legacy_ol_flags)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> int err;
> uint16_t buf_id, desc_count = 0;
> @@ -3871,6 +3908,7 @@ virtio_dev_tx_async_packed_batch(struct virtio_net
> *dev,
> struct vhost_virtqueue *vq,
> struct rte_mbuf **pkts, uint16_t slot_idx,
> uint16_t dma_id, uint16_t vchan_id)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint16_t avail_idx = vq->last_avail_idx;
> uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
> @@ -3927,6 +3965,7 @@ static __rte_always_inline uint16_t
> virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue
> *vq,
> struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
> uint16_t count, uint16_t dma_id, uint16_t vchan_id, bool
> legacy_ol_flags)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> uint32_t pkt_idx = 0;
> uint16_t slot_idx = 0;
> @@ -4036,6 +4075,7 @@ static uint16_t
> virtio_dev_tx_async_packed_legacy(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
> struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
> uint16_t count, uint16_t dma_id, uint16_t vchan_id)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
> pkts, count, dma_id, vchan_id, true);
> @@ -4046,6 +4086,7 @@ static uint16_t
> virtio_dev_tx_async_packed_compliant(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
> struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts,
> uint16_t count, uint16_t dma_id, uint16_t vchan_id)
> + __rte_exclusive_locks_required(&vq->access_lock)
> {
> return virtio_dev_tx_async_packed(dev, vq, mbuf_pool,
> pkts, count, dma_id, vchan_id, false);
> --
> 2.39.1
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
next prev parent reply other threads:[~2023-02-09 8:01 UTC|newest]
Thread overview: 110+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-28 12:17 [RFC PATCH 0/5] vhost lock annotations David Marchand
2022-03-28 12:17 ` [RFC PATCH 1/5] vhost: fix missing virtqueue lock protection David Marchand
2022-03-28 12:17 ` [RFC PATCH 2/5] vhost: annotate virtqueue access lock David Marchand
2022-03-28 12:17 ` [RFC PATCH 3/5] vhost: fix async access David Marchand
2022-03-28 12:17 ` [RFC PATCH 4/5] vhost: annotate async locking requirement David Marchand
2022-03-28 12:17 ` [RFC PATCH 5/5] vhost: annotate IOTLB locks David Marchand
2022-03-30 13:49 ` [RFC PATCH v2 0/9] vhost lock annotations David Marchand
2022-03-30 13:49 ` [RFC PATCH v2 1/9] vhost: fix missing virtqueue lock protection David Marchand
2022-03-30 13:49 ` [RFC PATCH v2 2/9] eal: annotate spinlock and rwlock David Marchand
2022-03-31 9:22 ` David Marchand
2022-04-04 6:21 ` Stephen Hemminger
2022-04-07 8:20 ` David Marchand
2022-03-30 13:49 ` [RFC PATCH v2 3/9] vhost: annotate virtqueue access lock David Marchand
2022-04-07 1:40 ` Hu, Jiayu
2022-04-07 7:03 ` David Marchand
2022-03-30 13:49 ` [RFC PATCH v2 4/9] vhost: fix async access David Marchand
2022-03-31 8:00 ` Maxime Coquelin
2022-03-31 10:23 ` Hu, Jiayu
2022-04-04 6:57 ` Pai G, Sunil
2022-03-30 13:49 ` [RFC PATCH v2 5/9] vhost: annotate async acesses David Marchand
2022-03-30 13:49 ` [RFC PATCH v2 6/9] vhost: annotate need reply handling David Marchand
2022-03-30 13:49 ` [RFC PATCH v2 7/9] vhost: annotate VDPA device list accesses David Marchand
2022-03-30 13:49 ` [RFC PATCH v2 8/9] vhost: annotate IOTLB locks David Marchand
2022-03-30 13:49 ` [RFC PATCH v2 9/9] vhost: enable lock check David Marchand
2022-03-30 14:03 ` [RFC PATCH v2 0/9] vhost lock annotations David Marchand
2022-03-30 14:37 ` Ali Alnubani
2022-04-05 7:11 ` David Marchand
2022-04-11 11:00 ` [RFC PATCH v3 0/8] " David Marchand
2022-04-11 11:00 ` [RFC PATCH v3 1/8] eal: annotate spinlock and rwlock David Marchand
2022-04-21 13:48 ` Maxime Coquelin
2022-04-28 12:16 ` David Marchand
2022-04-11 11:00 ` [RFC PATCH v3 2/8] vhost: annotate virtqueue access lock David Marchand
2022-04-21 15:25 ` Maxime Coquelin
2022-04-22 9:49 ` David Marchand
2022-04-11 11:00 ` [RFC PATCH v3 3/8] vhost: fix async access David Marchand
2022-04-21 19:21 ` Maxime Coquelin
2022-05-17 13:24 ` Maxime Coquelin
2022-04-11 11:00 ` [RFC PATCH v3 4/8] vhost: annotate async accesses David Marchand
2022-04-22 7:20 ` Maxime Coquelin
2022-04-11 11:00 ` [RFC PATCH v3 5/8] vhost: annotate need reply handling David Marchand
2022-04-22 7:25 ` Maxime Coquelin
2022-04-11 11:00 ` [RFC PATCH v3 6/8] vhost: annotate vDPA device list accesses David Marchand
2022-04-22 7:26 ` Maxime Coquelin
2022-04-11 11:00 ` [RFC PATCH v3 7/8] vhost: annotate IOTLB locks David Marchand
2022-04-22 7:46 ` Maxime Coquelin
2022-04-11 11:00 ` [RFC PATCH v3 8/8] vhost: enable lock check David Marchand
2022-04-22 7:47 ` Maxime Coquelin
2023-01-19 18:46 ` [PATCH v4 0/9] vhost lock annotations David Marchand
2023-01-19 18:46 ` [PATCH v4 1/9] eal: annotate spinlock, rwlock and seqlock David Marchand
2023-01-19 19:42 ` Stephen Hemminger
2023-01-19 20:39 ` Tyler Retzlaff
2023-01-19 21:16 ` David Marchand
2023-01-19 21:50 ` Tyler Retzlaff
2023-01-26 12:18 ` David Marchand
2023-01-19 20:55 ` David Marchand
2023-01-19 19:43 ` Stephen Hemminger
2023-01-31 16:18 ` Maxime Coquelin
2023-01-19 18:46 ` [PATCH v4 2/9] vhost: simplify need reply handling David Marchand
2023-01-31 16:41 ` Maxime Coquelin
2023-01-19 18:46 ` [PATCH v4 3/9] vhost: terminate when access lock is not taken David Marchand
2023-01-31 16:47 ` Maxime Coquelin
2023-01-19 18:46 ` [PATCH v4 4/9] vhost: annotate virtqueue access lock David Marchand
2023-01-31 16:50 ` Maxime Coquelin
2023-01-19 18:46 ` [PATCH v4 5/9] vhost: annotate async accesses David Marchand
2023-01-31 16:54 ` Maxime Coquelin
2023-01-19 18:46 ` [PATCH v4 6/9] vhost: always take IOTLB lock David Marchand
2023-01-31 16:59 ` Maxime Coquelin
2023-01-19 18:46 ` [PATCH v4 7/9] vhost: annotate " David Marchand
2023-01-31 17:05 ` Maxime Coquelin
2023-01-19 18:46 ` [PATCH v4 8/9] vhost: annotate vDPA device list accesses David Marchand
2023-01-31 17:08 ` Maxime Coquelin
2023-01-19 18:46 ` [PATCH v4 9/9] vhost: enable lock check David Marchand
2023-01-31 17:14 ` Maxime Coquelin
2023-01-19 19:20 ` [PATCH v4 0/9] vhost lock annotations Morten Brørup
2023-02-01 11:14 ` [PATCH v5 0/9] Lock annotations David Marchand
2023-02-01 11:14 ` [PATCH v5 1/9] eal: annotate spinlock, rwlock and seqlock David Marchand
2023-02-01 12:32 ` David Marchand
2023-02-06 1:01 ` Tu, Lijuan
2023-02-06 8:12 ` David Marchand
2023-02-01 11:14 ` [PATCH v5 2/9] vhost: simplify need reply handling David Marchand
2023-02-01 11:14 ` [PATCH v5 3/9] vhost: terminate when access lock is not taken David Marchand
2023-02-01 11:14 ` [PATCH v5 4/9] vhost: annotate virtqueue access lock David Marchand
2023-02-01 11:14 ` [PATCH v5 5/9] vhost: annotate async accesses David Marchand
2023-02-01 11:14 ` [PATCH v5 6/9] vhost: always take IOTLB lock David Marchand
2023-02-01 11:14 ` [PATCH v5 7/9] vhost: annotate " David Marchand
2023-02-01 11:14 ` [PATCH v5 8/9] vhost: annotate vDPA device list accesses David Marchand
2023-02-01 11:14 ` [PATCH v5 9/9] vhost: enable lock check David Marchand
2023-02-07 10:45 ` [PATCH v6 0/9] Lock annotations David Marchand
2023-02-07 10:45 ` [PATCH v6 1/9] eal: annotate spinlock, rwlock and seqlock David Marchand
2023-02-09 8:00 ` Xia, Chenbo
2023-02-07 10:45 ` [PATCH v6 2/9] vhost: simplify need reply handling David Marchand
2023-02-09 8:00 ` Xia, Chenbo
2023-02-07 10:45 ` [PATCH v6 3/9] vhost: terminate when access lock is not taken David Marchand
2023-02-09 8:01 ` Xia, Chenbo
2023-02-07 10:45 ` [PATCH v6 4/9] vhost: annotate virtqueue access lock David Marchand
2023-02-09 8:01 ` Xia, Chenbo
2023-02-07 10:45 ` [PATCH v6 5/9] vhost: annotate async accesses David Marchand
2023-02-09 8:01 ` Xia, Chenbo [this message]
2023-02-07 10:45 ` [PATCH v6 6/9] vhost: always take IOTLB lock David Marchand
2023-02-09 8:01 ` Xia, Chenbo
2023-02-07 10:45 ` [PATCH v6 7/9] vhost: annotate " David Marchand
2023-02-09 8:02 ` Xia, Chenbo
2023-02-07 10:45 ` [PATCH v6 8/9] vhost: annotate vDPA device list accesses David Marchand
2023-02-09 8:02 ` Xia, Chenbo
2023-02-07 10:45 ` [PATCH v6 9/9] vhost: enable lock check David Marchand
2023-02-09 8:05 ` Xia, Chenbo
2023-02-09 7:59 ` [PATCH v6 0/9] Lock annotations Xia, Chenbo
2023-02-09 8:08 ` David Marchand
2023-02-09 8:24 ` Xia, Chenbo
2023-02-09 13:48 ` David Marchand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=SN6PR11MB3504E96C7D41474FBD83121F9CD99@SN6PR11MB3504.namprd11.prod.outlook.com \
--to=chenbo.xia@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=jiayu.hu@intel.com \
--cc=maxime.coquelin@redhat.com \
--cc=mb@smartsharesystems.com \
--cc=stephen@networkplumber.org \
--cc=xuan.ding@intel.com \
--cc=yuanx.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).