* Re: [dpdk-stable] [PATCH 19.11] net/virtio: fix refill order in packed ring datapath
2021-08-17 9:52 [dpdk-stable] [PATCH 19.11] net/virtio: fix refill order in packed ring datapath Marvin Liu
@ 2021-08-17 9:44 ` Christian Ehrhardt
0 siblings, 0 replies; 2+ messages in thread
From: Christian Ehrhardt @ 2021-08-17 9:44 UTC (permalink / raw)
To: Marvin Liu; +Cc: dpdk stable, Cheng Jiang
On Tue, Aug 17, 2021 at 4:30 AM Marvin Liu <yong.liu@intel.com> wrote:
>
> [ upstream commit 2d91b28730a945def257bc372a525c9b5dbf181c ]
Thanks, applied
> The front-end should refill the descriptor with the mbuf indicated by
> the buff_id rather then the index of used descriptor. Back-end may
> return buffers out of order if async copy mode is enabled.
>
> When initializing rxq, refill the descriptors in order as buff_id is
> not available at that time.
>
> Fixes: a76290c8f1cf ("net/virtio: implement Rx path for packed queues")
>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 5211736d2..421e4847e 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -474,13 +474,35 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
> return 0;
> }
>
> +static inline void
> +virtqueue_refill_single_packed(struct virtqueue *vq,
> + struct vring_packed_desc *dp,
> + struct rte_mbuf *cookie)
> +{
> + uint16_t flags = vq->vq_packed.cached_flags;
> + struct virtio_hw *hw = vq->hw;
> +
> + dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) +
> + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> + dp->len = cookie->buf_len -
> + RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
> +
> + virtqueue_store_flags_packed(dp, flags,
> + hw->weak_barriers);
> +
> + if (++vq->vq_avail_idx >= vq->vq_nentries) {
> + vq->vq_avail_idx -= vq->vq_nentries;
> + vq->vq_packed.cached_flags ^=
> + VRING_PACKED_DESC_F_AVAIL_USED;
> + flags = vq->vq_packed.cached_flags;
> + }
> +}
> +
> static inline int
> -virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
> +virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
> struct rte_mbuf **cookie, uint16_t num)
> {
> struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
> - uint16_t flags = vq->vq_packed.cached_flags;
> - struct virtio_hw *hw = vq->hw;
> struct vq_desc_extra *dxp;
> uint16_t idx;
> int i;
> @@ -496,24 +518,34 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
> dxp->cookie = (void *)cookie[i];
> dxp->ndescs = 1;
>
> - start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
> - RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
> - start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
> - + hw->vtnet_hdr_size;
> + virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
> + }
> + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
> + return 0;
> +}
>
> - vq->vq_desc_head_idx = dxp->next;
> - if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
> - vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
> +static inline int
> +virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
> + struct rte_mbuf **cookie, uint16_t num)
> +{
> + struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
> + struct vq_desc_extra *dxp;
> + uint16_t idx, did;
> + int i;
>
> - virtqueue_store_flags_packed(&start_dp[idx], flags,
> - hw->weak_barriers);
> + if (unlikely(vq->vq_free_cnt == 0))
> + return -ENOSPC;
> + if (unlikely(vq->vq_free_cnt < num))
> + return -EMSGSIZE;
>
> - if (++vq->vq_avail_idx >= vq->vq_nentries) {
> - vq->vq_avail_idx -= vq->vq_nentries;
> - vq->vq_packed.cached_flags ^=
> - VRING_PACKED_DESC_F_AVAIL_USED;
> - flags = vq->vq_packed.cached_flags;
> - }
> + for (i = 0; i < num; i++) {
> + idx = vq->vq_avail_idx;
> + did = start_dp[idx].id;
> + dxp = &vq->vq_descx[did];
> + dxp->cookie = (void *)cookie[i];
> + dxp->ndescs = 1;
> +
> + virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
> }
> vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
> return 0;
> @@ -1022,7 +1054,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
>
> /* Enqueue allocated buffers */
> if (vtpci_packed_queue(vq->hw))
> - error = virtqueue_enqueue_recv_refill_packed(vq,
> + error = virtqueue_enqueue_recv_refill_packed_init(vq,
> &m, 1);
> else
> error = virtqueue_enqueue_recv_refill(vq,
> --
> 2.17.1
>
--
Christian Ehrhardt
Staff Engineer, Ubuntu Server
Canonical Ltd
^ permalink raw reply [flat|nested] 2+ messages in thread
* [dpdk-stable] [PATCH 19.11] net/virtio: fix refill order in packed ring datapath
@ 2021-08-17 9:52 Marvin Liu
2021-08-17 9:44 ` Christian Ehrhardt
0 siblings, 1 reply; 2+ messages in thread
From: Marvin Liu @ 2021-08-17 9:52 UTC (permalink / raw)
To: stable; +Cc: Marvin Liu, Cheng Jiang
[ upstream commit 2d91b28730a945def257bc372a525c9b5dbf181c ]
The front-end should refill the descriptor with the mbuf indicated by
the buff_id rather then the index of used descriptor. Back-end may
return buffers out of order if async copy mode is enabled.
When initializing rxq, refill the descriptors in order as buff_id is
not available at that time.
Fixes: a76290c8f1cf ("net/virtio: implement Rx path for packed queues")
Signed-off-by: Marvin Liu <yong.liu@intel.com>
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 5211736d2..421e4847e 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -474,13 +474,35 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
return 0;
}
+static inline void
+virtqueue_refill_single_packed(struct virtqueue *vq,
+ struct vring_packed_desc *dp,
+ struct rte_mbuf *cookie)
+{
+ uint16_t flags = vq->vq_packed.cached_flags;
+ struct virtio_hw *hw = vq->hw;
+
+ dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ dp->len = cookie->buf_len -
+ RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+
+ virtqueue_store_flags_packed(dp, flags,
+ hw->weak_barriers);
+
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^=
+ VRING_PACKED_DESC_F_AVAIL_USED;
+ flags = vq->vq_packed.cached_flags;
+ }
+}
+
static inline int
-virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
struct rte_mbuf **cookie, uint16_t num)
{
struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
- uint16_t flags = vq->vq_packed.cached_flags;
- struct virtio_hw *hw = vq->hw;
struct vq_desc_extra *dxp;
uint16_t idx;
int i;
@@ -496,24 +518,34 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
dxp->cookie = (void *)cookie[i];
dxp->ndescs = 1;
- start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
- RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
- start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
- + hw->vtnet_hdr_size;
+ virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
+ }
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ return 0;
+}
- vq->vq_desc_head_idx = dxp->next;
- if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
+static inline int
+virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+ struct rte_mbuf **cookie, uint16_t num)
+{
+ struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
+ struct vq_desc_extra *dxp;
+ uint16_t idx, did;
+ int i;
- virtqueue_store_flags_packed(&start_dp[idx], flags,
- hw->weak_barriers);
+ if (unlikely(vq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(vq->vq_free_cnt < num))
+ return -EMSGSIZE;
- if (++vq->vq_avail_idx >= vq->vq_nentries) {
- vq->vq_avail_idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^=
- VRING_PACKED_DESC_F_AVAIL_USED;
- flags = vq->vq_packed.cached_flags;
- }
+ for (i = 0; i < num; i++) {
+ idx = vq->vq_avail_idx;
+ did = start_dp[idx].id;
+ dxp = &vq->vq_descx[did];
+ dxp->cookie = (void *)cookie[i];
+ dxp->ndescs = 1;
+
+ virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
}
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
return 0;
@@ -1022,7 +1054,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
/* Enqueue allocated buffers */
if (vtpci_packed_queue(vq->hw))
- error = virtqueue_enqueue_recv_refill_packed(vq,
+ error = virtqueue_enqueue_recv_refill_packed_init(vq,
&m, 1);
else
error = virtqueue_enqueue_recv_refill(vq,
--
2.17.1
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2021-08-17 9:44 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-17 9:52 [dpdk-stable] [PATCH 19.11] net/virtio: fix refill order in packed ring datapath Marvin Liu
2021-08-17 9:44 ` Christian Ehrhardt
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).