* [dpdk-stable] [PATCH] net/virtio: fix refill order in packed ring datapath
@ 2021-07-08 9:58 Cheng Jiang
2021-07-19 8:30 ` Maxime Coquelin
2021-07-20 2:48 ` Xia, Chenbo
0 siblings, 2 replies; 3+ messages in thread
From: Cheng Jiang @ 2021-07-08 9:58 UTC (permalink / raw)
To: maxime.coquelin, Chenbo.Xia
Cc: dev, jiayu.hu, yong.liu, yvonnex.yang, Cheng Jiang, stable
The front-end should refill the descriptor with the mbuf indicated by
the buff_id rather then the index of used descriptor. Back-end may
return buffers out of order if async copy mode is enabled.
When initializing rxq, refill the descriptors in order as buff_id is
not available at that time.
Fixes: a76290c8f1cf ("net/virtio: implement Rx path for packed queues")
Cc: stable@dpdk.org
Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
Signed-off-by: Marvin Liu <yong.liu@intel.com>
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 3ac847317f..d35875d9ce 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -328,13 +328,35 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
return 0;
}
+static inline void
+virtqueue_refill_single_packed(struct virtqueue *vq,
+ struct vring_packed_desc *dp,
+ struct rte_mbuf *cookie)
+{
+ uint16_t flags = vq->vq_packed.cached_flags;
+ struct virtio_hw *hw = vq->hw;
+
+ dp->addr = cookie->buf_iova +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ dp->len = cookie->buf_len -
+ RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+
+ virtqueue_store_flags_packed(dp, flags,
+ hw->weak_barriers);
+
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^=
+ VRING_PACKED_DESC_F_AVAIL_USED;
+ flags = vq->vq_packed.cached_flags;
+ }
+}
+
static inline int
-virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
struct rte_mbuf **cookie, uint16_t num)
{
struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
- uint16_t flags = vq->vq_packed.cached_flags;
- struct virtio_hw *hw = vq->hw;
struct vq_desc_extra *dxp;
uint16_t idx;
int i;
@@ -350,24 +372,34 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
dxp->cookie = (void *)cookie[i];
dxp->ndescs = 1;
- start_dp[idx].addr = cookie[i]->buf_iova +
- RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
- start_dp[idx].len = cookie[i]->buf_len -
- RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+ virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
+ }
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ return 0;
+}
- vq->vq_desc_head_idx = dxp->next;
- if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
+static inline int
+virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+ struct rte_mbuf **cookie, uint16_t num)
+{
+ struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
+ struct vq_desc_extra *dxp;
+ uint16_t idx, did;
+ int i;
- virtqueue_store_flags_packed(&start_dp[idx], flags,
- hw->weak_barriers);
+ if (unlikely(vq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(vq->vq_free_cnt < num))
+ return -EMSGSIZE;
- if (++vq->vq_avail_idx >= vq->vq_nentries) {
- vq->vq_avail_idx -= vq->vq_nentries;
- vq->vq_packed.cached_flags ^=
- VRING_PACKED_DESC_F_AVAIL_USED;
- flags = vq->vq_packed.cached_flags;
- }
+ for (i = 0; i < num; i++) {
+ idx = vq->vq_avail_idx;
+ did = start_dp[idx].id;
+ dxp = &vq->vq_descx[did];
+ dxp->cookie = (void *)cookie[i];
+ dxp->ndescs = 1;
+
+ virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
}
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
return 0;
@@ -742,7 +774,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
/* Enqueue allocated buffers */
if (virtio_with_packed_queue(vq->hw))
- error = virtqueue_enqueue_recv_refill_packed(vq,
+ error = virtqueue_enqueue_recv_refill_packed_init(vq,
&m, 1);
else
error = virtqueue_enqueue_recv_refill(vq,
--
2.17.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [dpdk-stable] [PATCH] net/virtio: fix refill order in packed ring datapath
2021-07-08 9:58 [dpdk-stable] [PATCH] net/virtio: fix refill order in packed ring datapath Cheng Jiang
@ 2021-07-19 8:30 ` Maxime Coquelin
2021-07-20 2:48 ` Xia, Chenbo
1 sibling, 0 replies; 3+ messages in thread
From: Maxime Coquelin @ 2021-07-19 8:30 UTC (permalink / raw)
To: Cheng Jiang, Chenbo.Xia; +Cc: dev, jiayu.hu, yong.liu, yvonnex.yang, stable
On 7/8/21 11:58 AM, Cheng Jiang wrote:
> The front-end should refill the descriptor with the mbuf indicated by
> the buff_id rather then the index of used descriptor. Back-end may
> return buffers out of order if async copy mode is enabled.
>
> When initializing rxq, refill the descriptors in order as buff_id is
> not available at that time.
>
> Fixes: a76290c8f1cf ("net/virtio: implement Rx path for packed queues")
> Cc: stable@dpdk.org
>
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [dpdk-stable] [PATCH] net/virtio: fix refill order in packed ring datapath
2021-07-08 9:58 [dpdk-stable] [PATCH] net/virtio: fix refill order in packed ring datapath Cheng Jiang
2021-07-19 8:30 ` Maxime Coquelin
@ 2021-07-20 2:48 ` Xia, Chenbo
1 sibling, 0 replies; 3+ messages in thread
From: Xia, Chenbo @ 2021-07-20 2:48 UTC (permalink / raw)
To: Jiang, Cheng1, maxime.coquelin
Cc: dev, Hu, Jiayu, Liu, Yong, Yang, YvonneX, stable
> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.jiang@intel.com>
> Sent: Thursday, July 8, 2021 5:58 PM
> To: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu.hu@intel.com>; Liu, Yong
> <yong.liu@intel.com>; Yang, YvonneX <yvonnex.yang@intel.com>; Jiang, Cheng1
> <cheng1.jiang@intel.com>; stable@dpdk.org
> Subject: [PATCH] net/virtio: fix refill order in packed ring datapath
>
> The front-end should refill the descriptor with the mbuf indicated by
> the buff_id rather then the index of used descriptor. Back-end may
> return buffers out of order if async copy mode is enabled.
>
> When initializing rxq, refill the descriptors in order as buff_id is
> not available at that time.
>
> Fixes: a76290c8f1cf ("net/virtio: implement Rx path for packed queues")
> Cc: stable@dpdk.org
>
> Signed-off-by: Cheng Jiang <cheng1.jiang@intel.com>
> Signed-off-by: Marvin Liu <yong.liu@intel.com>
> --
> 2.17.1
Applied to next-virtio/main, thanks.
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2021-07-20 2:48 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-08 9:58 [dpdk-stable] [PATCH] net/virtio: fix refill order in packed ring datapath Cheng Jiang
2021-07-19 8:30 ` Maxime Coquelin
2021-07-20 2:48 ` Xia, Chenbo
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).