DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: tiwei.bie@intel.com, yliu@fridaylinux.org,
	ferruh.yigit@intel.com, victork@redhat.com, thomas@monjalon.net,
	olivier.matz@6wind.com, jianfeng.tan@intel.com
Cc: dev@dpdk.org, stable@dpdk.org, zhihong.wang@intel.com,
	qian.q.xu@intel.com, lei.a.yao@intel.com,
	Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [dpdk-dev] [PATCH v2 2/3] virtio: fix resuming port with rx vector path
Date: Mon, 12 Feb 2018 16:46:11 +0100	[thread overview]
Message-ID: <20180212154612.5297-3-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20180212154612.5297-1-maxime.coquelin@redhat.com>

Since commit efc83a1e7fc3 ("net/virtio: fix queue setup consistency"),
when resuming a virtio port, the rx rings are refilled with new mbufs
until they are full (vq->vq_free_cnt == 0). This is done without
ensuring that the descriptor index remains a multiple of
RTE_VIRTIO_VPMD_RX_REARM_THRESH, which is a prerequisite when using the
vector mode. This can cause an out of bound access in the rx ring.

This commit changes the vector refill method from
virtqueue_enqueue_recv_refill_simple() to virtio_rxq_rearm_vec(), which
properly checks that the refill is done by batch of
RTE_VIRTIO_VPMD_RX_REARM_THRESH.

As virtqueue_enqueue_recv_refill_simple() is no more used, this
patch also removes the function.

Fixes: efc83a1e7fc3 ("net/virtio: fix queue setup consistency")

Reviewed-by: Jianfeng Tan <jianfeng.tan@intel.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Signed-off-by: Olivier Matz <olivier.matz@6wind.com>
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio_rxtx.c        | 38 ++++++++++++++++++---------------
 drivers/net/virtio/virtio_rxtx.h        |  3 ---
 drivers/net/virtio/virtio_rxtx_simple.c | 31 +--------------------------
 drivers/net/virtio/virtio_rxtx_simple.h |  2 +-
 4 files changed, 23 insertions(+), 51 deletions(-)

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 854af399e..8dbf2a30e 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -30,6 +30,7 @@
 #include "virtio_pci.h"
 #include "virtqueue.h"
 #include "virtio_rxtx.h"
+#include "virtio_rxtx_simple.h"
 
 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
@@ -437,6 +438,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
 			vq->vq_ring.desc[desc_idx].flags =
 				VRING_DESC_F_WRITE;
 		}
+
+		virtio_rxq_vec_setup(rxvq);
 	}
 
 	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
@@ -446,30 +449,31 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
 			&rxvq->fake_mbuf;
 	}
 
-	while (!virtqueue_full(vq)) {
-		m = rte_mbuf_raw_alloc(rxvq->mpool);
-		if (m == NULL)
-			break;
+	if (hw->use_simple_rx) {
+		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+			virtio_rxq_rearm_vec(rxvq);
+			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+		}
+	} else {
+		while (!virtqueue_full(vq)) {
+			m = rte_mbuf_raw_alloc(rxvq->mpool);
+			if (m == NULL)
+				break;
 
-		/* Enqueue allocated buffers */
-		if (hw->use_simple_rx)
-			error = virtqueue_enqueue_recv_refill_simple(vq, m);
-		else
+			/* Enqueue allocated buffers */
 			error = virtqueue_enqueue_recv_refill(vq, m);
-
-		if (error) {
-			rte_pktmbuf_free(m);
-			break;
+			if (error) {
+				rte_pktmbuf_free(m);
+				break;
+			}
+			nbufs++;
 		}
-		nbufs++;
-	}
 
-	vq_update_avail_idx(vq);
+		vq_update_avail_idx(vq);
+	}
 
 	PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
 
-	virtio_rxq_vec_setup(rxvq);
-
 	VIRTQUEUE_DUMP(vq);
 
 	return 0;
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 49e9d98ee..685cc4f81 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -60,7 +60,4 @@ struct virtnet_ctl {
 
 int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
 
-int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
-	struct rte_mbuf *m);
-
 #endif /* _VIRTIO_RXTX_H_ */
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index b1f610ffa..515207581 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -27,35 +27,6 @@
 #pragma GCC diagnostic ignored "-Wcast-qual"
 #endif
 
-int __attribute__((cold))
-virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
-	struct rte_mbuf *cookie)
-{
-	struct vq_desc_extra *dxp;
-	struct vring_desc *start_dp;
-	uint16_t desc_idx;
-
-	cookie->port = vq->rxq.port_id;
-	cookie->data_off = RTE_PKTMBUF_HEADROOM;
-
-	desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
-	dxp = &vq->vq_descx[desc_idx];
-	dxp->cookie = (void *)cookie;
-	vq->sw_ring[desc_idx] = cookie;
-
-	start_dp = vq->vq_ring.desc;
-	start_dp[desc_idx].addr =
-		VIRTIO_MBUF_ADDR(cookie, vq) +
-		RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
-	start_dp[desc_idx].len = cookie->buf_len -
-		RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
-
-	vq->vq_free_cnt--;
-	vq->vq_avail_idx++;
-
-	return 0;
-}
-
 uint16_t
 virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint16_t nb_pkts)
@@ -78,7 +49,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
 	rte_compiler_barrier();
 
 	if (nb_used >= VIRTIO_TX_FREE_THRESH)
-		virtio_xmit_cleanup(vq);
+		virtio_xmit_cleanup_simple(vq);
 
 	nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
 	desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
index 2d8e6b14a..303904d64 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.h
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -60,7 +60,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
 #define VIRTIO_TX_FREE_NR 32
 /* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
 static inline void
-virtio_xmit_cleanup(struct virtqueue *vq)
+virtio_xmit_cleanup_simple(struct virtqueue *vq)
 {
 	uint16_t i, desc_idx;
 	uint32_t nb_free = 0;
-- 
2.14.3

  parent reply	other threads:[~2018-02-12 15:46 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-02-12 15:46 [dpdk-dev] [PATCH v2 0/3] Vhost & Virtio fixes for v18.02 Maxime Coquelin
2018-02-12 15:46 ` [dpdk-dev] [PATCH v2 1/3] net/virtio: fix mbuf data offset for simple Rx function Maxime Coquelin
2018-02-13  8:53   ` Maxime Coquelin
2018-02-12 15:46 ` Maxime Coquelin [this message]
2018-02-12 15:46 ` [dpdk-dev] [PATCH v2 3/3] vhost: don't take access_lock on VHOST_USER_RESET_OWNER Maxime Coquelin
2018-02-13 17:45 ` [dpdk-dev] [dpdk-stable] [PATCH v2 0/3] Vhost & Virtio fixes for v18.02 Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180212154612.5297-3-maxime.coquelin@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=jianfeng.tan@intel.com \
    --cc=lei.a.yao@intel.com \
    --cc=olivier.matz@6wind.com \
    --cc=qian.q.xu@intel.com \
    --cc=stable@dpdk.org \
    --cc=thomas@monjalon.net \
    --cc=tiwei.bie@intel.com \
    --cc=victork@redhat.com \
    --cc=yliu@fridaylinux.org \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).