patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Ye Xiaolong <xiaolong.ye@intel.com>
To: Xuan Ding <xuan.ding@intel.com>
Cc: maxime.coquelin@redhat.com, tiwei.bie@intel.com,
	zhihong.wang@intel.com, yong.liu@intel.com, dev@dpdk.org,
	stable@dpdk.org
Subject: Re: [dpdk-stable] [dpdk-dev] [PATCH v3] net/virtio-user: fix packed ring server mode
Date: Wed, 18 Dec 2019 10:25:01 +0800	[thread overview]
Message-ID: <20191218022501.GP59123@intel.com> (raw)
In-Reply-To: <20191218022406.86245-1-xuan.ding@intel.com>

Hi, Xuan

On 12/18, Xuan Ding wrote:
>This patch fixes the situation where datapath does not work properly when
>vhost reconnects to virtio in server mode with packed ring.
>
>Currently, virtio and vhost share memory of vring. For split ring, vhost
>can read the status of discriptors directly from the available ring and
>the used ring during reconnection. Therefore, the datapath can continue.
>
>But for packed ring, when reconnecting to virtio, vhost cannot get the
>status of discriptors only through the descriptor ring. By resetting Tx
>and Rx queues, the datapath can restart from the beginning.
>
>Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
>Cc: stable@dpdk.org
>
>Signed-off-by: Xuan Ding <xuan.ding@intel.com>
>
>v3:
>* Removed an extra asterisk from a comment.
>* Renamed device reset function and moved it to virtio_user_ethdev.c.
>
>v2:
>* Renamed queue reset functions and moved them to virtqueue.c.

Please put these change log after below '---' marker, then they won't be shown in
commit log when you apply the patch by `git am`.

Thanks,
Xiaolong

>---
> drivers/net/virtio/virtio_ethdev.c      |  4 +-
> drivers/net/virtio/virtio_user_ethdev.c | 40 ++++++++++++++
> drivers/net/virtio/virtqueue.c          | 71 +++++++++++++++++++++++++
> drivers/net/virtio/virtqueue.h          |  4 ++
> 4 files changed, 117 insertions(+), 2 deletions(-)
>
>diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
>index 044eb10a7..f9d0ea70d 100644
>--- a/drivers/net/virtio/virtio_ethdev.c
>+++ b/drivers/net/virtio/virtio_ethdev.c
>@@ -1913,6 +1913,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
> 			goto err_vtpci_init;
> 	}
> 
>+	rte_spinlock_init(&hw->state_lock);
>+
> 	/* reset device and negotiate default features */
> 	ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
> 	if (ret < 0)
>@@ -2155,8 +2157,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
> 			return -EBUSY;
> 		}
> 
>-	rte_spinlock_init(&hw->state_lock);
>-
> 	hw->use_simple_rx = 1;
> 
> 	if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
>diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
>index 3fc172573..425f48230 100644
>--- a/drivers/net/virtio/virtio_user_ethdev.c
>+++ b/drivers/net/virtio/virtio_user_ethdev.c
>@@ -25,12 +25,48 @@
> #define virtio_user_get_dev(hw) \
> 	((struct virtio_user_dev *)(hw)->virtio_user_dev)
> 
>+static void
>+virtio_user_reset_queues_packed(struct rte_eth_dev *dev)
>+{
>+	struct virtio_hw *hw = dev->data->dev_private;
>+	struct virtnet_rx *rxvq;
>+	struct virtnet_tx *txvq;
>+	uint16_t i;
>+
>+	/* Add lock to avoid queue contention. */
>+	rte_spinlock_lock(&hw->state_lock);
>+	hw->started = 0;
>+
>+	/*
>+	 * Waitting for datapath to complete before resetting queues.
>+	 * 1 ms should be enough for the ongoing Tx/Rx function to finish.
>+	 */
>+	rte_delay_ms(1);
>+
>+	/* Vring reset for each Tx queue and Rx queue. */
>+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
>+		rxvq = dev->data->rx_queues[i];
>+		virtqueue_rxvq_reset_packed(rxvq->vq);
>+		virtio_dev_rx_queue_setup_finish(dev, i);
>+	}
>+
>+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
>+		txvq = dev->data->tx_queues[i];
>+		virtqueue_txvq_reset_packed(txvq->vq);
>+	}
>+
>+	hw->started = 1;
>+	rte_spinlock_unlock(&hw->state_lock);
>+}
>+
>+
> static int
> virtio_user_server_reconnect(struct virtio_user_dev *dev)
> {
> 	int ret;
> 	int connectfd;
> 	struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
>+	struct virtio_hw *hw = eth_dev->data->dev_private;
> 
> 	connectfd = accept(dev->listenfd, NULL, NULL);
> 	if (connectfd < 0)
>@@ -51,6 +87,10 @@ virtio_user_server_reconnect(struct virtio_user_dev *dev)
> 
> 	dev->features &= dev->device_features;
> 
>+	/* For packed ring, resetting queues is required in reconnection. */
>+	if (vtpci_packed_queue(hw))
>+		virtio_user_reset_queues_packed(eth_dev);
>+
> 	ret = virtio_user_start_device(dev);
> 	if (ret < 0)
> 		return -1;
>diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
>index 5ff1e3587..0b4e3bf3e 100644
>--- a/drivers/net/virtio/virtqueue.c
>+++ b/drivers/net/virtio/virtqueue.c
>@@ -141,3 +141,74 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
> 	else
> 		virtqueue_rxvq_flush_split(vq);
> }
>+
>+int
>+virtqueue_rxvq_reset_packed(struct virtqueue *vq)
>+{
>+	int size = vq->vq_nentries;
>+	struct vq_desc_extra *dxp;
>+	struct virtnet_rx *rxvq;
>+	uint16_t desc_idx;
>+
>+	vq->vq_used_cons_idx = 0;
>+	vq->vq_desc_head_idx = 0;
>+	vq->vq_avail_idx = 0;
>+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
>+	vq->vq_free_cnt = vq->vq_nentries;
>+
>+	vq->vq_packed.used_wrap_counter = 1;
>+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
>+	vq->vq_packed.event_flags_shadow = 0;
>+	vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
>+
>+	rxvq = &vq->rxq;
>+	memset(rxvq->mz->addr, 0, rxvq->mz->len);
>+
>+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
>+		dxp = &vq->vq_descx[desc_idx];
>+		if (dxp->cookie != NULL) {
>+			rte_pktmbuf_free(dxp->cookie);
>+			dxp->cookie = NULL;
>+		}
>+	}
>+
>+	vring_desc_init_packed(vq, size);
>+
>+	return 0;
>+}
>+
>+int
>+virtqueue_txvq_reset_packed(struct virtqueue *vq)
>+{
>+	int size = vq->vq_nentries;
>+	struct vq_desc_extra *dxp;
>+	struct virtnet_tx *txvq;
>+	uint16_t desc_idx;
>+
>+	vq->vq_used_cons_idx = 0;
>+	vq->vq_desc_head_idx = 0;
>+	vq->vq_avail_idx = 0;
>+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
>+	vq->vq_free_cnt = vq->vq_nentries;
>+
>+	vq->vq_packed.used_wrap_counter = 1;
>+	vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
>+	vq->vq_packed.event_flags_shadow = 0;
>+
>+	txvq = &vq->txq;
>+	memset(txvq->mz->addr, 0, txvq->mz->len);
>+	memset(txvq->virtio_net_hdr_mz->addr, 0,
>+		txvq->virtio_net_hdr_mz->len);
>+
>+	for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) {
>+		dxp = &vq->vq_descx[desc_idx];
>+		if (dxp->cookie != NULL) {
>+			rte_pktmbuf_free(dxp->cookie);
>+			dxp->cookie = NULL;
>+		}
>+	}
>+
>+	vring_desc_init_packed(vq, size);
>+
>+	return 0;
>+}
>diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
>index 8d7f197b1..58ad7309a 100644
>--- a/drivers/net/virtio/virtqueue.h
>+++ b/drivers/net/virtio/virtqueue.h
>@@ -443,6 +443,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq);
> /* Flush the elements in the used ring. */
> void virtqueue_rxvq_flush(struct virtqueue *vq);
> 
>+int virtqueue_rxvq_reset_packed(struct virtqueue *vq);
>+
>+int virtqueue_txvq_reset_packed(struct virtqueue *vq);
>+
> static inline int
> virtqueue_full(const struct virtqueue *vq)
> {
>-- 
>2.17.1
>

  reply	other threads:[~2019-12-18  7:18 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-09 16:49 [dpdk-stable] [PATCH v1] " Xuan Ding
2019-12-09  8:51 ` [dpdk-stable] [dpdk-dev] " Liu, Yong
2019-12-12 11:08   ` Ding, Xuan
2019-12-18  2:24 ` [dpdk-stable] [PATCH v3] " Xuan Ding
2019-12-18  2:25   ` Ye Xiaolong [this message]
2019-12-18  2:38     ` [dpdk-stable] [dpdk-dev] " Ding, Xuan
2019-12-23  7:25   ` [dpdk-stable] [PATCH v4] " Xuan Ding
2020-01-14 15:04     ` Maxime Coquelin
2020-01-15  6:08       ` Ding, Xuan
2020-01-15  6:13     ` [dpdk-stable] [PATCH v5] " Xuan Ding
2020-01-15 10:47       ` Maxime Coquelin
2020-01-15 11:16       ` Maxime Coquelin
2020-01-15 15:40         ` Ferruh Yigit
2020-01-16  7:13           ` Ding, Xuan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191218022501.GP59123@intel.com \
    --to=xiaolong.ye@intel.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=stable@dpdk.org \
    --cc=tiwei.bie@intel.com \
    --cc=xuan.ding@intel.com \
    --cc=yong.liu@intel.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).