DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Xia, Chenbo" <chenbo.xia@intel.com>
To: Maxime Coquelin <maxime.coquelin@redhat.com>,
	"dev@dpdk.org" <dev@dpdk.org>,
	"david.marchand@redhat.com" <david.marchand@redhat.com>,
	"mkp@redhat.com" <mkp@redhat.com>,
	"fbl@redhat.com" <fbl@redhat.com>,
	"jasowang@redhat.com" <jasowang@redhat.com>,
	"Liang, Cunming" <cunming.liang@intel.com>,
	"Xie, Yongji" <xieyongji@bytedance.com>,
	"echaudro@redhat.com" <echaudro@redhat.com>,
	"eperezma@redhat.com" <eperezma@redhat.com>,
	"amorenoz@redhat.com" <amorenoz@redhat.com>,
	"lulu@redhat.com" <lulu@redhat.com>
Subject: RE: [PATCH v3 17/28] vhost: add control virtqueue support
Date: Mon, 29 May 2023 06:51:20 +0000	[thread overview]
Message-ID: <SN6PR11MB3504B1FE7414BFA3B27BF9109C4A9@SN6PR11MB3504.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20230525162551.70359-18-maxime.coquelin@redhat.com>

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Friday, May 26, 2023 12:26 AM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>;
> david.marchand@redhat.com; mkp@redhat.com; fbl@redhat.com;
> jasowang@redhat.com; Liang, Cunming <cunming.liang@intel.com>; Xie, Yongji
> <xieyongji@bytedance.com>; echaudro@redhat.com; eperezma@redhat.com;
> amorenoz@redhat.com; lulu@redhat.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH v3 17/28] vhost: add control virtqueue support
> 
> In order to support multi-queue with VDUSE, having
> control queue support is required.
> 
> This patch adds control queue implementation, it will be
> used later when adding VDUSE support. Only split ring
> layout is supported for now, packed ring support will be
> added later.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  lib/vhost/meson.build       |   1 +
>  lib/vhost/vhost.h           |   2 +
>  lib/vhost/virtio_net_ctrl.c | 286 ++++++++++++++++++++++++++++++++++++
>  lib/vhost/virtio_net_ctrl.h |  10 ++
>  4 files changed, 299 insertions(+)
>  create mode 100644 lib/vhost/virtio_net_ctrl.c
>  create mode 100644 lib/vhost/virtio_net_ctrl.h
> 
> diff --git a/lib/vhost/meson.build b/lib/vhost/meson.build
> index 0d1abf6283..83c8482c9e 100644
> --- a/lib/vhost/meson.build
> +++ b/lib/vhost/meson.build
> @@ -27,6 +27,7 @@ sources = files(
>          'vhost_crypto.c',
>          'vhost_user.c',
>          'virtio_net.c',
> +        'virtio_net_ctrl.c',
>  )
>  headers = files(
>          'rte_vdpa.h',
> diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
> index 8f0875b4e2..76663aed24 100644
> --- a/lib/vhost/vhost.h
> +++ b/lib/vhost/vhost.h
> @@ -525,6 +525,8 @@ struct virtio_net {
>  	int			postcopy_ufd;
>  	int			postcopy_listening;
> 
> +	struct vhost_virtqueue	*cvq;
> +
>  	struct rte_vdpa_device *vdpa_dev;
> 
>  	/* context data for the external message handlers */
> diff --git a/lib/vhost/virtio_net_ctrl.c b/lib/vhost/virtio_net_ctrl.c
> new file mode 100644
> index 0000000000..f4b8d5f7cc
> --- /dev/null
> +++ b/lib/vhost/virtio_net_ctrl.c
> @@ -0,0 +1,286 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright (c) 2023 Red Hat, Inc.
> + */
> +
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <unistd.h>
> +
> +#include "iotlb.h"
> +#include "vhost.h"
> +#include "virtio_net_ctrl.h"
> +
> +struct virtio_net_ctrl {
> +	uint8_t class;
> +	uint8_t command;
> +	uint8_t command_data[];
> +};
> +
> +struct virtio_net_ctrl_elem {
> +	struct virtio_net_ctrl *ctrl_req;
> +	uint16_t head_idx;
> +	uint16_t n_descs;
> +	uint8_t *desc_ack;
> +};
> +
> +static int
> +virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
> +		struct virtio_net_ctrl_elem *ctrl_elem)
> +	__rte_shared_locks_required(&cvq->iotlb_lock)
> +{
> +	uint16_t avail_idx, desc_idx, n_descs = 0;
> +	uint64_t desc_len, desc_addr, desc_iova, data_len = 0;
> +	uint8_t *ctrl_req;
> +	struct vring_desc *descs;
> +
> +	avail_idx = __atomic_load_n(&cvq->avail->idx, __ATOMIC_ACQUIRE);
> +	if (avail_idx == cvq->last_avail_idx) {
> +		VHOST_LOG_CONFIG(dev->ifname, DEBUG, "Control queue empty\n");
> +		return 0;
> +	}
> +
> +	desc_idx = cvq->avail->ring[cvq->last_avail_idx];
> +	if (desc_idx >= cvq->size) {
> +		VHOST_LOG_CONFIG(dev->ifname, ERR, "Out of range desc index,
> dropping\n");
> +		goto err;
> +	}
> +
> +	ctrl_elem->head_idx = desc_idx;
> +
> +	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
> +		desc_len = cvq->desc[desc_idx].len;
> +		desc_iova = cvq->desc[desc_idx].addr;
> +
> +		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev,
> cvq,
> +					desc_iova, &desc_len, VHOST_ACCESS_RO);
> +		if (!descs || desc_len != cvq->desc[desc_idx].len) {
> +			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl
> indirect descs\n");
> +			goto err;
> +		}
> +
> +		desc_idx = 0;
> +	} else {
> +		descs = cvq->desc;
> +	}
> +
> +	while (1) {
> +		desc_len = descs[desc_idx].len;
> +		desc_iova = descs[desc_idx].addr;
> +
> +		n_descs++;
> +
> +		if (descs[desc_idx].flags & VRING_DESC_F_WRITE) {
> +			if (ctrl_elem->desc_ack) {
> +				VHOST_LOG_CONFIG(dev->ifname, ERR,
> +						"Unexpected ctrl chain layout\n");
> +				goto err;
> +			}
> +
> +			if (desc_len != sizeof(uint8_t)) {
> +				VHOST_LOG_CONFIG(dev->ifname, ERR,
> +						"Invalid ack size for ctrl req,
> dropping\n");
> +				goto err;
> +			}
> +
> +			ctrl_elem->desc_ack = (uint8_t
> *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
> +					desc_iova, &desc_len, VHOST_ACCESS_WO);
> +			if (!ctrl_elem->desc_ack || desc_len != sizeof(uint8_t))
> {
> +				VHOST_LOG_CONFIG(dev->ifname, ERR,
> +						"Failed to map ctrl ack descriptor\n");
> +				goto err;
> +			}
> +		} else {
> +			if (ctrl_elem->desc_ack) {
> +				VHOST_LOG_CONFIG(dev->ifname, ERR,
> +						"Unexpected ctrl chain layout\n");
> +				goto err;
> +			}
> +
> +			data_len += desc_len;
> +		}
> +
> +		if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
> +			break;
> +
> +		desc_idx = descs[desc_idx].next;
> +	}
> +
> +	desc_idx = ctrl_elem->head_idx;
> +
> +	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT)
> +		ctrl_elem->n_descs = 1;
> +	else
> +		ctrl_elem->n_descs = n_descs;
> +
> +	if (!ctrl_elem->desc_ack) {
> +		VHOST_LOG_CONFIG(dev->ifname, ERR, "Missing ctrl ack
> descriptor\n");
> +		goto err;
> +	}
> +
> +	if (data_len < sizeof(ctrl_elem->ctrl_req->class) +
> sizeof(ctrl_elem->ctrl_req->command)) {
> +		VHOST_LOG_CONFIG(dev->ifname, ERR, "Invalid control header
> size\n");
> +		goto err;
> +	}
> +
> +	ctrl_elem->ctrl_req = malloc(data_len);
> +	if (!ctrl_elem->ctrl_req) {
> +		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to alloc ctrl
> request\n");
> +		goto err;
> +	}
> +
> +	ctrl_req = (uint8_t *)ctrl_elem->ctrl_req;
> +
> +	if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
> +		desc_len = cvq->desc[desc_idx].len;
> +		desc_iova = cvq->desc[desc_idx].addr;
> +
> +		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev,
> cvq,
> +					desc_iova, &desc_len, VHOST_ACCESS_RO);
> +		if (!descs || desc_len != cvq->desc[desc_idx].len) {
> +			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl
> indirect descs\n");
> +			goto free_err;
> +		}
> +
> +		desc_idx = 0;
> +	} else {
> +		descs = cvq->desc;
> +	}
> +
> +	while (!(descs[desc_idx].flags & VRING_DESC_F_WRITE)) {
> +		desc_len = descs[desc_idx].len;
> +		desc_iova = descs[desc_idx].addr;
> +
> +		desc_addr = vhost_iova_to_vva(dev, cvq, desc_iova, &desc_len,
> VHOST_ACCESS_RO);
> +		if (!desc_addr || desc_len < descs[desc_idx].len) {
> +			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl
> descriptor\n");
> +			goto free_err;
> +		}
> +
> +		memcpy(ctrl_req, (void *)(uintptr_t)desc_addr, desc_len);
> +		ctrl_req += desc_len;
> +
> +		if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
> +			break;
> +
> +		desc_idx = descs[desc_idx].next;
> +	}
> +
> +	cvq->last_avail_idx++;
> +	if (cvq->last_avail_idx >= cvq->size)
> +		cvq->last_avail_idx -= cvq->size;
> +
> +	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
> +		vhost_avail_event(cvq) = cvq->last_avail_idx;
> +
> +	return 1;
> +
> +free_err:
> +	free(ctrl_elem->ctrl_req);
> +err:
> +	cvq->last_avail_idx++;
> +	if (cvq->last_avail_idx >= cvq->size)
> +		cvq->last_avail_idx -= cvq->size;
> +
> +	if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
> +		vhost_avail_event(cvq) = cvq->last_avail_idx;
> +
> +	return -1;
> +}
> +
> +static uint8_t
> +virtio_net_ctrl_handle_req(struct virtio_net *dev, struct virtio_net_ctrl
> *ctrl_req)
> +{
> +	uint8_t ret = VIRTIO_NET_ERR;
> +
> +	if (ctrl_req->class == VIRTIO_NET_CTRL_MQ &&
> +			ctrl_req->command == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
> +		uint16_t queue_pairs;
> +		uint32_t i;
> +
> +		queue_pairs = *(uint16_t *)(uintptr_t)ctrl_req->command_data;
> +		VHOST_LOG_CONFIG(dev->ifname, INFO, "Ctrl req: MQ %u queue
> pairs\n", queue_pairs);
> +		ret = VIRTIO_NET_OK;
> +
> +		for (i = 0; i < dev->nr_vring; i++) {
> +			struct vhost_virtqueue *vq = dev->virtqueue[i];
> +			bool enable;
> +
> +			if (vq == dev->cvq)
> +				continue;
> +
> +			if (i < queue_pairs * 2)
> +				enable = true;
> +			else
> +				enable = false;
> +
> +			vq->enabled = enable;
> +			if (dev->notify_ops->vring_state_changed)
> +				dev->notify_ops->vring_state_changed(dev->vid, i,
> enable);
> +		}
> +	}
> +
> +	return ret;
> +}
> +
> +static int
> +virtio_net_ctrl_push(struct virtio_net *dev, struct virtio_net_ctrl_elem
> *ctrl_elem)
> +{
> +	struct vhost_virtqueue *cvq = dev->cvq;
> +	struct vring_used_elem *used_elem;
> +
> +	used_elem = &cvq->used->ring[cvq->last_used_idx];
> +	used_elem->id = ctrl_elem->head_idx;
> +	used_elem->len = ctrl_elem->n_descs;
> +
> +	cvq->last_used_idx++;
> +	if (cvq->last_used_idx >= cvq->size)
> +		cvq->last_used_idx -= cvq->size;
> +
> +	__atomic_store_n(&cvq->used->idx, cvq->last_used_idx,
> __ATOMIC_RELEASE);
> +
> +	vhost_vring_call_split(dev, dev->cvq);
> +
> +	free(ctrl_elem->ctrl_req);
> +
> +	return 0;
> +}
> +
> +int
> +virtio_net_ctrl_handle(struct virtio_net *dev)
> +{
> +	int ret = 0;
> +
> +	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
> +		VHOST_LOG_CONFIG(dev->ifname, ERR, "Packed ring not supported
> yet\n");
> +		return -1;
> +	}
> +
> +	if (!dev->cvq) {
> +		VHOST_LOG_CONFIG(dev->ifname, ERR, "missing control queue\n");
> +		return -1;
> +	}
> +
> +	rte_spinlock_lock(&dev->cvq->access_lock);
> +	vhost_user_iotlb_rd_lock(dev->cvq);
> +
> +	while (1) {
> +		struct virtio_net_ctrl_elem ctrl_elem;
> +
> +		memset(&ctrl_elem, 0, sizeof(struct virtio_net_ctrl_elem));
> +
> +		ret = virtio_net_ctrl_pop(dev, dev->cvq, &ctrl_elem);
> +		if (ret <= 0)
> +			break;
> +
> +		*ctrl_elem.desc_ack = virtio_net_ctrl_handle_req(dev,
> ctrl_elem.ctrl_req);
> +
> +		ret = virtio_net_ctrl_push(dev, &ctrl_elem);
> +		if (ret < 0)
> +			break;
> +	}
> +
> +	vhost_user_iotlb_rd_unlock(dev->cvq);
> +	rte_spinlock_unlock(&dev->cvq->access_lock);
> +
> +	return ret;
> +}
> diff --git a/lib/vhost/virtio_net_ctrl.h b/lib/vhost/virtio_net_ctrl.h
> new file mode 100644
> index 0000000000..9a90f4b9da
> --- /dev/null
> +++ b/lib/vhost/virtio_net_ctrl.h
> @@ -0,0 +1,10 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright (c) 2023 Red Hat, Inc.
> + */
> +
> +#ifndef _VIRTIO_NET_CTRL_H
> +#define _VIRTIO_NET_CTRL_H
> +
> +int virtio_net_ctrl_handle(struct virtio_net *dev);
> +
> +#endif
> --
> 2.40.1

Reviewed-by: Chenbo Xia <chenbo.xia@intel.com> 

  reply	other threads:[~2023-05-29  6:51 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-25 16:25 [PATCH v3 00/28] Add VDUSE support to Vhost library Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 01/28] vhost: fix missing guest notif stat increment Maxime Coquelin
2023-06-01 19:59   ` Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 02/28] vhost: fix invalid call FD handling Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 03/28] vhost: fix IOTLB entries overlap check with previous entry Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 04/28] vhost: add helper of IOTLB entries coredump Maxime Coquelin
2023-05-26  8:46   ` David Marchand
2023-06-01 13:43     ` Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 05/28] vhost: add helper for IOTLB entries shared page check Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 06/28] vhost: don't dump unneeded pages with IOTLB Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 07/28] vhost: change to single IOTLB cache per device Maxime Coquelin
2023-05-29  6:32   ` Xia, Chenbo
2023-05-25 16:25 ` [PATCH v3 08/28] vhost: add offset field to IOTLB entries Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 09/28] vhost: add page size info to IOTLB entry Maxime Coquelin
2023-05-29  6:32   ` Xia, Chenbo
2023-05-25 16:25 ` [PATCH v3 10/28] vhost: retry translating IOVA after IOTLB miss Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 11/28] vhost: introduce backend ops Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 12/28] vhost: add IOTLB cache entry removal callback Maxime Coquelin
2023-05-29  6:33   ` Xia, Chenbo
2023-05-25 16:25 ` [PATCH v3 13/28] vhost: add helper for IOTLB misses Maxime Coquelin
2023-05-29  6:33   ` Xia, Chenbo
2023-05-25 16:25 ` [PATCH v3 14/28] vhost: add helper for interrupt injection Maxime Coquelin
2023-05-26  8:54   ` David Marchand
2023-06-01 13:58     ` Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 15/28] vhost: add API to set max queue pairs Maxime Coquelin
2023-05-26  8:58   ` David Marchand
2023-06-01 14:00     ` Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 16/28] net/vhost: use " Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 17/28] vhost: add control virtqueue support Maxime Coquelin
2023-05-29  6:51   ` Xia, Chenbo [this message]
2023-05-25 16:25 ` [PATCH v3 18/28] vhost: add VDUSE device creation and destruction Maxime Coquelin
2023-05-26  9:11   ` David Marchand
2023-06-01 14:05     ` Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 19/28] vhost: add VDUSE callback for IOTLB miss Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 20/28] vhost: add VDUSE callback for IOTLB entry removal Maxime Coquelin
2023-05-29  6:51   ` Xia, Chenbo
2023-05-25 16:25 ` [PATCH v3 21/28] vhost: add VDUSE callback for IRQ injection Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 22/28] vhost: add VDUSE events handler Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 23/28] vhost: add support for virtqueue state get event Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 24/28] vhost: add support for VDUSE status set event Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 25/28] vhost: add support for VDUSE IOTLB update event Maxime Coquelin
2023-05-29  6:52   ` Xia, Chenbo
2023-05-25 16:25 ` [PATCH v3 26/28] vhost: add VDUSE device startup Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 27/28] vhost: add multiqueue support to VDUSE Maxime Coquelin
2023-05-25 16:25 ` [PATCH v3 28/28] vhost: add VDUSE device stop Maxime Coquelin
2023-05-29  6:53   ` Xia, Chenbo
2023-06-01 18:48     ` Maxime Coquelin
2023-05-26  9:14 ` [PATCH v3 00/28] Add VDUSE support to Vhost library David Marchand
2023-06-01 14:59   ` Maxime Coquelin
2023-06-01 15:18     ` David Marchand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=SN6PR11MB3504B1FE7414BFA3B27BF9109C4A9@SN6PR11MB3504.namprd11.prod.outlook.com \
    --to=chenbo.xia@intel.com \
    --cc=amorenoz@redhat.com \
    --cc=cunming.liang@intel.com \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=echaudro@redhat.com \
    --cc=eperezma@redhat.com \
    --cc=fbl@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=lulu@redhat.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=mkp@redhat.com \
    --cc=xieyongji@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).