From: "Xia, Chenbo" <chenbo.xia@intel.com>
To: Maxime Coquelin <maxime.coquelin@redhat.com>,
"dev@dpdk.org" <dev@dpdk.org>,
"david.marchand@redhat.com" <david.marchand@redhat.com>,
"mkp@redhat.com" <mkp@redhat.com>,
"fbl@redhat.com" <fbl@redhat.com>,
"jasowang@redhat.com" <jasowang@redhat.com>,
"Liang, Cunming" <cunming.liang@intel.com>,
"Xie, Yongji" <xieyongji@bytedance.com>,
"echaudro@redhat.com" <echaudro@redhat.com>,
"eperezma@redhat.com" <eperezma@redhat.com>,
"amorenoz@redhat.com" <amorenoz@redhat.com>
Subject: RE: [RFC 17/27] vhost: add control virtqueue support
Date: Tue, 9 May 2023 05:29:10 +0000 [thread overview]
Message-ID: <SN6PR11MB3504722192938DDC0E92C69C9C769@SN6PR11MB3504.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20230331154259.1447831-18-maxime.coquelin@redhat.com>
Hi Maxime,
> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Friday, March 31, 2023 11:43 PM
> To: dev@dpdk.org; david.marchand@redhat.com; Xia, Chenbo
> <chenbo.xia@intel.com>; mkp@redhat.com; fbl@redhat.com;
> jasowang@redhat.com; Liang, Cunming <cunming.liang@intel.com>; Xie, Yongji
> <xieyongji@bytedance.com>; echaudro@redhat.com; eperezma@redhat.com;
> amorenoz@redhat.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [RFC 17/27] vhost: add control virtqueue support
>
> In order to support multi-queue with VDUSE, having
> control queue support in required.
in -> is
>
> This patch adds control queue implementation, it will be
> used later when adding VDUSE support. Only split ring
> layout is supported for now, packed ring support will be
> added later.
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
> lib/vhost/meson.build | 1 +
> lib/vhost/vhost.h | 2 +
> lib/vhost/virtio_net_ctrl.c | 282 ++++++++++++++++++++++++++++++++++++
> lib/vhost/virtio_net_ctrl.h | 10 ++
> 4 files changed, 295 insertions(+)
> create mode 100644 lib/vhost/virtio_net_ctrl.c
> create mode 100644 lib/vhost/virtio_net_ctrl.h
>
> diff --git a/lib/vhost/meson.build b/lib/vhost/meson.build
> index 197a51d936..cdcd403df3 100644
> --- a/lib/vhost/meson.build
> +++ b/lib/vhost/meson.build
> @@ -28,6 +28,7 @@ sources = files(
> 'vhost_crypto.c',
> 'vhost_user.c',
> 'virtio_net.c',
> + 'virtio_net_ctrl.c',
> )
> headers = files(
> 'rte_vdpa.h',
> diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
> index 8f0875b4e2..76663aed24 100644
> --- a/lib/vhost/vhost.h
> +++ b/lib/vhost/vhost.h
> @@ -525,6 +525,8 @@ struct virtio_net {
> int postcopy_ufd;
> int postcopy_listening;
>
> + struct vhost_virtqueue *cvq;
> +
> struct rte_vdpa_device *vdpa_dev;
>
> /* context data for the external message handlers */
> diff --git a/lib/vhost/virtio_net_ctrl.c b/lib/vhost/virtio_net_ctrl.c
> new file mode 100644
> index 0000000000..16ea63b42f
> --- /dev/null
> +++ b/lib/vhost/virtio_net_ctrl.c
> @@ -0,0 +1,282 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright (c) 2023 Red Hat, Inc.
> + */
> +
> +#undef RTE_ANNOTATE_LOCKS
> +
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <unistd.h>
> +
> +#include "vhost.h"
> +#include "virtio_net_ctrl.h"
> +
> +struct virtio_net_ctrl {
> + uint8_t class;
> + uint8_t command;
> + uint8_t command_data[];
> +};
> +
> +struct virtio_net_ctrl_elem {
> + struct virtio_net_ctrl *ctrl_req;
> + uint16_t head_idx;
> + uint16_t n_descs;
> + uint8_t *desc_ack;
> +};
> +
> +static int
> +virtio_net_ctrl_pop(struct virtio_net *dev, struct virtio_net_ctrl_elem
> *ctrl_elem)
> +{
> + struct vhost_virtqueue *cvq = dev->cvq;
> + uint16_t avail_idx, desc_idx, n_descs = 0;
> + uint64_t desc_len, desc_addr, desc_iova, data_len = 0;
> + uint8_t *ctrl_req;
> + struct vring_desc *descs;
> +
> + avail_idx = __atomic_load_n(&cvq->avail->idx, __ATOMIC_ACQUIRE);
> + if (avail_idx == cvq->last_avail_idx) {
> + VHOST_LOG_CONFIG(dev->ifname, DEBUG, "Control queue empty\n");
> + return 0;
> + }
> +
> + desc_idx = cvq->avail->ring[cvq->last_avail_idx];
> + if (desc_idx >= cvq->size) {
> + VHOST_LOG_CONFIG(dev->ifname, ERR, "Out of range desc index,
> dropping\n");
> + goto err;
> + }
> +
> + ctrl_elem->head_idx = desc_idx;
> +
> + if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
> + desc_len = cvq->desc[desc_idx].len;
> + desc_iova = cvq->desc[desc_idx].addr;
> +
> + descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev,
> cvq,
> + desc_iova, &desc_len, VHOST_ACCESS_RO);
> + if (!descs || desc_len != cvq->desc[desc_idx].len) {
> + VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl
> indirect descs\n");
> + goto err;
> + }
> +
> + desc_idx = 0;
> + } else {
> + descs = cvq->desc;
> + }
> +
> + while (1) {
> + desc_len = descs[desc_idx].len;
> + desc_iova = descs[desc_idx].addr;
> +
> + n_descs++;
> +
> + if (descs[desc_idx].flags & VRING_DESC_F_WRITE) {
> + if (ctrl_elem->desc_ack) {
> + VHOST_LOG_CONFIG(dev->ifname, ERR,
> + "Unexpected ctrl chain layout\n");
> + goto err;
> + }
> +
> + if (desc_len != sizeof(uint8_t)) {
> + VHOST_LOG_CONFIG(dev->ifname, ERR,
> + "Invalid ack size for ctrl req,
> dropping\n");
> + goto err;
> + }
> +
> + ctrl_elem->desc_ack = (uint8_t
> *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
> + desc_iova, &desc_len, VHOST_ACCESS_WO);
> + if (!ctrl_elem->desc_ack || desc_len != sizeof(uint8_t))
> {
> + VHOST_LOG_CONFIG(dev->ifname, ERR,
> + "Failed to map ctrl ack descriptor\n");
> + goto err;
> + }
> + } else {
> + if (ctrl_elem->desc_ack) {
> + VHOST_LOG_CONFIG(dev->ifname, ERR,
> + "Unexpected ctrl chain layout\n");
> + goto err;
> + }
> +
> + data_len += desc_len;
> + }
> +
> + if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
> + break;
> +
> + desc_idx = descs[desc_idx].next;
> + }
> +
> + desc_idx = ctrl_elem->head_idx;
> +
> + if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT)
> + ctrl_elem->n_descs = 1;
> + else
> + ctrl_elem->n_descs = n_descs;
> +
> + if (!ctrl_elem->desc_ack) {
> + VHOST_LOG_CONFIG(dev->ifname, ERR, "Missing ctrl ack
> descriptor\n");
> + goto err;
> + }
> +
> + if (data_len < sizeof(ctrl_elem->ctrl_req->class) +
> sizeof(ctrl_elem->ctrl_req->command)) {
> + VHOST_LOG_CONFIG(dev->ifname, ERR, "Invalid control header
> size\n");
> + goto err;
> + }
> +
> + ctrl_elem->ctrl_req = malloc(data_len);
> + if (!ctrl_elem->ctrl_req) {
> + VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to alloc ctrl
> request\n");
> + goto err;
> + }
> +
> + ctrl_req = (uint8_t *)ctrl_elem->ctrl_req;
> +
> + if (cvq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
> + desc_len = cvq->desc[desc_idx].len;
> + desc_iova = cvq->desc[desc_idx].addr;
> +
> + descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev,
> cvq,
> + desc_iova, &desc_len, VHOST_ACCESS_RO);
> + if (!descs || desc_len != cvq->desc[desc_idx].len) {
> + VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl
> indirect descs\n");
> + goto err;
goto free_err?
Thanks,
Chenbo
> + }
> +
> + desc_idx = 0;
> + } else {
> + descs = cvq->desc;
> + }
> +
> + while (!(descs[desc_idx].flags & VRING_DESC_F_WRITE)) {
> + desc_len = descs[desc_idx].len;
> + desc_iova = descs[desc_idx].addr;
> +
> + desc_addr = vhost_iova_to_vva(dev, cvq, desc_iova, &desc_len,
> VHOST_ACCESS_RO);
> + if (!desc_addr || desc_len < descs[desc_idx].len) {
> + VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl
> descriptor\n");
> + goto free_err;
> + }
> +
> + memcpy(ctrl_req, (void *)(uintptr_t)desc_addr, desc_len);
> + ctrl_req += desc_len;
> +
> + if (!(descs[desc_idx].flags & VRING_DESC_F_NEXT))
> + break;
> +
> + desc_idx = descs[desc_idx].next;
> + }
> +
> + cvq->last_avail_idx++;
> + if (cvq->last_avail_idx >= cvq->size)
> + cvq->last_avail_idx -= cvq->size;
> +
> + if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
> + vhost_avail_event(cvq) = cvq->last_avail_idx;
> +
> + return 1;
> +
> +free_err:
> + free(ctrl_elem->ctrl_req);
> +err:
> + cvq->last_avail_idx++;
> + if (cvq->last_avail_idx >= cvq->size)
> + cvq->last_avail_idx -= cvq->size;
> +
> + if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
> + vhost_avail_event(cvq) = cvq->last_avail_idx;
> +
> + return -1;
> +}
> +
> +static uint8_t
> +virtio_net_ctrl_handle_req(struct virtio_net *dev, struct virtio_net_ctrl
> *ctrl_req)
> +{
> + uint8_t ret = VIRTIO_NET_ERR;
> +
> + if (ctrl_req->class == VIRTIO_NET_CTRL_MQ &&
> + ctrl_req->command == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
> + uint16_t queue_pairs;
> + uint32_t i;
> +
> + queue_pairs = *(uint16_t *)(uintptr_t)ctrl_req->command_data;
> + VHOST_LOG_CONFIG(dev->ifname, INFO, "Ctrl req: MQ %u queue
> pairs\n", queue_pairs);
> + ret = VIRTIO_NET_OK;
> +
> + for (i = 0; i < dev->nr_vring; i++) {
> + struct vhost_virtqueue *vq = dev->virtqueue[i];
> + bool enable;
> +
> + if (vq == dev->cvq)
> + continue;
> +
> + if (i < queue_pairs * 2)
> + enable = true;
> + else
> + enable = false;
> +
> + vq->enabled = enable;
> + if (dev->notify_ops->vring_state_changed)
> + dev->notify_ops->vring_state_changed(dev->vid, i,
> enable);
> + }
> + }
> +
> + return ret;
> +}
> +
> +static int
> +virtio_net_ctrl_push(struct virtio_net *dev, struct virtio_net_ctrl_elem
> *ctrl_elem)
> +{
> + struct vhost_virtqueue *cvq = dev->cvq;
> + struct vring_used_elem *used_elem;
> +
> + used_elem = &cvq->used->ring[cvq->last_used_idx];
> + used_elem->id = ctrl_elem->head_idx;
> + used_elem->len = ctrl_elem->n_descs;
> +
> + cvq->last_used_idx++;
> + if (cvq->last_used_idx >= cvq->size)
> + cvq->last_used_idx -= cvq->size;
> +
> + __atomic_store_n(&cvq->used->idx, cvq->last_used_idx,
> __ATOMIC_RELEASE);
> +
> + free(ctrl_elem->ctrl_req);
> +
> + return 0;
> +}
> +
> +int
> +virtio_net_ctrl_handle(struct virtio_net *dev)
> +{
> + int ret = 0;
> +
> + if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
> + VHOST_LOG_CONFIG(dev->ifname, ERR, "Packed ring not supported
> yet\n");
> + return -1;
> + }
> +
> + if (!dev->cvq) {
> + VHOST_LOG_CONFIG(dev->ifname, ERR, "missing control queue\n");
> + return -1;
> + }
> +
> + rte_spinlock_lock(&dev->cvq->access_lock);
> +
> + while (1) {
> + struct virtio_net_ctrl_elem ctrl_elem;
> +
> + memset(&ctrl_elem, 0, sizeof(struct virtio_net_ctrl_elem));
> +
> + ret = virtio_net_ctrl_pop(dev, &ctrl_elem);
> + if (ret <= 0)
> + break;
> +
> + *ctrl_elem.desc_ack = virtio_net_ctrl_handle_req(dev,
> ctrl_elem.ctrl_req);
> +
> + ret = virtio_net_ctrl_push(dev, &ctrl_elem);
> + if (ret < 0)
> + break;
> + }
> +
> + rte_spinlock_unlock(&dev->cvq->access_lock);
> +
> + return ret;
> +}
> diff --git a/lib/vhost/virtio_net_ctrl.h b/lib/vhost/virtio_net_ctrl.h
> new file mode 100644
> index 0000000000..9a90f4b9da
> --- /dev/null
> +++ b/lib/vhost/virtio_net_ctrl.h
> @@ -0,0 +1,10 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright (c) 2023 Red Hat, Inc.
> + */
> +
> +#ifndef _VIRTIO_NET_CTRL_H
> +#define _VIRTIO_NET_CTRL_H
> +
> +int virtio_net_ctrl_handle(struct virtio_net *dev);
> +
> +#endif
> --
> 2.39.2
next prev parent reply other threads:[~2023-05-09 5:29 UTC|newest]
Thread overview: 79+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-31 15:42 [RFC 00/27] Add VDUSE support to Vhost library Maxime Coquelin
2023-03-31 15:42 ` [RFC 01/27] vhost: fix missing guest notif stat increment Maxime Coquelin
2023-04-24 2:57 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 02/27] vhost: fix invalid call FD handling Maxime Coquelin
2023-04-24 2:58 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 03/27] vhost: fix IOTLB entries overlap check with previous entry Maxime Coquelin
2023-04-17 19:15 ` Mike Pattrick
2023-04-24 2:58 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 04/27] vhost: add helper of IOTLB entries coredump Maxime Coquelin
2023-04-24 2:59 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 05/27] vhost: add helper for IOTLB entries shared page check Maxime Coquelin
2023-04-17 19:39 ` Mike Pattrick
2023-04-19 9:35 ` Maxime Coquelin
2023-04-19 14:52 ` Mike Pattrick
2023-04-24 2:59 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 06/27] vhost: don't dump unneeded pages with IOTLB Maxime Coquelin
2023-04-20 17:11 ` Mike Pattrick
2023-04-24 3:00 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 07/27] vhost: change to single IOTLB cache per device Maxime Coquelin
2023-04-25 6:19 ` Xia, Chenbo
2023-05-03 13:47 ` Maxime Coquelin
2023-03-31 15:42 ` [RFC 08/27] vhost: add offset field to IOTLB entries Maxime Coquelin
2023-04-25 6:20 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 09/27] vhost: add page size info to IOTLB entry Maxime Coquelin
2023-04-25 6:20 ` Xia, Chenbo
2023-05-03 13:57 ` Maxime Coquelin
2023-03-31 15:42 ` [RFC 10/27] vhost: retry translating IOVA after IOTLB miss Maxime Coquelin
2023-05-05 5:07 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 11/27] vhost: introduce backend ops Maxime Coquelin
2023-05-05 5:07 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 12/27] vhost: add IOTLB cache entry removal callback Maxime Coquelin
2023-05-05 5:07 ` Xia, Chenbo
2023-05-25 11:20 ` Maxime Coquelin
2023-03-31 15:42 ` [RFC 13/27] vhost: add helper for IOTLB misses Maxime Coquelin
2023-03-31 15:42 ` [RFC 14/27] vhost: add helper for interrupt injection Maxime Coquelin
2023-05-05 5:07 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 15/27] vhost: add API to set max queue pairs Maxime Coquelin
2023-05-05 5:07 ` Xia, Chenbo
2023-05-25 11:23 ` Maxime Coquelin
2023-03-31 15:42 ` [RFC 16/27] net/vhost: use " Maxime Coquelin
2023-05-05 5:07 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 17/27] vhost: add control virtqueue support Maxime Coquelin
2023-05-09 5:29 ` Xia, Chenbo [this message]
2023-03-31 15:42 ` [RFC 18/27] vhost: add VDUSE device creation and destruction Maxime Coquelin
2023-05-09 5:31 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 19/27] vhost: add VDUSE callback for IOTLB miss Maxime Coquelin
2023-05-09 5:31 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 20/27] vhost: add VDUSE callback for IOTLB entry removal Maxime Coquelin
2023-05-09 5:32 ` Xia, Chenbo
2023-05-25 11:35 ` Maxime Coquelin
2023-03-31 15:42 ` [RFC 21/27] vhost: add VDUSE callback for IRQ injection Maxime Coquelin
2023-05-09 5:33 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 22/27] vhost: add VDUSE events handler Maxime Coquelin
2023-05-09 5:34 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 23/27] vhost: add support for virtqueue state get event Maxime Coquelin
2023-05-09 5:34 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 24/27] vhost: add support for VDUSE status set event Maxime Coquelin
2023-05-09 5:34 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 25/27] vhost: add support for VDUSE IOTLB update event Maxime Coquelin
2023-05-09 5:35 ` Xia, Chenbo
2023-05-25 11:43 ` Maxime Coquelin
2023-03-31 15:42 ` [RFC 26/27] vhost: add VDUSE device startup Maxime Coquelin
2023-05-09 5:35 ` Xia, Chenbo
2023-03-31 15:42 ` [RFC 27/27] vhost: add multiqueue support to VDUSE Maxime Coquelin
2023-05-09 5:35 ` Xia, Chenbo
2023-04-06 3:44 ` [RFC 00/27] Add VDUSE support to Vhost library Yongji Xie
2023-04-06 8:16 ` Maxime Coquelin
2023-04-06 11:04 ` Yongji Xie
2023-04-12 11:33 ` Ferruh Yigit
2023-04-12 15:28 ` Maxime Coquelin
2023-04-12 19:40 ` Morten Brørup
2023-04-13 7:08 ` Xia, Chenbo
2023-04-13 7:58 ` Morten Brørup
2023-04-13 7:59 ` Maxime Coquelin
2023-04-14 10:48 ` Ferruh Yigit
2023-04-14 12:06 ` Maxime Coquelin
2023-04-14 14:25 ` Ferruh Yigit
2023-04-17 3:10 ` Jason Wang
2023-05-05 5:53 ` Xia, Chenbo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=SN6PR11MB3504722192938DDC0E92C69C9C769@SN6PR11MB3504.namprd11.prod.outlook.com \
--to=chenbo.xia@intel.com \
--cc=amorenoz@redhat.com \
--cc=cunming.liang@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=echaudro@redhat.com \
--cc=eperezma@redhat.com \
--cc=fbl@redhat.com \
--cc=jasowang@redhat.com \
--cc=maxime.coquelin@redhat.com \
--cc=mkp@redhat.com \
--cc=xieyongji@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).