From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: Jens Freimann <jfreimann@redhat.com>, dev@dpdk.org
Cc: tiwei.bie@intel.com
Subject: Re: [dpdk-dev] [PATCH v2 1/2] net/virtio-user: ctrl vq support for packed
Date: Thu, 10 Jan 2019 15:23:29 +0100 [thread overview]
Message-ID: <bc0c7f3d-3495-7ace-36c7-b44fe0a9256a@redhat.com> (raw)
In-Reply-To: <20190110131751.32670-2-jfreimann@redhat.com>
On 1/10/19 2:17 PM, Jens Freimann wrote:
> Add support to virtio-user for control virtqueues.
>
> Signed-off-by: Jens Freimann <jfreimann@redhat.com>
> ---
> .../net/virtio/virtio_user/virtio_user_dev.c | 92 ++++++++++++++++++-
> .../net/virtio/virtio_user/virtio_user_dev.h | 8 +-
> drivers/net/virtio/virtio_user_ethdev.c | 49 +++++++++-
> 3 files changed, 141 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> index b9044faff..49fcf48b9 100644
> --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
> +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> @@ -43,15 +43,26 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
> struct vhost_vring_file file;
> struct vhost_vring_state state;
> struct vring *vring = &dev->vrings[queue_sel];
> + struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
> struct vhost_vring_addr addr = {
> .index = queue_sel,
> - .desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
> - .avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
> - .used_user_addr = (uint64_t)(uintptr_t)vring->used,
> .log_guest_addr = 0,
> .flags = 0, /* disable log */
> };
>
> + if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
> + addr.desc_user_addr =
> + (uint64_t)(uintptr_t)pq_vring->desc_packed;
> + addr.avail_user_addr =
> + (uint64_t)(uintptr_t)pq_vring->driver_event;
> + addr.used_user_addr =
> + (uint64_t)(uintptr_t)pq_vring->device_event;
> + } else {
> + addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
> + addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
> + addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
> + }
> +
> state.index = queue_sel;
> state.num = vring->num;
> dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
> @@ -620,6 +631,81 @@ virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
> return n_descs;
> }
>
> +static inline int
> +desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
> +{
> + return wrap_counter == !!(desc->flags & VRING_DESC_F_AVAIL(1)) &&
> + wrap_counter != !!(desc->flags & VRING_DESC_F_USED(1));
> +}
> +
> +static uint32_t
> +virtio_user_handle_ctrl_msg_pq(struct virtio_user_dev *dev,
> + struct vring_packed *vring,
> + uint16_t idx_hdr)
> +{
> + struct virtio_net_ctrl_hdr *hdr;
> + virtio_net_ctrl_ack status = ~0;
> + uint16_t idx_data, idx_status;
> + /* initialize to one, header is first */
> + uint32_t n_descs = 1;
> +
> + /* locate desc for header, data, and status */
> + idx_data = idx_hdr + 1;
> + n_descs++;
> +
> + idx_status = idx_data;
> + while (vring->desc_packed[idx_status].flags & VRING_DESC_F_NEXT) {
> + idx_status++;
> + n_descs++;
> + }
> +
> + hdr = (void *)(uintptr_t)vring->desc_packed[idx_hdr].addr;
> + if (hdr->class == VIRTIO_NET_CTRL_MQ &&
> + hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
> + uint16_t queues;
> +
> + queues = *(uint16_t *)(uintptr_t)
> + vring->desc_packed[idx_data].addr;
> + status = virtio_user_handle_mq(dev, queues);
> + }
> +
> + /* Update status */
> + *(virtio_net_ctrl_ack *)(uintptr_t)
> + vring->desc_packed[idx_status].addr = status;
> +
> + return n_descs;
> +}
> +
> +void
> +virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx,
> + struct virtqueue *vq)
> +{
> + struct vring_packed *vring = &dev->packed_vrings[queue_idx];
> + uint16_t id, n_descs;
> + int16_t n;
> +
> + while (desc_is_avail(&vring->desc_packed[vq->vq_used_cons_idx],
> + vq->avail_wrap_counter)) {
> + id = vring->desc_packed[vq->vq_used_cons_idx].id;
> +
> + n_descs = virtio_user_handle_ctrl_msg_pq(dev, vring, id);
> +
> + vring->desc_packed[vq->vq_used_cons_idx].len = n_descs;
> + vring->desc_packed[vq->vq_used_cons_idx].id =
> + vq->vq_used_cons_idx;
Are the above assignments necessary? I don't think so.
> + n = vq->vq_used_cons_idx + n_descs - 1;
> + do {
> + vring->desc_packed[vq->vq_used_cons_idx].flags |=
> + VRING_DESC_F_AVAIL(vq->used_wrap_counter) |
> + VRING_DESC_F_USED(vq->used_wrap_counter);
> + if (vq->vq_used_cons_idx >= dev->queue_size) {
> + vq->vq_used_cons_idx -= dev->queue_size;
> + vq->used_wrap_counter ^= 1;
> + }
> + } while (++vq->vq_used_cons_idx <= n);
I think it still does not work.
In case of wrap, n will be greater than queue size.
But as vq->vq_used_cons_idx wraps, I guess there will be an endless
loop.
> + }
> +}
> +
> void
> virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
> {
> diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
> index 672a8161a..8be8ca622 100644
> --- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
> +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
> @@ -39,7 +39,11 @@ struct virtio_user_dev {
> uint16_t port_id;
> uint8_t mac_addr[ETHER_ADDR_LEN];
> char path[PATH_MAX];
> - struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
> + union {
> + struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
> + struct vring_packed packed_vrings[VIRTIO_MAX_VIRTQUEUES];
> + };
> +
> struct virtio_user_backend_ops *ops;
> pthread_mutex_t mutex;
> bool started;
> @@ -53,5 +57,7 @@ int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
> int mrg_rxbuf, int in_order, int packed_vq);
> void virtio_user_dev_uninit(struct virtio_user_dev *dev);
> void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
> +void virtio_user_handle_cq_packed(struct virtio_user_dev *dev,
> + uint16_t queue_idx, struct virtqueue *vq);
> uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
> #endif
> diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
> index 2df6eb695..efc375cd7 100644
> --- a/drivers/net/virtio/virtio_user_ethdev.c
> +++ b/drivers/net/virtio/virtio_user_ethdev.c
> @@ -271,10 +271,36 @@ virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
> return dev->queue_size;
> }
>
> -static int
> -virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
> +static void
> +virtio_user_setup_queue_packed(struct virtqueue *vq,
> + struct virtio_user_dev *dev)
> +
> +{
> + uint16_t queue_idx = vq->vq_queue_index;
> + uint64_t desc_addr;
> + uint64_t avail_addr;
> + uint64_t used_addr;
> +
> + desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
> + avail_addr = desc_addr + vq->vq_nentries *
> + sizeof(struct vring_packed_desc);
> + used_addr = RTE_ALIGN_CEIL(avail_addr +
> + sizeof(struct vring_packed_desc_event),
> + VIRTIO_PCI_VRING_ALIGN);
> + dev->packed_vrings[queue_idx].num = vq->vq_nentries;
> + dev->packed_vrings[queue_idx].desc_packed =
> + (void *)(uintptr_t)desc_addr;
> + dev->packed_vrings[queue_idx].driver_event =
> + (void *)(uintptr_t)avail_addr;
> + dev->packed_vrings[queue_idx].device_event =
> + (void *)(uintptr_t)used_addr;
> + vq->avail_wrap_counter = true;
> + vq->used_wrap_counter = true;
> +}
> +
> +static void
> +virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
> {
> - struct virtio_user_dev *dev = virtio_user_get_dev(hw);
> uint16_t queue_idx = vq->vq_queue_index;
> uint64_t desc_addr, avail_addr, used_addr;
>
> @@ -288,6 +314,17 @@ virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
> dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
> dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
> dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
> +}
> +
> +static int
> +virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
> +{
> + struct virtio_user_dev *dev = virtio_user_get_dev(hw);
> +
> + if (vtpci_packed_queue(hw))
> + virtio_user_setup_queue_packed(vq, dev);
> + else
> + virtio_user_setup_queue_split(vq, dev);
>
> return 0;
> }
> @@ -317,7 +354,11 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
> struct virtio_user_dev *dev = virtio_user_get_dev(hw);
>
> if (hw->cvq && (hw->cvq->vq == vq)) {
> - virtio_user_handle_cq(dev, vq->vq_queue_index);
> + if (vtpci_packed_queue(vq->hw))
> + virtio_user_handle_cq_packed(dev, vq->vq_queue_index,
> + vq);
> + else
> + virtio_user_handle_cq(dev, vq->vq_queue_index);
> return;
> }
>
>
next prev parent reply other threads:[~2019-01-10 14:23 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-01-10 13:17 [dpdk-dev] [PATCH v2 0/2] net/virtio-user: add packed vq support Jens Freimann
2019-01-10 13:17 ` [dpdk-dev] [PATCH v2 1/2] net/virtio-user: ctrl vq support for packed Jens Freimann
2019-01-10 14:23 ` Maxime Coquelin [this message]
2019-01-10 15:04 ` Jens Freimann
2019-01-10 13:17 ` [dpdk-dev] [PATCH v2 2/2] Revert "net/virtio-user: fail if cq used with packed vq" Jens Freimann
2019-01-10 14:25 ` Maxime Coquelin
2019-01-10 14:40 ` Tiwei Bie
2019-01-10 14:58 ` Jens Freimann
2019-01-10 15:01 ` Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=bc0c7f3d-3495-7ace-36c7-b44fe0a9256a@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=dev@dpdk.org \
--cc=jfreimann@redhat.com \
--cc=tiwei.bie@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).