From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id E8E4FA0613 for ; Thu, 29 Aug 2019 10:00:30 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 54C231D405; Thu, 29 Aug 2019 10:00:29 +0200 (CEST) Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id AB2B91D41A; Thu, 29 Aug 2019 10:00:26 +0200 (CEST) Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.phx2.redhat.com [10.5.11.15]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 0D19581129; Thu, 29 Aug 2019 08:00:26 +0000 (UTC) Received: from localhost.localdomain (ovpn-112-52.ams2.redhat.com [10.36.112.52]) by smtp.corp.redhat.com (Postfix) with ESMTP id C1F455D6B2; Thu, 29 Aug 2019 08:00:23 +0000 (UTC) From: Maxime Coquelin To: tiwei.bie@intel.com, zhihong.wang@intel.com, amorenoz@redhat.com, xiao.w.wang@intel.com, dev@dpdk.org, jfreimann@redhat.com Cc: stable@dpdk.org, Maxime Coquelin Date: Thu, 29 Aug 2019 09:59:48 +0200 Message-Id: <20190829080000.20806-4-maxime.coquelin@redhat.com> In-Reply-To: <20190829080000.20806-1-maxime.coquelin@redhat.com> References: <20190829080000.20806-1-maxime.coquelin@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Scanned-By: MIMEDefang 2.79 on 10.5.11.15 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.27]); Thu, 29 Aug 2019 08:00:26 +0000 (UTC) Subject: [dpdk-stable] [PATCH 03/15] net/virtio: move control path fonctions in virtqueue file X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: stable-bounces@dpdk.org Sender: "stable" Virtio-vdpa driver needs to implement the control path, so move related functions to virtqueue file so that it can be used by both Virtio PMD and Virtio-vdpa drivers. Signed-off-by: Maxime Coquelin --- drivers/net/virtio/virtio_ethdev.c | 252 ---------------------------- drivers/net/virtio/virtqueue.c | 255 +++++++++++++++++++++++++++++ drivers/net/virtio/virtqueue.h | 5 + 3 files changed, 260 insertions(+), 252 deletions(-) diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index f96588b9d..3682ee318 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -140,226 +140,6 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = { struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS]; -static struct virtio_pmd_ctrl * -virtio_send_command_packed(struct virtnet_ctl *cvq, - struct virtio_pmd_ctrl *ctrl, - int *dlen, int pkt_num) -{ - struct virtqueue *vq = cvq->vq; - int head; - struct vring_packed_desc *desc = vq->vq_packed.ring.desc; - struct virtio_pmd_ctrl *result; - uint16_t flags; - int sum = 0; - int nb_descs = 0; - int k; - - /* - * Format is enforced in qemu code: - * One TX packet for header; - * At least one TX packet per argument; - * One RX packet for ACK. - */ - head = vq->vq_avail_idx; - flags = vq->vq_packed.cached_flags; - desc[head].addr = cvq->virtio_net_hdr_mem; - desc[head].len = sizeof(struct virtio_net_ctrl_hdr); - vq->vq_free_cnt--; - nb_descs++; - if (++vq->vq_avail_idx >= vq->vq_nentries) { - vq->vq_avail_idx -= vq->vq_nentries; - vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; - } - - for (k = 0; k < pkt_num; k++) { - desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem - + sizeof(struct virtio_net_ctrl_hdr) - + sizeof(ctrl->status) + sizeof(uint8_t) * sum; - desc[vq->vq_avail_idx].len = dlen[k]; - desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT | - vq->vq_packed.cached_flags; - sum += dlen[k]; - vq->vq_free_cnt--; - nb_descs++; - if (++vq->vq_avail_idx >= vq->vq_nentries) { - vq->vq_avail_idx -= vq->vq_nentries; - vq->vq_packed.cached_flags ^= - VRING_PACKED_DESC_F_AVAIL_USED; - } - } - - desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem - + sizeof(struct virtio_net_ctrl_hdr); - desc[vq->vq_avail_idx].len = sizeof(ctrl->status); - desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE | - vq->vq_packed.cached_flags; - vq->vq_free_cnt--; - nb_descs++; - if (++vq->vq_avail_idx >= vq->vq_nentries) { - vq->vq_avail_idx -= vq->vq_nentries; - vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; - } - - virtio_wmb(vq->hw->weak_barriers); - desc[head].flags = VRING_DESC_F_NEXT | flags; - - virtio_wmb(vq->hw->weak_barriers); - virtqueue_notify(vq); - - /* wait for used descriptors in virtqueue */ - while (!desc_is_used(&desc[head], vq)) - usleep(100); - - virtio_rmb(vq->hw->weak_barriers); - - /* now get used descriptors */ - vq->vq_free_cnt += nb_descs; - vq->vq_used_cons_idx += nb_descs; - if (vq->vq_used_cons_idx >= vq->vq_nentries) { - vq->vq_used_cons_idx -= vq->vq_nentries; - vq->vq_packed.used_wrap_counter ^= 1; - } - - PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n" - "vq->vq_avail_idx=%d\n" - "vq->vq_used_cons_idx=%d\n" - "vq->vq_packed.cached_flags=0x%x\n" - "vq->vq_packed.used_wrap_counter=%d\n", - vq->vq_free_cnt, - vq->vq_avail_idx, - vq->vq_used_cons_idx, - vq->vq_packed.cached_flags, - vq->vq_packed.used_wrap_counter); - - result = cvq->virtio_net_hdr_mz->addr; - return result; -} - -static struct virtio_pmd_ctrl * -virtio_send_command_split(struct virtnet_ctl *cvq, - struct virtio_pmd_ctrl *ctrl, - int *dlen, int pkt_num) -{ - struct virtio_pmd_ctrl *result; - struct virtqueue *vq = cvq->vq; - uint32_t head, i; - int k, sum = 0; - - head = vq->vq_desc_head_idx; - - /* - * Format is enforced in qemu code: - * One TX packet for header; - * At least one TX packet per argument; - * One RX packet for ACK. - */ - vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT; - vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem; - vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr); - vq->vq_free_cnt--; - i = vq->vq_split.ring.desc[head].next; - - for (k = 0; k < pkt_num; k++) { - vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT; - vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem - + sizeof(struct virtio_net_ctrl_hdr) - + sizeof(ctrl->status) + sizeof(uint8_t)*sum; - vq->vq_split.ring.desc[i].len = dlen[k]; - sum += dlen[k]; - vq->vq_free_cnt--; - i = vq->vq_split.ring.desc[i].next; - } - - vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE; - vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem - + sizeof(struct virtio_net_ctrl_hdr); - vq->vq_split.ring.desc[i].len = sizeof(ctrl->status); - vq->vq_free_cnt--; - - vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next; - - vq_update_avail_ring(vq, head); - vq_update_avail_idx(vq); - - PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index); - - virtqueue_notify(vq); - - rte_rmb(); - while (VIRTQUEUE_NUSED(vq) == 0) { - rte_rmb(); - usleep(100); - } - - while (VIRTQUEUE_NUSED(vq)) { - uint32_t idx, desc_idx, used_idx; - struct vring_used_elem *uep; - - used_idx = (uint32_t)(vq->vq_used_cons_idx - & (vq->vq_nentries - 1)); - uep = &vq->vq_split.ring.used->ring[used_idx]; - idx = (uint32_t) uep->id; - desc_idx = idx; - - while (vq->vq_split.ring.desc[desc_idx].flags & - VRING_DESC_F_NEXT) { - desc_idx = vq->vq_split.ring.desc[desc_idx].next; - vq->vq_free_cnt++; - } - - vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx; - vq->vq_desc_head_idx = idx; - - vq->vq_used_cons_idx++; - vq->vq_free_cnt++; - } - - PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d", - vq->vq_free_cnt, vq->vq_desc_head_idx); - - result = cvq->virtio_net_hdr_mz->addr; - return result; -} - -static int -virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, - int *dlen, int pkt_num) -{ - virtio_net_ctrl_ack status = ~0; - struct virtio_pmd_ctrl *result; - struct virtqueue *vq; - - ctrl->status = status; - - if (!cvq || !cvq->vq) { - PMD_INIT_LOG(ERR, "Control queue is not supported."); - return -1; - } - - rte_spinlock_lock(&cvq->lock); - vq = cvq->vq; - - PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, " - "vq->hw->cvq = %p vq = %p", - vq->vq_desc_head_idx, status, vq->hw->cvq, vq); - - if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) { - rte_spinlock_unlock(&cvq->lock); - return -1; - } - - memcpy(cvq->virtio_net_hdr_mz->addr, ctrl, - sizeof(struct virtio_pmd_ctrl)); - - if (vtpci_packed_queue(vq->hw)) - result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num); - else - result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num); - - rte_spinlock_unlock(&cvq->lock); - return result->status; -} - static int virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues) { @@ -401,38 +181,6 @@ virtio_get_nr_vq(struct virtio_hw *hw) return nr_vq; } -static void -virtio_init_vring(struct virtqueue *vq) -{ - int size = vq->vq_nentries; - uint8_t *ring_mem = vq->vq_ring_virt_mem; - - PMD_INIT_FUNC_TRACE(); - - memset(ring_mem, 0, vq->vq_ring_size); - - vq->vq_used_cons_idx = 0; - vq->vq_desc_head_idx = 0; - vq->vq_avail_idx = 0; - vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); - vq->vq_free_cnt = vq->vq_nentries; - memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); - if (vtpci_packed_queue(vq->hw)) { - vring_init_packed(&vq->vq_packed.ring, ring_mem, - VIRTIO_PCI_VRING_ALIGN, size); - vring_desc_init_packed(vq, size); - } else { - struct vring *vr = &vq->vq_split.ring; - - vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size); - vring_desc_init_split(vr->desc, size); - } - /* - * Disable device(host) interrupting guest - */ - virtqueue_disable_intr(vq); -} - static int virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) { diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c index 5ff1e3587..db630e07c 100644 --- a/drivers/net/virtio/virtqueue.c +++ b/drivers/net/virtio/virtqueue.c @@ -2,6 +2,7 @@ * Copyright(c) 2010-2015 Intel Corporation */ #include +#include #include @@ -141,3 +142,257 @@ virtqueue_rxvq_flush(struct virtqueue *vq) else virtqueue_rxvq_flush_split(vq); } + +static struct virtio_pmd_ctrl * +virtio_send_command_packed(struct virtnet_ctl *cvq, + struct virtio_pmd_ctrl *ctrl, + int *dlen, int pkt_num) +{ + struct virtqueue *vq = cvq->vq; + int head; + struct vring_packed_desc *desc = vq->vq_packed.ring.desc; + struct virtio_pmd_ctrl *result; + uint16_t flags; + int sum = 0; + int nb_descs = 0; + int k; + + /* + * Format is enforced in qemu code: + * One TX packet for header; + * At least one TX packet per argument; + * One RX packet for ACK. + */ + head = vq->vq_avail_idx; + flags = vq->vq_packed.cached_flags; + desc[head].addr = cvq->virtio_net_hdr_mem; + desc[head].len = sizeof(struct virtio_net_ctrl_hdr); + vq->vq_free_cnt--; + nb_descs++; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; + } + + for (k = 0; k < pkt_num; k++) { + desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem + + sizeof(struct virtio_net_ctrl_hdr) + + sizeof(ctrl->status) + sizeof(uint8_t) * sum; + desc[vq->vq_avail_idx].len = dlen[k]; + desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT | + vq->vq_packed.cached_flags; + sum += dlen[k]; + vq->vq_free_cnt--; + nb_descs++; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= + VRING_PACKED_DESC_F_AVAIL_USED; + } + } + + desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem + + sizeof(struct virtio_net_ctrl_hdr); + desc[vq->vq_avail_idx].len = sizeof(ctrl->status); + desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE | + vq->vq_packed.cached_flags; + vq->vq_free_cnt--; + nb_descs++; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; + } + + virtio_wmb(vq->hw->weak_barriers); + desc[head].flags = VRING_DESC_F_NEXT | flags; + + virtio_wmb(vq->hw->weak_barriers); + virtqueue_notify(vq); + + /* wait for used descriptors in virtqueue */ + while (!desc_is_used(&desc[head], vq)) + usleep(100); + + virtio_rmb(vq->hw->weak_barriers); + + /* now get used descriptors */ + vq->vq_free_cnt += nb_descs; + vq->vq_used_cons_idx += nb_descs; + if (vq->vq_used_cons_idx >= vq->vq_nentries) { + vq->vq_used_cons_idx -= vq->vq_nentries; + vq->vq_packed.used_wrap_counter ^= 1; + } + + PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n" + "vq->vq_avail_idx=%d\n" + "vq->vq_used_cons_idx=%d\n" + "vq->vq_packed.cached_flags=0x%x\n" + "vq->vq_packed.used_wrap_counter=%d\n", + vq->vq_free_cnt, + vq->vq_avail_idx, + vq->vq_used_cons_idx, + vq->vq_packed.cached_flags, + vq->vq_packed.used_wrap_counter); + + result = cvq->virtio_net_hdr_mz->addr; + return result; +} + +static struct virtio_pmd_ctrl * +virtio_send_command_split(struct virtnet_ctl *cvq, + struct virtio_pmd_ctrl *ctrl, + int *dlen, int pkt_num) +{ + struct virtio_pmd_ctrl *result; + struct virtqueue *vq = cvq->vq; + uint32_t head, i; + int k, sum = 0; + + head = vq->vq_desc_head_idx; + + /* + * Format is enforced in qemu code: + * One TX packet for header; + * At least one TX packet per argument; + * One RX packet for ACK. + */ + vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT; + vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem; + vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr); + vq->vq_free_cnt--; + i = vq->vq_split.ring.desc[head].next; + + for (k = 0; k < pkt_num; k++) { + vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT; + vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem + + sizeof(struct virtio_net_ctrl_hdr) + + sizeof(ctrl->status) + sizeof(uint8_t) * sum; + vq->vq_split.ring.desc[i].len = dlen[k]; + sum += dlen[k]; + vq->vq_free_cnt--; + i = vq->vq_split.ring.desc[i].next; + } + + vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE; + vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem + + sizeof(struct virtio_net_ctrl_hdr); + vq->vq_split.ring.desc[i].len = sizeof(ctrl->status); + vq->vq_free_cnt--; + + vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next; + + vq_update_avail_ring(vq, head); + vq_update_avail_idx(vq); + + PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index); + + virtqueue_notify(vq); + + rte_rmb(); + while (VIRTQUEUE_NUSED(vq) == 0) { + rte_rmb(); + usleep(100); + } + + while (VIRTQUEUE_NUSED(vq)) { + uint32_t idx, desc_idx, used_idx; + struct vring_used_elem *uep; + + used_idx = (uint32_t)(vq->vq_used_cons_idx + & (vq->vq_nentries - 1)); + uep = &vq->vq_split.ring.used->ring[used_idx]; + idx = (uint32_t)uep->id; + desc_idx = idx; + + while (vq->vq_split.ring.desc[desc_idx].flags & + VRING_DESC_F_NEXT) { + desc_idx = vq->vq_split.ring.desc[desc_idx].next; + vq->vq_free_cnt++; + } + + vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx; + vq->vq_desc_head_idx = idx; + + vq->vq_used_cons_idx++; + vq->vq_free_cnt++; + } + + PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d", + vq->vq_free_cnt, vq->vq_desc_head_idx); + + result = cvq->virtio_net_hdr_mz->addr; + return result; +} + +int +virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + int *dlen, int pkt_num) +{ + virtio_net_ctrl_ack status = ~0; + struct virtio_pmd_ctrl *result; + struct virtqueue *vq; + + ctrl->status = status; + + if (!cvq || !cvq->vq) { + PMD_INIT_LOG(ERR, "Control queue is not supported."); + return -1; + } + + rte_spinlock_lock(&cvq->lock); + vq = cvq->vq; + + PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, " + "vq->hw->cvq = %p vq = %p", + vq->vq_desc_head_idx, status, vq->hw->cvq, vq); + + if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) { + rte_spinlock_unlock(&cvq->lock); + return -1; + } + + memcpy(cvq->virtio_net_hdr_mz->addr, ctrl, + sizeof(struct virtio_pmd_ctrl)); + + if (vtpci_packed_queue(vq->hw)) + result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num); + else + result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num); + + rte_spinlock_unlock(&cvq->lock); + return result->status; +} + +void +virtio_init_vring(struct virtqueue *vq) +{ + int size = vq->vq_nentries; + uint8_t *ring_mem = vq->vq_ring_virt_mem; + + PMD_INIT_FUNC_TRACE(); + + memset(ring_mem, 0, vq->vq_ring_size); + + vq->vq_used_cons_idx = 0; + vq->vq_desc_head_idx = 0; + vq->vq_avail_idx = 0; + vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); + vq->vq_free_cnt = vq->vq_nentries; + if (vq->vq_descx) + memset(vq->vq_descx, 0, + sizeof(struct vq_desc_extra) * vq->vq_nentries); + if (vtpci_packed_queue(vq->hw)) { + vring_init_packed(&vq->vq_packed.ring, ring_mem, + VIRTIO_PCI_VRING_ALIGN, size); + vring_desc_init_packed(vq, size); + } else { + struct vring *vr = &vq->vq_split.ring; + + vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size); + vring_desc_init_split(vr->desc, size); + } + /* + * Disable device(host) interrupting guest + */ + virtqueue_disable_intr(vq); +} diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index c6dd4a347..4d8d069c7 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -480,6 +480,11 @@ virtqueue_notify(struct virtqueue *vq) VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq); } +int virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + int *dlen, int pkt_num); + +void virtio_init_vring(struct virtqueue *vq); + #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP #define VIRTQUEUE_DUMP(vq) do { \ uint16_t used_idx, nused; \ -- 2.21.0