From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx1.redhat.com (mx3-rdu2.redhat.com [66.187.233.73]) by dpdk.org (Postfix) with ESMTP id 17F7E4F9A for ; Mon, 23 Apr 2018 17:59:43 +0200 (CEST) Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.rdu2.redhat.com [10.11.54.4]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 5D0F6E44B5 for ; Mon, 23 Apr 2018 15:59:43 +0000 (UTC) Received: from localhost.localdomain (ovpn-112-58.ams2.redhat.com [10.36.112.58]) by smtp.corp.redhat.com (Postfix) with ESMTP id 801332026DFD; Mon, 23 Apr 2018 15:59:42 +0000 (UTC) From: Maxime Coquelin To: stable@dpdk.org Cc: Maxime Coquelin Date: Mon, 23 Apr 2018 17:59:13 +0200 Message-Id: <20180423155918.21350-2-maxime.coquelin@redhat.com> In-Reply-To: <20180423155918.21350-1-maxime.coquelin@redhat.com> References: <20180423155918.21350-1-maxime.coquelin@redhat.com> X-Scanned-By: MIMEDefang 2.78 on 10.11.54.4 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.1]); Mon, 23 Apr 2018 15:59:43 +0000 (UTC) X-Greylist: inspected by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.1]); Mon, 23 Apr 2018 15:59:43 +0000 (UTC) for IP:'10.11.54.4' DOMAIN:'int-mx04.intmail.prod.int.rdu2.redhat.com' HELO:'smtp.corp.redhat.com' FROM:'maxime.coquelin@redhat.com' RCPT:'' Subject: [dpdk-stable] [PATCH v16.11 LTS 1/6] vhost: check all range is mapped when translating GPAs X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 23 Apr 2018 15:59:44 -0000 There is currently no check done on the length when translating guest addresses into host virtual addresses. Also, there is no guanrantee that the guest addresses range is contiguous in the host virtual address space. This patch adapts gpa_to_vva() and its callers to return and check the mapped size. If the mapped size is smaller than the requested size, the caller handle it as an error. Signed-off-by: Maxime Coquelin --- lib/librte_vhost/vhost.h | 19 ++++++++----- lib/librte_vhost/virtio_net.c | 66 +++++++++++++++++++++++++++++-------------- 2 files changed, 57 insertions(+), 28 deletions(-) diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 9f60ff81a..c49db0c03 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -225,19 +225,24 @@ extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; /* Convert guest physical Address to host virtual address */ static inline uint64_t __attribute__((always_inline)) -gpa_to_vva(struct virtio_net *dev, uint64_t gpa) +gpa_to_vva(struct virtio_net *dev, uint64_t gpa, uint64_t *len) { - struct virtio_memory_region *reg; + struct virtio_memory_region *r; uint32_t i; for (i = 0; i < dev->mem->nregions; i++) { - reg = &dev->mem->regions[i]; - if (gpa >= reg->guest_phys_addr && - gpa < reg->guest_phys_addr + reg->size) { - return gpa - reg->guest_phys_addr + - reg->host_user_addr; + r = &dev->mem->regions[i]; + if (gpa >= r->guest_phys_addr && + gpa < r->guest_phys_addr + r->size) { + + if (unlikely(*len > r->guest_phys_addr + r->size - gpa)) + *len = r->guest_phys_addr + r->size - gpa; + + return gpa - r->guest_phys_addr + + r->host_user_addr; } } + *len = 0; return 0; } diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 0024f729e..f1b1a4ec7 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -214,6 +214,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs, struct rte_mbuf *m, uint16_t desc_idx, uint32_t size) { uint32_t desc_avail, desc_offset; + uint64_t desc_len; uint32_t mbuf_avail, mbuf_offset; uint32_t cpy_len; struct vring_desc *desc; @@ -223,13 +224,15 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs, uint16_t nr_desc = 1; desc = &descs[desc_idx]; - desc_addr = gpa_to_vva(dev, desc->addr); + desc_len = desc->len; + desc_addr = gpa_to_vva(dev, desc->addr, &desc_len); /* * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid * performance issue with some versions of gcc (4.8.4 and 5.3.0) which * otherwise stores offset on the stack instead of in a register. */ - if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr) + if (unlikely(desc_len != desc->len || + desc->len < dev->vhost_hlen) || !desc_addr) return -1; rte_prefetch0((void *)(uintptr_t)desc_addr); @@ -263,8 +266,9 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs, return -1; desc = &descs[desc->next]; - desc_addr = gpa_to_vva(dev, desc->addr); - if (unlikely(!desc_addr)) + desc_len = desc->len; + desc_addr = gpa_to_vva(dev, desc->addr, &desc_len); + if (unlikely(!desc_addr || desc_len != desc->len)) return -1; desc_offset = 0; @@ -305,6 +309,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct vring_desc *descs; uint16_t used_idx; uint32_t i, sz; + uint64_t dlen; LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) { @@ -350,9 +355,11 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, int err; if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) { + dlen = vq->desc[desc_idx].len; descs = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev, - vq->desc[desc_idx].addr); - if (unlikely(!descs)) { + vq->desc[desc_idx].addr, &dlen); + if (unlikely(!descs || + dlen != vq->desc[desc_idx].len)) { count = i; break; } @@ -408,14 +415,17 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)]; uint32_t vec_id = *vec_idx; uint32_t len = 0; + uint64_t dlen; struct vring_desc *descs = vq->desc; *desc_chain_head = idx; if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) { + dlen = vq->desc[idx].len; descs = (struct vring_desc *)(uintptr_t) - gpa_to_vva(dev, vq->desc[idx].addr); - if (unlikely(!descs)) + gpa_to_vva(dev, vq->desc[idx].addr, + &dlen); + if (unlikely(!descs || dlen != vq->desc[idx].len)) return -1; idx = 0; @@ -494,7 +504,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, { struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0}; uint32_t vec_idx = 0; - uint64_t desc_addr; + uint64_t desc_addr, desc_len; uint32_t mbuf_offset, mbuf_avail; uint32_t desc_offset, desc_avail; uint32_t cpy_len; @@ -504,8 +514,11 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, if (unlikely(m == NULL)) return -1; - desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr); - if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) + desc_len = buf_vec[vec_idx].buf_len; + desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr, &desc_len); + if (desc_len != buf_vec[vec_idx].buf_len || + buf_vec[vec_idx].buf_len < dev->vhost_hlen || + !desc_addr) return -1; hdr_mbuf = m; @@ -526,8 +539,11 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, /* done with current desc buf, get the next one */ if (desc_avail == 0) { vec_idx++; - desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr); - if (unlikely(!desc_addr)) + desc_len = buf_vec[vec_idx].buf_len; + desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr, + &desc_len); + if (unlikely(!desc_addr || + desc_len != buf_vec[vec_idx].buf_len)) return -1; /* Prefetch buffer address. */ @@ -825,6 +841,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, struct vring_desc *desc; uint64_t desc_addr; uint32_t desc_avail, desc_offset; + uint64_t desc_len; uint32_t mbuf_avail, mbuf_offset; uint32_t cpy_len; struct rte_mbuf *cur = m, *prev = m; @@ -837,8 +854,9 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, (desc->flags & VRING_DESC_F_INDIRECT)) return -1; - desc_addr = gpa_to_vva(dev, desc->addr); - if (unlikely(!desc_addr)) + desc_len = desc->len; + desc_addr = gpa_to_vva(dev, desc->addr, &desc_len); + if (unlikely(!desc_addr || desc_len != desc->len)) return -1; if (virtio_net_with_host_offload(dev)) { @@ -857,8 +875,9 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) return -1; - desc_addr = gpa_to_vva(dev, desc->addr); - if (unlikely(!desc_addr)) + desc_len = desc->len; + desc_addr = gpa_to_vva(dev, desc->addr, &desc_len); + if (unlikely(!desc_addr || desc_len != desc->len)) return -1; desc_offset = 0; @@ -922,8 +941,9 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) return -1; - desc_addr = gpa_to_vva(dev, desc->addr); - if (unlikely(!desc_addr)) + desc_len = desc->len; + desc_addr = gpa_to_vva(dev, desc->addr, &desc_len); + if (unlikely(!desc_addr || desc_len != desc->len)) return -1; rte_prefetch0((void *)(uintptr_t)desc_addr); @@ -1182,15 +1202,19 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, for (i = 0; i < count; i++) { struct vring_desc *desc; uint16_t sz, idx; + uint64_t dlen; int err; if (likely(i + 1 < count)) rte_prefetch0(&vq->desc[desc_indexes[i + 1]]); if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) { + dlen = vq->desc[desc_indexes[i]].len; desc = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev, - vq->desc[desc_indexes[i]].addr); - if (unlikely(!desc)) + vq->desc[desc_indexes[i]].addr, + &dlen); + if (unlikely(!desc || + dlen != vq->desc[desc_indexes[i]].len)) break; rte_prefetch0(desc); -- 2.14.3