From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx1.redhat.com (mx3-rdu2.redhat.com [66.187.233.73]) by dpdk.org (Postfix) with ESMTP id 478345F6C for ; Mon, 23 Apr 2018 18:01:15 +0200 (CEST) Received: from smtp.corp.redhat.com (int-mx05.intmail.prod.int.rdu2.redhat.com [10.11.54.5]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id E50214023335 for ; Mon, 23 Apr 2018 16:01:14 +0000 (UTC) Received: from localhost.localdomain (ovpn-112-58.ams2.redhat.com [10.36.112.58]) by smtp.corp.redhat.com (Postfix) with ESMTP id 108907C49; Mon, 23 Apr 2018 16:01:13 +0000 (UTC) From: Maxime Coquelin To: stable@dpdk.org Cc: Maxime Coquelin Date: Mon, 23 Apr 2018 18:00:38 +0200 Message-Id: <20180423160047.21457-3-maxime.coquelin@redhat.com> In-Reply-To: <20180423160047.21457-1-maxime.coquelin@redhat.com> References: <20180423160047.21457-1-maxime.coquelin@redhat.com> X-Scanned-By: MIMEDefang 2.79 on 10.11.54.5 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.6]); Mon, 23 Apr 2018 16:01:14 +0000 (UTC) X-Greylist: inspected by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.6]); Mon, 23 Apr 2018 16:01:14 +0000 (UTC) for IP:'10.11.54.5' DOMAIN:'int-mx05.intmail.prod.int.rdu2.redhat.com' HELO:'smtp.corp.redhat.com' FROM:'maxime.coquelin@redhat.com' RCPT:'' Subject: [dpdk-stable] [PATCH v17.11 LTS 02/11] vhost: check all range is mapped when translating GPAs X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 23 Apr 2018 16:01:15 -0000 There is currently no check done on the length when translating guest addresses into host virtual addresses. Also, there is no guanrantee that the guest addresses range is contiguous in the host virtual address space. This patch prepares vhost_iova_to_vva() and its callers to return and check the mapped size. If the mapped size is smaller than the requested size, the caller handle it as an error. This issue has been assigned CVE-2018-1059. Reported-by: Yongji Xie Signed-off-by: Maxime Coquelin --- lib/librte_vhost/vhost.c | 39 +++++++++++++++----------- lib/librte_vhost/vhost.h | 6 ++-- lib/librte_vhost/virtio_net.c | 64 +++++++++++++++++++++++++++---------------- 3 files changed, 67 insertions(+), 42 deletions(-) diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index 51ea720a3..a8ed40b1f 100644 --- a/lib/librte_vhost/vhost.c +++ b/lib/librte_vhost/vhost.c @@ -58,17 +58,17 @@ struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; /* Called with iotlb_lock read-locked */ uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint64_t iova, uint64_t size, uint8_t perm) + uint64_t iova, uint64_t *size, uint8_t perm) { uint64_t vva, tmp_size; - if (unlikely(!size)) + if (unlikely(!*size)) return 0; - tmp_size = size; + tmp_size = *size; vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm); - if (tmp_size == size) + if (tmp_size == *size) return vva; iova += tmp_size; @@ -158,32 +158,39 @@ free_device(struct virtio_net *dev) int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq) { - uint64_t size; + uint64_t req_size, size; if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) goto out; - size = sizeof(struct vring_desc) * vq->size; + req_size = sizeof(struct vring_desc) * vq->size; + size = req_size; vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq, vq->ring_addrs.desc_user_addr, - size, VHOST_ACCESS_RW); - if (!vq->desc) + &size, VHOST_ACCESS_RW); + if (!vq->desc || size != req_size) return -1; - size = sizeof(struct vring_avail); - size += sizeof(uint16_t) * vq->size; + req_size = sizeof(struct vring_avail); + req_size += sizeof(uint16_t) * vq->size; + if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) + req_size += sizeof(uint16_t); + size = req_size; vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq, vq->ring_addrs.avail_user_addr, - size, VHOST_ACCESS_RW); - if (!vq->avail) + &size, VHOST_ACCESS_RW); + if (!vq->avail || size != req_size) return -1; - size = sizeof(struct vring_used); - size += sizeof(struct vring_used_elem) * vq->size; + req_size = sizeof(struct vring_used); + req_size += sizeof(struct vring_used_elem) * vq->size; + if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) + req_size += sizeof(uint16_t); + size = req_size; vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq, vq->ring_addrs.used_user_addr, - size, VHOST_ACCESS_RW); - if (!vq->used) + &size, VHOST_ACCESS_RW); + if (!vq->used || size != req_size) return -1; out: diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index c8f2a8176..de300c107 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -381,18 +381,18 @@ struct vhost_device_ops const *vhost_driver_callback_get(const char *path); void vhost_backend_cleanup(struct virtio_net *dev); uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint64_t iova, uint64_t size, uint8_t perm); + uint64_t iova, uint64_t *len, uint8_t perm); int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq); void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq); static __rte_always_inline uint64_t vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint64_t iova, uint64_t size, uint8_t perm) + uint64_t iova, uint64_t *len, uint8_t perm) { if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) return rte_vhost_gpa_to_vva(dev->mem, iova); - return __vhost_iova_to_vva(dev, vq, iova, size, perm); + return __vhost_iova_to_vva(dev, vq, iova, len, perm); } #endif /* _VHOST_NET_CDEV_H_ */ diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index cb1d0cfc4..79bac590d 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -204,6 +204,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t desc_avail, desc_offset; uint32_t mbuf_avail, mbuf_offset; uint32_t cpy_len; + uint64_t dlen; struct vring_desc *desc; uint64_t desc_addr; /* A counter to avoid desc dead loop chain */ @@ -213,14 +214,16 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, int error = 0; desc = &descs[desc_idx]; + dlen = desc->len; desc_addr = vhost_iova_to_vva(dev, vq, desc->addr, - desc->len, VHOST_ACCESS_RW); + &dlen, VHOST_ACCESS_RW); /* * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid * performance issue with some versions of gcc (4.8.4 and 5.3.0) which * otherwise stores offset on the stack instead of in a register. */ - if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr) { + if (unlikely(dlen != desc->len || desc->len < dev->vhost_hlen) || + !desc_addr) { error = -1; goto out; } @@ -258,10 +261,11 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, } desc = &descs[desc->next]; + dlen = desc->len; desc_addr = vhost_iova_to_vva(dev, vq, desc->addr, - desc->len, + &dlen, VHOST_ACCESS_RW); - if (unlikely(!desc_addr)) { + if (unlikely(!desc_addr || dlen != desc->len)) { error = -1; goto out; } @@ -375,12 +379,13 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, int err; if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) { + uint64_t dlen = vq->desc[desc_idx].len; descs = (struct vring_desc *)(uintptr_t) vhost_iova_to_vva(dev, vq, vq->desc[desc_idx].addr, - vq->desc[desc_idx].len, - VHOST_ACCESS_RO); - if (unlikely(!descs)) { + &dlen, VHOST_ACCESS_RO); + if (unlikely(!descs || + dlen != vq->desc[desc_idx].len)) { count = i; break; } @@ -438,16 +443,18 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)]; uint32_t vec_id = *vec_idx; uint32_t len = 0; + uint64_t dlen; struct vring_desc *descs = vq->desc; *desc_chain_head = idx; if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) { + dlen = vq->desc[idx].len; descs = (struct vring_desc *)(uintptr_t) vhost_iova_to_vva(dev, vq, vq->desc[idx].addr, - vq->desc[idx].len, + &dlen, VHOST_ACCESS_RO); - if (unlikely(!descs)) + if (unlikely(!descs || dlen != vq->desc[idx].len)) return -1; idx = 0; @@ -530,6 +537,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t mbuf_offset, mbuf_avail; uint32_t desc_offset, desc_avail; uint32_t cpy_len; + uint64_t dlen; uint64_t hdr_addr, hdr_phys_addr; struct rte_mbuf *hdr_mbuf; struct batch_copy_elem *batch_copy = vq->batch_copy_elems; @@ -541,10 +549,12 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, goto out; } + dlen = buf_vec[vec_idx].buf_len; desc_addr = vhost_iova_to_vva(dev, vq, buf_vec[vec_idx].buf_addr, - buf_vec[vec_idx].buf_len, - VHOST_ACCESS_RW); - if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) { + &dlen, VHOST_ACCESS_RW); + if (dlen != buf_vec[vec_idx].buf_len || + buf_vec[vec_idx].buf_len < dev->vhost_hlen || + !desc_addr) { error = -1; goto out; } @@ -566,12 +576,14 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, /* done with current desc buf, get the next one */ if (desc_avail == 0) { vec_idx++; + dlen = buf_vec[vec_idx].buf_len; desc_addr = vhost_iova_to_vva(dev, vq, buf_vec[vec_idx].buf_addr, - buf_vec[vec_idx].buf_len, + &dlen, VHOST_ACCESS_RW); - if (unlikely(!desc_addr)) { + if (unlikely(!desc_addr || + dlen != buf_vec[vec_idx].buf_len)) { error = -1; goto out; } @@ -911,6 +923,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t desc_avail, desc_offset; uint32_t mbuf_avail, mbuf_offset; uint32_t cpy_len; + uint64_t dlen; struct rte_mbuf *cur = m, *prev = m; struct virtio_net_hdr *hdr = NULL; /* A counter to avoid desc dead loop chain */ @@ -926,11 +939,12 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, goto out; } + dlen = desc->len; desc_addr = vhost_iova_to_vva(dev, vq, desc->addr, - desc->len, + &dlen, VHOST_ACCESS_RO); - if (unlikely(!desc_addr)) { + if (unlikely(!desc_addr || dlen != desc->len)) { error = -1; goto out; } @@ -953,11 +967,12 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, goto out; } + dlen = desc->len; desc_addr = vhost_iova_to_vva(dev, vq, desc->addr, - desc->len, + &dlen, VHOST_ACCESS_RO); - if (unlikely(!desc_addr)) { + if (unlikely(!desc_addr || dlen != desc->len)) { error = -1; goto out; } @@ -1041,11 +1056,11 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, goto out; } + dlen = desc->len; desc_addr = vhost_iova_to_vva(dev, vq, desc->addr, - desc->len, - VHOST_ACCESS_RO); - if (unlikely(!desc_addr)) { + &dlen, VHOST_ACCESS_RO); + if (unlikely(!desc_addr || dlen != desc->len)) { error = -1; goto out; } @@ -1319,18 +1334,21 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, for (i = 0; i < count; i++) { struct vring_desc *desc; uint16_t sz, idx; + uint64_t dlen; int err; if (likely(i + 1 < count)) rte_prefetch0(&vq->desc[desc_indexes[i + 1]]); if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) { + dlen = vq->desc[desc_indexes[i]].len; desc = (struct vring_desc *)(uintptr_t) vhost_iova_to_vva(dev, vq, vq->desc[desc_indexes[i]].addr, - vq->desc[desc_indexes[i]].len, + &dlen, VHOST_ACCESS_RO); - if (unlikely(!desc)) + if (unlikely(!desc || + dlen != vq->desc[desc_indexes[i]].len)) break; rte_prefetch0(desc); -- 2.14.3