From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx1.redhat.com (mx3-rdu2.redhat.com [66.187.233.73]) by dpdk.org (Postfix) with ESMTP id AAB204F9A for ; Mon, 23 Apr 2018 17:59:45 +0200 (CEST) Received: from smtp.corp.redhat.com (int-mx04.intmail.prod.int.rdu2.redhat.com [10.11.54.4]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 552EE81A88C4 for ; Mon, 23 Apr 2018 15:59:45 +0000 (UTC) Received: from localhost.localdomain (ovpn-112-58.ams2.redhat.com [10.36.112.58]) by smtp.corp.redhat.com (Postfix) with ESMTP id B06C42026609; Mon, 23 Apr 2018 15:59:44 +0000 (UTC) From: Maxime Coquelin To: stable@dpdk.org Cc: Maxime Coquelin Date: Mon, 23 Apr 2018 17:59:15 +0200 Message-Id: <20180423155918.21350-4-maxime.coquelin@redhat.com> In-Reply-To: <20180423155918.21350-1-maxime.coquelin@redhat.com> References: <20180423155918.21350-1-maxime.coquelin@redhat.com> X-Scanned-By: MIMEDefang 2.78 on 10.11.54.4 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.8]); Mon, 23 Apr 2018 15:59:45 +0000 (UTC) X-Greylist: inspected by milter-greylist-4.5.16 (mx1.redhat.com [10.11.55.8]); Mon, 23 Apr 2018 15:59:45 +0000 (UTC) for IP:'10.11.54.4' DOMAIN:'int-mx04.intmail.prod.int.rdu2.redhat.com' HELO:'smtp.corp.redhat.com' FROM:'maxime.coquelin@redhat.com' RCPT:'' Subject: [dpdk-stable] [PATCH v16.11 LTS 3/6] vhost: add support for non-contiguous indirect descs tables X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 23 Apr 2018 15:59:46 -0000 This patch adds support for non-contiguous indirect descriptor tables in VA space. When it happens, which is unlikely, a table is allocated and the non-contiguous content is copied into it. Reported-by: Yongji Xie Signed-off-by: Maxime Coquelin --- lib/librte_vhost/virtio_net.c | 105 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 98 insertions(+), 7 deletions(-) diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index f1b1a4ec7..352f6ada0 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -45,6 +45,7 @@ #include #include #include +#include #include "vhost.h" @@ -101,6 +102,44 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t qp_nb) return (is_tx ^ (idx & 1)) == 0 && idx < qp_nb * VIRTIO_QNUM; } +static inline struct vring_desc *__attribute__((always_inline)) +alloc_copy_ind_table(struct virtio_net *dev, struct vring_desc *desc) +{ + struct vring_desc *idesc; + uint64_t src, dst; + uint64_t len, remain = desc->len; + uint64_t desc_addr = desc->addr; + + idesc = rte_malloc(__func__, desc->len, 0); + if (unlikely(!idesc)) + return 0; + + dst = (uint64_t)(uintptr_t)idesc; + + while (remain) { + len = remain; + src = gpa_to_vva(dev, desc_addr, &len); + if (unlikely(!src || !len)) { + rte_free(idesc); + return 0; + } + + rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len); + + remain -= len; + dst += len; + desc_addr += len; + } + + return idesc; +} + +static inline void __attribute__((always_inline)) +free_ind_table(struct vring_desc *idesc) +{ + rte_free(idesc); +} + static inline void __attribute__((always_inline)) do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t to, uint16_t from, uint16_t size) @@ -351,6 +390,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, rte_prefetch0(&vq->desc[desc_indexes[0]]); for (i = 0; i < count; i++) { + struct vring_desc *idesc = NULL; uint16_t desc_idx = desc_indexes[i]; int err; @@ -358,12 +398,24 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, dlen = vq->desc[desc_idx].len; descs = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev, vq->desc[desc_idx].addr, &dlen); - if (unlikely(!descs || - dlen != vq->desc[desc_idx].len)) { + if (unlikely(!descs)) { count = i; break; } + if (unlikely(dlen < vq->desc[desc_idx].len)) { + /* + * The indirect desc table is not contiguous + * in process VA space, we have to copy it. + */ + idesc = alloc_copy_ind_table(dev, + &vq->desc[desc_idx]); + if (unlikely(!idesc)) + break; + + descs = idesc; + } + desc_idx = 0; sz = vq->desc[desc_idx].len / sizeof(*descs); } else { @@ -382,6 +434,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, if (i + 1 < count) rte_prefetch0(&vq->desc[desc_indexes[i+1]]); + + if (unlikely(!!idesc)) + free_ind_table(idesc); } rte_smp_wmb(); @@ -417,6 +472,7 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t len = 0; uint64_t dlen; struct vring_desc *descs = vq->desc; + struct vring_desc *idesc = NULL; *desc_chain_head = idx; @@ -425,15 +481,29 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, descs = (struct vring_desc *)(uintptr_t) gpa_to_vva(dev, vq->desc[idx].addr, &dlen); - if (unlikely(!descs || dlen != vq->desc[idx].len)) + if (unlikely(!descs)) return -1; + if (unlikely(dlen < vq->desc[idx].len)) { + /* + * The indirect desc table is not contiguous + * in process VA space, we have to copy it. + */ + idesc = alloc_copy_ind_table(dev, &vq->desc[idx]); + if (unlikely(!idesc)) + return -1; + + descs = idesc; + } + idx = 0; } while (1) { - if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) + if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) { + free_ind_table(idesc); return -1; + } len += descs[idx].len; buf_vec[vec_id].buf_addr = descs[idx].addr; @@ -450,6 +520,9 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, *desc_chain_len = len; *vec_idx = vec_id; + if (unlikely(!!idesc)) + free_ind_table(idesc); + return 0; } @@ -1200,7 +1273,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, /* Prefetch descriptor index. */ rte_prefetch0(&vq->desc[desc_indexes[0]]); for (i = 0; i < count; i++) { - struct vring_desc *desc; + struct vring_desc *desc, *idesc = NULL; uint16_t sz, idx; uint64_t dlen; int err; @@ -1213,10 +1286,22 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, desc = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev, vq->desc[desc_indexes[i]].addr, &dlen); - if (unlikely(!desc || - dlen != vq->desc[desc_indexes[i]].len)) + if (unlikely(!desc)) break; + if (unlikely(dlen < vq->desc[desc_indexes[i]].len)) { + /* + * The indirect desc table is not contiguous + * in process VA space, we have to copy it. + */ + idesc = alloc_copy_ind_table(dev, + &vq->desc[desc_indexes[i]]); + if (unlikely(!idesc)) + break; + + desc = idesc; + } + rte_prefetch0(desc); sz = vq->desc[desc_indexes[i]].len / sizeof(*desc); idx = 0; @@ -1230,12 +1315,14 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, if (unlikely(pkts[i] == NULL)) { RTE_LOG(ERR, VHOST_DATA, "Failed to allocate memory for mbuf.\n"); + free_ind_table(idesc); break; } err = copy_desc_to_mbuf(dev, desc, sz, pkts[i], idx, mbuf_pool); if (unlikely(err)) { rte_pktmbuf_free(pkts[i]); + free_ind_table(idesc); break; } @@ -1245,6 +1332,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, zmbuf = get_zmbuf(vq); if (!zmbuf) { rte_pktmbuf_free(pkts[i]); + free_ind_table(idesc); break; } zmbuf->mbuf = pkts[i]; @@ -1261,6 +1349,9 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, vq->nr_zmbuf += 1; TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next); } + + if (unlikely(!!idesc)) + free_ind_table(idesc); } vq->last_avail_idx += i; -- 2.14.3