From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id 15E39A00E6 for ; Fri, 17 May 2019 17:06:39 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A27A4ABEF; Fri, 17 May 2019 17:06:32 +0200 (CEST) Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id 72A5F7D4A for ; Fri, 17 May 2019 17:06:30 +0200 (CEST) Received: from smtp.corp.redhat.com (int-mx06.intmail.prod.int.phx2.redhat.com [10.5.11.16]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 8294CC0AD28F; Fri, 17 May 2019 15:06:29 +0000 (UTC) Received: from localhost.localdomain (ovpn-112-59.ams2.redhat.com [10.36.112.59]) by smtp.corp.redhat.com (Postfix) with ESMTP id 70F0A71442; Fri, 17 May 2019 15:06:26 +0000 (UTC) From: Maxime Coquelin To: dev@dpdk.org, tiwei.bie@intel.com, jfreimann@redhat.com, zhihong.wang@intel.com, bruce.richardson@intel.com, konstantin.ananyev@intel.com, david.marchand@redhat.com Cc: Maxime Coquelin Date: Fri, 17 May 2019 17:06:09 +0200 Message-Id: <20190517150613.13310-2-maxime.coquelin@redhat.com> In-Reply-To: <20190517150613.13310-1-maxime.coquelin@redhat.com> References: <20190517150613.13310-1-maxime.coquelin@redhat.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Scanned-By: MIMEDefang 2.79 on 10.5.11.16 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.32]); Fri, 17 May 2019 15:06:29 +0000 (UTC) Subject: [dpdk-dev] [PATCH v2 1/5] vhost: un-inline dirty pages logging functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" In order to reduce the I-cache pressure, this patch removes the inlining of the dirty pages logging functions, that we can consider as cold path. Indeed, these functions are only called while doing live migration, so not called most of the time. Signed-off-by: Maxime Coquelin --- lib/librte_vhost/vhost.c | 132 +++++++++++++++++++++++++++++++++++++++ lib/librte_vhost/vhost.h | 129 ++++---------------------------------- 2 files changed, 144 insertions(+), 117 deletions(-) diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index 163f4595ef..4a54ad6bd1 100644 --- a/lib/librte_vhost/vhost.c +++ b/lib/librte_vhost/vhost.c @@ -69,6 +69,138 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, return 0; } +#define VHOST_LOG_PAGE 4096 + +/* + * Atomically set a bit in memory. + */ +static __rte_always_inline void +vhost_set_bit(unsigned int nr, volatile uint8_t *addr) +{ +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) + /* + * __sync_ built-ins are deprecated, but __atomic_ ones + * are sub-optimized in older GCC versions. + */ + __sync_fetch_and_or_1(addr, (1U << nr)); +#else + __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED); +#endif +} + +static __rte_always_inline void +vhost_log_page(uint8_t *log_base, uint64_t page) +{ + vhost_set_bit(page % 8, &log_base[page / 8]); +} + +void +__vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len) +{ + uint64_t page; + + if (unlikely(!dev->log_base || !len)) + return; + + if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8))) + return; + + /* To make sure guest memory updates are committed before logging */ + rte_smp_wmb(); + + page = addr / VHOST_LOG_PAGE; + while (page * VHOST_LOG_PAGE < addr + len) { + vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); + page += 1; + } +} + +void +__vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq) +{ + unsigned long *log_base; + int i; + + if (unlikely(!dev->log_base)) + return; + + rte_smp_wmb(); + + log_base = (unsigned long *)(uintptr_t)dev->log_base; + + for (i = 0; i < vq->log_cache_nb_elem; i++) { + struct log_cache_entry *elem = vq->log_cache + i; + +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) + /* + * '__sync' builtins are deprecated, but '__atomic' ones + * are sub-optimized in older GCC versions. + */ + __sync_fetch_and_or(log_base + elem->offset, elem->val); +#else + __atomic_fetch_or(log_base + elem->offset, elem->val, + __ATOMIC_RELAXED); +#endif + } + + rte_smp_wmb(); + + vq->log_cache_nb_elem = 0; +} + +static __rte_always_inline void +vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t page) +{ + uint32_t bit_nr = page % (sizeof(unsigned long) << 3); + uint32_t offset = page / (sizeof(unsigned long) << 3); + int i; + + for (i = 0; i < vq->log_cache_nb_elem; i++) { + struct log_cache_entry *elem = vq->log_cache + i; + + if (elem->offset == offset) { + elem->val |= (1UL << bit_nr); + return; + } + } + + if (unlikely(i >= VHOST_LOG_CACHE_NR)) { + /* + * No more room for a new log cache entry, + * so write the dirty log map directly. + */ + rte_smp_wmb(); + vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); + + return; + } + + vq->log_cache[i].offset = offset; + vq->log_cache[i].val = (1UL << bit_nr); + vq->log_cache_nb_elem++; +} + +void +__vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t addr, uint64_t len) +{ + uint64_t page; + + if (unlikely(!dev->log_base || !len)) + return; + + if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8))) + return; + + page = addr / VHOST_LOG_PAGE; + while (page * VHOST_LOG_PAGE < addr + len) { + vhost_log_cache_page(dev, vq, page); + page += 1; + } +} + + void cleanup_vq(struct vhost_virtqueue *vq, int destroy) { diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index e9138dfab4..3ab7b4950f 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -350,138 +350,33 @@ desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) wrap_counter != !!(flags & VRING_DESC_F_USED); } -#define VHOST_LOG_PAGE 4096 - -/* - * Atomically set a bit in memory. - */ -static __rte_always_inline void -vhost_set_bit(unsigned int nr, volatile uint8_t *addr) -{ -#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) - /* - * __sync_ built-ins are deprecated, but __atomic_ ones - * are sub-optimized in older GCC versions. - */ - __sync_fetch_and_or_1(addr, (1U << nr)); -#else - __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED); -#endif -} - -static __rte_always_inline void -vhost_log_page(uint8_t *log_base, uint64_t page) -{ - vhost_set_bit(page % 8, &log_base[page / 8]); -} +void __vhost_log_cache_write(struct virtio_net *dev, + struct vhost_virtqueue *vq, + uint64_t addr, uint64_t len); +void __vhost_log_cache_sync(struct virtio_net *dev, + struct vhost_virtqueue *vq); +void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len); static __rte_always_inline void vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len) { - uint64_t page; - - if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) || - !dev->log_base || !len)) - return; - - if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8))) - return; - - /* To make sure guest memory updates are committed before logging */ - rte_smp_wmb(); - - page = addr / VHOST_LOG_PAGE; - while (page * VHOST_LOG_PAGE < addr + len) { - vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); - page += 1; - } + if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) + __vhost_log_write(dev, addr, len); } static __rte_always_inline void vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq) { - unsigned long *log_base; - int i; - - if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) || - !dev->log_base)) - return; - - rte_smp_wmb(); - - log_base = (unsigned long *)(uintptr_t)dev->log_base; - - for (i = 0; i < vq->log_cache_nb_elem; i++) { - struct log_cache_entry *elem = vq->log_cache + i; - -#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) - /* - * '__sync' builtins are deprecated, but '__atomic' ones - * are sub-optimized in older GCC versions. - */ - __sync_fetch_and_or(log_base + elem->offset, elem->val); -#else - __atomic_fetch_or(log_base + elem->offset, elem->val, - __ATOMIC_RELAXED); -#endif - } - - rte_smp_wmb(); - - vq->log_cache_nb_elem = 0; -} - -static __rte_always_inline void -vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint64_t page) -{ - uint32_t bit_nr = page % (sizeof(unsigned long) << 3); - uint32_t offset = page / (sizeof(unsigned long) << 3); - int i; - - for (i = 0; i < vq->log_cache_nb_elem; i++) { - struct log_cache_entry *elem = vq->log_cache + i; - - if (elem->offset == offset) { - elem->val |= (1UL << bit_nr); - return; - } - } - - if (unlikely(i >= VHOST_LOG_CACHE_NR)) { - /* - * No more room for a new log cache entry, - * so write the dirty log map directly. - */ - rte_smp_wmb(); - vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); - - return; - } - - vq->log_cache[i].offset = offset; - vq->log_cache[i].val = (1UL << bit_nr); - vq->log_cache_nb_elem++; + if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) + __vhost_log_cache_sync(dev, vq); } static __rte_always_inline void vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t addr, uint64_t len) { - uint64_t page; - - if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) || - !dev->log_base || !len)) - return; - - if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8))) - return; - - page = addr / VHOST_LOG_PAGE; - while (page * VHOST_LOG_PAGE < addr + len) { - vhost_log_cache_page(dev, vq, page); - page += 1; - } + if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) + __vhost_log_cache_write(dev, vq, addr, len); } static __rte_always_inline void -- 2.21.0