From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id D26D6A00BE; Tue, 28 Apr 2020 11:14:11 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 8DFE31D5A7; Tue, 28 Apr 2020 11:14:08 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by dpdk.org (Postfix) with ESMTP id A6F681D422 for ; Tue, 28 Apr 2020 11:14:04 +0200 (CEST) IronPort-SDR: fdFaSDdio0aeAIHy+OBT1AnBdR+DsjjBMLiwvWMzP1zhUaEEKHcQCuGRmnTiTf+X7TSt97EAlw 55NTIUtFDevA== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 28 Apr 2020 02:14:04 -0700 IronPort-SDR: 48MmM3ELj3ADwkhByjTkl6z6QTK9beY+wBGrbH5efO2XhGoep9A2tQVURf1oJ+WcsZzz6aeRln RuVM477dMNSQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.73,327,1583222400"; d="scan'208";a="282082038" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.56]) by fmsmga004.fm.intel.com with ESMTP; 28 Apr 2020 02:14:02 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, xiaolong.ye@intel.com, zhihong.wang@intel.com Cc: dev@dpdk.org, Marvin Liu Date: Tue, 28 Apr 2020 17:13:59 +0800 Message-Id: <20200428091359.11297-2-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200428091359.11297-1-yong.liu@intel.com> References: <20200316153353.112897-1-yong.liu@intel.com> <20200428091359.11297-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v3 2/2] vhost: binary search address mapping table X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" If Tx zero copy enabled, gpa to hpa mapping table is updated one by one. This will harm performance when guest memory backend using 2M hugepages. Now utilize binary search to find the entry in mapping table, meanwhile set threshold to 256 entries for linear search. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/Makefile b/lib/librte_vhost/Makefile index e592795f2..8769afaad 100644 --- a/lib/librte_vhost/Makefile +++ b/lib/librte_vhost/Makefile @@ -10,7 +10,7 @@ EXPORT_MAP := rte_vhost_version.map CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 CFLAGS += -I vhost_user -CFLAGS += -fno-strict-aliasing +CFLAGS += -fno-strict-aliasing -Wno-maybe-uninitialized LDLIBS += -lpthread ifeq ($(RTE_TOOLCHAIN), gcc) diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 507dbf214..a0fee39d5 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -546,20 +546,46 @@ extern int vhost_data_log_level; #define MAX_VHOST_DEVICE 1024 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; +#define VHOST_BINARY_SEARCH_THRESH 256 +static int guest_page_addrcmp(const void *p1, const void *p2) +{ + const struct guest_page *page1 = (const struct guest_page *)p1; + const struct guest_page *page2 = (const struct guest_page *)p2; + + if (page1->guest_phys_addr > page2->guest_phys_addr) + return 1; + if (page1->guest_phys_addr < page2->guest_phys_addr) + return -1; + + return 0; +} + /* Convert guest physical address to host physical address */ static __rte_always_inline rte_iova_t gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size) { uint32_t i; struct guest_page *page; - - for (i = 0; i < dev->nr_guest_pages; i++) { - page = &dev->guest_pages[i]; - - if (gpa >= page->guest_phys_addr && - gpa + size < page->guest_phys_addr + page->size) { - return gpa - page->guest_phys_addr + - page->host_phys_addr; + struct guest_page key; + + if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { + key.guest_phys_addr = gpa; + page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages, + sizeof(struct guest_page), guest_page_addrcmp); + if (page) { + if (gpa + size < page->guest_phys_addr + page->size) + return gpa - page->guest_phys_addr + + page->host_phys_addr; + } + } else { + for (i = 0; i < dev->nr_guest_pages; i++) { + page = &dev->guest_pages[i]; + + if (gpa >= page->guest_phys_addr && + gpa + size < page->guest_phys_addr + + page->size) + return gpa - page->guest_phys_addr + + page->host_phys_addr; } } diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c index 79fcb9d19..15e50d27d 100644 --- a/lib/librte_vhost/vhost_user.c +++ b/lib/librte_vhost/vhost_user.c @@ -965,6 +965,12 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, reg_size -= size; } + /* sort guest page array if over binary search threshold */ + if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { + qsort((void *)dev->guest_pages, dev->nr_guest_pages, + sizeof(struct guest_page), guest_page_addrcmp); + } + return 0; } -- 2.17.1