From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from foss.arm.com (usa-sjc-mx-foss1.foss.arm.com [217.140.101.70]) by dpdk.org (Postfix) with ESMTP id 8D5431D8A for ; Tue, 28 Aug 2018 03:59:50 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id BEB597A9; Mon, 27 Aug 2018 18:59:49 -0700 (PDT) Received: from phil-VirtualBox.shanghai.arm.com (unknown [10.169.107.143]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id EFB1F3F721; Mon, 27 Aug 2018 18:59:48 -0700 (PDT) From: Phil Yang Date: Tue, 28 Aug 2018 09:59:34 +0800 Message-Id: <1535421574-8161-1-git-send-email-phil.yang@arm.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1535332589-8090-1-git-send-email-phil.yang@arm.com> References: <1535332589-8090-1-git-send-email-phil.yang@arm.com> Subject: [dts] [PATCH v2] framework/dut: setup hugepage for all available numa nodes X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 28 Aug 2018 01:59:51 -0000 From: Phil Yang On a NUMA machine, we need to setup hugepage for all NUMA nodes. The default approach is by setting /sys/kernel/mm/hugepages, but it only works on a single-node system. Signed-off-by: Phil Yang --- framework/crb.py | 6 ++++-- framework/dut.py | 9 ++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/framework/crb.py b/framework/crb.py index 5c555db..97bebfe 100644 --- a/framework/crb.py +++ b/framework/crb.py @@ -204,8 +204,10 @@ class Crb(object): if numa == -1: self.send_expect('echo %d > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % (huge_pages, page_size), '# ', 5) else: - # sometimes we set hugepage on kernel cmdline, so we need clear default hugepage - self.send_expect('echo 0 > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % (page_size), '# ', 5) + # sometimes we set hugepage on kernel cmdline, so we clear all nodes' default hugepages at the first time. + if numa == 0: + self.send_expect('echo 0 > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % (page_size), '# ', 5) + # some platform not support numa, example vm dut try: self.send_expect('echo %d > /sys/devices/system/node/node%d/hugepages/hugepages-%skB/nr_hugepages' % (huge_pages, numa, page_size), '# ', 5) diff --git a/framework/dut.py b/framework/dut.py index 18f0b39..3e21509 100644 --- a/framework/dut.py +++ b/framework/dut.py @@ -305,6 +305,12 @@ class Dut(Crb): return hugepages_size = self.send_expect("awk '/Hugepagesize/ {print $2}' /proc/meminfo", "# ") total_huge_pages = self.get_total_huge_pages() + total_numa_nodes = self.send_expect("ls /sys/devices/system/node | grep node* | wc -l", "# ") + numa_service_num = self.get_def_rte_config('CONFIG_RTE_MAX_NUMA_NODES') + if numa_service_num: + numa = min(total_numa_nodes, numa_service_num) + else: + numa = total_numa_nodes force_socket = False if int(hugepages_size) < (1024 * 1024): @@ -332,7 +338,8 @@ class Dut(Crb): if force_socket: self.set_huge_pages(arch_huge_pages, 0) else: - self.set_huge_pages(arch_huge_pages) + for numa_id in range(0, int(numa)): + self.set_huge_pages(arch_huge_pages, numa_id) self.mount_huge_pages() self.hugepage_path = self.strip_hugepage_path() -- 2.7.4