From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from foss.arm.com (foss.arm.com [217.140.101.70]) by dpdk.org (Postfix) with ESMTP id 26450DED for ; Tue, 28 Aug 2018 05:08:54 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 4A31F7A9; Mon, 27 Aug 2018 20:08:53 -0700 (PDT) Received: from phil-VirtualBox.shanghai.arm.com (unknown [10.169.107.143]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id 7AAD83F721; Mon, 27 Aug 2018 20:08:52 -0700 (PDT) From: Phil Yang Date: Tue, 28 Aug 2018 11:08:41 +0800 Message-Id: <1535425721-8709-1-git-send-email-phil.yang@arm.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1535421574-8161-1-git-send-email-phil.yang@arm.com> References: <1535421574-8161-1-git-send-email-phil.yang@arm.com> Subject: [dts] [PATCH v3] framework/dut: setup hugepage for all available numa nodes X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 28 Aug 2018 03:08:54 -0000 From: Phil Yang On a NUMA machine, we need to setup hugepage for all NUMA nodes. The default approach is by setting /sys/kernel/mm/hugepages, but it only works on a single-node system. Signed-off-by: Phil Yang --- framework/crb.py | 6 ++++-- framework/dut.py | 9 ++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/framework/crb.py b/framework/crb.py index 5c555db..97bebfe 100644 --- a/framework/crb.py +++ b/framework/crb.py @@ -204,8 +204,10 @@ class Crb(object): if numa == -1: self.send_expect('echo %d > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % (huge_pages, page_size), '# ', 5) else: - # sometimes we set hugepage on kernel cmdline, so we need clear default hugepage - self.send_expect('echo 0 > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % (page_size), '# ', 5) + # sometimes we set hugepage on kernel cmdline, so we clear all nodes' default hugepages at the first time. + if numa == 0: + self.send_expect('echo 0 > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % (page_size), '# ', 5) + # some platform not support numa, example vm dut try: self.send_expect('echo %d > /sys/devices/system/node/node%d/hugepages/hugepages-%skB/nr_hugepages' % (huge_pages, numa, page_size), '# ', 5) diff --git a/framework/dut.py b/framework/dut.py index 18f0b39..a8116ff 100644 --- a/framework/dut.py +++ b/framework/dut.py @@ -305,6 +305,12 @@ class Dut(Crb): return hugepages_size = self.send_expect("awk '/Hugepagesize/ {print $2}' /proc/meminfo", "# ") total_huge_pages = self.get_total_huge_pages() + total_numa_nodes = self.send_expect("ls /sys/devices/system/node | grep node* | wc -l", "# ") + numa_service_num = self.get_def_rte_config('CONFIG_RTE_MAX_NUMA_NODES') + if numa_service_num is not None: + numa = min(int(total_numa_nodes), int(numa_service_num)) + else: + numa = total_numa_nodes force_socket = False if int(hugepages_size) < (1024 * 1024): @@ -332,7 +338,8 @@ class Dut(Crb): if force_socket: self.set_huge_pages(arch_huge_pages, 0) else: - self.set_huge_pages(arch_huge_pages) + for numa_id in range(0, int(numa)): + self.set_huge_pages(arch_huge_pages, numa_id) self.mount_huge_pages() self.hugepage_path = self.strip_hugepage_path() -- 2.7.4