From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from foss.arm.com (foss.arm.com [217.140.101.70]) by dpdk.org (Postfix) with ESMTP id 0D99C2BA4 for ; Mon, 27 Aug 2018 05:12:24 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 86C1A7A9; Sun, 26 Aug 2018 18:16:46 -0700 (PDT) Received: from phil-VirtualBox.shanghai.arm.com (unknown [10.169.107.143]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id DA2C73F5BD; Sun, 26 Aug 2018 18:16:40 -0700 (PDT) From: Phil Yang To: dts@dpdk.org Cc: nd@arm.com Date: Mon, 27 Aug 2018 09:16:29 +0800 Message-Id: <1535332589-8090-1-git-send-email-phil.yang@arm.com> X-Mailer: git-send-email 2.7.4 Subject: [dts] [PATCH] framework/dut: setup hugepage for all available NUMA nodes X-BeenThere: dts@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: test suite reviews and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 27 Aug 2018 03:12:25 -0000 On a NUMA machine, we need to setup hugepage for all NUMA nodes. The default approach is by setting /sys/kernel/mm/hugepages, but it only works on a single-node system. This fix needs numactl tool support on Linux. Signed-off-by: Phil Yang --- framework/crb.py | 6 ++++-- framework/dut.py | 9 ++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/framework/crb.py b/framework/crb.py index 5c555db..97bebfe 100644 --- a/framework/crb.py +++ b/framework/crb.py @@ -204,8 +204,10 @@ class Crb(object): if numa == -1: self.send_expect('echo %d > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % (huge_pages, page_size), '# ', 5) else: - # sometimes we set hugepage on kernel cmdline, so we need clear default hugepage - self.send_expect('echo 0 > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % (page_size), '# ', 5) + # sometimes we set hugepage on kernel cmdline, so we clear all nodes' default hugepages at the first time. + if numa == 0: + self.send_expect('echo 0 > /sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages' % (page_size), '# ', 5) + # some platform not support numa, example vm dut try: self.send_expect('echo %d > /sys/devices/system/node/node%d/hugepages/hugepages-%skB/nr_hugepages' % (huge_pages, numa, page_size), '# ', 5) diff --git a/framework/dut.py b/framework/dut.py index 18f0b39..b358578 100644 --- a/framework/dut.py +++ b/framework/dut.py @@ -305,6 +305,12 @@ class Dut(Crb): return hugepages_size = self.send_expect("awk '/Hugepagesize/ {print $2}' /proc/meminfo", "# ") total_huge_pages = self.get_total_huge_pages() + total_numa_nodes = self.send_expect("numactl -H | awk '/available/ {print $2}'", "# ") + numa_service_num = self.get_def_rte_config('CONFIG_RTE_MAX_NUMA_NODES') + if numa_service_num: + numa = min(total_numa_nodes, numa_service_num) + else: + numa = total_numa_nodes force_socket = False if int(hugepages_size) < (1024 * 1024): @@ -332,7 +338,8 @@ class Dut(Crb): if force_socket: self.set_huge_pages(arch_huge_pages, 0) else: - self.set_huge_pages(arch_huge_pages) + for numa_id in range(0, int(numa)): + self.set_huge_pages(arch_huge_pages, numa_id) self.mount_huge_pages() self.hugepage_path = self.strip_hugepage_path() -- 2.7.4