From: Stephen Hurd <shurd@broadcom.com>
To: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH] app/test-pmd: Detect NUMA socket count
Date: Thu, 20 Aug 2015 19:25:21 -0700 [thread overview]
Message-ID: <1440123921-59551-1-git-send-email-shurd@broadcom.com> (raw)
Currently, there is a MAX_SOCKET macro which artificially limits the
number of NUMA sockets testpmd can use. Anything on a higher socket
ends up using socket zero. This patch replaces this with a variable
set during set_default_fwd_lcores_config() and uses RTE_MAX_NUMA_NODES
where a hard-coded max number of sockets is required.
Signed-off-by: Stephen Hurd <shurd@broadcom.com>
---
app/test-pmd/parameters.c | 12 ++++++------
app/test-pmd/testpmd.c | 17 +++++++++++++----
app/test-pmd/testpmd.h | 3 +--
3 files changed, 20 insertions(+), 12 deletions(-)
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index f1daa6e..fe78723 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -398,9 +398,9 @@ parse_portnuma_config(const char *q_arg)
return -1;
}
socket_id = (uint8_t)int_fld[FLD_SOCKET];
- if(socket_id >= MAX_SOCKET) {
+ if(socket_id >= max_socket) {
printf("Invalid socket id, range is [0, %d]\n",
- MAX_SOCKET - 1);
+ max_socket - 1);
return -1;
}
port_numa[port_id] = socket_id;
@@ -458,9 +458,9 @@ parse_ringnuma_config(const char *q_arg)
return -1;
}
socket_id = (uint8_t)int_fld[FLD_SOCKET];
- if (socket_id >= MAX_SOCKET) {
+ if (socket_id >= max_socket) {
printf("Invalid socket id, range is [0, %d]\n",
- MAX_SOCKET - 1);
+ max_socket - 1);
return -1;
}
ring_flag = (uint8_t)int_fld[FLD_FLAG];
@@ -667,12 +667,12 @@ launch_args_parse(int argc, char** argv)
"invalid ring-numa configuration\n");
if (!strcmp(lgopts[opt_idx].name, "socket-num")) {
n = atoi(optarg);
- if(n < MAX_SOCKET)
+ if((uint8_t)n < max_socket)
socket_num = (uint8_t)n;
else
rte_exit(EXIT_FAILURE,
"The socket number should be < %d\n",
- MAX_SOCKET);
+ max_socket);
}
if (!strcmp(lgopts[opt_idx].name, "mbuf-size")) {
n = atoi(optarg);
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 386bf84..2578b6b 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -313,6 +313,8 @@ struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_a
uint16_t nb_tx_queue_stats_mappings = 0;
uint16_t nb_rx_queue_stats_mappings = 0;
+unsigned max_socket = 0;
+
/* Forward function declarations */
static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
static void check_all_ports_link_status(uint32_t port_mask);
@@ -345,6 +347,7 @@ set_default_fwd_lcores_config(void)
{
unsigned int i;
unsigned int nb_lc;
+ unsigned int sock_num;
nb_lc = 0;
for (i = 0; i < RTE_MAX_LCORE; i++) {
@@ -353,6 +356,12 @@ set_default_fwd_lcores_config(void)
if (i == rte_get_master_lcore())
continue;
fwd_lcores_cpuids[nb_lc++] = i;
+ sock_num = rte_lcore_to_socket_id(i) + 1;
+ if (sock_num > max_socket) {
+ if (sock_num > RTE_MAX_NUMA_NODES)
+ rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
+ max_socket = sock_num;
+ }
}
nb_lcores = (lcoreid_t) nb_lc;
nb_cfg_lcores = nb_lcores;
@@ -446,7 +455,7 @@ check_socket_id(const unsigned int socket_id)
{
static int warning_once = 0;
- if (socket_id >= MAX_SOCKET) {
+ if (socket_id >= max_socket) {
if (!warning_once && numa_support)
printf("Warning: NUMA should be configured manually by"
" using --port-numa-config and"
@@ -466,9 +475,9 @@ init_config(void)
struct rte_mempool *mbp;
unsigned int nb_mbuf_per_pool;
lcoreid_t lc_id;
- uint8_t port_per_socket[MAX_SOCKET];
+ uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
- memset(port_per_socket,0,MAX_SOCKET);
+ memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
/* Configuration of logical cores. */
fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
sizeof(struct fwd_lcore *) * nb_lcores,
@@ -545,7 +554,7 @@ init_config(void)
if (param_total_num_mbufs)
nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
- for (i = 0; i < MAX_SOCKET; i++) {
+ for (i = 0; i < max_socket; i++) {
nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
if (nb_mbuf)
mbuf_pool_create(mbuf_data_size,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index d287274..f925df7 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -308,8 +308,6 @@ extern volatile int test_done; /* stop packet forwarding when set to 1. */
extern uint32_t bypass_timeout; /**< Store the NIC bypass watchdog timeout */
#endif
-#define MAX_SOCKET 2 /*MAX SOCKET:currently, it is 2 */
-
/*
* Store specified sockets on which memory pool to be used by ports
* is allocated.
@@ -338,6 +336,7 @@ extern lcoreid_t nb_lcores; /**< Number of logical cores probed at init time. */
extern lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
extern lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
extern unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE];
+extern unsigned max_socket;
/*
* Configuration of Ethernet ports:
--
1.9.1
next reply other threads:[~2015-08-21 2:25 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-08-21 2:25 Stephen Hurd [this message]
2015-09-02 11:09 ` De Lara Guarch, Pablo
2015-10-24 20:20 ` Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1440123921-59551-1-git-send-email-shurd@broadcom.com \
--to=shurd@broadcom.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).