From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 9069F4CA7 for ; Mon, 1 Apr 2019 17:31:29 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Apr 2019 08:31:28 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.60,297,1549958400"; d="scan'208";a="130484447" Received: from silpixa00399952.ir.intel.com (HELO silpixa00399952.ger.corp.intel.com) ([10.237.223.64]) by orsmga008.jf.intel.com with ESMTP; 01 Apr 2019 08:31:27 -0700 From: David Hunt To: dev@dpdk.org Cc: david.hunt@intel.com, anatoly.burakov@intel.com, liang.j.ma@intel.com Date: Mon, 1 Apr 2019 16:30:44 +0100 Message-Id: <20190401153044.39273-2-david.hunt@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190401153044.39273-1-david.hunt@intel.com> References: <20190329131520.10653-1-david.hunt@intel.com> <20190401153044.39273-1-david.hunt@intel.com> Subject: [dpdk-dev] [PATCH v4 2/2] examples/distributor: detect high frequency cores X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 01 Apr 2019 15:31:30 -0000 The distributor application is bottlenecked by the distributor core, so if we can give more frequency to this core, then the overall performance of the application may increase. This patch uses the rte_power_get_capabilities() API to query the cores provided in the core mask, and if any high frequency cores are found (e.g. Turbo Boost is enabled), we will pin the distributor workload to that core. Signed-off-by: Liang Ma Signed-off-by: David Hunt Reviewed-by: Anatoly Burakov --- examples/distributor/main.c | 201 ++++++++++++++++++++++++------- examples/distributor/meson.build | 2 +- 2 files changed, 156 insertions(+), 47 deletions(-) diff --git a/examples/distributor/main.c b/examples/distributor/main.c index 03a05e3d9..b5499bb12 100644 --- a/examples/distributor/main.c +++ b/examples/distributor/main.c @@ -16,6 +16,7 @@ #include #include #include +#include #define RX_RING_SIZE 1024 #define TX_RING_SIZE 1024 @@ -37,6 +38,7 @@ volatile uint8_t quit_signal; volatile uint8_t quit_signal_rx; volatile uint8_t quit_signal_dist; volatile uint8_t quit_signal_work; +unsigned int power_lib_initialised; static volatile struct app_stats { struct { @@ -281,6 +283,8 @@ lcore_rx(struct lcore_params *p) if (++port == nb_ports) port = 0; } + if (power_lib_initialised) + rte_power_exit(rte_lcore_id()); /* set worker & tx threads quit flag */ printf("\nCore %u exiting rx task.\n", rte_lcore_id()); quit_signal = 1; @@ -363,7 +367,8 @@ lcore_distributor(struct lcore_params *p) } printf("\nCore %u exiting distributor task.\n", rte_lcore_id()); quit_signal_work = 1; - + if (power_lib_initialised) + rte_power_exit(rte_lcore_id()); rte_distributor_flush(d); /* Unblock any returns so workers can exit */ rte_distributor_clear_returns(d); @@ -435,6 +440,8 @@ lcore_tx(struct rte_ring *in_r) } } } + if (power_lib_initialised) + rte_power_exit(rte_lcore_id()); printf("\nCore %u exiting tx task.\n", rte_lcore_id()); return 0; } @@ -575,9 +582,33 @@ lcore_worker(struct lcore_params *p) if (num > 0) app_stats.worker_bursts[p->worker_id][num-1]++; } + if (power_lib_initialised) + rte_power_exit(rte_lcore_id()); + rte_free(p); return 0; } +static int +init_power_library(void) +{ + int ret = 0, lcore_id; + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + /* init power management library */ + ret = rte_power_init(lcore_id); + if (ret) { + RTE_LOG(ERR, POWER, + "Library initialization failed on core %u\n", + lcore_id); + /* + * Return on first failure, we'll fall back + * to non-power operation + */ + return ret; + } + } + return ret; +} + /* display usage */ static void print_usage(const char *prgname) @@ -657,7 +688,9 @@ main(int argc, char *argv[]) struct rte_distributor *d; struct rte_ring *dist_tx_ring; struct rte_ring *rx_dist_ring; - unsigned lcore_id, worker_id = 0; + struct rte_power_core_capabilities lcore_cap; + unsigned int lcore_id, worker_id = 0; + int distr_core_id = -1, rx_core_id = -1, tx_core_id = -1; unsigned nb_ports; uint16_t portid; uint16_t nb_ports_available; @@ -687,6 +720,9 @@ main(int argc, char *argv[]) "1 lcore for packet TX\n" "and at least 1 lcore for worker threads\n"); + if (init_power_library() == 0) + power_lib_initialised = 1; + nb_ports = rte_eth_dev_count_avail(); if (nb_ports == 0) rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n"); @@ -742,54 +778,123 @@ main(int argc, char *argv[]) if (rx_dist_ring == NULL) rte_exit(EXIT_FAILURE, "Cannot create output ring\n"); - RTE_LCORE_FOREACH_SLAVE(lcore_id) { - if (worker_id == rte_lcore_count() - 3) { - printf("Starting distributor on lcore_id %d\n", + if (power_lib_initialised) { + /* + * Here we'll pre-assign lcore ids to the rx, tx and + * distributor workloads if there's higher frequency + * on those cores e.g. if Turbo Boost is enabled. + * It's also worth mentioning that it will assign cores in a + * specific order, so that if there's less than three + * available, the higher frequency cores will go to the + * distributor first, then rx, then tx. + */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + + rte_power_get_capabilities(lcore_id, &lcore_cap); + + if (lcore_cap.priority != 1) + continue; + + if (distr_core_id < 0) { + distr_core_id = lcore_id; + printf("Distributor on priority core %d\n", lcore_id); - /* distributor core */ - struct lcore_params *p = - rte_malloc(NULL, sizeof(*p), 0); - if (!p) - rte_panic("malloc failure\n"); - *p = (struct lcore_params){worker_id, d, - rx_dist_ring, dist_tx_ring, mbuf_pool}; - rte_eal_remote_launch( - (lcore_function_t *)lcore_distributor, - p, lcore_id); - } else if (worker_id == rte_lcore_count() - 4) { - printf("Starting tx on worker_id %d, lcore_id %d\n", - worker_id, lcore_id); - /* tx core */ - rte_eal_remote_launch((lcore_function_t *)lcore_tx, - dist_tx_ring, lcore_id); - } else if (worker_id == rte_lcore_count() - 2) { - printf("Starting rx on worker_id %d, lcore_id %d\n", - worker_id, lcore_id); - /* rx core */ - struct lcore_params *p = - rte_malloc(NULL, sizeof(*p), 0); - if (!p) - rte_panic("malloc failure\n"); - *p = (struct lcore_params){worker_id, d, rx_dist_ring, - dist_tx_ring, mbuf_pool}; - rte_eal_remote_launch((lcore_function_t *)lcore_rx, - p, lcore_id); - } else { - printf("Starting worker on worker_id %d, lcore_id %d\n", - worker_id, lcore_id); - struct lcore_params *p = - rte_malloc(NULL, sizeof(*p), 0); - if (!p) - rte_panic("malloc failure\n"); - *p = (struct lcore_params){worker_id, d, rx_dist_ring, - dist_tx_ring, mbuf_pool}; - - rte_eal_remote_launch((lcore_function_t *)lcore_worker, - p, lcore_id); + continue; + } + if (rx_core_id < 0) { + rx_core_id = lcore_id; + printf("Rx on priority core %d\n", + lcore_id); + continue; + } + if (tx_core_id < 0) { + tx_core_id = lcore_id; + printf("Tx on priority core %d\n", + lcore_id); + continue; + } + } + } + + /* + * If there's any of the key workloads left without an lcore_id + * after the high performing core assignment above, pre-assign + * them here. + */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (lcore_id == (unsigned int)distr_core_id || + lcore_id == (unsigned int)rx_core_id || + lcore_id == (unsigned int)tx_core_id) + continue; + if (distr_core_id < 0) { + distr_core_id = lcore_id; + printf("Distributor on core %d\n", lcore_id); + continue; + } + if (rx_core_id < 0) { + rx_core_id = lcore_id; + printf("Rx on core %d\n", lcore_id); + continue; + } + if (tx_core_id < 0) { + tx_core_id = lcore_id; + printf("Tx on core %d\n", lcore_id); + continue; } - worker_id++; } + printf(" tx id %d, dist id %d, rx id %d\n", + tx_core_id, + distr_core_id, + rx_core_id); + + /* + * Kick off all the worker threads first, avoiding the pre-assigned + * lcore_ids for tx, rx and distributor workloads. + */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (lcore_id == (unsigned int)distr_core_id || + lcore_id == (unsigned int)rx_core_id || + lcore_id == (unsigned int)tx_core_id) + continue; + printf("Starting thread %d as worker, lcore_id %d\n", + worker_id, lcore_id); + struct lcore_params *p = + rte_malloc(NULL, sizeof(*p), 0); + if (!p) + rte_panic("malloc failure\n"); + *p = (struct lcore_params){worker_id++, d, rx_dist_ring, + dist_tx_ring, mbuf_pool}; + + rte_eal_remote_launch((lcore_function_t *)lcore_worker, + p, lcore_id); + } + + /* Start tx core */ + rte_eal_remote_launch((lcore_function_t *)lcore_tx, + dist_tx_ring, tx_core_id); + + /* Start distributor core */ + struct lcore_params *pd = + rte_malloc(NULL, sizeof(*pd), 0); + if (!pd) + rte_panic("malloc failure\n"); + *pd = (struct lcore_params){worker_id++, d, + rx_dist_ring, dist_tx_ring, mbuf_pool}; + rte_eal_remote_launch( + (lcore_function_t *)lcore_distributor, + pd, distr_core_id); + + /* Start rx core */ + struct lcore_params *pr = + rte_malloc(NULL, sizeof(*pr), 0); + if (!pr) + rte_panic("malloc failure\n"); + *pr = (struct lcore_params){worker_id++, d, rx_dist_ring, + dist_tx_ring, mbuf_pool}; + rte_eal_remote_launch((lcore_function_t *)lcore_rx, + pr, rx_core_id); + freq = rte_get_timer_hz(); t = rte_rdtsc() + freq; while (!quit_signal_dist) { @@ -806,5 +911,9 @@ main(int argc, char *argv[]) } print_stats(); + + rte_free(pd); + rte_free(pr); + return 0; } diff --git a/examples/distributor/meson.build b/examples/distributor/meson.build index 88c001f56..8cf2ca1da 100644 --- a/examples/distributor/meson.build +++ b/examples/distributor/meson.build @@ -6,7 +6,7 @@ # To build this example as a standalone application with an already-installed # DPDK instance, use 'make' -deps += 'distributor' +deps += ['distributor', 'power'] sources = files( 'main.c' ) -- 2.17.1 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id 81832A0679 for ; Mon, 1 Apr 2019 17:31:33 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A76744C96; Mon, 1 Apr 2019 17:31:31 +0200 (CEST) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by dpdk.org (Postfix) with ESMTP id 9069F4CA7 for ; Mon, 1 Apr 2019 17:31:29 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Apr 2019 08:31:28 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.60,297,1549958400"; d="scan'208";a="130484447" Received: from silpixa00399952.ir.intel.com (HELO silpixa00399952.ger.corp.intel.com) ([10.237.223.64]) by orsmga008.jf.intel.com with ESMTP; 01 Apr 2019 08:31:27 -0700 From: David Hunt To: dev@dpdk.org Cc: david.hunt@intel.com, anatoly.burakov@intel.com, liang.j.ma@intel.com Date: Mon, 1 Apr 2019 16:30:44 +0100 Message-Id: <20190401153044.39273-2-david.hunt@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190401153044.39273-1-david.hunt@intel.com> References: <20190329131520.10653-1-david.hunt@intel.com> <20190401153044.39273-1-david.hunt@intel.com> Subject: [dpdk-dev] [PATCH v4 2/2] examples/distributor: detect high frequency cores X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Content-Type: text/plain; charset="UTF-8" Message-ID: <20190401153044.o1VxxqW_-C5JMZ6Us3TECrLX7qi0-P-_ntfolg1kjug@z> The distributor application is bottlenecked by the distributor core, so if we can give more frequency to this core, then the overall performance of the application may increase. This patch uses the rte_power_get_capabilities() API to query the cores provided in the core mask, and if any high frequency cores are found (e.g. Turbo Boost is enabled), we will pin the distributor workload to that core. Signed-off-by: Liang Ma Signed-off-by: David Hunt Reviewed-by: Anatoly Burakov --- examples/distributor/main.c | 201 ++++++++++++++++++++++++------- examples/distributor/meson.build | 2 +- 2 files changed, 156 insertions(+), 47 deletions(-) diff --git a/examples/distributor/main.c b/examples/distributor/main.c index 03a05e3d9..b5499bb12 100644 --- a/examples/distributor/main.c +++ b/examples/distributor/main.c @@ -16,6 +16,7 @@ #include #include #include +#include #define RX_RING_SIZE 1024 #define TX_RING_SIZE 1024 @@ -37,6 +38,7 @@ volatile uint8_t quit_signal; volatile uint8_t quit_signal_rx; volatile uint8_t quit_signal_dist; volatile uint8_t quit_signal_work; +unsigned int power_lib_initialised; static volatile struct app_stats { struct { @@ -281,6 +283,8 @@ lcore_rx(struct lcore_params *p) if (++port == nb_ports) port = 0; } + if (power_lib_initialised) + rte_power_exit(rte_lcore_id()); /* set worker & tx threads quit flag */ printf("\nCore %u exiting rx task.\n", rte_lcore_id()); quit_signal = 1; @@ -363,7 +367,8 @@ lcore_distributor(struct lcore_params *p) } printf("\nCore %u exiting distributor task.\n", rte_lcore_id()); quit_signal_work = 1; - + if (power_lib_initialised) + rte_power_exit(rte_lcore_id()); rte_distributor_flush(d); /* Unblock any returns so workers can exit */ rte_distributor_clear_returns(d); @@ -435,6 +440,8 @@ lcore_tx(struct rte_ring *in_r) } } } + if (power_lib_initialised) + rte_power_exit(rte_lcore_id()); printf("\nCore %u exiting tx task.\n", rte_lcore_id()); return 0; } @@ -575,9 +582,33 @@ lcore_worker(struct lcore_params *p) if (num > 0) app_stats.worker_bursts[p->worker_id][num-1]++; } + if (power_lib_initialised) + rte_power_exit(rte_lcore_id()); + rte_free(p); return 0; } +static int +init_power_library(void) +{ + int ret = 0, lcore_id; + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + /* init power management library */ + ret = rte_power_init(lcore_id); + if (ret) { + RTE_LOG(ERR, POWER, + "Library initialization failed on core %u\n", + lcore_id); + /* + * Return on first failure, we'll fall back + * to non-power operation + */ + return ret; + } + } + return ret; +} + /* display usage */ static void print_usage(const char *prgname) @@ -657,7 +688,9 @@ main(int argc, char *argv[]) struct rte_distributor *d; struct rte_ring *dist_tx_ring; struct rte_ring *rx_dist_ring; - unsigned lcore_id, worker_id = 0; + struct rte_power_core_capabilities lcore_cap; + unsigned int lcore_id, worker_id = 0; + int distr_core_id = -1, rx_core_id = -1, tx_core_id = -1; unsigned nb_ports; uint16_t portid; uint16_t nb_ports_available; @@ -687,6 +720,9 @@ main(int argc, char *argv[]) "1 lcore for packet TX\n" "and at least 1 lcore for worker threads\n"); + if (init_power_library() == 0) + power_lib_initialised = 1; + nb_ports = rte_eth_dev_count_avail(); if (nb_ports == 0) rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n"); @@ -742,54 +778,123 @@ main(int argc, char *argv[]) if (rx_dist_ring == NULL) rte_exit(EXIT_FAILURE, "Cannot create output ring\n"); - RTE_LCORE_FOREACH_SLAVE(lcore_id) { - if (worker_id == rte_lcore_count() - 3) { - printf("Starting distributor on lcore_id %d\n", + if (power_lib_initialised) { + /* + * Here we'll pre-assign lcore ids to the rx, tx and + * distributor workloads if there's higher frequency + * on those cores e.g. if Turbo Boost is enabled. + * It's also worth mentioning that it will assign cores in a + * specific order, so that if there's less than three + * available, the higher frequency cores will go to the + * distributor first, then rx, then tx. + */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + + rte_power_get_capabilities(lcore_id, &lcore_cap); + + if (lcore_cap.priority != 1) + continue; + + if (distr_core_id < 0) { + distr_core_id = lcore_id; + printf("Distributor on priority core %d\n", lcore_id); - /* distributor core */ - struct lcore_params *p = - rte_malloc(NULL, sizeof(*p), 0); - if (!p) - rte_panic("malloc failure\n"); - *p = (struct lcore_params){worker_id, d, - rx_dist_ring, dist_tx_ring, mbuf_pool}; - rte_eal_remote_launch( - (lcore_function_t *)lcore_distributor, - p, lcore_id); - } else if (worker_id == rte_lcore_count() - 4) { - printf("Starting tx on worker_id %d, lcore_id %d\n", - worker_id, lcore_id); - /* tx core */ - rte_eal_remote_launch((lcore_function_t *)lcore_tx, - dist_tx_ring, lcore_id); - } else if (worker_id == rte_lcore_count() - 2) { - printf("Starting rx on worker_id %d, lcore_id %d\n", - worker_id, lcore_id); - /* rx core */ - struct lcore_params *p = - rte_malloc(NULL, sizeof(*p), 0); - if (!p) - rte_panic("malloc failure\n"); - *p = (struct lcore_params){worker_id, d, rx_dist_ring, - dist_tx_ring, mbuf_pool}; - rte_eal_remote_launch((lcore_function_t *)lcore_rx, - p, lcore_id); - } else { - printf("Starting worker on worker_id %d, lcore_id %d\n", - worker_id, lcore_id); - struct lcore_params *p = - rte_malloc(NULL, sizeof(*p), 0); - if (!p) - rte_panic("malloc failure\n"); - *p = (struct lcore_params){worker_id, d, rx_dist_ring, - dist_tx_ring, mbuf_pool}; - - rte_eal_remote_launch((lcore_function_t *)lcore_worker, - p, lcore_id); + continue; + } + if (rx_core_id < 0) { + rx_core_id = lcore_id; + printf("Rx on priority core %d\n", + lcore_id); + continue; + } + if (tx_core_id < 0) { + tx_core_id = lcore_id; + printf("Tx on priority core %d\n", + lcore_id); + continue; + } + } + } + + /* + * If there's any of the key workloads left without an lcore_id + * after the high performing core assignment above, pre-assign + * them here. + */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (lcore_id == (unsigned int)distr_core_id || + lcore_id == (unsigned int)rx_core_id || + lcore_id == (unsigned int)tx_core_id) + continue; + if (distr_core_id < 0) { + distr_core_id = lcore_id; + printf("Distributor on core %d\n", lcore_id); + continue; + } + if (rx_core_id < 0) { + rx_core_id = lcore_id; + printf("Rx on core %d\n", lcore_id); + continue; + } + if (tx_core_id < 0) { + tx_core_id = lcore_id; + printf("Tx on core %d\n", lcore_id); + continue; } - worker_id++; } + printf(" tx id %d, dist id %d, rx id %d\n", + tx_core_id, + distr_core_id, + rx_core_id); + + /* + * Kick off all the worker threads first, avoiding the pre-assigned + * lcore_ids for tx, rx and distributor workloads. + */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (lcore_id == (unsigned int)distr_core_id || + lcore_id == (unsigned int)rx_core_id || + lcore_id == (unsigned int)tx_core_id) + continue; + printf("Starting thread %d as worker, lcore_id %d\n", + worker_id, lcore_id); + struct lcore_params *p = + rte_malloc(NULL, sizeof(*p), 0); + if (!p) + rte_panic("malloc failure\n"); + *p = (struct lcore_params){worker_id++, d, rx_dist_ring, + dist_tx_ring, mbuf_pool}; + + rte_eal_remote_launch((lcore_function_t *)lcore_worker, + p, lcore_id); + } + + /* Start tx core */ + rte_eal_remote_launch((lcore_function_t *)lcore_tx, + dist_tx_ring, tx_core_id); + + /* Start distributor core */ + struct lcore_params *pd = + rte_malloc(NULL, sizeof(*pd), 0); + if (!pd) + rte_panic("malloc failure\n"); + *pd = (struct lcore_params){worker_id++, d, + rx_dist_ring, dist_tx_ring, mbuf_pool}; + rte_eal_remote_launch( + (lcore_function_t *)lcore_distributor, + pd, distr_core_id); + + /* Start rx core */ + struct lcore_params *pr = + rte_malloc(NULL, sizeof(*pr), 0); + if (!pr) + rte_panic("malloc failure\n"); + *pr = (struct lcore_params){worker_id++, d, rx_dist_ring, + dist_tx_ring, mbuf_pool}; + rte_eal_remote_launch((lcore_function_t *)lcore_rx, + pr, rx_core_id); + freq = rte_get_timer_hz(); t = rte_rdtsc() + freq; while (!quit_signal_dist) { @@ -806,5 +911,9 @@ main(int argc, char *argv[]) } print_stats(); + + rte_free(pd); + rte_free(pr); + return 0; } diff --git a/examples/distributor/meson.build b/examples/distributor/meson.build index 88c001f56..8cf2ca1da 100644 --- a/examples/distributor/meson.build +++ b/examples/distributor/meson.build @@ -6,7 +6,7 @@ # To build this example as a standalone application with an already-installed # DPDK instance, use 'make' -deps += 'distributor' +deps += ['distributor', 'power'] sources = files( 'main.c' ) -- 2.17.1