From: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
To: <david.hunt@intel.com>, <anatoly.burakov@intel.com>,
<jerinj@marvell.com>, <radu.nicolau@intel.com>,
<gakhil@marvell.com>, <cristian.dumitrescu@intel.com>,
<ferruh.yigit@amd.com>, <konstantin.ananyev@huawei.com>
Cc: <dev@dpdk.org>, <stable@dpdk.org>
Subject: [PATCH v3 2/6] examples/l3fwd-power: fix lcore ID restriction
Date: Wed, 20 Dec 2023 07:44:57 +0100 [thread overview]
Message-ID: <20231220064502.2830-3-sivaprasad.tummala@amd.com> (raw)
In-Reply-To: <20231220064502.2830-1-sivaprasad.tummala@amd.com>
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: f88e7c175a68 ("examples/l3fwd-power: add high/regular perf cores options")
Cc: radu.nicolau@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/l3fwd-power/main.c | 57 ++++++++++++++++----------------
examples/l3fwd-power/main.h | 4 +--
examples/l3fwd-power/perf_core.c | 10 +++---
3 files changed, 35 insertions(+), 36 deletions(-)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index f4adcf41b5..d0f3c332ee 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -214,7 +214,7 @@ enum freq_scale_hint_t
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
enum freq_scale_hint_t freq_up_hint;
uint32_t zero_rx_packet_count;
uint32_t idle_hint;
@@ -838,7 +838,7 @@ sleep_until_rx_interrupt(int num, int lcore)
struct rte_epoll_event event[num];
int n, i;
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
void *data;
if (status[lcore].wakeup) {
@@ -850,9 +850,9 @@ sleep_until_rx_interrupt(int num, int lcore)
n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, num, 10);
for (i = 0; i < n; i++) {
data = event[i].epdata.data;
- port_id = ((uintptr_t)data) >> CHAR_BIT;
+ port_id = ((uintptr_t)data) >> (sizeof(uint16_t) * CHAR_BIT);
queue_id = ((uintptr_t)data) &
- RTE_LEN2MASK(CHAR_BIT, uint8_t);
+ RTE_LEN2MASK((sizeof(uint16_t) * CHAR_BIT), uint16_t);
RTE_LOG(INFO, L3FWD_POWER,
"lcore %u is waked up from rx interrupt on"
" port %d queue %d\n",
@@ -867,7 +867,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
{
int i;
struct lcore_rx_queue *rx_queue;
- uint8_t queue_id;
+ uint16_t queue_id;
uint16_t port_id;
for (i = 0; i < qconf->n_rx_queue; ++i) {
@@ -887,7 +887,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
static int event_register(struct lcore_conf *qconf)
{
struct lcore_rx_queue *rx_queue;
- uint8_t queueid;
+ uint16_t queueid;
uint16_t portid;
uint32_t data;
int ret;
@@ -897,7 +897,7 @@ static int event_register(struct lcore_conf *qconf)
rx_queue = &(qconf->rx_queue_list[i]);
portid = rx_queue->port_id;
queueid = rx_queue->queue_id;
- data = portid << CHAR_BIT | queueid;
+ data = portid << (sizeof(uint16_t) * CHAR_BIT) | queueid;
ret = rte_eth_dev_rx_intr_ctl_q(portid, queueid,
RTE_EPOLL_PER_THREAD,
@@ -917,8 +917,7 @@ static int main_intr_loop(__rte_unused void *dummy)
unsigned int lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
uint32_t lcore_rx_idle_count = 0;
@@ -946,7 +945,7 @@ static int main_intr_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
@@ -1083,8 +1082,7 @@ main_telemetry_loop(__rte_unused void *dummy)
unsigned int lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc, prev_tel_tsc;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
uint64_t ep_nep[2] = {0}, fp_nfp[2] = {0};
@@ -1114,7 +1112,7 @@ main_telemetry_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
- "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ "rxqueueid=%hu\n", lcore_id, portid, queueid);
}
while (!is_done()) {
@@ -1205,8 +1203,7 @@ main_legacy_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc, tim_res_tsc, hz;
uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
enum freq_scale_hint_t lcore_scaleup_hint;
@@ -1234,7 +1231,7 @@ main_legacy_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
- "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ "rxqueueid=%hu\n", lcore_id, portid, queueid);
}
/* add into event wait list */
@@ -1399,25 +1396,25 @@ main_legacy_loop(__rte_unused void *dummy)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
+ uint16_t queue, lcore;
uint16_t i;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("invalid queue number: %hhu\n", queue);
+ printf("invalid queue number: %hu\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in lcore "
+ printf("error: lcore %hu is not enabled in lcore "
"mask\n", lcore);
return -1;
}
if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
(numa_on == 0)) {
- printf("warning: lcore %hhu is on socket %d with numa "
+ printf("warning: lcore %hu is on socket %d with numa "
"off\n", lcore, socketid);
}
if (app_mode == APP_MODE_TELEMETRY && lcore == rte_lcore_id()) {
@@ -1451,7 +1448,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -1462,14 +1459,14 @@ get_port_n_rx_queues(const uint16_t port)
lcore_params[i].queue_id > queue)
queue = lcore_params[i].queue_id;
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -1661,6 +1658,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1681,7 +1680,7 @@ parse_config(const char *q_arg)
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
if (errno != 0 || end == str_fld[i] || int_fld[i] >
- 255)
+ max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1690,11 +1689,11 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
@@ -2501,8 +2500,8 @@ main(int argc, char **argv)
uint64_t hz;
uint32_t n_tx_queue, nb_lcores;
uint32_t dev_rxq_num, dev_txq_num;
- uint8_t nb_rx_queue, queue, socketid;
- uint16_t portid;
+ uint8_t socketid;
+ uint16_t portid, nb_rx_queue, queue;
const char *ptr_strings[NUM_TELSTATS];
/* init EAL */
diff --git a/examples/l3fwd-power/main.h b/examples/l3fwd-power/main.h
index 258de98f5b..dcb5744ee6 100644
--- a/examples/l3fwd-power/main.h
+++ b/examples/l3fwd-power/main.h
@@ -9,8 +9,8 @@
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t queue_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
extern struct lcore_params *lcore_params;
diff --git a/examples/l3fwd-power/perf_core.c b/examples/l3fwd-power/perf_core.c
index 41ef6d0c9a..1fb9ceb584 100644
--- a/examples/l3fwd-power/perf_core.c
+++ b/examples/l3fwd-power/perf_core.c
@@ -22,9 +22,9 @@ static uint16_t nb_hp_lcores;
struct perf_lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t high_perf;
- uint8_t lcore_idx;
+ uint16_t lcore_idx;
} __rte_cache_aligned;
static struct perf_lcore_params prf_lc_prms[MAX_LCORE_PARAMS];
@@ -161,13 +161,13 @@ parse_perf_config(const char *q_arg)
return -1;
}
prf_lc_prms[nb_prf_lc_prms].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
prf_lc_prms[nb_prf_lc_prms].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
prf_lc_prms[nb_prf_lc_prms].high_perf =
!!(uint8_t)int_fld[FLD_LCORE_HP];
prf_lc_prms[nb_prf_lc_prms].lcore_idx =
- (uint8_t)int_fld[FLD_LCORE_IDX];
+ (uint16_t)int_fld[FLD_LCORE_IDX];
++nb_prf_lc_prms;
}
--
2.25.1
next prev parent reply other threads:[~2023-12-20 6:45 UTC|newest]
Thread overview: 96+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-18 7:49 [PATCH 1/6] examples/l3fwd: " Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 2/6] examples/l3fwd-power: " Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 5/6] examples/qos_sched: " Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 6/6] examples/vm_power_manager: " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 1/6] examples/l3fwd: " Sivaprasad Tummala
2023-12-19 12:05 ` Konstantin Ananyev
2023-12-19 12:30 ` Konstantin Ananyev
2023-12-19 14:18 ` Tummala, Sivaprasad
2023-12-19 15:10 ` Konstantin Ananyev
2023-12-20 1:32 ` Tummala, Sivaprasad
2023-12-19 3:28 ` [PATCH v2 2/6] examples/l3fwd-power: " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
2023-12-19 12:03 ` Konstantin Ananyev
2023-12-19 3:28 ` [PATCH v2 5/6] examples/qos_sched: " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 6/6] examples/vm_power_manager: " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 " Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 1/6] examples/l3fwd: " Sivaprasad Tummala
2023-12-20 6:44 ` Sivaprasad Tummala [this message]
2023-12-20 6:44 ` [PATCH v3 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
2023-12-20 6:45 ` [PATCH v3 5/6] examples/qos_sched: " Sivaprasad Tummala
2023-12-20 16:31 ` Stephen Hemminger
2024-01-09 15:16 ` Ferruh Yigit
2024-01-16 12:33 ` Tummala, Sivaprasad
2024-01-16 16:28 ` Stephen Hemminger
2023-12-20 6:45 ` [PATCH v3 6/6] examples/vm_power_manager: " Sivaprasad Tummala
2023-12-20 6:45 ` [PATCH v3 0/6] " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 1/6] examples/l3fwd: " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 2/6] examples/l3fwd-power: " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 5/6] examples/qos_sched: " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 6/6] examples/vm_power_manager: " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 0/6] " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 1/6] examples/l3fwd: " Sivaprasad Tummala
2024-03-19 7:24 ` Morten Brørup
2024-03-21 9:55 ` Thomas Monjalon
2024-03-21 11:05 ` Tummala, Sivaprasad
2024-03-21 11:18 ` Thomas Monjalon
2024-03-21 18:26 ` Tummala, Sivaprasad
2024-03-21 11:05 ` Tummala, Sivaprasad
2024-03-18 17:31 ` [PATCH v5 2/6] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 5/6] examples/qos_sched: " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 6/6] examples/vm_power_manager: " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 0/6] " Sivaprasad Tummala
2024-03-19 18:41 ` Ferruh Yigit
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 01/14] examples/l3fwd: fix queue " Sivaprasad Tummala
2024-03-22 15:41 ` David Marchand
2024-03-25 12:45 ` Tummala, Sivaprasad
2024-03-21 18:47 ` [PATCH v6 02/14] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 03/14] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 04/14] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 05/14] examples/l3fwd: fix lcore " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 06/14] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 07/14] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 08/14] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 09/14] examples/qos_sched: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 10/14] examples/vm_power_manager: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 11/14] examples/l3fwd: fix port " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 12/14] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 13/14] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 14/14] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 00/10] fix lcore " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 01/14] examples/l3fwd: fix queue " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 02/14] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 03/14] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 04/14] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 05/14] examples/l3fwd: fix lcore " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 06/14] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 07/14] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 08/14] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 09/14] examples/qos_sched: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 10/14] examples/vm_power_manager: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 11/14] examples/l3fwd: fix port " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 12/14] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 13/14] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 14/14] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 00/14] fix lcore " Sivaprasad Tummala
2024-04-25 12:31 ` Ferruh Yigit
2024-04-30 13:47 ` Ferruh Yigit
2024-03-07 8:34 ` [PATCH 1/6] examples/l3fwd: " David Marchand
2024-03-07 9:16 ` Morten Brørup
2024-03-07 9:22 ` David Marchand
2024-03-07 9:53 ` Morten Brørup
2024-03-13 9:14 ` Tummala, Sivaprasad
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231220064502.2830-3-sivaprasad.tummala@amd.com \
--to=sivaprasad.tummala@amd.com \
--cc=anatoly.burakov@intel.com \
--cc=cristian.dumitrescu@intel.com \
--cc=david.hunt@intel.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@amd.com \
--cc=gakhil@marvell.com \
--cc=jerinj@marvell.com \
--cc=konstantin.ananyev@huawei.com \
--cc=radu.nicolau@intel.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).