* [PATCH 1/6] examples/l3fwd: fix lcore ID restriction
@ 2023-12-18 7:49 Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 2/6] examples/l3fwd-power: " Sivaprasad Tummala
` (6 more replies)
0 siblings, 7 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-18 7:49 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/l3fwd/main.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 3bf28aec0c..847ded0ad2 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -99,7 +99,7 @@ struct parm_cfg parm_config;
struct lcore_params {
uint16_t port_id;
uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -292,8 +292,8 @@ setup_l3fwd_lookup_tables(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
- uint16_t i;
+ uint8_t queue;
+ uint16_t i, lcore;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
@@ -359,7 +359,7 @@ static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -500,6 +500,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ 255, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -518,7 +520,8 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++){
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] >
+ max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -531,7 +534,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint8_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH 2/6] examples/l3fwd-power: fix lcore ID restriction
2023-12-18 7:49 [PATCH 1/6] examples/l3fwd: fix lcore ID restriction Sivaprasad Tummala
@ 2023-12-18 7:49 ` Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
` (5 subsequent siblings)
6 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-18 7:49 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: f88e7c175a68 ("examples/l3fwd-power: add high/regular perf cores options")
Cc: radu.nicolau@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/l3fwd-power/main.c | 12 +++++++-----
examples/l3fwd-power/main.h | 2 +-
2 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index f4adcf41b5..1f0ac3e660 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -1399,8 +1399,8 @@ main_legacy_loop(__rte_unused void *dummy)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
- uint16_t i;
+ uint8_t queue;
+ uint16_t lcore, i;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
@@ -1469,7 +1469,7 @@ static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -1661,6 +1661,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ 255, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1681,7 +1683,7 @@ parse_config(const char *q_arg)
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
if (errno != 0 || end == str_fld[i] || int_fld[i] >
- 255)
+ max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1694,7 +1696,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint8_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
diff --git a/examples/l3fwd-power/main.h b/examples/l3fwd-power/main.h
index 258de98f5b..4e5fd3b6d6 100644
--- a/examples/l3fwd-power/main.h
+++ b/examples/l3fwd-power/main.h
@@ -10,7 +10,7 @@
struct lcore_params {
uint16_t port_id;
uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
extern struct lcore_params *lcore_params;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH 3/6] examples/l3fwd-graph: fix lcore ID restriction
2023-12-18 7:49 [PATCH 1/6] examples/l3fwd: fix lcore ID restriction Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 2/6] examples/l3fwd-power: " Sivaprasad Tummala
@ 2023-12-18 7:49 ` Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
` (4 subsequent siblings)
6 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-18 7:49 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, stable, ndabilpuram
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: 08bd1a174461 ("examples/l3fwd-graph: add graph-based l3fwd skeleton")
Cc: ndabilpuram@marvell.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/l3fwd-graph/main.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index 96cb1c81ff..149677a3cc 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -111,7 +111,7 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
struct lcore_params {
uint16_t port_id;
uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -205,9 +205,9 @@ check_worker_model_params(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
+ uint8_t queue;
int socketid;
- uint16_t i;
+ uint16_t i, lcore;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
@@ -282,7 +282,7 @@ static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -452,7 +452,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint8_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH 4/6] examples/ipsec-secgw: fix lcore ID restriction
2023-12-18 7:49 [PATCH 1/6] examples/l3fwd: fix lcore ID restriction Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 2/6] examples/l3fwd-power: " Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
@ 2023-12-18 7:49 ` Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 5/6] examples/qos_sched: " Sivaprasad Tummala
` (3 subsequent siblings)
6 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-18 7:49 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, stable, sergio.gonzalez.monroy
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: sergio.gonzalez.monroy@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 12 +++++++-----
examples/ipsec-secgw/ipsec.c | 2 +-
3 files changed, 9 insertions(+), 7 deletions(-)
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index dfb81bfcf1..9923700f03 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -102,7 +102,7 @@ struct eh_event_link_info {
/**< Event port ID */
uint8_t eventq_id;
/**< Event queue to be linked to the port */
- uint8_t lcore_id;
+ uint16_t lcore_id;
/**< Lcore to be polling on this port */
};
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index bf98d2618b..0c15ec5334 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -221,7 +221,7 @@ static const char *cfgfile;
struct lcore_params {
uint16_t port_id;
uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -810,7 +810,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
static int32_t
check_poll_mode_params(struct eh_conf *eh_conf)
{
- uint8_t lcore;
+ uint16_t lcore;
uint16_t portid;
uint16_t i;
int32_t socket_id;
@@ -870,7 +870,7 @@ static int32_t
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -1051,6 +1051,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int32_t i;
uint32_t size;
+ unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ 255, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1071,7 +1073,7 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++) {
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1084,7 +1086,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint8_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index f5cec4a928..5ebb71bb9a 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -259,7 +259,7 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
continue;
/* Looking for cryptodev, which can handle this SA */
- key.lcore_id = (uint8_t)lcore_id;
+ key.lcore_id = (uint16_t)lcore_id;
key.cipher_algo = (uint8_t)sa->cipher_algo;
key.auth_algo = (uint8_t)sa->auth_algo;
key.aead_algo = (uint8_t)sa->aead_algo;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH 5/6] examples/qos_sched: fix lcore ID restriction
2023-12-18 7:49 [PATCH 1/6] examples/l3fwd: fix lcore ID restriction Sivaprasad Tummala
` (2 preceding siblings ...)
2023-12-18 7:49 ` [PATCH 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
@ 2023-12-18 7:49 ` Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 6/6] examples/vm_power_manager: " Sivaprasad Tummala
` (2 subsequent siblings)
6 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-18 7:49 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: de3cfa2c9823 ("sched: initial import")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/qos_sched/args.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
index e97273152a..22fe76eeb5 100644
--- a/examples/qos_sched/args.c
+++ b/examples/qos_sched/args.c
@@ -182,10 +182,10 @@ app_parse_flow_conf(const char *conf_str)
pconf->rx_port = vals[0];
pconf->tx_port = vals[1];
- pconf->rx_core = (uint8_t)vals[2];
- pconf->wt_core = (uint8_t)vals[3];
+ pconf->rx_core = (uint16_t)vals[2];
+ pconf->wt_core = (uint16_t)vals[3];
if (ret == 5)
- pconf->tx_core = (uint8_t)vals[4];
+ pconf->tx_core = (uint16_t)vals[4];
else
pconf->tx_core = pconf->wt_core;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH 6/6] examples/vm_power_manager: fix lcore ID restriction
2023-12-18 7:49 [PATCH 1/6] examples/l3fwd: fix lcore ID restriction Sivaprasad Tummala
` (3 preceding siblings ...)
2023-12-18 7:49 ` [PATCH 5/6] examples/qos_sched: " Sivaprasad Tummala
@ 2023-12-18 7:49 ` Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
2024-03-07 8:34 ` [PATCH 1/6] examples/l3fwd: fix lcore ID restriction David Marchand
6 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-18 7:49 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, stable, marcinx.hajkowski
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: 0e8f47491f09 ("examples/vm_power: add command to query CPU frequency")
Cc: marcinx.hajkowski@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/vm_power_manager/guest_cli/vm_power_cli_guest.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
index 94bfbbaf78..a586853a76 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
@@ -401,7 +401,7 @@ check_response_cmd(unsigned int lcore_id, int *result)
struct cmd_set_cpu_freq_result {
cmdline_fixed_string_t set_cpu_freq;
- uint8_t lcore_id;
+ uint16_t lcore_id;
cmdline_fixed_string_t cmd;
};
@@ -444,7 +444,7 @@ cmdline_parse_token_string_t cmd_set_cpu_freq =
set_cpu_freq, "set_cpu_freq");
cmdline_parse_token_num_t cmd_set_cpu_freq_core_num =
TOKEN_NUM_INITIALIZER(struct cmd_set_cpu_freq_result,
- lcore_id, RTE_UINT8);
+ lcore_id, RTE_UINT16);
cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
cmd, "up#down#min#max#enable_turbo#disable_turbo");
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v2 0/6] fix lcore ID restriction
2023-12-18 7:49 [PATCH 1/6] examples/l3fwd: fix lcore ID restriction Sivaprasad Tummala
` (4 preceding siblings ...)
2023-12-18 7:49 ` [PATCH 6/6] examples/vm_power_manager: " Sivaprasad Tummala
@ 2023-12-19 3:28 ` Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 1/6] examples/l3fwd: " Sivaprasad Tummala
` (8 more replies)
2024-03-07 8:34 ` [PATCH 1/6] examples/l3fwd: fix lcore ID restriction David Marchand
6 siblings, 9 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-19 3:28 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev
With modern CPUs, it is possible to have higher
CPU count thus we can have higher RTE_MAX_LCORES.
In DPDK sample applications, the current config
lcore options are hard limited to 255.
The patchset fixes these constraints by allowing
all lcore IDs up to RTE_MAX_LCORES.
Sivaprasad Tummala (6):
examples/l3fwd: fix lcore ID restriction
examples/l3fwd-power: fix lcore ID restriction
examples/l3fwd-graph: fix lcore ID restriction
examples/ipsec-secgw: fix lcore ID restriction
examples/qos_sched: fix lcore ID restriction
examples/vm_power_manager: fix lcore ID restriction
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 16 +++++++++-------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/l3fwd-graph/main.c | 14 +++++++-------
examples/l3fwd-power/main.c | 16 +++++++++-------
examples/l3fwd-power/main.h | 2 +-
examples/l3fwd/main.c | 19 +++++++++++--------
examples/qos_sched/args.c | 6 +++---
.../guest_cli/vm_power_cli_guest.c | 4 ++--
9 files changed, 44 insertions(+), 37 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v2 1/6] examples/l3fwd: fix lcore ID restriction
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
@ 2023-12-19 3:28 ` Sivaprasad Tummala
2023-12-19 12:05 ` Konstantin Ananyev
2023-12-19 3:28 ` [PATCH v2 2/6] examples/l3fwd-power: " Sivaprasad Tummala
` (7 subsequent siblings)
8 siblings, 1 reply; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-19 3:28 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/l3fwd/main.c | 19 +++++++++++--------
1 file changed, 11 insertions(+), 8 deletions(-)
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 3bf28aec0c..ed116da09c 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -99,7 +99,7 @@ struct parm_cfg parm_config;
struct lcore_params {
uint16_t port_id;
uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -292,8 +292,8 @@ setup_l3fwd_lookup_tables(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
- uint16_t i;
+ uint8_t queue;
+ uint16_t i, lcore;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
@@ -304,12 +304,12 @@ check_lcore_params(void)
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
+ printf("error: lcore %hu is not enabled in lcore mask\n", lcore);
return -1;
}
if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
(numa_on == 0)) {
- printf("warning: lcore %hhu is on socket %d with numa off \n",
+ printf("warning: lcore %hu is on socket %d with numa off\n",
lcore, socketid);
}
}
@@ -359,7 +359,7 @@ static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -500,6 +500,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ 255, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -518,7 +520,8 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++){
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] >
+ max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -531,7 +534,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint8_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v2 2/6] examples/l3fwd-power: fix lcore ID restriction
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 1/6] examples/l3fwd: " Sivaprasad Tummala
@ 2023-12-19 3:28 ` Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
` (6 subsequent siblings)
8 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-19 3:28 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: f88e7c175a68 ("examples/l3fwd-power: add high/regular perf cores options")
Cc: radu.nicolau@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/l3fwd-power/main.c | 16 +++++++++-------
examples/l3fwd-power/main.h | 2 +-
2 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index f4adcf41b5..bb9da36455 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -1399,8 +1399,8 @@ main_legacy_loop(__rte_unused void *dummy)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
- uint16_t i;
+ uint8_t queue;
+ uint16_t lcore, i;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
@@ -1411,13 +1411,13 @@ check_lcore_params(void)
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in lcore "
+ printf("error: lcore %hu is not enabled in lcore "
"mask\n", lcore);
return -1;
}
if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
(numa_on == 0)) {
- printf("warning: lcore %hhu is on socket %d with numa "
+ printf("warning: lcore %hu is on socket %d with numa "
"off\n", lcore, socketid);
}
if (app_mode == APP_MODE_TELEMETRY && lcore == rte_lcore_id()) {
@@ -1469,7 +1469,7 @@ static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -1661,6 +1661,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ 255, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1681,7 +1683,7 @@ parse_config(const char *q_arg)
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
if (errno != 0 || end == str_fld[i] || int_fld[i] >
- 255)
+ max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1694,7 +1696,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint8_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
diff --git a/examples/l3fwd-power/main.h b/examples/l3fwd-power/main.h
index 258de98f5b..4e5fd3b6d6 100644
--- a/examples/l3fwd-power/main.h
+++ b/examples/l3fwd-power/main.h
@@ -10,7 +10,7 @@
struct lcore_params {
uint16_t port_id;
uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
extern struct lcore_params *lcore_params;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v2 3/6] examples/l3fwd-graph: fix lcore ID restriction
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 1/6] examples/l3fwd: " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 2/6] examples/l3fwd-power: " Sivaprasad Tummala
@ 2023-12-19 3:28 ` Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
` (5 subsequent siblings)
8 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-19 3:28 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, ndabilpuram, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: 08bd1a174461 ("examples/l3fwd-graph: add graph-based l3fwd skeleton")
Cc: ndabilpuram@marvell.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/l3fwd-graph/main.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index 96cb1c81ff..6d3116aa7b 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -111,7 +111,7 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
struct lcore_params {
uint16_t port_id;
uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -205,9 +205,9 @@ check_worker_model_params(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
+ uint8_t queue;
int socketid;
- uint16_t i;
+ uint16_t i, lcore;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
@@ -217,7 +217,7 @@ check_lcore_params(void)
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("Error: lcore %hhu is not enabled in lcore mask\n",
+ printf("Error: lcore %hu is not enabled in lcore mask\n",
lcore);
return -1;
}
@@ -228,7 +228,7 @@ check_lcore_params(void)
}
socketid = rte_lcore_to_socket_id(lcore);
if ((socketid != 0) && (numa_on == 0)) {
- printf("Warning: lcore %hhu is on socket %d with numa off\n",
+ printf("Warning: lcore %hu is on socket %d with numa off\n",
lcore, socketid);
}
}
@@ -282,7 +282,7 @@ static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -452,7 +452,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint8_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v2 4/6] examples/ipsec-secgw: fix lcore ID restriction
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
` (2 preceding siblings ...)
2023-12-19 3:28 ` [PATCH v2 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
@ 2023-12-19 3:28 ` Sivaprasad Tummala
2023-12-19 12:03 ` Konstantin Ananyev
2023-12-19 3:28 ` [PATCH v2 5/6] examples/qos_sched: " Sivaprasad Tummala
` (4 subsequent siblings)
8 siblings, 1 reply; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-19 3:28 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, sergio.gonzalez.monroy, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: sergio.gonzalez.monroy@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 16 +++++++++-------
examples/ipsec-secgw/ipsec.c | 2 +-
3 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index dfb81bfcf1..9923700f03 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -102,7 +102,7 @@ struct eh_event_link_info {
/**< Event port ID */
uint8_t eventq_id;
/**< Event queue to be linked to the port */
- uint8_t lcore_id;
+ uint16_t lcore_id;
/**< Lcore to be polling on this port */
};
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index bf98d2618b..6f550db05c 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -221,7 +221,7 @@ static const char *cfgfile;
struct lcore_params {
uint16_t port_id;
uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -810,7 +810,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
static int32_t
check_poll_mode_params(struct eh_conf *eh_conf)
{
- uint8_t lcore;
+ uint16_t lcore;
uint16_t portid;
uint16_t i;
int32_t socket_id;
@@ -829,13 +829,13 @@ check_poll_mode_params(struct eh_conf *eh_conf)
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in "
+ printf("error: lcore %hu is not enabled in "
"lcore mask\n", lcore);
return -1;
}
socket_id = rte_lcore_to_socket_id(lcore);
if (socket_id != 0 && numa_on == 0) {
- printf("warning: lcore %hhu is on socket %d "
+ printf("warning: lcore %hu is on socket %d "
"with numa off\n",
lcore, socket_id);
}
@@ -870,7 +870,7 @@ static int32_t
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -1051,6 +1051,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int32_t i;
uint32_t size;
+ unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ 255, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1071,7 +1073,7 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++) {
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1084,7 +1086,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint8_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index f5cec4a928..5ebb71bb9a 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -259,7 +259,7 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
continue;
/* Looking for cryptodev, which can handle this SA */
- key.lcore_id = (uint8_t)lcore_id;
+ key.lcore_id = (uint16_t)lcore_id;
key.cipher_algo = (uint8_t)sa->cipher_algo;
key.auth_algo = (uint8_t)sa->auth_algo;
key.aead_algo = (uint8_t)sa->aead_algo;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v2 5/6] examples/qos_sched: fix lcore ID restriction
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
` (3 preceding siblings ...)
2023-12-19 3:28 ` [PATCH v2 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
@ 2023-12-19 3:28 ` Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 6/6] examples/vm_power_manager: " Sivaprasad Tummala
` (3 subsequent siblings)
8 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-19 3:28 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: de3cfa2c9823 ("sched: initial import")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/qos_sched/args.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
index e97273152a..22fe76eeb5 100644
--- a/examples/qos_sched/args.c
+++ b/examples/qos_sched/args.c
@@ -182,10 +182,10 @@ app_parse_flow_conf(const char *conf_str)
pconf->rx_port = vals[0];
pconf->tx_port = vals[1];
- pconf->rx_core = (uint8_t)vals[2];
- pconf->wt_core = (uint8_t)vals[3];
+ pconf->rx_core = (uint16_t)vals[2];
+ pconf->wt_core = (uint16_t)vals[3];
if (ret == 5)
- pconf->tx_core = (uint8_t)vals[4];
+ pconf->tx_core = (uint16_t)vals[4];
else
pconf->tx_core = pconf->wt_core;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v2 6/6] examples/vm_power_manager: fix lcore ID restriction
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
` (4 preceding siblings ...)
2023-12-19 3:28 ` [PATCH v2 5/6] examples/qos_sched: " Sivaprasad Tummala
@ 2023-12-19 3:28 ` Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
` (2 subsequent siblings)
8 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-19 3:28 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, marcinx.hajkowski, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: 0e8f47491f09 ("examples/vm_power: add command to query CPU frequency")
Cc: marcinx.hajkowski@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/vm_power_manager/guest_cli/vm_power_cli_guest.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
index 94bfbbaf78..a586853a76 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
@@ -401,7 +401,7 @@ check_response_cmd(unsigned int lcore_id, int *result)
struct cmd_set_cpu_freq_result {
cmdline_fixed_string_t set_cpu_freq;
- uint8_t lcore_id;
+ uint16_t lcore_id;
cmdline_fixed_string_t cmd;
};
@@ -444,7 +444,7 @@ cmdline_parse_token_string_t cmd_set_cpu_freq =
set_cpu_freq, "set_cpu_freq");
cmdline_parse_token_num_t cmd_set_cpu_freq_core_num =
TOKEN_NUM_INITIALIZER(struct cmd_set_cpu_freq_result,
- lcore_id, RTE_UINT8);
+ lcore_id, RTE_UINT16);
cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
cmd, "up#down#min#max#enable_turbo#disable_turbo");
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v2 0/6] fix lcore ID restriction
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
` (5 preceding siblings ...)
2023-12-19 3:28 ` [PATCH v2 6/6] examples/vm_power_manager: " Sivaprasad Tummala
@ 2023-12-19 3:28 ` Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 " Sivaprasad Tummala
2024-11-29 4:32 ` [PATCH] power/amd_uncore: add e-smi installation instructions Sivaprasad Tummala
8 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-19 3:28 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev
With modern CPUs, it is possible to have higher
CPU count thus we can have higher RTE_MAX_LCORES.
In DPDK sample applications, the current config
lcore options are hard limited to 255.
The patchset fixes these constraints by allowing
all lcore IDs up to RTE_MAX_LCORES.
Sivaprasad Tummala (6):
examples/l3fwd: fix lcore ID restriction
examples/l3fwd-power: fix lcore ID restriction
examples/l3fwd-graph: fix lcore ID restriction
examples/ipsec-secgw: fix lcore ID restriction
examples/qos_sched: fix lcore ID restriction
examples/vm_power_manager: fix lcore ID restriction
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 16 +++++++++-------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/l3fwd-graph/main.c | 14 +++++++-------
examples/l3fwd-power/main.c | 16 +++++++++-------
examples/l3fwd-power/main.h | 2 +-
examples/l3fwd/main.c | 19 +++++++++++--------
examples/qos_sched/args.c | 6 +++---
.../guest_cli/vm_power_cli_guest.c | 4 ++--
9 files changed, 44 insertions(+), 37 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH v2 4/6] examples/ipsec-secgw: fix lcore ID restriction
2023-12-19 3:28 ` [PATCH v2 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
@ 2023-12-19 12:03 ` Konstantin Ananyev
0 siblings, 0 replies; 100+ messages in thread
From: Konstantin Ananyev @ 2023-12-19 12:03 UTC (permalink / raw)
To: Sivaprasad Tummala, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, ferruh.yigit
Cc: dev, sergio.gonzalez.monroy, stable
> Currently the config option allows lcore IDs up to 255,
> irrespective of RTE_MAX_LCORES and needs to be fixed.
>
> The patch allows config options based on DPDK config.
>
> Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
> Cc: sergio.gonzalez.monroy@intel.com
> Cc: stable@dpdk.org
>
> Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> ---
> examples/ipsec-secgw/event_helper.h | 2 +-
> examples/ipsec-secgw/ipsec-secgw.c | 16 +++++++++-------
> examples/ipsec-secgw/ipsec.c | 2 +-
> 3 files changed, 11 insertions(+), 9 deletions(-)
>
> diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
> index dfb81bfcf1..9923700f03 100644
> --- a/examples/ipsec-secgw/event_helper.h
> +++ b/examples/ipsec-secgw/event_helper.h
> @@ -102,7 +102,7 @@ struct eh_event_link_info {
> /**< Event port ID */
> uint8_t eventq_id;
> /**< Event queue to be linked to the port */
> - uint8_t lcore_id;
> + uint16_t lcore_id;
> /**< Lcore to be polling on this port */
> };
>
> diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
> index bf98d2618b..6f550db05c 100644
> --- a/examples/ipsec-secgw/ipsec-secgw.c
> +++ b/examples/ipsec-secgw/ipsec-secgw.c
> @@ -221,7 +221,7 @@ static const char *cfgfile;
> struct lcore_params {
> uint16_t port_id;
> uint8_t queue_id;
> - uint8_t lcore_id;
> + uint16_t lcore_id;
> } __rte_cache_aligned;
>
> static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
> @@ -810,7 +810,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
> static int32_t
> check_poll_mode_params(struct eh_conf *eh_conf)
> {
> - uint8_t lcore;
> + uint16_t lcore;
> uint16_t portid;
> uint16_t i;
> int32_t socket_id;
> @@ -829,13 +829,13 @@ check_poll_mode_params(struct eh_conf *eh_conf)
> for (i = 0; i < nb_lcore_params; ++i) {
> lcore = lcore_params[i].lcore_id;
> if (!rte_lcore_is_enabled(lcore)) {
> - printf("error: lcore %hhu is not enabled in "
> + printf("error: lcore %hu is not enabled in "
> "lcore mask\n", lcore);
> return -1;
> }
> socket_id = rte_lcore_to_socket_id(lcore);
> if (socket_id != 0 && numa_on == 0) {
> - printf("warning: lcore %hhu is on socket %d "
> + printf("warning: lcore %hu is on socket %d "
> "with numa off\n",
> lcore, socket_id);
> }
> @@ -870,7 +870,7 @@ static int32_t
> init_lcore_rx_queues(void)
> {
> uint16_t i, nb_rx_queue;
> - uint8_t lcore;
> + uint16_t lcore;
>
> for (i = 0; i < nb_lcore_params; ++i) {
> lcore = lcore_params[i].lcore_id;
> @@ -1051,6 +1051,8 @@ parse_config(const char *q_arg)
> char *str_fld[_NUM_FLD];
> int32_t i;
> uint32_t size;
> + unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
> + 255, RTE_MAX_LCORE};
>
> nb_lcore_params = 0;
>
> @@ -1071,7 +1073,7 @@ parse_config(const char *q_arg)
> for (i = 0; i < _NUM_FLD; i++) {
> errno = 0;
> int_fld[i] = strtoul(str_fld[i], &end, 0);
> - if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
> + if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
> return -1;
> }
> if (nb_lcore_params >= MAX_LCORE_PARAMS) {
> @@ -1084,7 +1086,7 @@ parse_config(const char *q_arg)
> lcore_params_array[nb_lcore_params].queue_id =
> (uint8_t)int_fld[FLD_QUEUE];
> lcore_params_array[nb_lcore_params].lcore_id =
> - (uint8_t)int_fld[FLD_LCORE];
> + (uint16_t)int_fld[FLD_LCORE];
> ++nb_lcore_params;
> }
> lcore_params = lcore_params_array;
> diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
> index f5cec4a928..5ebb71bb9a 100644
> --- a/examples/ipsec-secgw/ipsec.c
> +++ b/examples/ipsec-secgw/ipsec.c
> @@ -259,7 +259,7 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
> continue;
>
> /* Looking for cryptodev, which can handle this SA */
> - key.lcore_id = (uint8_t)lcore_id;
> + key.lcore_id = (uint16_t)lcore_id;
> key.cipher_algo = (uint8_t)sa->cipher_algo;
> key.auth_algo = (uint8_t)sa->auth_algo;
> key.aead_algo = (uint8_t)sa->aead_algo;
> --
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> 2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH v2 1/6] examples/l3fwd: fix lcore ID restriction
2023-12-19 3:28 ` [PATCH v2 1/6] examples/l3fwd: " Sivaprasad Tummala
@ 2023-12-19 12:05 ` Konstantin Ananyev
2023-12-19 12:30 ` Konstantin Ananyev
0 siblings, 1 reply; 100+ messages in thread
From: Konstantin Ananyev @ 2023-12-19 12:05 UTC (permalink / raw)
To: Sivaprasad Tummala, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, ferruh.yigit
Cc: dev, stable
> Currently the config option allows lcore IDs up to 255,
> irrespective of RTE_MAX_LCORES and needs to be fixed.
>
> The patch allows config options based on DPDK config.
>
> Fixes: af75078fece3 ("first public release")
> Cc: stable@dpdk.org
>
> Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> ---
> examples/l3fwd/main.c | 19 +++++++++++--------
> 1 file changed, 11 insertions(+), 8 deletions(-)
>
> diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
> index 3bf28aec0c..ed116da09c 100644
> --- a/examples/l3fwd/main.c
> +++ b/examples/l3fwd/main.c
> @@ -99,7 +99,7 @@ struct parm_cfg parm_config;
> struct lcore_params {
> uint16_t port_id;
> uint8_t queue_id;
> - uint8_t lcore_id;
> + uint16_t lcore_id;
> } __rte_cache_aligned;
>
> static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
> @@ -292,8 +292,8 @@ setup_l3fwd_lookup_tables(void)
> static int
> check_lcore_params(void)
> {
> - uint8_t queue, lcore;
> - uint16_t i;
> + uint8_t queue;
> + uint16_t i, lcore;
> int socketid;
>
> for (i = 0; i < nb_lcore_params; ++i) {
> @@ -304,12 +304,12 @@ check_lcore_params(void)
> }
> lcore = lcore_params[i].lcore_id;
> if (!rte_lcore_is_enabled(lcore)) {
> - printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
> + printf("error: lcore %hu is not enabled in lcore mask\n", lcore);
> return -1;
> }
> if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
> (numa_on == 0)) {
> - printf("warning: lcore %hhu is on socket %d with numa off \n",
> + printf("warning: lcore %hu is on socket %d with numa off\n",
> lcore, socketid);
> }
> }
> @@ -359,7 +359,7 @@ static int
> init_lcore_rx_queues(void)
> {
> uint16_t i, nb_rx_queue;
> - uint8_t lcore;
> + uint16_t lcore;
>
> for (i = 0; i < nb_lcore_params; ++i) {
> lcore = lcore_params[i].lcore_id;
> @@ -500,6 +500,8 @@ parse_config(const char *q_arg)
> char *str_fld[_NUM_FLD];
> int i;
> unsigned size;
> + unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
> + 255, RTE_MAX_LCORE};
>
> nb_lcore_params = 0;
>
> @@ -518,7 +520,8 @@ parse_config(const char *q_arg)
> for (i = 0; i < _NUM_FLD; i++){
> errno = 0;
> int_fld[i] = strtoul(str_fld[i], &end, 0);
> - if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
> + if (errno != 0 || end == str_fld[i] || int_fld[i] >
> + max_fld[i])
> return -1;
> }
> if (nb_lcore_params >= MAX_LCORE_PARAMS) {
> @@ -531,7 +534,7 @@ parse_config(const char *q_arg)
> lcore_params_array[nb_lcore_params].queue_id =
> (uint8_t)int_fld[FLD_QUEUE];
> lcore_params_array[nb_lcore_params].lcore_id =
> - (uint8_t)int_fld[FLD_LCORE];
> + (uint16_t)int_fld[FLD_LCORE];
> ++nb_lcore_params;
> }
> lcore_params = lcore_params_array;
> --
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> 2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH v2 1/6] examples/l3fwd: fix lcore ID restriction
2023-12-19 12:05 ` Konstantin Ananyev
@ 2023-12-19 12:30 ` Konstantin Ananyev
2023-12-19 14:18 ` Tummala, Sivaprasad
0 siblings, 1 reply; 100+ messages in thread
From: Konstantin Ananyev @ 2023-12-19 12:30 UTC (permalink / raw)
To: Konstantin Ananyev, Sivaprasad Tummala, david.hunt,
anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit
Cc: dev, stable
>
> > Currently the config option allows lcore IDs up to 255,
> > irrespective of RTE_MAX_LCORES and needs to be fixed.
> >
> > The patch allows config options based on DPDK config.
> >
> > Fixes: af75078fece3 ("first public release")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> > ---
> > examples/l3fwd/main.c | 19 +++++++++++--------
> > 1 file changed, 11 insertions(+), 8 deletions(-)
> >
> > diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
> > index 3bf28aec0c..ed116da09c 100644
> > --- a/examples/l3fwd/main.c
> > +++ b/examples/l3fwd/main.c
> > @@ -99,7 +99,7 @@ struct parm_cfg parm_config;
> > struct lcore_params {
> > uint16_t port_id;
> > uint8_t queue_id;
Actually one comment:
As lcore_id becomes uint16_t it might be worth to do the same queue_id,
they usually are very much related.
> > - uint8_t lcore_id;
> > + uint16_t lcore_id;
> > } __rte_cache_aligned;
> >
> > static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
> > @@ -292,8 +292,8 @@ setup_l3fwd_lookup_tables(void)
> > static int
> > check_lcore_params(void)
> > {
> > - uint8_t queue, lcore;
> > - uint16_t i;
> > + uint8_t queue;
> > + uint16_t i, lcore;
> > int socketid;
> >
> > for (i = 0; i < nb_lcore_params; ++i) {
> > @@ -304,12 +304,12 @@ check_lcore_params(void)
> > }
> > lcore = lcore_params[i].lcore_id;
> > if (!rte_lcore_is_enabled(lcore)) {
> > - printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
> > + printf("error: lcore %hu is not enabled in lcore mask\n", lcore);
> > return -1;
> > }
> > if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
> > (numa_on == 0)) {
> > - printf("warning: lcore %hhu is on socket %d with numa off \n",
> > + printf("warning: lcore %hu is on socket %d with numa off\n",
> > lcore, socketid);
> > }
> > }
> > @@ -359,7 +359,7 @@ static int
> > init_lcore_rx_queues(void)
> > {
> > uint16_t i, nb_rx_queue;
> > - uint8_t lcore;
> > + uint16_t lcore;
> >
> > for (i = 0; i < nb_lcore_params; ++i) {
> > lcore = lcore_params[i].lcore_id;
> > @@ -500,6 +500,8 @@ parse_config(const char *q_arg)
> > char *str_fld[_NUM_FLD];
> > int i;
> > unsigned size;
> > + unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
> > + 255, RTE_MAX_LCORE};
> >
> > nb_lcore_params = 0;
> >
> > @@ -518,7 +520,8 @@ parse_config(const char *q_arg)
> > for (i = 0; i < _NUM_FLD; i++){
> > errno = 0;
> > int_fld[i] = strtoul(str_fld[i], &end, 0);
> > - if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
> > + if (errno != 0 || end == str_fld[i] || int_fld[i] >
> > + max_fld[i])
> > return -1;
> > }
> > if (nb_lcore_params >= MAX_LCORE_PARAMS) {
> > @@ -531,7 +534,7 @@ parse_config(const char *q_arg)
> > lcore_params_array[nb_lcore_params].queue_id =
> > (uint8_t)int_fld[FLD_QUEUE];
> > lcore_params_array[nb_lcore_params].lcore_id =
> > - (uint8_t)int_fld[FLD_LCORE];
> > + (uint16_t)int_fld[FLD_LCORE];
> > ++nb_lcore_params;
> > }
> > lcore_params = lcore_params_array;
> > --
>
> Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
>
>
> > 2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH v2 1/6] examples/l3fwd: fix lcore ID restriction
2023-12-19 12:30 ` Konstantin Ananyev
@ 2023-12-19 14:18 ` Tummala, Sivaprasad
2023-12-19 15:10 ` Konstantin Ananyev
0 siblings, 1 reply; 100+ messages in thread
From: Tummala, Sivaprasad @ 2023-12-19 14:18 UTC (permalink / raw)
To: Konstantin Ananyev, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, Yigit, Ferruh
Cc: dev, stable
[AMD Official Use Only - General]
Hi Konstantin,
> -----Original Message-----
> From: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> Sent: Tuesday, December 19, 2023 6:00 PM
> To: Konstantin Ananyev <konstantin.ananyev@huawei.com>; Tummala, Sivaprasad
> <Sivaprasad.Tummala@amd.com>; david.hunt@intel.com;
> anatoly.burakov@intel.com; jerinj@marvell.com; radu.nicolau@intel.com;
> gakhil@marvell.com; cristian.dumitrescu@intel.com; Yigit, Ferruh
> <Ferruh.Yigit@amd.com>
> Cc: dev@dpdk.org; stable@dpdk.org
> Subject: RE: [PATCH v2 1/6] examples/l3fwd: fix lcore ID restriction
>
> Caution: This message originated from an External Source. Use proper caution
> when opening attachments, clicking links, or responding.
>
>
> >
> > > Currently the config option allows lcore IDs up to 255, irrespective
> > > of RTE_MAX_LCORES and needs to be fixed.
> > >
> > > The patch allows config options based on DPDK config.
> > >
> > > Fixes: af75078fece3 ("first public release")
> > > Cc: stable@dpdk.org
> > >
> > > Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> > > ---
> > > examples/l3fwd/main.c | 19 +++++++++++--------
> > > 1 file changed, 11 insertions(+), 8 deletions(-)
> > >
> > > diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index
> > > 3bf28aec0c..ed116da09c 100644
> > > --- a/examples/l3fwd/main.c
> > > +++ b/examples/l3fwd/main.c
> > > @@ -99,7 +99,7 @@ struct parm_cfg parm_config; struct lcore_params
> > > {
> > > uint16_t port_id;
> > > uint8_t queue_id;
>
> Actually one comment:
> As lcore_id becomes uint16_t it might be worth to do the same queue_id, they
> usually are very much related.
Yes, that's a valid statement for one network interface.
With multiple interfaces, it's a combination of port/queue that maps to a specific lcore.
If there a NICs that support more than 256 queues, then it makes sense to change the
queue_id type as well.
Please let me know your thoughts.
>
> > > - uint8_t lcore_id;
> > > + uint16_t lcore_id;
> > > } __rte_cache_aligned;
> > >
> > > static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
> > > @@ -292,8 +292,8 @@ setup_l3fwd_lookup_tables(void) static int
> > > check_lcore_params(void)
> > > {
> > > - uint8_t queue, lcore;
> > > - uint16_t i;
> > > + uint8_t queue;
> > > + uint16_t i, lcore;
> > > int socketid;
> > >
> > > for (i = 0; i < nb_lcore_params; ++i) { @@ -304,12 +304,12 @@
> > > check_lcore_params(void)
> > > }
> > > lcore = lcore_params[i].lcore_id;
> > > if (!rte_lcore_is_enabled(lcore)) {
> > > - printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
> > > + printf("error: lcore %hu is not enabled in lcore
> > > + mask\n", lcore);
> > > return -1;
> > > }
> > > if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
> > > (numa_on == 0)) {
> > > - printf("warning: lcore %hhu is on socket %d with numa off \n",
> > > + printf("warning: lcore %hu is on socket %d with
> > > + numa off\n",
> > > lcore, socketid);
> > > }
> > > }
> > > @@ -359,7 +359,7 @@ static int
> > > init_lcore_rx_queues(void)
> > > {
> > > uint16_t i, nb_rx_queue;
> > > - uint8_t lcore;
> > > + uint16_t lcore;
> > >
> > > for (i = 0; i < nb_lcore_params; ++i) {
> > > lcore = lcore_params[i].lcore_id; @@ -500,6 +500,8 @@
> > > parse_config(const char *q_arg)
> > > char *str_fld[_NUM_FLD];
> > > int i;
> > > unsigned size;
> > > + unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
> > > + 255, RTE_MAX_LCORE};
> > >
> > > nb_lcore_params = 0;
> > >
> > > @@ -518,7 +520,8 @@ parse_config(const char *q_arg)
> > > for (i = 0; i < _NUM_FLD; i++){
> > > errno = 0;
> > > int_fld[i] = strtoul(str_fld[i], &end, 0);
> > > - if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
> > > + if (errno != 0 || end == str_fld[i] || int_fld[i] >
> > > +
> > > + max_fld[i])
> > > return -1;
> > > }
> > > if (nb_lcore_params >= MAX_LCORE_PARAMS) { @@ -531,7
> > > +534,7 @@ parse_config(const char *q_arg)
> > > lcore_params_array[nb_lcore_params].queue_id =
> > > (uint8_t)int_fld[FLD_QUEUE];
> > > lcore_params_array[nb_lcore_params].lcore_id =
> > > - (uint8_t)int_fld[FLD_LCORE];
> > > + (uint16_t)int_fld[FLD_LCORE];
> > > ++nb_lcore_params;
> > > }
> > > lcore_params = lcore_params_array;
> > > --
> >
> > Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> >
> >
> > > 2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH v2 1/6] examples/l3fwd: fix lcore ID restriction
2023-12-19 14:18 ` Tummala, Sivaprasad
@ 2023-12-19 15:10 ` Konstantin Ananyev
2023-12-20 1:32 ` Tummala, Sivaprasad
0 siblings, 1 reply; 100+ messages in thread
From: Konstantin Ananyev @ 2023-12-19 15:10 UTC (permalink / raw)
To: Tummala, Sivaprasad, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, Yigit, Ferruh
Cc: dev, stable
Hi Sivaprasad,
>
> Hi Konstantin,
>
> > -----Original Message-----
> > From: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> > Sent: Tuesday, December 19, 2023 6:00 PM
> > To: Konstantin Ananyev <konstantin.ananyev@huawei.com>; Tummala, Sivaprasad
> > <Sivaprasad.Tummala@amd.com>; david.hunt@intel.com;
> > anatoly.burakov@intel.com; jerinj@marvell.com; radu.nicolau@intel.com;
> > gakhil@marvell.com; cristian.dumitrescu@intel.com; Yigit, Ferruh
> > <Ferruh.Yigit@amd.com>
> > Cc: dev@dpdk.org; stable@dpdk.org
> > Subject: RE: [PATCH v2 1/6] examples/l3fwd: fix lcore ID restriction
> >
> > Caution: This message originated from an External Source. Use proper caution
> > when opening attachments, clicking links, or responding.
> >
> >
> > >
> > > > Currently the config option allows lcore IDs up to 255, irrespective
> > > > of RTE_MAX_LCORES and needs to be fixed.
> > > >
> > > > The patch allows config options based on DPDK config.
> > > >
> > > > Fixes: af75078fece3 ("first public release")
> > > > Cc: stable@dpdk.org
> > > >
> > > > Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> > > > ---
> > > > examples/l3fwd/main.c | 19 +++++++++++--------
> > > > 1 file changed, 11 insertions(+), 8 deletions(-)
> > > >
> > > > diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index
> > > > 3bf28aec0c..ed116da09c 100644
> > > > --- a/examples/l3fwd/main.c
> > > > +++ b/examples/l3fwd/main.c
> > > > @@ -99,7 +99,7 @@ struct parm_cfg parm_config; struct lcore_params
> > > > {
> > > > uint16_t port_id;
> > > > uint8_t queue_id;
> >
> > Actually one comment:
> > As lcore_id becomes uint16_t it might be worth to do the same queue_id, they
> > usually are very much related.
> Yes, that's a valid statement for one network interface.
> With multiple interfaces, it's a combination of port/queue that maps to a specific lcore.
> If there a NICs that support more than 256 queues, then it makes sense to change the
> queue_id type as well.
AFAIK, majority of modern NICs do support more than 256 queues.
That's why in rte_ethev API queue_id is uint16_t.
>
> Please let me know your thoughts.
> >
> > > > - uint8_t lcore_id;
> > > > + uint16_t lcore_id;
> > > > } __rte_cache_aligned;
> > > >
> > > > static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
> > > > @@ -292,8 +292,8 @@ setup_l3fwd_lookup_tables(void) static int
> > > > check_lcore_params(void)
> > > > {
> > > > - uint8_t queue, lcore;
> > > > - uint16_t i;
> > > > + uint8_t queue;
> > > > + uint16_t i, lcore;
> > > > int socketid;
> > > >
> > > > for (i = 0; i < nb_lcore_params; ++i) { @@ -304,12 +304,12 @@
> > > > check_lcore_params(void)
> > > > }
> > > > lcore = lcore_params[i].lcore_id;
> > > > if (!rte_lcore_is_enabled(lcore)) {
> > > > - printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
> > > > + printf("error: lcore %hu is not enabled in lcore
> > > > + mask\n", lcore);
> > > > return -1;
> > > > }
> > > > if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
> > > > (numa_on == 0)) {
> > > > - printf("warning: lcore %hhu is on socket %d with numa off \n",
> > > > + printf("warning: lcore %hu is on socket %d with
> > > > + numa off\n",
> > > > lcore, socketid);
> > > > }
> > > > }
> > > > @@ -359,7 +359,7 @@ static int
> > > > init_lcore_rx_queues(void)
> > > > {
> > > > uint16_t i, nb_rx_queue;
> > > > - uint8_t lcore;
> > > > + uint16_t lcore;
> > > >
> > > > for (i = 0; i < nb_lcore_params; ++i) {
> > > > lcore = lcore_params[i].lcore_id; @@ -500,6 +500,8 @@
> > > > parse_config(const char *q_arg)
> > > > char *str_fld[_NUM_FLD];
> > > > int i;
> > > > unsigned size;
> > > > + unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
> > > > + 255, RTE_MAX_LCORE};
> > > >
> > > > nb_lcore_params = 0;
> > > >
> > > > @@ -518,7 +520,8 @@ parse_config(const char *q_arg)
> > > > for (i = 0; i < _NUM_FLD; i++){
> > > > errno = 0;
> > > > int_fld[i] = strtoul(str_fld[i], &end, 0);
> > > > - if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
> > > > + if (errno != 0 || end == str_fld[i] || int_fld[i] >
> > > > +
> > > > + max_fld[i])
> > > > return -1;
> > > > }
> > > > if (nb_lcore_params >= MAX_LCORE_PARAMS) { @@ -531,7
> > > > +534,7 @@ parse_config(const char *q_arg)
> > > > lcore_params_array[nb_lcore_params].queue_id =
> > > > (uint8_t)int_fld[FLD_QUEUE];
> > > > lcore_params_array[nb_lcore_params].lcore_id =
> > > > - (uint8_t)int_fld[FLD_LCORE];
> > > > + (uint16_t)int_fld[FLD_LCORE];
> > > > ++nb_lcore_params;
> > > > }
> > > > lcore_params = lcore_params_array;
> > > > --
> > >
> > > Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> > >
> > >
> > > > 2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH v2 1/6] examples/l3fwd: fix lcore ID restriction
2023-12-19 15:10 ` Konstantin Ananyev
@ 2023-12-20 1:32 ` Tummala, Sivaprasad
0 siblings, 0 replies; 100+ messages in thread
From: Tummala, Sivaprasad @ 2023-12-20 1:32 UTC (permalink / raw)
To: Konstantin Ananyev, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, Yigit, Ferruh
Cc: dev, stable
[AMD Official Use Only - General]
Hi Konstantin,
> -----Original Message-----
> From: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> Sent: Tuesday, December 19, 2023 8:40 PM
> To: Tummala, Sivaprasad <Sivaprasad.Tummala@amd.com>;
> david.hunt@intel.com; anatoly.burakov@intel.com; jerinj@marvell.com;
> radu.nicolau@intel.com; gakhil@marvell.com; cristian.dumitrescu@intel.com; Yigit,
> Ferruh <Ferruh.Yigit@amd.com>
> Cc: dev@dpdk.org; stable@dpdk.org
> Subject: RE: [PATCH v2 1/6] examples/l3fwd: fix lcore ID restriction
>
> Caution: This message originated from an External Source. Use proper caution
> when opening attachments, clicking links, or responding.
>
>
> Hi Sivaprasad,
>
> >
> > Hi Konstantin,
> >
> > > -----Original Message-----
> > > From: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> > > Sent: Tuesday, December 19, 2023 6:00 PM
> > > To: Konstantin Ananyev <konstantin.ananyev@huawei.com>; Tummala,
> > > Sivaprasad <Sivaprasad.Tummala@amd.com>; david.hunt@intel.com;
> > > anatoly.burakov@intel.com; jerinj@marvell.com;
> > > radu.nicolau@intel.com; gakhil@marvell.com;
> > > cristian.dumitrescu@intel.com; Yigit, Ferruh <Ferruh.Yigit@amd.com>
> > > Cc: dev@dpdk.org; stable@dpdk.org
> > > Subject: RE: [PATCH v2 1/6] examples/l3fwd: fix lcore ID restriction
> > >
> > > Caution: This message originated from an External Source. Use proper
> > > caution when opening attachments, clicking links, or responding.
> > >
> > >
> > > >
> > > > > Currently the config option allows lcore IDs up to 255,
> > > > > irrespective of RTE_MAX_LCORES and needs to be fixed.
> > > > >
> > > > > The patch allows config options based on DPDK config.
> > > > >
> > > > > Fixes: af75078fece3 ("first public release")
> > > > > Cc: stable@dpdk.org
> > > > >
> > > > > Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> > > > > ---
> > > > > examples/l3fwd/main.c | 19 +++++++++++--------
> > > > > 1 file changed, 11 insertions(+), 8 deletions(-)
> > > > >
> > > > > diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index
> > > > > 3bf28aec0c..ed116da09c 100644
> > > > > --- a/examples/l3fwd/main.c
> > > > > +++ b/examples/l3fwd/main.c
> > > > > @@ -99,7 +99,7 @@ struct parm_cfg parm_config; struct
> > > > > lcore_params {
> > > > > uint16_t port_id;
> > > > > uint8_t queue_id;
> > >
> > > Actually one comment:
> > > As lcore_id becomes uint16_t it might be worth to do the same
> > > queue_id, they usually are very much related.
> > Yes, that's a valid statement for one network interface.
> > With multiple interfaces, it's a combination of port/queue that maps to a specific
> lcore.
> > If there a NICs that support more than 256 queues, then it makes sense
> > to change the queue_id type as well.
>
> AFAIK, majority of modern NICs do support more than 256 queues.
> That's why in rte_ethev API queue_id is uint16_t.
Thanks. Will update the queue_id type to uint16_t in next version.
>
> >
> > Please let me know your thoughts.
> > >
> > > > > - uint8_t lcore_id;
> > > > > + uint16_t lcore_id;
> > > > > } __rte_cache_aligned;
> > > > >
> > > > > static struct lcore_params
> > > > > lcore_params_array[MAX_LCORE_PARAMS];
> > > > > @@ -292,8 +292,8 @@ setup_l3fwd_lookup_tables(void) static int
> > > > > check_lcore_params(void)
> > > > > {
> > > > > - uint8_t queue, lcore;
> > > > > - uint16_t i;
> > > > > + uint8_t queue;
> > > > > + uint16_t i, lcore;
> > > > > int socketid;
> > > > >
> > > > > for (i = 0; i < nb_lcore_params; ++i) { @@ -304,12 +304,12
> > > > > @@
> > > > > check_lcore_params(void)
> > > > > }
> > > > > lcore = lcore_params[i].lcore_id;
> > > > > if (!rte_lcore_is_enabled(lcore)) {
> > > > > - printf("error: lcore %hhu is not enabled in lcore mask\n",
> lcore);
> > > > > + printf("error: lcore %hu is not enabled in
> > > > > + lcore mask\n", lcore);
> > > > > return -1;
> > > > > }
> > > > > if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
> > > > > (numa_on == 0)) {
> > > > > - printf("warning: lcore %hhu is on socket %d with numa off
> \n",
> > > > > + printf("warning: lcore %hu is on socket %d
> > > > > + with numa off\n",
> > > > > lcore, socketid);
> > > > > }
> > > > > }
> > > > > @@ -359,7 +359,7 @@ static int
> > > > > init_lcore_rx_queues(void)
> > > > > {
> > > > > uint16_t i, nb_rx_queue;
> > > > > - uint8_t lcore;
> > > > > + uint16_t lcore;
> > > > >
> > > > > for (i = 0; i < nb_lcore_params; ++i) {
> > > > > lcore = lcore_params[i].lcore_id; @@ -500,6 +500,8
> > > > > @@ parse_config(const char *q_arg)
> > > > > char *str_fld[_NUM_FLD];
> > > > > int i;
> > > > > unsigned size;
> > > > > + unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
> > > > > + 255, RTE_MAX_LCORE};
> > > > >
> > > > > nb_lcore_params = 0;
> > > > >
> > > > > @@ -518,7 +520,8 @@ parse_config(const char *q_arg)
> > > > > for (i = 0; i < _NUM_FLD; i++){
> > > > > errno = 0;
> > > > > int_fld[i] = strtoul(str_fld[i], &end, 0);
> > > > > - if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
> > > > > + if (errno != 0 || end == str_fld[i] ||
> > > > > + int_fld[i] >
> > > > > +
> > > > > + max_fld[i])
> > > > > return -1;
> > > > > }
> > > > > if (nb_lcore_params >= MAX_LCORE_PARAMS) { @@ -531,7
> > > > > +534,7 @@ parse_config(const char *q_arg)
> > > > > lcore_params_array[nb_lcore_params].queue_id =
> > > > > (uint8_t)int_fld[FLD_QUEUE];
> > > > > lcore_params_array[nb_lcore_params].lcore_id =
> > > > > - (uint8_t)int_fld[FLD_LCORE];
> > > > > + (uint16_t)int_fld[FLD_LCORE];
> > > > > ++nb_lcore_params;
> > > > > }
> > > > > lcore_params = lcore_params_array;
> > > > > --
> > > >
> > > > Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> > > >
> > > >
> > > > > 2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v3 0/6] fix lcore ID restriction
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
` (6 preceding siblings ...)
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
@ 2023-12-20 6:44 ` Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 1/6] examples/l3fwd: " Sivaprasad Tummala
` (7 more replies)
2024-11-29 4:32 ` [PATCH] power/amd_uncore: add e-smi installation instructions Sivaprasad Tummala
8 siblings, 8 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-20 6:44 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev
With modern CPUs, it is possible to have higher
CPU count thus we can have higher RTE_MAX_LCORES.
In DPDK sample applications, the current config
lcore options are hard limited to 255.
The patchset fixes these constraints by allowing
all lcore IDs up to RTE_MAX_LCORES.
Sivaprasad Tummala (6):
examples/l3fwd: fix lcore ID restriction
examples/l3fwd-power: fix lcore ID restriction
examples/l3fwd-graph: fix lcore ID restriction
examples/ipsec-secgw: fix lcore ID restriction
examples/qos_sched: fix lcore ID restriction
examples/vm_power_manager: fix lcore ID restriction
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 32 +++++------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 2 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++--
examples/l3fwd-graph/main.c | 31 +++++-----
examples/l3fwd-power/main.c | 57 +++++++++----------
examples/l3fwd-power/main.h | 4 +-
examples/l3fwd-power/perf_core.c | 10 ++--
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 +-
examples/l3fwd/l3fwd_em.c | 4 +-
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 +-
examples/l3fwd/l3fwd_lpm.c | 5 +-
examples/l3fwd/main.c | 36 ++++++------
examples/qos_sched/args.c | 6 +-
.../guest_cli/vm_power_cli_guest.c | 4 +-
18 files changed, 109 insertions(+), 108 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v3 1/6] examples/l3fwd: fix lcore ID restriction
2023-12-20 6:44 ` [PATCH v3 " Sivaprasad Tummala
@ 2023-12-20 6:44 ` Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 2/6] examples/l3fwd-power: " Sivaprasad Tummala
` (6 subsequent siblings)
7 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-20 6:44 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
---
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 ++--
examples/l3fwd/l3fwd_em.c | 4 ++--
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 ++--
examples/l3fwd/l3fwd_lpm.c | 5 ++---
examples/l3fwd/main.c | 36 ++++++++++++++++++++----------------
7 files changed, 30 insertions(+), 27 deletions(-)
diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index e7ae0e5834..12c264cb4c 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -74,7 +74,7 @@ struct mbuf_table {
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
} __rte_cache_aligned;
struct lcore_conf {
diff --git a/examples/l3fwd/l3fwd_acl.c b/examples/l3fwd/l3fwd_acl.c
index 401692bcec..2bd63181bc 100644
--- a/examples/l3fwd/l3fwd_acl.c
+++ b/examples/l3fwd/l3fwd_acl.c
@@ -997,7 +997,7 @@ acl_main_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
uint16_t portid;
- uint8_t queueid;
+ uint16_t queueid;
struct lcore_conf *qconf;
int socketid;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -1020,7 +1020,7 @@ acl_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 40e102b38a..cd2bb4a4bb 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -586,7 +586,7 @@ em_main_loop(__rte_unused void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint8_t queueid;
+ uint16_t queueid;
uint16_t portid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
@@ -609,7 +609,7 @@ em_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index 9aad358003..c6a4a89127 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -78,8 +78,8 @@ struct l3fwd_event_resources {
uint8_t deq_depth;
uint8_t has_burst;
uint8_t enabled;
- uint8_t eth_rx_queues;
uint8_t vector_enabled;
+ uint16_t eth_rx_queues;
uint16_t vector_size;
uint64_t vector_tmo_ns;
};
diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index 6a21984415..7da55f707a 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -186,7 +186,7 @@ fib_main_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
uint16_t portid;
- uint8_t queueid;
+ uint16_t queueid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -208,7 +208,7 @@ fib_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index a484a33089..01d38bc69c 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -148,8 +148,7 @@ lpm_main_loop(__rte_unused void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -171,7 +170,7 @@ lpm_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 3bf28aec0c..db49f4707d 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -98,8 +98,8 @@ struct parm_cfg parm_config;
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t queue_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -292,24 +292,24 @@ setup_l3fwd_lookup_tables(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
+ uint16_t queue, lcore;
uint16_t i;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("invalid queue number: %hhu\n", queue);
+ printf("invalid queue number: %hu\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
+ printf("error: lcore %hu is not enabled in lcore mask\n", lcore);
return -1;
}
if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
(numa_on == 0)) {
- printf("warning: lcore %hhu is on socket %d with numa off \n",
+ printf("warning: lcore %hu is on socket %d with numa off\n",
lcore, socketid);
}
}
@@ -336,7 +336,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -352,14 +352,14 @@ get_port_n_rx_queues(const uint16_t port)
lcore_params[i].port_id);
}
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -500,6 +500,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ uint16_t max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -518,7 +520,8 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++){
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] >
+ max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -527,11 +530,11 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
@@ -630,7 +633,7 @@ parse_event_eth_rx_queues(const char *eth_rx_queues)
{
struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
char *end = NULL;
- uint8_t num_eth_rx_queues;
+ uint16_t num_eth_rx_queues;
/* parse decimal string */
num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
@@ -1211,7 +1214,8 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
static void
l3fwd_poll_resource_setup(void)
{
- uint8_t nb_rx_queue, queue, socketid;
+ uint8_t socketid;
+ uint16_t nb_rx_queue, queue;
struct rte_eth_dev_info dev_info;
uint32_t n_tx_queue, nb_lcores;
struct rte_eth_txconf *txconf;
@@ -1528,7 +1532,7 @@ main(int argc, char **argv)
struct lcore_conf *qconf;
uint16_t queueid, portid;
unsigned int lcore_id;
- uint8_t queue;
+ uint16_t queue;
int ret;
/* init EAL */
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v3 2/6] examples/l3fwd-power: fix lcore ID restriction
2023-12-20 6:44 ` [PATCH v3 " Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 1/6] examples/l3fwd: " Sivaprasad Tummala
@ 2023-12-20 6:44 ` Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
` (5 subsequent siblings)
7 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-20 6:44 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: f88e7c175a68 ("examples/l3fwd-power: add high/regular perf cores options")
Cc: radu.nicolau@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/l3fwd-power/main.c | 57 ++++++++++++++++----------------
examples/l3fwd-power/main.h | 4 +--
examples/l3fwd-power/perf_core.c | 10 +++---
3 files changed, 35 insertions(+), 36 deletions(-)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index f4adcf41b5..d0f3c332ee 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -214,7 +214,7 @@ enum freq_scale_hint_t
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
enum freq_scale_hint_t freq_up_hint;
uint32_t zero_rx_packet_count;
uint32_t idle_hint;
@@ -838,7 +838,7 @@ sleep_until_rx_interrupt(int num, int lcore)
struct rte_epoll_event event[num];
int n, i;
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
void *data;
if (status[lcore].wakeup) {
@@ -850,9 +850,9 @@ sleep_until_rx_interrupt(int num, int lcore)
n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, num, 10);
for (i = 0; i < n; i++) {
data = event[i].epdata.data;
- port_id = ((uintptr_t)data) >> CHAR_BIT;
+ port_id = ((uintptr_t)data) >> (sizeof(uint16_t) * CHAR_BIT);
queue_id = ((uintptr_t)data) &
- RTE_LEN2MASK(CHAR_BIT, uint8_t);
+ RTE_LEN2MASK((sizeof(uint16_t) * CHAR_BIT), uint16_t);
RTE_LOG(INFO, L3FWD_POWER,
"lcore %u is waked up from rx interrupt on"
" port %d queue %d\n",
@@ -867,7 +867,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
{
int i;
struct lcore_rx_queue *rx_queue;
- uint8_t queue_id;
+ uint16_t queue_id;
uint16_t port_id;
for (i = 0; i < qconf->n_rx_queue; ++i) {
@@ -887,7 +887,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
static int event_register(struct lcore_conf *qconf)
{
struct lcore_rx_queue *rx_queue;
- uint8_t queueid;
+ uint16_t queueid;
uint16_t portid;
uint32_t data;
int ret;
@@ -897,7 +897,7 @@ static int event_register(struct lcore_conf *qconf)
rx_queue = &(qconf->rx_queue_list[i]);
portid = rx_queue->port_id;
queueid = rx_queue->queue_id;
- data = portid << CHAR_BIT | queueid;
+ data = portid << (sizeof(uint16_t) * CHAR_BIT) | queueid;
ret = rte_eth_dev_rx_intr_ctl_q(portid, queueid,
RTE_EPOLL_PER_THREAD,
@@ -917,8 +917,7 @@ static int main_intr_loop(__rte_unused void *dummy)
unsigned int lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
uint32_t lcore_rx_idle_count = 0;
@@ -946,7 +945,7 @@ static int main_intr_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
@@ -1083,8 +1082,7 @@ main_telemetry_loop(__rte_unused void *dummy)
unsigned int lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc, prev_tel_tsc;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
uint64_t ep_nep[2] = {0}, fp_nfp[2] = {0};
@@ -1114,7 +1112,7 @@ main_telemetry_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
- "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ "rxqueueid=%hu\n", lcore_id, portid, queueid);
}
while (!is_done()) {
@@ -1205,8 +1203,7 @@ main_legacy_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc, tim_res_tsc, hz;
uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
enum freq_scale_hint_t lcore_scaleup_hint;
@@ -1234,7 +1231,7 @@ main_legacy_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
- "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ "rxqueueid=%hu\n", lcore_id, portid, queueid);
}
/* add into event wait list */
@@ -1399,25 +1396,25 @@ main_legacy_loop(__rte_unused void *dummy)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
+ uint16_t queue, lcore;
uint16_t i;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("invalid queue number: %hhu\n", queue);
+ printf("invalid queue number: %hu\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in lcore "
+ printf("error: lcore %hu is not enabled in lcore "
"mask\n", lcore);
return -1;
}
if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
(numa_on == 0)) {
- printf("warning: lcore %hhu is on socket %d with numa "
+ printf("warning: lcore %hu is on socket %d with numa "
"off\n", lcore, socketid);
}
if (app_mode == APP_MODE_TELEMETRY && lcore == rte_lcore_id()) {
@@ -1451,7 +1448,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -1462,14 +1459,14 @@ get_port_n_rx_queues(const uint16_t port)
lcore_params[i].queue_id > queue)
queue = lcore_params[i].queue_id;
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -1661,6 +1658,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1681,7 +1680,7 @@ parse_config(const char *q_arg)
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
if (errno != 0 || end == str_fld[i] || int_fld[i] >
- 255)
+ max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1690,11 +1689,11 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
@@ -2501,8 +2500,8 @@ main(int argc, char **argv)
uint64_t hz;
uint32_t n_tx_queue, nb_lcores;
uint32_t dev_rxq_num, dev_txq_num;
- uint8_t nb_rx_queue, queue, socketid;
- uint16_t portid;
+ uint8_t socketid;
+ uint16_t portid, nb_rx_queue, queue;
const char *ptr_strings[NUM_TELSTATS];
/* init EAL */
diff --git a/examples/l3fwd-power/main.h b/examples/l3fwd-power/main.h
index 258de98f5b..dcb5744ee6 100644
--- a/examples/l3fwd-power/main.h
+++ b/examples/l3fwd-power/main.h
@@ -9,8 +9,8 @@
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t queue_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
extern struct lcore_params *lcore_params;
diff --git a/examples/l3fwd-power/perf_core.c b/examples/l3fwd-power/perf_core.c
index 41ef6d0c9a..1fb9ceb584 100644
--- a/examples/l3fwd-power/perf_core.c
+++ b/examples/l3fwd-power/perf_core.c
@@ -22,9 +22,9 @@ static uint16_t nb_hp_lcores;
struct perf_lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t high_perf;
- uint8_t lcore_idx;
+ uint16_t lcore_idx;
} __rte_cache_aligned;
static struct perf_lcore_params prf_lc_prms[MAX_LCORE_PARAMS];
@@ -161,13 +161,13 @@ parse_perf_config(const char *q_arg)
return -1;
}
prf_lc_prms[nb_prf_lc_prms].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
prf_lc_prms[nb_prf_lc_prms].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
prf_lc_prms[nb_prf_lc_prms].high_perf =
!!(uint8_t)int_fld[FLD_LCORE_HP];
prf_lc_prms[nb_prf_lc_prms].lcore_idx =
- (uint8_t)int_fld[FLD_LCORE_IDX];
+ (uint16_t)int_fld[FLD_LCORE_IDX];
++nb_prf_lc_prms;
}
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v3 3/6] examples/l3fwd-graph: fix lcore ID restriction
2023-12-20 6:44 ` [PATCH v3 " Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 1/6] examples/l3fwd: " Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 2/6] examples/l3fwd-power: " Sivaprasad Tummala
@ 2023-12-20 6:44 ` Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
` (4 subsequent siblings)
7 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-20 6:44 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, ndabilpuram, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: 08bd1a174461 ("examples/l3fwd-graph: add graph-based l3fwd skeleton")
Cc: ndabilpuram@marvell.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/l3fwd-graph/main.c | 31 ++++++++++++++++---------------
1 file changed, 16 insertions(+), 15 deletions(-)
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index 96cb1c81ff..ffb6900fee 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -90,7 +90,7 @@ static int pcap_trace_enable;
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
char node_name[RTE_NODE_NAMESIZE];
};
@@ -110,8 +110,8 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t queue_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -205,19 +205,19 @@ check_worker_model_params(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
+ uint16_t queue;
int socketid;
- uint16_t i;
+ uint16_t i, lcore;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("Invalid queue number: %hhu\n", queue);
+ printf("Invalid queue number: %hu\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("Error: lcore %hhu is not enabled in lcore mask\n",
+ printf("Error: lcore %hu is not enabled in lcore mask\n",
lcore);
return -1;
}
@@ -228,7 +228,7 @@ check_lcore_params(void)
}
socketid = rte_lcore_to_socket_id(lcore);
if ((socketid != 0) && (numa_on == 0)) {
- printf("Warning: lcore %hhu is on socket %d with numa off\n",
+ printf("Warning: lcore %hu is on socket %d with numa off\n",
lcore, socketid);
}
}
@@ -257,7 +257,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -275,14 +275,14 @@ get_port_n_rx_queues(const uint16_t port)
}
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -448,11 +448,11 @@ parse_config(const char *q_arg)
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
@@ -1011,7 +1011,8 @@ main(int argc, char **argv)
"ethdev_tx-*",
"pkt_drop",
};
- uint8_t nb_rx_queue, queue, socketid;
+ uint8_t socketid;
+ uint16_t nb_rx_queue, queue;
struct rte_graph_param graph_conf;
struct rte_eth_dev_info dev_info;
uint32_t nb_ports, nb_conf = 0;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v3 4/6] examples/ipsec-secgw: fix lcore ID restriction
2023-12-20 6:44 ` [PATCH v3 " Sivaprasad Tummala
` (2 preceding siblings ...)
2023-12-20 6:44 ` [PATCH v3 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
@ 2023-12-20 6:44 ` Sivaprasad Tummala
2023-12-20 6:45 ` [PATCH v3 5/6] examples/qos_sched: " Sivaprasad Tummala
` (3 subsequent siblings)
7 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-20 6:44 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, sergio.gonzalez.monroy, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: sergio.gonzalez.monroy@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
---
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 32 ++++++++++++++---------------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 2 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++++-----
5 files changed, 23 insertions(+), 25 deletions(-)
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index dfb81bfcf1..9923700f03 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -102,7 +102,7 @@ struct eh_event_link_info {
/**< Event port ID */
uint8_t eventq_id;
/**< Event queue to be linked to the port */
- uint8_t lcore_id;
+ uint16_t lcore_id;
/**< Lcore to be polling on this port */
};
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index bf98d2618b..f03a93259c 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -220,8 +220,8 @@ static const char *cfgfile;
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t queue_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -696,8 +696,7 @@ ipsec_poll_mode_worker(void)
uint32_t lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int32_t i, nb_rx;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
int32_t rc, socket_id;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -789,8 +788,7 @@ int
check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
{
uint16_t i;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
for (i = 0; i < nb_lcore_params; ++i) {
portid = lcore_params_array[i].port_id;
@@ -810,7 +808,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
static int32_t
check_poll_mode_params(struct eh_conf *eh_conf)
{
- uint8_t lcore;
+ uint16_t lcore;
uint16_t portid;
uint16_t i;
int32_t socket_id;
@@ -829,13 +827,13 @@ check_poll_mode_params(struct eh_conf *eh_conf)
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in "
+ printf("error: lcore %hu is not enabled in "
"lcore mask\n", lcore);
return -1;
}
socket_id = rte_lcore_to_socket_id(lcore);
if (socket_id != 0 && numa_on == 0) {
- printf("warning: lcore %hhu is on socket %d "
+ printf("warning: lcore %hu is on socket %d "
"with numa off\n",
lcore, socket_id);
}
@@ -852,7 +850,7 @@ check_poll_mode_params(struct eh_conf *eh_conf)
return 0;
}
-static uint8_t
+static uint16_t
get_port_nb_rx_queues(const uint16_t port)
{
int32_t queue = -1;
@@ -863,14 +861,14 @@ get_port_nb_rx_queues(const uint16_t port)
lcore_params[i].queue_id > queue)
queue = lcore_params[i].queue_id;
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int32_t
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -1051,6 +1049,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int32_t i;
uint32_t size;
+ uint16_t max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1071,7 +1071,7 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++) {
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1080,11 +1080,11 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index f5cec4a928..5ebb71bb9a 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -259,7 +259,7 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
continue;
/* Looking for cryptodev, which can handle this SA */
- key.lcore_id = (uint8_t)lcore_id;
+ key.lcore_id = (uint16_t)lcore_id;
key.cipher_algo = (uint8_t)sa->cipher_algo;
key.auth_algo = (uint8_t)sa->auth_algo;
key.aead_algo = (uint8_t)sa->aead_algo;
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index bdcada1c40..29b9b283f0 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -285,7 +285,7 @@ struct cnt_blk {
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
void *sec_ctx;
} __rte_cache_aligned;
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 8d122e8519..90a4c38ba4 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -1598,8 +1598,7 @@ ipsec_poll_mode_wrkr_inl_pr(void)
int32_t socket_id;
uint32_t lcore_id;
int32_t i, nb_rx;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
prev_tsc = 0;
lcore_id = rte_lcore_id();
@@ -1633,7 +1632,7 @@ ipsec_poll_mode_wrkr_inl_pr(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
@@ -1729,8 +1728,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void)
uint32_t i, nb_rx, j;
int32_t socket_id;
uint32_t lcore_id;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
prev_tsc = 0;
lcore_id = rte_lcore_id();
@@ -1764,7 +1762,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v3 5/6] examples/qos_sched: fix lcore ID restriction
2023-12-20 6:44 ` [PATCH v3 " Sivaprasad Tummala
` (3 preceding siblings ...)
2023-12-20 6:44 ` [PATCH v3 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
@ 2023-12-20 6:45 ` Sivaprasad Tummala
2023-12-20 16:31 ` Stephen Hemminger
2023-12-20 6:45 ` [PATCH v3 6/6] examples/vm_power_manager: " Sivaprasad Tummala
` (2 subsequent siblings)
7 siblings, 1 reply; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-20 6:45 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: de3cfa2c9823 ("sched: initial import")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/qos_sched/args.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
index e97273152a..22fe76eeb5 100644
--- a/examples/qos_sched/args.c
+++ b/examples/qos_sched/args.c
@@ -182,10 +182,10 @@ app_parse_flow_conf(const char *conf_str)
pconf->rx_port = vals[0];
pconf->tx_port = vals[1];
- pconf->rx_core = (uint8_t)vals[2];
- pconf->wt_core = (uint8_t)vals[3];
+ pconf->rx_core = (uint16_t)vals[2];
+ pconf->wt_core = (uint16_t)vals[3];
if (ret == 5)
- pconf->tx_core = (uint8_t)vals[4];
+ pconf->tx_core = (uint16_t)vals[4];
else
pconf->tx_core = pconf->wt_core;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v3 6/6] examples/vm_power_manager: fix lcore ID restriction
2023-12-20 6:44 ` [PATCH v3 " Sivaprasad Tummala
` (4 preceding siblings ...)
2023-12-20 6:45 ` [PATCH v3 5/6] examples/qos_sched: " Sivaprasad Tummala
@ 2023-12-20 6:45 ` Sivaprasad Tummala
2023-12-20 6:45 ` [PATCH v3 0/6] " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 " Sivaprasad Tummala
7 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-20 6:45 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev, marcinx.hajkowski, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: 0e8f47491f09 ("examples/vm_power: add command to query CPU frequency")
Cc: marcinx.hajkowski@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/vm_power_manager/guest_cli/vm_power_cli_guest.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
index 94bfbbaf78..a586853a76 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
@@ -401,7 +401,7 @@ check_response_cmd(unsigned int lcore_id, int *result)
struct cmd_set_cpu_freq_result {
cmdline_fixed_string_t set_cpu_freq;
- uint8_t lcore_id;
+ uint16_t lcore_id;
cmdline_fixed_string_t cmd;
};
@@ -444,7 +444,7 @@ cmdline_parse_token_string_t cmd_set_cpu_freq =
set_cpu_freq, "set_cpu_freq");
cmdline_parse_token_num_t cmd_set_cpu_freq_core_num =
TOKEN_NUM_INITIALIZER(struct cmd_set_cpu_freq_result,
- lcore_id, RTE_UINT8);
+ lcore_id, RTE_UINT16);
cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
cmd, "up#down#min#max#enable_turbo#disable_turbo");
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v3 0/6] fix lcore ID restriction
2023-12-20 6:44 ` [PATCH v3 " Sivaprasad Tummala
` (5 preceding siblings ...)
2023-12-20 6:45 ` [PATCH v3 6/6] examples/vm_power_manager: " Sivaprasad Tummala
@ 2023-12-20 6:45 ` Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 " Sivaprasad Tummala
7 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2023-12-20 6:45 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev
Cc: dev
With modern CPUs, it is possible to have higher
CPU count thus we can have higher RTE_MAX_LCORES.
In DPDK sample applications, the current config
lcore options are hard limited to 255.
The patchset fixes these constraints by allowing
all lcore IDs up to RTE_MAX_LCORES.
Sivaprasad Tummala (6):
examples/l3fwd: fix lcore ID restriction
examples/l3fwd-power: fix lcore ID restriction
examples/l3fwd-graph: fix lcore ID restriction
examples/ipsec-secgw: fix lcore ID restriction
examples/qos_sched: fix lcore ID restriction
examples/vm_power_manager: fix lcore ID restriction
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 32 +++++------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 2 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++--
examples/l3fwd-graph/main.c | 31 +++++-----
examples/l3fwd-power/main.c | 57 +++++++++----------
examples/l3fwd-power/main.h | 4 +-
examples/l3fwd-power/perf_core.c | 10 ++--
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 +-
examples/l3fwd/l3fwd_em.c | 4 +-
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 +-
examples/l3fwd/l3fwd_lpm.c | 5 +-
examples/l3fwd/main.c | 36 ++++++------
examples/qos_sched/args.c | 6 +-
.../guest_cli/vm_power_cli_guest.c | 4 +-
18 files changed, 109 insertions(+), 108 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH v3 5/6] examples/qos_sched: fix lcore ID restriction
2023-12-20 6:45 ` [PATCH v3 5/6] examples/qos_sched: " Sivaprasad Tummala
@ 2023-12-20 16:31 ` Stephen Hemminger
2024-01-09 15:16 ` Ferruh Yigit
0 siblings, 1 reply; 100+ messages in thread
From: Stephen Hemminger @ 2023-12-20 16:31 UTC (permalink / raw)
To: Sivaprasad Tummala
Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, dev,
stable
On Wed, 20 Dec 2023 07:45:00 +0100
Sivaprasad Tummala <sivaprasad.tummala@amd.com> wrote:
> diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
> index e97273152a..22fe76eeb5 100644
> --- a/examples/qos_sched/args.c
> +++ b/examples/qos_sched/args.c
> @@ -182,10 +182,10 @@ app_parse_flow_conf(const char *conf_str)
>
> pconf->rx_port = vals[0];
> pconf->tx_port = vals[1];
> - pconf->rx_core = (uint8_t)vals[2];
> - pconf->wt_core = (uint8_t)vals[3];
> + pconf->rx_core = (uint16_t)vals[2];
> + pconf->wt_core = (uint16_t)vals[3];
> if (ret == 5)
> - pconf->tx_core = (uint8_t)vals[4];
> + pconf->tx_core = (uint16_t)vals[4];
> else
> pconf->tx_core = pconf->wt_core;
>
> --
Not sure why cast is even needed, assigning uint32_t to uint16_t
is not going to generate a warning with current compiler settings.
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH v3 5/6] examples/qos_sched: fix lcore ID restriction
2023-12-20 16:31 ` Stephen Hemminger
@ 2024-01-09 15:16 ` Ferruh Yigit
2024-01-16 12:33 ` Tummala, Sivaprasad
0 siblings, 1 reply; 100+ messages in thread
From: Ferruh Yigit @ 2024-01-09 15:16 UTC (permalink / raw)
To: Stephen Hemminger, Sivaprasad Tummala
Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, konstantin.ananyev, dev, stable,
Bruce Richardson
On 12/20/2023 4:31 PM, Stephen Hemminger wrote:
> On Wed, 20 Dec 2023 07:45:00 +0100
> Sivaprasad Tummala <sivaprasad.tummala@amd.com> wrote:
>
>> diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
>> index e97273152a..22fe76eeb5 100644
>> --- a/examples/qos_sched/args.c
>> +++ b/examples/qos_sched/args.c
>> @@ -182,10 +182,10 @@ app_parse_flow_conf(const char *conf_str)
>>
>> pconf->rx_port = vals[0];
>> pconf->tx_port = vals[1];
>> - pconf->rx_core = (uint8_t)vals[2];
>> - pconf->wt_core = (uint8_t)vals[3];
>> + pconf->rx_core = (uint16_t)vals[2];
>> + pconf->wt_core = (uint16_t)vals[3];
>> if (ret == 5)
>> - pconf->tx_core = (uint8_t)vals[4];
>> + pconf->tx_core = (uint16_t)vals[4];
>> else
>> pconf->tx_core = pconf->wt_core;
>>
>> --
>
> Not sure why cast is even needed, assigning uint32_t to uint16_t
> is not going to generate a warning with current compiler settings.
>
I was assuming compiler will complain when assigning uint32_t to
uint16_t, but it seems '-Wconversion' compiler flag is required for this
warning.
Enabling this flag for DPDK build causes lots of warnings, I wonder if
we should add a new buildtype in meson that enables this flag.
And except from compiler warning, I think it is good to keep explicit
cast where assignment can cause change of value. This at worst can work
as documentation that assignment between different types done intentionally.
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH v3 5/6] examples/qos_sched: fix lcore ID restriction
2024-01-09 15:16 ` Ferruh Yigit
@ 2024-01-16 12:33 ` Tummala, Sivaprasad
2024-01-16 16:28 ` Stephen Hemminger
0 siblings, 1 reply; 100+ messages in thread
From: Tummala, Sivaprasad @ 2024-01-16 12:33 UTC (permalink / raw)
To: Yigit, Ferruh, Stephen Hemminger
Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, konstantin.ananyev, dev, stable,
Bruce Richardson
[AMD Official Use Only - General]
Hi Stephen,
> -----Original Message-----
> From: Yigit, Ferruh <Ferruh.Yigit@amd.com>
> Sent: Tuesday, January 9, 2024 8:47 PM
> To: Stephen Hemminger <stephen@networkplumber.org>; Tummala, Sivaprasad
> <Sivaprasad.Tummala@amd.com>
> Cc: david.hunt@intel.com; anatoly.burakov@intel.com; jerinj@marvell.com;
> radu.nicolau@intel.com; gakhil@marvell.com; cristian.dumitrescu@intel.com;
> konstantin.ananyev@huawei.com; dev@dpdk.org; stable@dpdk.org; Bruce
> Richardson <bruce.richardson@intel.com>
> Subject: Re: [PATCH v3 5/6] examples/qos_sched: fix lcore ID restriction
>
> On 12/20/2023 4:31 PM, Stephen Hemminger wrote:
> > On Wed, 20 Dec 2023 07:45:00 +0100
> > Sivaprasad Tummala <sivaprasad.tummala@amd.com> wrote:
> >
> >> diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
> >> index e97273152a..22fe76eeb5 100644
> >> --- a/examples/qos_sched/args.c
> >> +++ b/examples/qos_sched/args.c
> >> @@ -182,10 +182,10 @@ app_parse_flow_conf(const char *conf_str)
> >>
> >> pconf->rx_port = vals[0];
> >> pconf->tx_port = vals[1];
> >> - pconf->rx_core = (uint8_t)vals[2];
> >> - pconf->wt_core = (uint8_t)vals[3];
> >> + pconf->rx_core = (uint16_t)vals[2];
> >> + pconf->wt_core = (uint16_t)vals[3];
> >> if (ret == 5)
> >> - pconf->tx_core = (uint8_t)vals[4];
> >> + pconf->tx_core = (uint16_t)vals[4];
> >> else
> >> pconf->tx_core = pconf->wt_core;
> >>
> >> --
> >
> > Not sure why cast is even needed, assigning uint32_t to uint16_t is
> > not going to generate a warning with current compiler settings.
> >
>
> I was assuming compiler will complain when assigning uint32_t to uint16_t, but it
> seems '-Wconversion' compiler flag is required for this warning.
> Enabling this flag for DPDK build causes lots of warnings, I wonder if we should add
> a new buildtype in meson that enables this flag.
>
>
> And except from compiler warning, I think it is good to keep explicit cast where
> assignment can cause change of value. This at worst can work as documentation
> that assignment between different types done intentionally.
I would prefer to keep the explicit conversion for consistency.
Please let me know if you think otherwise.
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH v3 5/6] examples/qos_sched: fix lcore ID restriction
2024-01-16 12:33 ` Tummala, Sivaprasad
@ 2024-01-16 16:28 ` Stephen Hemminger
0 siblings, 0 replies; 100+ messages in thread
From: Stephen Hemminger @ 2024-01-16 16:28 UTC (permalink / raw)
To: Tummala, Sivaprasad
Cc: Yigit, Ferruh, david.hunt, anatoly.burakov, jerinj, radu.nicolau,
gakhil, cristian.dumitrescu, konstantin.ananyev, dev, stable,
Bruce Richardson
On Tue, 16 Jan 2024 12:33:48 +0000
"Tummala, Sivaprasad" <Sivaprasad.Tummala@amd.com> wrote:
> > > Not sure why cast is even needed, assigning uint32_t to uint16_t is
> > > not going to generate a warning with current compiler settings.
> > >
> >
> > I was assuming compiler will complain when assigning uint32_t to uint16_t, but it
> > seems '-Wconversion' compiler flag is required for this warning.
> > Enabling this flag for DPDK build causes lots of warnings, I wonder if we should add
> > a new buildtype in meson that enables this flag.
> >
> >
> > And except from compiler warning, I think it is good to keep explicit cast where
> > assignment can cause change of value. This at worst can work as documentation
> > that assignment between different types done intentionally.
>
> I would prefer to keep the explicit conversion for consistency.
> Please let me know if you think otherwise.
Keep it, but casts are often a source of bug.
They defeat type checking, and often not needed.
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v4 0/6] fix lcore ID restriction
2023-12-20 6:44 ` [PATCH v3 " Sivaprasad Tummala
` (6 preceding siblings ...)
2023-12-20 6:45 ` [PATCH v3 0/6] " Sivaprasad Tummala
@ 2024-01-16 18:23 ` Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 1/6] examples/l3fwd: " Sivaprasad Tummala
` (7 more replies)
7 siblings, 8 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-01-16 18:23 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen
Cc: dev
With modern CPUs, it is possible to have higher
CPU count thus we can have higher RTE_MAX_LCORES.
In DPDK sample applications, the current config
lcore options are hard limited to 255.
The patchset fixes these constraints by allowing
all lcore IDs up to RTE_MAX_LCORES.
v4:
- fixed build errors with queue_id type
in ipsec-secgw
v3:
- updated queue_id type to uint16_t
v2:
- fixed typo with lcore_id type in l3fwd
Sivaprasad Tummala (6):
examples/l3fwd: fix lcore ID restriction
examples/l3fwd-power: fix lcore ID restriction
examples/l3fwd-graph: fix lcore ID restriction
examples/ipsec-secgw: fix lcore ID restriction
examples/qos_sched: fix lcore ID restriction
examples/vm_power_manager: fix lcore ID restriction
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 34 +++++------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 2 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++--
examples/l3fwd-graph/main.c | 31 +++++-----
examples/l3fwd-power/main.c | 57 +++++++++----------
examples/l3fwd-power/main.h | 4 +-
examples/l3fwd-power/perf_core.c | 10 ++--
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 +-
examples/l3fwd/l3fwd_em.c | 4 +-
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 +-
examples/l3fwd/l3fwd_lpm.c | 5 +-
examples/l3fwd/main.c | 36 ++++++------
examples/qos_sched/args.c | 6 +-
.../guest_cli/vm_power_cli_guest.c | 4 +-
18 files changed, 110 insertions(+), 109 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v4 1/6] examples/l3fwd: fix lcore ID restriction
2024-01-16 18:23 ` [PATCH v4 " Sivaprasad Tummala
@ 2024-01-16 18:23 ` Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 2/6] examples/l3fwd-power: " Sivaprasad Tummala
` (6 subsequent siblings)
7 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-01-16 18:23 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
---
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 ++--
examples/l3fwd/l3fwd_em.c | 4 ++--
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 ++--
examples/l3fwd/l3fwd_lpm.c | 5 ++---
examples/l3fwd/main.c | 36 ++++++++++++++++++++----------------
7 files changed, 30 insertions(+), 27 deletions(-)
diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index e7ae0e5834..12c264cb4c 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -74,7 +74,7 @@ struct mbuf_table {
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
} __rte_cache_aligned;
struct lcore_conf {
diff --git a/examples/l3fwd/l3fwd_acl.c b/examples/l3fwd/l3fwd_acl.c
index 401692bcec..2bd63181bc 100644
--- a/examples/l3fwd/l3fwd_acl.c
+++ b/examples/l3fwd/l3fwd_acl.c
@@ -997,7 +997,7 @@ acl_main_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
uint16_t portid;
- uint8_t queueid;
+ uint16_t queueid;
struct lcore_conf *qconf;
int socketid;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -1020,7 +1020,7 @@ acl_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 40e102b38a..cd2bb4a4bb 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -586,7 +586,7 @@ em_main_loop(__rte_unused void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint8_t queueid;
+ uint16_t queueid;
uint16_t portid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
@@ -609,7 +609,7 @@ em_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index 9aad358003..c6a4a89127 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -78,8 +78,8 @@ struct l3fwd_event_resources {
uint8_t deq_depth;
uint8_t has_burst;
uint8_t enabled;
- uint8_t eth_rx_queues;
uint8_t vector_enabled;
+ uint16_t eth_rx_queues;
uint16_t vector_size;
uint64_t vector_tmo_ns;
};
diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index 6a21984415..7da55f707a 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -186,7 +186,7 @@ fib_main_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
uint16_t portid;
- uint8_t queueid;
+ uint16_t queueid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -208,7 +208,7 @@ fib_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index a484a33089..01d38bc69c 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -148,8 +148,7 @@ lpm_main_loop(__rte_unused void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -171,7 +170,7 @@ lpm_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 3bf28aec0c..db49f4707d 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -98,8 +98,8 @@ struct parm_cfg parm_config;
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t queue_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -292,24 +292,24 @@ setup_l3fwd_lookup_tables(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
+ uint16_t queue, lcore;
uint16_t i;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("invalid queue number: %hhu\n", queue);
+ printf("invalid queue number: %hu\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
+ printf("error: lcore %hu is not enabled in lcore mask\n", lcore);
return -1;
}
if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
(numa_on == 0)) {
- printf("warning: lcore %hhu is on socket %d with numa off \n",
+ printf("warning: lcore %hu is on socket %d with numa off\n",
lcore, socketid);
}
}
@@ -336,7 +336,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -352,14 +352,14 @@ get_port_n_rx_queues(const uint16_t port)
lcore_params[i].port_id);
}
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -500,6 +500,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ uint16_t max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -518,7 +520,8 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++){
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] >
+ max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -527,11 +530,11 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
@@ -630,7 +633,7 @@ parse_event_eth_rx_queues(const char *eth_rx_queues)
{
struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
char *end = NULL;
- uint8_t num_eth_rx_queues;
+ uint16_t num_eth_rx_queues;
/* parse decimal string */
num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
@@ -1211,7 +1214,8 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
static void
l3fwd_poll_resource_setup(void)
{
- uint8_t nb_rx_queue, queue, socketid;
+ uint8_t socketid;
+ uint16_t nb_rx_queue, queue;
struct rte_eth_dev_info dev_info;
uint32_t n_tx_queue, nb_lcores;
struct rte_eth_txconf *txconf;
@@ -1528,7 +1532,7 @@ main(int argc, char **argv)
struct lcore_conf *qconf;
uint16_t queueid, portid;
unsigned int lcore_id;
- uint8_t queue;
+ uint16_t queue;
int ret;
/* init EAL */
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v4 2/6] examples/l3fwd-power: fix lcore ID restriction
2024-01-16 18:23 ` [PATCH v4 " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 1/6] examples/l3fwd: " Sivaprasad Tummala
@ 2024-01-16 18:23 ` Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
` (5 subsequent siblings)
7 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-01-16 18:23 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: f88e7c175a68 ("examples/l3fwd-power: add high/regular perf cores options")
Cc: radu.nicolau@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/l3fwd-power/main.c | 57 ++++++++++++++++----------------
examples/l3fwd-power/main.h | 4 +--
examples/l3fwd-power/perf_core.c | 10 +++---
3 files changed, 35 insertions(+), 36 deletions(-)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index f4adcf41b5..d0f3c332ee 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -214,7 +214,7 @@ enum freq_scale_hint_t
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
enum freq_scale_hint_t freq_up_hint;
uint32_t zero_rx_packet_count;
uint32_t idle_hint;
@@ -838,7 +838,7 @@ sleep_until_rx_interrupt(int num, int lcore)
struct rte_epoll_event event[num];
int n, i;
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
void *data;
if (status[lcore].wakeup) {
@@ -850,9 +850,9 @@ sleep_until_rx_interrupt(int num, int lcore)
n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, num, 10);
for (i = 0; i < n; i++) {
data = event[i].epdata.data;
- port_id = ((uintptr_t)data) >> CHAR_BIT;
+ port_id = ((uintptr_t)data) >> (sizeof(uint16_t) * CHAR_BIT);
queue_id = ((uintptr_t)data) &
- RTE_LEN2MASK(CHAR_BIT, uint8_t);
+ RTE_LEN2MASK((sizeof(uint16_t) * CHAR_BIT), uint16_t);
RTE_LOG(INFO, L3FWD_POWER,
"lcore %u is waked up from rx interrupt on"
" port %d queue %d\n",
@@ -867,7 +867,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
{
int i;
struct lcore_rx_queue *rx_queue;
- uint8_t queue_id;
+ uint16_t queue_id;
uint16_t port_id;
for (i = 0; i < qconf->n_rx_queue; ++i) {
@@ -887,7 +887,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
static int event_register(struct lcore_conf *qconf)
{
struct lcore_rx_queue *rx_queue;
- uint8_t queueid;
+ uint16_t queueid;
uint16_t portid;
uint32_t data;
int ret;
@@ -897,7 +897,7 @@ static int event_register(struct lcore_conf *qconf)
rx_queue = &(qconf->rx_queue_list[i]);
portid = rx_queue->port_id;
queueid = rx_queue->queue_id;
- data = portid << CHAR_BIT | queueid;
+ data = portid << (sizeof(uint16_t) * CHAR_BIT) | queueid;
ret = rte_eth_dev_rx_intr_ctl_q(portid, queueid,
RTE_EPOLL_PER_THREAD,
@@ -917,8 +917,7 @@ static int main_intr_loop(__rte_unused void *dummy)
unsigned int lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
uint32_t lcore_rx_idle_count = 0;
@@ -946,7 +945,7 @@ static int main_intr_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
@@ -1083,8 +1082,7 @@ main_telemetry_loop(__rte_unused void *dummy)
unsigned int lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc, prev_tel_tsc;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
uint64_t ep_nep[2] = {0}, fp_nfp[2] = {0};
@@ -1114,7 +1112,7 @@ main_telemetry_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
- "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ "rxqueueid=%hu\n", lcore_id, portid, queueid);
}
while (!is_done()) {
@@ -1205,8 +1203,7 @@ main_legacy_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc, tim_res_tsc, hz;
uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
enum freq_scale_hint_t lcore_scaleup_hint;
@@ -1234,7 +1231,7 @@ main_legacy_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
- "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ "rxqueueid=%hu\n", lcore_id, portid, queueid);
}
/* add into event wait list */
@@ -1399,25 +1396,25 @@ main_legacy_loop(__rte_unused void *dummy)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
+ uint16_t queue, lcore;
uint16_t i;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("invalid queue number: %hhu\n", queue);
+ printf("invalid queue number: %hu\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in lcore "
+ printf("error: lcore %hu is not enabled in lcore "
"mask\n", lcore);
return -1;
}
if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
(numa_on == 0)) {
- printf("warning: lcore %hhu is on socket %d with numa "
+ printf("warning: lcore %hu is on socket %d with numa "
"off\n", lcore, socketid);
}
if (app_mode == APP_MODE_TELEMETRY && lcore == rte_lcore_id()) {
@@ -1451,7 +1448,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -1462,14 +1459,14 @@ get_port_n_rx_queues(const uint16_t port)
lcore_params[i].queue_id > queue)
queue = lcore_params[i].queue_id;
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -1661,6 +1658,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1681,7 +1680,7 @@ parse_config(const char *q_arg)
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
if (errno != 0 || end == str_fld[i] || int_fld[i] >
- 255)
+ max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1690,11 +1689,11 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
@@ -2501,8 +2500,8 @@ main(int argc, char **argv)
uint64_t hz;
uint32_t n_tx_queue, nb_lcores;
uint32_t dev_rxq_num, dev_txq_num;
- uint8_t nb_rx_queue, queue, socketid;
- uint16_t portid;
+ uint8_t socketid;
+ uint16_t portid, nb_rx_queue, queue;
const char *ptr_strings[NUM_TELSTATS];
/* init EAL */
diff --git a/examples/l3fwd-power/main.h b/examples/l3fwd-power/main.h
index 258de98f5b..dcb5744ee6 100644
--- a/examples/l3fwd-power/main.h
+++ b/examples/l3fwd-power/main.h
@@ -9,8 +9,8 @@
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t queue_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
extern struct lcore_params *lcore_params;
diff --git a/examples/l3fwd-power/perf_core.c b/examples/l3fwd-power/perf_core.c
index 41ef6d0c9a..1fb9ceb584 100644
--- a/examples/l3fwd-power/perf_core.c
+++ b/examples/l3fwd-power/perf_core.c
@@ -22,9 +22,9 @@ static uint16_t nb_hp_lcores;
struct perf_lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t high_perf;
- uint8_t lcore_idx;
+ uint16_t lcore_idx;
} __rte_cache_aligned;
static struct perf_lcore_params prf_lc_prms[MAX_LCORE_PARAMS];
@@ -161,13 +161,13 @@ parse_perf_config(const char *q_arg)
return -1;
}
prf_lc_prms[nb_prf_lc_prms].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
prf_lc_prms[nb_prf_lc_prms].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
prf_lc_prms[nb_prf_lc_prms].high_perf =
!!(uint8_t)int_fld[FLD_LCORE_HP];
prf_lc_prms[nb_prf_lc_prms].lcore_idx =
- (uint8_t)int_fld[FLD_LCORE_IDX];
+ (uint16_t)int_fld[FLD_LCORE_IDX];
++nb_prf_lc_prms;
}
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v4 3/6] examples/l3fwd-graph: fix lcore ID restriction
2024-01-16 18:23 ` [PATCH v4 " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 1/6] examples/l3fwd: " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 2/6] examples/l3fwd-power: " Sivaprasad Tummala
@ 2024-01-16 18:23 ` Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
` (4 subsequent siblings)
7 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-01-16 18:23 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen
Cc: dev, ndabilpuram, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: 08bd1a174461 ("examples/l3fwd-graph: add graph-based l3fwd skeleton")
Cc: ndabilpuram@marvell.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/l3fwd-graph/main.c | 31 ++++++++++++++++---------------
1 file changed, 16 insertions(+), 15 deletions(-)
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index 96cb1c81ff..ffb6900fee 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -90,7 +90,7 @@ static int pcap_trace_enable;
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
char node_name[RTE_NODE_NAMESIZE];
};
@@ -110,8 +110,8 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t queue_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -205,19 +205,19 @@ check_worker_model_params(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
+ uint16_t queue;
int socketid;
- uint16_t i;
+ uint16_t i, lcore;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("Invalid queue number: %hhu\n", queue);
+ printf("Invalid queue number: %hu\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("Error: lcore %hhu is not enabled in lcore mask\n",
+ printf("Error: lcore %hu is not enabled in lcore mask\n",
lcore);
return -1;
}
@@ -228,7 +228,7 @@ check_lcore_params(void)
}
socketid = rte_lcore_to_socket_id(lcore);
if ((socketid != 0) && (numa_on == 0)) {
- printf("Warning: lcore %hhu is on socket %d with numa off\n",
+ printf("Warning: lcore %hu is on socket %d with numa off\n",
lcore, socketid);
}
}
@@ -257,7 +257,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -275,14 +275,14 @@ get_port_n_rx_queues(const uint16_t port)
}
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -448,11 +448,11 @@ parse_config(const char *q_arg)
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
@@ -1011,7 +1011,8 @@ main(int argc, char **argv)
"ethdev_tx-*",
"pkt_drop",
};
- uint8_t nb_rx_queue, queue, socketid;
+ uint8_t socketid;
+ uint16_t nb_rx_queue, queue;
struct rte_graph_param graph_conf;
struct rte_eth_dev_info dev_info;
uint32_t nb_ports, nb_conf = 0;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v4 4/6] examples/ipsec-secgw: fix lcore ID restriction
2024-01-16 18:23 ` [PATCH v4 " Sivaprasad Tummala
` (2 preceding siblings ...)
2024-01-16 18:23 ` [PATCH v4 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
@ 2024-01-16 18:23 ` Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 5/6] examples/qos_sched: " Sivaprasad Tummala
` (3 subsequent siblings)
7 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-01-16 18:23 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen
Cc: dev, sergio.gonzalez.monroy, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: sergio.gonzalez.monroy@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
---
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 34 ++++++++++++++---------------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 2 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++++-----
5 files changed, 24 insertions(+), 26 deletions(-)
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index dfb81bfcf1..9923700f03 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -102,7 +102,7 @@ struct eh_event_link_info {
/**< Event port ID */
uint8_t eventq_id;
/**< Event queue to be linked to the port */
- uint8_t lcore_id;
+ uint16_t lcore_id;
/**< Lcore to be polling on this port */
};
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index bf98d2618b..713a5b5812 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -220,8 +220,8 @@ static const char *cfgfile;
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t queue_id;
+ uint16_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -696,8 +696,7 @@ ipsec_poll_mode_worker(void)
uint32_t lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int32_t i, nb_rx;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
int32_t rc, socket_id;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -744,7 +743,7 @@ ipsec_poll_mode_worker(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
@@ -789,8 +788,7 @@ int
check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
{
uint16_t i;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
for (i = 0; i < nb_lcore_params; ++i) {
portid = lcore_params_array[i].port_id;
@@ -810,7 +808,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
static int32_t
check_poll_mode_params(struct eh_conf *eh_conf)
{
- uint8_t lcore;
+ uint16_t lcore;
uint16_t portid;
uint16_t i;
int32_t socket_id;
@@ -829,13 +827,13 @@ check_poll_mode_params(struct eh_conf *eh_conf)
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in "
+ printf("error: lcore %hu is not enabled in "
"lcore mask\n", lcore);
return -1;
}
socket_id = rte_lcore_to_socket_id(lcore);
if (socket_id != 0 && numa_on == 0) {
- printf("warning: lcore %hhu is on socket %d "
+ printf("warning: lcore %hu is on socket %d "
"with numa off\n",
lcore, socket_id);
}
@@ -852,7 +850,7 @@ check_poll_mode_params(struct eh_conf *eh_conf)
return 0;
}
-static uint8_t
+static uint16_t
get_port_nb_rx_queues(const uint16_t port)
{
int32_t queue = -1;
@@ -863,14 +861,14 @@ get_port_nb_rx_queues(const uint16_t port)
lcore_params[i].queue_id > queue)
queue = lcore_params[i].queue_id;
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int32_t
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint16_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -1051,6 +1049,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int32_t i;
uint32_t size;
+ uint16_t max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1071,7 +1071,7 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++) {
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1080,11 +1080,11 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint16_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index f5cec4a928..5ebb71bb9a 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -259,7 +259,7 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
continue;
/* Looking for cryptodev, which can handle this SA */
- key.lcore_id = (uint8_t)lcore_id;
+ key.lcore_id = (uint16_t)lcore_id;
key.cipher_algo = (uint8_t)sa->cipher_algo;
key.auth_algo = (uint8_t)sa->auth_algo;
key.aead_algo = (uint8_t)sa->aead_algo;
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index bdcada1c40..29b9b283f0 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -285,7 +285,7 @@ struct cnt_blk {
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
void *sec_ctx;
} __rte_cache_aligned;
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 8d122e8519..90a4c38ba4 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -1598,8 +1598,7 @@ ipsec_poll_mode_wrkr_inl_pr(void)
int32_t socket_id;
uint32_t lcore_id;
int32_t i, nb_rx;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
prev_tsc = 0;
lcore_id = rte_lcore_id();
@@ -1633,7 +1632,7 @@ ipsec_poll_mode_wrkr_inl_pr(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
@@ -1729,8 +1728,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void)
uint32_t i, nb_rx, j;
int32_t socket_id;
uint32_t lcore_id;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
prev_tsc = 0;
lcore_id = rte_lcore_id();
@@ -1764,7 +1762,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v4 5/6] examples/qos_sched: fix lcore ID restriction
2024-01-16 18:23 ` [PATCH v4 " Sivaprasad Tummala
` (3 preceding siblings ...)
2024-01-16 18:23 ` [PATCH v4 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
@ 2024-01-16 18:23 ` Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 6/6] examples/vm_power_manager: " Sivaprasad Tummala
` (2 subsequent siblings)
7 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-01-16 18:23 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: de3cfa2c9823 ("sched: initial import")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/qos_sched/args.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
index e97273152a..22fe76eeb5 100644
--- a/examples/qos_sched/args.c
+++ b/examples/qos_sched/args.c
@@ -182,10 +182,10 @@ app_parse_flow_conf(const char *conf_str)
pconf->rx_port = vals[0];
pconf->tx_port = vals[1];
- pconf->rx_core = (uint8_t)vals[2];
- pconf->wt_core = (uint8_t)vals[3];
+ pconf->rx_core = (uint16_t)vals[2];
+ pconf->wt_core = (uint16_t)vals[3];
if (ret == 5)
- pconf->tx_core = (uint8_t)vals[4];
+ pconf->tx_core = (uint16_t)vals[4];
else
pconf->tx_core = pconf->wt_core;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v4 6/6] examples/vm_power_manager: fix lcore ID restriction
2024-01-16 18:23 ` [PATCH v4 " Sivaprasad Tummala
` (4 preceding siblings ...)
2024-01-16 18:23 ` [PATCH v4 5/6] examples/qos_sched: " Sivaprasad Tummala
@ 2024-01-16 18:23 ` Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 0/6] " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 " Sivaprasad Tummala
7 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-01-16 18:23 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen
Cc: dev, marcinx.hajkowski, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: 0e8f47491f09 ("examples/vm_power: add command to query CPU frequency")
Cc: marcinx.hajkowski@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/vm_power_manager/guest_cli/vm_power_cli_guest.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
index 94bfbbaf78..a586853a76 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
@@ -401,7 +401,7 @@ check_response_cmd(unsigned int lcore_id, int *result)
struct cmd_set_cpu_freq_result {
cmdline_fixed_string_t set_cpu_freq;
- uint8_t lcore_id;
+ uint16_t lcore_id;
cmdline_fixed_string_t cmd;
};
@@ -444,7 +444,7 @@ cmdline_parse_token_string_t cmd_set_cpu_freq =
set_cpu_freq, "set_cpu_freq");
cmdline_parse_token_num_t cmd_set_cpu_freq_core_num =
TOKEN_NUM_INITIALIZER(struct cmd_set_cpu_freq_result,
- lcore_id, RTE_UINT8);
+ lcore_id, RTE_UINT16);
cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
cmd, "up#down#min#max#enable_turbo#disable_turbo");
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v4 0/6] fix lcore ID restriction
2024-01-16 18:23 ` [PATCH v4 " Sivaprasad Tummala
` (5 preceding siblings ...)
2024-01-16 18:23 ` [PATCH v4 6/6] examples/vm_power_manager: " Sivaprasad Tummala
@ 2024-01-16 18:23 ` Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 " Sivaprasad Tummala
7 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-01-16 18:23 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen
Cc: dev
With modern CPUs, it is possible to have higher
CPU count thus we can have higher RTE_MAX_LCORES.
In DPDK sample applications, the current config
lcore options are hard limited to 255.
The patchset fixes these constraints by allowing
all lcore IDs up to RTE_MAX_LCORES.
v4:
- fixed build errors with queue_id type
in ipsec-secgw
v3:
- updated queue_id type to uint16_t
v2:
- fixed typo with lcore_id type in l3fwd
Sivaprasad Tummala (6):
examples/l3fwd: fix lcore ID restriction
examples/l3fwd-power: fix lcore ID restriction
examples/l3fwd-graph: fix lcore ID restriction
examples/ipsec-secgw: fix lcore ID restriction
examples/qos_sched: fix lcore ID restriction
examples/vm_power_manager: fix lcore ID restriction
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 34 +++++------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 2 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++--
examples/l3fwd-graph/main.c | 31 +++++-----
examples/l3fwd-power/main.c | 57 +++++++++----------
examples/l3fwd-power/main.h | 4 +-
examples/l3fwd-power/perf_core.c | 10 ++--
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 +-
examples/l3fwd/l3fwd_em.c | 4 +-
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 +-
examples/l3fwd/l3fwd_lpm.c | 5 +-
examples/l3fwd/main.c | 36 ++++++------
examples/qos_sched/args.c | 6 +-
.../guest_cli/vm_power_cli_guest.c | 4 +-
18 files changed, 110 insertions(+), 109 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH 1/6] examples/l3fwd: fix lcore ID restriction
2023-12-18 7:49 [PATCH 1/6] examples/l3fwd: fix lcore ID restriction Sivaprasad Tummala
` (5 preceding siblings ...)
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
@ 2024-03-07 8:34 ` David Marchand
2024-03-07 9:16 ` Morten Brørup
2024-03-13 9:14 ` Tummala, Sivaprasad
6 siblings, 2 replies; 100+ messages in thread
From: David Marchand @ 2024-03-07 8:34 UTC (permalink / raw)
To: Sivaprasad Tummala
Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, dev,
stable
On Mon, Dec 18, 2023 at 8:49 AM Sivaprasad Tummala
<sivaprasad.tummala@amd.com> wrote:
>
> Currently the config option allows lcore IDs up to 255,
> irrespective of RTE_MAX_LCORES and needs to be fixed.
"needs to be fixed" ?
I disagree on the principle.
The examples were written with limitations, this is not a bug.
>
> The patch allows config options based on DPDK config.
>
> Fixes: af75078fece3 ("first public release")
> Cc: stable@dpdk.org
Please remove this request for backport in the next revision.
>
> Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> ---
> examples/l3fwd/main.c | 15 +++++++++------
> 1 file changed, 9 insertions(+), 6 deletions(-)
>
> diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
> index 3bf28aec0c..847ded0ad2 100644
> --- a/examples/l3fwd/main.c
> +++ b/examples/l3fwd/main.c
> @@ -99,7 +99,7 @@ struct parm_cfg parm_config;
> struct lcore_params {
> uint16_t port_id;
> uint8_t queue_id;
> - uint8_t lcore_id;
> + uint16_t lcore_id;
lcore_id are stored as an unsigned int (so potentially 32bits) in EAL.
Moving to uint16_t seems not enough.
> } __rte_cache_aligned;
>
> static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
> @@ -292,8 +292,8 @@ setup_l3fwd_lookup_tables(void)
> static int
> check_lcore_params(void)
> {
> - uint8_t queue, lcore;
> - uint16_t i;
> + uint8_t queue;
> + uint16_t i, lcore;
> int socketid;
>
> for (i = 0; i < nb_lcore_params; ++i) {
> @@ -359,7 +359,7 @@ static int
> init_lcore_rx_queues(void)
> {
> uint16_t i, nb_rx_queue;
> - uint8_t lcore;
> + uint16_t lcore;
>
> for (i = 0; i < nb_lcore_params; ++i) {
> lcore = lcore_params[i].lcore_id;
> @@ -500,6 +500,8 @@ parse_config(const char *q_arg)
> char *str_fld[_NUM_FLD];
> int i;
> unsigned size;
> + unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
This change here is not described in the commitlog and introduces a bug.
In this example, queue_id is stored as a uint8_t.
queue_id are stored as uint16_t in ethdev.
Yet RTE_MAX_ETHPORTS can be larger than 255.
> + 255, RTE_MAX_LCORE};
>
> nb_lcore_params = 0;
>
> @@ -518,7 +520,8 @@ parse_config(const char *q_arg)
> for (i = 0; i < _NUM_FLD; i++){
> errno = 0;
> int_fld[i] = strtoul(str_fld[i], &end, 0);
> - if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
> + if (errno != 0 || end == str_fld[i] || int_fld[i] >
> + max_fld[i])
> return -1;
> }
> if (nb_lcore_params >= MAX_LCORE_PARAMS) {
> @@ -531,7 +534,7 @@ parse_config(const char *q_arg)
> lcore_params_array[nb_lcore_params].queue_id =
> (uint8_t)int_fld[FLD_QUEUE];
> lcore_params_array[nb_lcore_params].lcore_id =
> - (uint8_t)int_fld[FLD_LCORE];
> + (uint16_t)int_fld[FLD_LCORE];
> ++nb_lcore_params;
> }
> lcore_params = lcore_params_array;
> --
> 2.25.1
>
I did not check other patches in the series, but I suggest you revisit
them in the light of those comments.
--
David Marchand
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH 1/6] examples/l3fwd: fix lcore ID restriction
2024-03-07 8:34 ` [PATCH 1/6] examples/l3fwd: fix lcore ID restriction David Marchand
@ 2024-03-07 9:16 ` Morten Brørup
2024-03-07 9:22 ` David Marchand
2024-03-13 9:14 ` Tummala, Sivaprasad
1 sibling, 1 reply; 100+ messages in thread
From: Morten Brørup @ 2024-03-07 9:16 UTC (permalink / raw)
To: David Marchand, Sivaprasad Tummala
Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, dev,
stable
> From: David Marchand [mailto:david.marchand@redhat.com]
> Sent: Thursday, 7 March 2024 09.34
>
> On Mon, Dec 18, 2023 at 8:49 AM Sivaprasad Tummala
> <sivaprasad.tummala@amd.com> wrote:
> >
> > Currently the config option allows lcore IDs up to 255,
> > irrespective of RTE_MAX_LCORES and needs to be fixed.
>
> "needs to be fixed" ?
> I disagree on the principle.
> The examples were written with limitations, this is not a bug.
Unfortunately, l3fwd is not only an example; it is also used for benchmarking. It really belongs in some other directory.
With that in mind, I would consider it a bug that the benchmarking application cannot handle the amount of cores available in modern CPUs.
>
> >
> > The patch allows config options based on DPDK config.
> >
> > Fixes: af75078fece3 ("first public release")
> > Cc: stable@dpdk.org
>
> Please remove this request for backport in the next revision.
Disagree, see comment above.
>
> >
> > Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> > ---
> > examples/l3fwd/main.c | 15 +++++++++------
> > 1 file changed, 9 insertions(+), 6 deletions(-)
> >
> > diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
> > index 3bf28aec0c..847ded0ad2 100644
> > --- a/examples/l3fwd/main.c
> > +++ b/examples/l3fwd/main.c
> > @@ -99,7 +99,7 @@ struct parm_cfg parm_config;
> > struct lcore_params {
> > uint16_t port_id;
> > uint8_t queue_id;
> > - uint8_t lcore_id;
> > + uint16_t lcore_id;
>
> lcore_id are stored as an unsigned int (so potentially 32bits) in EAL.
> Moving to uint16_t seems not enough.
<rant>
I might say that the lcore_id API type was not well thought through when it was decided to use unsigned int.
The port_id and queue_id APIs have been updated to use uint16_t.
If the application uses one TX queue per lcore, using the same type for lcore_id as for queue_id should suffice, i.e. uint16_t.
It's unlikely that the lcore_id API type is going to change; we are stuck with unsigned int, although uint16_t would probably be better (to prevent type conversion related bugs).
</rant>
That said, you can follow David's advice and use unsigned int for lcore_id, or you can use uint16_t and add a build time check after the structure:
static_assert(RTE_MAX_LCORE <= UINT16_MAX + 1, "lcore_id does not fit into uint16_t");
>
>
> > } __rte_cache_aligned;
> >
> > static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
> > @@ -292,8 +292,8 @@ setup_l3fwd_lookup_tables(void)
> > static int
> > check_lcore_params(void)
> > {
> > - uint8_t queue, lcore;
> > - uint16_t i;
> > + uint8_t queue;
> > + uint16_t i, lcore;
> > int socketid;
> >
> > for (i = 0; i < nb_lcore_params; ++i) {
> > @@ -359,7 +359,7 @@ static int
> > init_lcore_rx_queues(void)
> > {
> > uint16_t i, nb_rx_queue;
> > - uint8_t lcore;
> > + uint16_t lcore;
> >
> > for (i = 0; i < nb_lcore_params; ++i) {
> > lcore = lcore_params[i].lcore_id;
> > @@ -500,6 +500,8 @@ parse_config(const char *q_arg)
> > char *str_fld[_NUM_FLD];
> > int i;
> > unsigned size;
> > + unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
>
> This change here is not described in the commitlog and introduces a bug.
>
> In this example, queue_id is stored as a uint8_t.
> queue_id are stored as uint16_t in ethdev.
> Yet RTE_MAX_ETHPORTS can be larger than 255.
The API type of port_id is uint16_t, so RTE_MAX_ETHPORTS can be up to UINT16_MAX (65535).
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH 1/6] examples/l3fwd: fix lcore ID restriction
2024-03-07 9:16 ` Morten Brørup
@ 2024-03-07 9:22 ` David Marchand
2024-03-07 9:53 ` Morten Brørup
0 siblings, 1 reply; 100+ messages in thread
From: David Marchand @ 2024-03-07 9:22 UTC (permalink / raw)
To: Morten Brørup
Cc: Sivaprasad Tummala, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, ferruh.yigit,
konstantin.ananyev, dev, stable
On Thu, Mar 7, 2024 at 10:16 AM Morten Brørup <mb@smartsharesystems.com> wrote:
>
> > From: David Marchand [mailto:david.marchand@redhat.com]
> > Sent: Thursday, 7 March 2024 09.34
> >
> > On Mon, Dec 18, 2023 at 8:49 AM Sivaprasad Tummala
> > <sivaprasad.tummala@amd.com> wrote:
> > >
> > > Currently the config option allows lcore IDs up to 255,
> > > irrespective of RTE_MAX_LCORES and needs to be fixed.
> >
> > "needs to be fixed" ?
> > I disagree on the principle.
> > The examples were written with limitations, this is not a bug.
>
> Unfortunately, l3fwd is not only an example; it is also used for benchmarking. It really belongs in some other directory.
>
> With that in mind, I would consider it a bug that the benchmarking application cannot handle the amount of cores available in modern CPUs.
This is not a bug.
And with careful configuration (using --lcores), you can already start
l3fwd with 254 datapath threads, right?
--
David Marchand
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH 1/6] examples/l3fwd: fix lcore ID restriction
2024-03-07 9:22 ` David Marchand
@ 2024-03-07 9:53 ` Morten Brørup
0 siblings, 0 replies; 100+ messages in thread
From: Morten Brørup @ 2024-03-07 9:53 UTC (permalink / raw)
To: David Marchand
Cc: Sivaprasad Tummala, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, ferruh.yigit,
konstantin.ananyev, dev, stable
> From: David Marchand [mailto:david.marchand@redhat.com]
> Sent: Thursday, 7 March 2024 10.22
restriction
>
> On Thu, Mar 7, 2024 at 10:16 AM Morten Brørup <mb@smartsharesystems.com>
> wrote:
> >
> > > From: David Marchand [mailto:david.marchand@redhat.com]
> > > Sent: Thursday, 7 March 2024 09.34
> > >
> > > On Mon, Dec 18, 2023 at 8:49 AM Sivaprasad Tummala
> > > <sivaprasad.tummala@amd.com> wrote:
> > > >
> > > > Currently the config option allows lcore IDs up to 255,
> > > > irrespective of RTE_MAX_LCORES and needs to be fixed.
> > >
> > > "needs to be fixed" ?
> > > I disagree on the principle.
> > > The examples were written with limitations, this is not a bug.
> >
> > Unfortunately, l3fwd is not only an example; it is also used for
> benchmarking. It really belongs in some other directory.
> >
> > With that in mind, I would consider it a bug that the benchmarking
> application cannot handle the amount of cores available in modern CPUs.
>
> This is not a bug.
DPDK 23.11 LTS supposedly supports Zen 4, which has 512 cores [1].
If l3fwd does not support it, it is a bug in l3fwd.
[1]: https://elixir.bootlin.com/dpdk/v23.11/source/config/x86/meson.build#L88
>
> And with careful configuration (using --lcores), you can already start
> l3fwd with 254 datapath threads, right?
Correct, but you cannot start l3fwd with all 512 cores.
There should be some test scenario to set up l3fwd to use all 512 cores.
If there are practical limitations that effectively prevents l3fwd from using all 512 cores, e.g. if no NIC offers more than 256 queues, then I'll let go and not consider it a bug.
>
>
> --
> David Marchand
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH 1/6] examples/l3fwd: fix lcore ID restriction
2024-03-07 8:34 ` [PATCH 1/6] examples/l3fwd: fix lcore ID restriction David Marchand
2024-03-07 9:16 ` Morten Brørup
@ 2024-03-13 9:14 ` Tummala, Sivaprasad
1 sibling, 0 replies; 100+ messages in thread
From: Tummala, Sivaprasad @ 2024-03-13 9:14 UTC (permalink / raw)
To: David Marchand
Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, Yigit, Ferruh, konstantin.ananyev, dev,
stable
[AMD Official Use Only - General]
Hi,
> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Thursday, March 7, 2024 2:04 PM
> To: Tummala, Sivaprasad <Sivaprasad.Tummala@amd.com>
> Cc: david.hunt@intel.com; anatoly.burakov@intel.com; jerinj@marvell.com;
> radu.nicolau@intel.com; gakhil@marvell.com; cristian.dumitrescu@intel.com; Yigit,
> Ferruh <Ferruh.Yigit@amd.com>; konstantin.ananyev@huawei.com;
> dev@dpdk.org; stable@dpdk.org
> Subject: Re: [PATCH 1/6] examples/l3fwd: fix lcore ID restriction
>
> Caution: This message originated from an External Source. Use proper caution
> when opening attachments, clicking links, or responding.
>
>
> On Mon, Dec 18, 2023 at 8:49 AM Sivaprasad Tummala
> <sivaprasad.tummala@amd.com> wrote:
> >
> > Currently the config option allows lcore IDs up to 255, irrespective
> > of RTE_MAX_LCORES and needs to be fixed.
>
> "needs to be fixed" ?
> I disagree on the principle.
> The examples were written with limitations, this is not a bug.
>
> >
> > The patch allows config options based on DPDK config.
> >
> > Fixes: af75078fece3 ("first public release")
> > Cc: stable@dpdk.org
>
> Please remove this request for backport in the next revision.
>
> >
> > Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> > ---
> > examples/l3fwd/main.c | 15 +++++++++------
> > 1 file changed, 9 insertions(+), 6 deletions(-)
> >
> > diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index
> > 3bf28aec0c..847ded0ad2 100644
> > --- a/examples/l3fwd/main.c
> > +++ b/examples/l3fwd/main.c
> > @@ -99,7 +99,7 @@ struct parm_cfg parm_config; struct lcore_params {
> > uint16_t port_id;
> > uint8_t queue_id;
> > - uint8_t lcore_id;
> > + uint16_t lcore_id;
>
> lcore_id are stored as an unsigned int (so potentially 32bits) in EAL.
> Moving to uint16_t seems not enough.
Will fix this to be aligned to EAL. However, I don't think of a SOC/CPU with more than
65536 cores/threads.
>
>
> > } __rte_cache_aligned;
> >
> > static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
> > @@ -292,8 +292,8 @@ setup_l3fwd_lookup_tables(void) static int
> > check_lcore_params(void)
> > {
> > - uint8_t queue, lcore;
> > - uint16_t i;
> > + uint8_t queue;
> > + uint16_t i, lcore;
> > int socketid;
> >
> > for (i = 0; i < nb_lcore_params; ++i) { @@ -359,7 +359,7 @@
> > static int
> > init_lcore_rx_queues(void)
> > {
> > uint16_t i, nb_rx_queue;
> > - uint8_t lcore;
> > + uint16_t lcore;
> >
> > for (i = 0; i < nb_lcore_params; ++i) {
> > lcore = lcore_params[i].lcore_id; @@ -500,6 +500,8 @@
> > parse_config(const char *q_arg)
> > char *str_fld[_NUM_FLD];
> > int i;
> > unsigned size;
> > + unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
>
> This change here is not described in the commitlog and introduces a bug.
OK! Will fix it
>
> In this example, queue_id is stored as a uint8_t.
> queue_id are stored as uint16_t in ethdev.
> Yet RTE_MAX_ETHPORTS can be larger than 255.
queue_id is already modified as uint16_t based on the earlier comment from Konstantin.
The port_id is already uint16_t even if RTE_MAX_ETHPORTS exceeds 255.
I will fix the max_fld type to "uint32_t" to accommodate lcore.
>
>
> > + 255, RTE_MAX_LCORE};
> >
> > nb_lcore_params = 0;
> >
> > @@ -518,7 +520,8 @@ parse_config(const char *q_arg)
> > for (i = 0; i < _NUM_FLD; i++){
> > errno = 0;
> > int_fld[i] = strtoul(str_fld[i], &end, 0);
> > - if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
> > + if (errno != 0 || end == str_fld[i] || int_fld[i] >
> > +
> > + max_fld[i])
> > return -1;
> > }
> > if (nb_lcore_params >= MAX_LCORE_PARAMS) { @@ -531,7
> > +534,7 @@ parse_config(const char *q_arg)
> > lcore_params_array[nb_lcore_params].queue_id =
> > (uint8_t)int_fld[FLD_QUEUE];
> > lcore_params_array[nb_lcore_params].lcore_id =
> > - (uint8_t)int_fld[FLD_LCORE];
> > + (uint16_t)int_fld[FLD_LCORE];
> > ++nb_lcore_params;
> > }
> > lcore_params = lcore_params_array;
> > --
> > 2.25.1
> >
>
> I did not check other patches in the series, but I suggest you revisit them in the
> light of those comments.
OK
>
>
> --
> David Marchand
Thanks & Regards,
Sivaprasad
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v5 0/6] fix lcore ID restriction
2024-01-16 18:23 ` [PATCH v4 " Sivaprasad Tummala
` (6 preceding siblings ...)
2024-01-16 18:23 ` [PATCH v4 0/6] " Sivaprasad Tummala
@ 2024-03-18 17:31 ` Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 1/6] examples/l3fwd: " Sivaprasad Tummala
` (8 more replies)
7 siblings, 9 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-18 17:31 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
david.marchand
Cc: dev
With modern CPUs, it is possible to have higher
CPU count thus we can have higher RTE_MAX_LCORES.
In DPDK sample applications, the current config
lcore options are hard limited to 255.
The patchset fixes these constraints by allowing
all lcore IDs up to RTE_MAX_LCORES. Also the queue
IDs are increased to support up to 65535.
v5:
- updated lcore_id type to uint32_t
v4:
- fixed build errors with queue_id type
in ipsec-secgw
v3:
- updated queue_id type to uint16_t
v2:
- fixed typo with lcore_id type in l3fwd
Sivaprasad Tummala (6):
examples/l3fwd: fix lcore ID restriction
examples/l3fwd-power: fix lcore ID restriction
examples/l3fwd-graph: fix lcore ID restriction
examples/ipsec-secgw: fix lcore ID restriction
examples/qos_sched: fix lcore ID restriction
examples/vm_power_manager: fix lcore ID restriction
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 35 +++++------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 6 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++--
examples/l3fwd-graph/main.c | 33 ++++++-----
examples/l3fwd-power/main.c | 59 +++++++++----------
examples/l3fwd-power/main.h | 4 +-
examples/l3fwd-power/perf_core.c | 16 +++--
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 +-
examples/l3fwd/l3fwd_em.c | 4 +-
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 +-
examples/l3fwd/l3fwd_lpm.c | 5 +-
examples/l3fwd/main.c | 40 +++++++------
examples/qos_sched/args.c | 6 +-
.../guest_cli/vm_power_cli_guest.c | 4 +-
18 files changed, 122 insertions(+), 116 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v5 1/6] examples/l3fwd: fix lcore ID restriction
2024-03-18 17:31 ` [PATCH v5 " Sivaprasad Tummala
@ 2024-03-18 17:31 ` Sivaprasad Tummala
2024-03-19 7:24 ` Morten Brørup
2024-03-18 17:31 ` [PATCH v5 2/6] examples/l3fwd-power: " Sivaprasad Tummala
` (7 subsequent siblings)
8 siblings, 1 reply; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-18 17:31 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
david.marchand
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
---
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 ++--
examples/l3fwd/l3fwd_em.c | 4 ++--
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 ++--
examples/l3fwd/l3fwd_lpm.c | 5 ++---
examples/l3fwd/main.c | 40 ++++++++++++++++++++----------------
7 files changed, 32 insertions(+), 29 deletions(-)
diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index e7ae0e5834..12c264cb4c 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -74,7 +74,7 @@ struct mbuf_table {
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
} __rte_cache_aligned;
struct lcore_conf {
diff --git a/examples/l3fwd/l3fwd_acl.c b/examples/l3fwd/l3fwd_acl.c
index 401692bcec..2bd63181bc 100644
--- a/examples/l3fwd/l3fwd_acl.c
+++ b/examples/l3fwd/l3fwd_acl.c
@@ -997,7 +997,7 @@ acl_main_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
uint16_t portid;
- uint8_t queueid;
+ uint16_t queueid;
struct lcore_conf *qconf;
int socketid;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -1020,7 +1020,7 @@ acl_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 40e102b38a..cd2bb4a4bb 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -586,7 +586,7 @@ em_main_loop(__rte_unused void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint8_t queueid;
+ uint16_t queueid;
uint16_t portid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
@@ -609,7 +609,7 @@ em_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index 9aad358003..c6a4a89127 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -78,8 +78,8 @@ struct l3fwd_event_resources {
uint8_t deq_depth;
uint8_t has_burst;
uint8_t enabled;
- uint8_t eth_rx_queues;
uint8_t vector_enabled;
+ uint16_t eth_rx_queues;
uint16_t vector_size;
uint64_t vector_tmo_ns;
};
diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index 6a21984415..7da55f707a 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -186,7 +186,7 @@ fib_main_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
uint16_t portid;
- uint8_t queueid;
+ uint16_t queueid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -208,7 +208,7 @@ fib_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index a484a33089..01d38bc69c 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -148,8 +148,7 @@ lpm_main_loop(__rte_unused void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -171,7 +170,7 @@ lpm_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 8d32ae1dd5..19e4d9dfa2 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -98,8 +98,8 @@ struct parm_cfg parm_config;
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t queue_id;
+ uint32_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -292,24 +292,24 @@ setup_l3fwd_lookup_tables(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
- uint16_t i;
+ uint16_t queue, i;
+ uint32_t lcore;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("invalid queue number: %hhu\n", queue);
+ printf("invalid queue number: %hu\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
+ printf("error: lcore %u is not enabled in lcore mask\n", lcore);
return -1;
}
if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
(numa_on == 0)) {
- printf("warning: lcore %hhu is on socket %d with numa off \n",
+ printf("warning: lcore %u is on socket %d with numa off\n",
lcore, socketid);
}
}
@@ -336,7 +336,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -352,21 +352,21 @@ get_port_n_rx_queues(const uint16_t port)
lcore_params[i].port_id);
}
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
nb_rx_queue = lcore_conf[lcore].n_rx_queue;
if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
printf("error: too many queues (%u) for lcore: %u\n",
- (unsigned)nb_rx_queue + 1, (unsigned)lcore);
+ (unsigned int)nb_rx_queue + 1, lcore);
return -1;
} else {
lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
@@ -500,6 +500,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ uint32_t max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -518,7 +520,8 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++){
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] >
+ max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -527,11 +530,11 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint32_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
@@ -630,7 +633,7 @@ parse_event_eth_rx_queues(const char *eth_rx_queues)
{
struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
char *end = NULL;
- uint8_t num_eth_rx_queues;
+ uint16_t num_eth_rx_queues;
/* parse decimal string */
num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
@@ -1211,7 +1214,8 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
static void
l3fwd_poll_resource_setup(void)
{
- uint8_t nb_rx_queue, queue, socketid;
+ uint8_t socketid;
+ uint16_t nb_rx_queue, queue;
struct rte_eth_dev_info dev_info;
uint32_t n_tx_queue, nb_lcores;
struct rte_eth_txconf *txconf;
@@ -1535,7 +1539,7 @@ main(int argc, char **argv)
struct lcore_conf *qconf;
uint16_t queueid, portid;
unsigned int lcore_id;
- uint8_t queue;
+ uint16_t queue;
int ret;
/* init EAL */
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v5 2/6] examples/l3fwd-power: fix lcore ID restriction
2024-03-18 17:31 ` [PATCH v5 " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 1/6] examples/l3fwd: " Sivaprasad Tummala
@ 2024-03-18 17:31 ` Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
` (6 subsequent siblings)
8 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-18 17:31 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
david.marchand
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: f88e7c175a68 ("examples/l3fwd-power: add high/regular perf cores options")
Cc: radu.nicolau@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/l3fwd-power/main.c | 59 ++++++++++++++++----------------
examples/l3fwd-power/main.h | 4 +--
examples/l3fwd-power/perf_core.c | 16 +++++----
3 files changed, 41 insertions(+), 38 deletions(-)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index f4adcf41b5..4430605df0 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -214,7 +214,7 @@ enum freq_scale_hint_t
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
enum freq_scale_hint_t freq_up_hint;
uint32_t zero_rx_packet_count;
uint32_t idle_hint;
@@ -838,7 +838,7 @@ sleep_until_rx_interrupt(int num, int lcore)
struct rte_epoll_event event[num];
int n, i;
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
void *data;
if (status[lcore].wakeup) {
@@ -850,9 +850,9 @@ sleep_until_rx_interrupt(int num, int lcore)
n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, num, 10);
for (i = 0; i < n; i++) {
data = event[i].epdata.data;
- port_id = ((uintptr_t)data) >> CHAR_BIT;
+ port_id = ((uintptr_t)data) >> (sizeof(uint16_t) * CHAR_BIT);
queue_id = ((uintptr_t)data) &
- RTE_LEN2MASK(CHAR_BIT, uint8_t);
+ RTE_LEN2MASK((sizeof(uint16_t) * CHAR_BIT), uint16_t);
RTE_LOG(INFO, L3FWD_POWER,
"lcore %u is waked up from rx interrupt on"
" port %d queue %d\n",
@@ -867,7 +867,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
{
int i;
struct lcore_rx_queue *rx_queue;
- uint8_t queue_id;
+ uint16_t queue_id;
uint16_t port_id;
for (i = 0; i < qconf->n_rx_queue; ++i) {
@@ -887,7 +887,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
static int event_register(struct lcore_conf *qconf)
{
struct lcore_rx_queue *rx_queue;
- uint8_t queueid;
+ uint16_t queueid;
uint16_t portid;
uint32_t data;
int ret;
@@ -897,7 +897,7 @@ static int event_register(struct lcore_conf *qconf)
rx_queue = &(qconf->rx_queue_list[i]);
portid = rx_queue->port_id;
queueid = rx_queue->queue_id;
- data = portid << CHAR_BIT | queueid;
+ data = portid << (sizeof(uint16_t) * CHAR_BIT) | queueid;
ret = rte_eth_dev_rx_intr_ctl_q(portid, queueid,
RTE_EPOLL_PER_THREAD,
@@ -917,8 +917,7 @@ static int main_intr_loop(__rte_unused void *dummy)
unsigned int lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
uint32_t lcore_rx_idle_count = 0;
@@ -946,7 +945,7 @@ static int main_intr_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
@@ -1083,8 +1082,7 @@ main_telemetry_loop(__rte_unused void *dummy)
unsigned int lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc, prev_tel_tsc;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
uint64_t ep_nep[2] = {0}, fp_nfp[2] = {0};
@@ -1114,7 +1112,7 @@ main_telemetry_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
- "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ "rxqueueid=%hu\n", lcore_id, portid, queueid);
}
while (!is_done()) {
@@ -1205,8 +1203,7 @@ main_legacy_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc, tim_res_tsc, hz;
uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
enum freq_scale_hint_t lcore_scaleup_hint;
@@ -1234,7 +1231,7 @@ main_legacy_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
- "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ "rxqueueid=%hu\n", lcore_id, portid, queueid);
}
/* add into event wait list */
@@ -1399,25 +1396,25 @@ main_legacy_loop(__rte_unused void *dummy)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
- uint16_t i;
+ uint16_t queue, i;
+ uint32_t lcore;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("invalid queue number: %hhu\n", queue);
+ printf("invalid queue number: %hu\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in lcore "
+ printf("error: lcore %u is not enabled in lcore "
"mask\n", lcore);
return -1;
}
if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
(numa_on == 0)) {
- printf("warning: lcore %hhu is on socket %d with numa "
+ printf("warning: lcore %u is on socket %d with numa "
"off\n", lcore, socketid);
}
if (app_mode == APP_MODE_TELEMETRY && lcore == rte_lcore_id()) {
@@ -1451,7 +1448,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -1462,14 +1459,14 @@ get_port_n_rx_queues(const uint16_t port)
lcore_params[i].queue_id > queue)
queue = lcore_params[i].queue_id;
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -1661,6 +1658,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1681,7 +1680,7 @@ parse_config(const char *q_arg)
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
if (errno != 0 || end == str_fld[i] || int_fld[i] >
- 255)
+ max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1690,11 +1689,11 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint32_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
@@ -2501,8 +2500,8 @@ main(int argc, char **argv)
uint64_t hz;
uint32_t n_tx_queue, nb_lcores;
uint32_t dev_rxq_num, dev_txq_num;
- uint8_t nb_rx_queue, queue, socketid;
- uint16_t portid;
+ uint8_t socketid;
+ uint16_t portid, nb_rx_queue, queue;
const char *ptr_strings[NUM_TELSTATS];
/* init EAL */
diff --git a/examples/l3fwd-power/main.h b/examples/l3fwd-power/main.h
index 258de98f5b..194bd82102 100644
--- a/examples/l3fwd-power/main.h
+++ b/examples/l3fwd-power/main.h
@@ -9,8 +9,8 @@
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t queue_id;
+ uint32_t lcore_id;
} __rte_cache_aligned;
extern struct lcore_params *lcore_params;
diff --git a/examples/l3fwd-power/perf_core.c b/examples/l3fwd-power/perf_core.c
index 41ef6d0c9a..c2cdc4bf49 100644
--- a/examples/l3fwd-power/perf_core.c
+++ b/examples/l3fwd-power/perf_core.c
@@ -22,9 +22,9 @@ static uint16_t nb_hp_lcores;
struct perf_lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t high_perf;
- uint8_t lcore_idx;
+ uint32_t lcore_idx;
} __rte_cache_aligned;
static struct perf_lcore_params prf_lc_prms[MAX_LCORE_PARAMS];
@@ -132,6 +132,8 @@ parse_perf_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned int size;
+ unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS, USHRT_MAX,
+ UCHAR_MAX, RTE_MAX_LCORE};
nb_prf_lc_prms = 0;
@@ -152,7 +154,9 @@ parse_perf_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++) {
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] >
+ max_fld[i])
+
return -1;
}
if (nb_prf_lc_prms >= MAX_LCORE_PARAMS) {
@@ -161,13 +165,13 @@ parse_perf_config(const char *q_arg)
return -1;
}
prf_lc_prms[nb_prf_lc_prms].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
prf_lc_prms[nb_prf_lc_prms].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
prf_lc_prms[nb_prf_lc_prms].high_perf =
!!(uint8_t)int_fld[FLD_LCORE_HP];
prf_lc_prms[nb_prf_lc_prms].lcore_idx =
- (uint8_t)int_fld[FLD_LCORE_IDX];
+ (uint32_t)int_fld[FLD_LCORE_IDX];
++nb_prf_lc_prms;
}
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v5 3/6] examples/l3fwd-graph: fix lcore ID restriction
2024-03-18 17:31 ` [PATCH v5 " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 1/6] examples/l3fwd: " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 2/6] examples/l3fwd-power: " Sivaprasad Tummala
@ 2024-03-18 17:31 ` Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
` (5 subsequent siblings)
8 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-18 17:31 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
david.marchand
Cc: dev, ndabilpuram, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: 08bd1a174461 ("examples/l3fwd-graph: add graph-based l3fwd skeleton")
Cc: ndabilpuram@marvell.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/l3fwd-graph/main.c | 33 +++++++++++++++++----------------
1 file changed, 17 insertions(+), 16 deletions(-)
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index 96cb1c81ff..557ac6d823 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -90,7 +90,7 @@ static int pcap_trace_enable;
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
char node_name[RTE_NODE_NAMESIZE];
};
@@ -110,8 +110,8 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t queue_id;
+ uint32_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -205,19 +205,19 @@ check_worker_model_params(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
+ uint16_t queue, i;
int socketid;
- uint16_t i;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("Invalid queue number: %hhu\n", queue);
+ printf("Invalid queue number: %hu\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("Error: lcore %hhu is not enabled in lcore mask\n",
+ printf("Error: lcore %u is not enabled in lcore mask\n",
lcore);
return -1;
}
@@ -228,7 +228,7 @@ check_lcore_params(void)
}
socketid = rte_lcore_to_socket_id(lcore);
if ((socketid != 0) && (numa_on == 0)) {
- printf("Warning: lcore %hhu is on socket %d with numa off\n",
+ printf("Warning: lcore %u is on socket %d with numa off\n",
lcore, socketid);
}
}
@@ -257,7 +257,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -275,14 +275,14 @@ get_port_n_rx_queues(const uint16_t port)
}
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -290,7 +290,7 @@ init_lcore_rx_queues(void)
if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
printf("Error: too many queues (%u) for lcore: %u\n",
(unsigned int)nb_rx_queue + 1,
- (unsigned int)lcore);
+ lcore);
return -1;
}
@@ -448,11 +448,11 @@ parse_config(const char *q_arg)
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint32_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
@@ -1011,7 +1011,8 @@ main(int argc, char **argv)
"ethdev_tx-*",
"pkt_drop",
};
- uint8_t nb_rx_queue, queue, socketid;
+ uint8_t socketid;
+ uint16_t nb_rx_queue, queue;
struct rte_graph_param graph_conf;
struct rte_eth_dev_info dev_info;
uint32_t nb_ports, nb_conf = 0;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v5 4/6] examples/ipsec-secgw: fix lcore ID restriction
2024-03-18 17:31 ` [PATCH v5 " Sivaprasad Tummala
` (2 preceding siblings ...)
2024-03-18 17:31 ` [PATCH v5 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
@ 2024-03-18 17:31 ` Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 5/6] examples/qos_sched: " Sivaprasad Tummala
` (4 subsequent siblings)
8 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-18 17:31 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
david.marchand
Cc: dev, sergio.gonzalez.monroy, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: sergio.gonzalez.monroy@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
---
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 37 +++++++++++++++--------------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 6 ++---
examples/ipsec-secgw/ipsec_worker.c | 10 ++++----
5 files changed, 28 insertions(+), 29 deletions(-)
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index dfb81bfcf1..be635685b4 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -102,7 +102,7 @@ struct eh_event_link_info {
/**< Event port ID */
uint8_t eventq_id;
/**< Event queue to be linked to the port */
- uint8_t lcore_id;
+ uint32_t lcore_id;
/**< Lcore to be polling on this port */
};
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 45a303850d..dc7491a2b9 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -220,8 +220,8 @@ static const char *cfgfile;
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
+ uint16_t queue_id;
+ uint32_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -695,8 +695,7 @@ ipsec_poll_mode_worker(void)
struct rte_mbuf *pkts[MAX_PKT_BURST];
uint32_t lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
- uint16_t i, nb_rx, portid;
- uint8_t queueid;
+ uint16_t i, nb_rx, portid, queueid;
struct lcore_conf *qconf;
int32_t rc, socket_id;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -743,7 +742,7 @@ ipsec_poll_mode_worker(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
@@ -788,8 +787,7 @@ int
check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
{
uint16_t i;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
for (i = 0; i < nb_lcore_params; ++i) {
portid = lcore_params_array[i].port_id;
@@ -809,7 +807,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
static int32_t
check_poll_mode_params(struct eh_conf *eh_conf)
{
- uint8_t lcore;
+ uint32_t lcore;
uint16_t portid;
uint16_t i;
int32_t socket_id;
@@ -828,13 +826,13 @@ check_poll_mode_params(struct eh_conf *eh_conf)
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in "
+ printf("error: lcore %u is not enabled in "
"lcore mask\n", lcore);
return -1;
}
socket_id = rte_lcore_to_socket_id(lcore);
if (socket_id != 0 && numa_on == 0) {
- printf("warning: lcore %hhu is on socket %d "
+ printf("warning: lcore %u is on socket %d "
"with numa off\n",
lcore, socket_id);
}
@@ -851,7 +849,7 @@ check_poll_mode_params(struct eh_conf *eh_conf)
return 0;
}
-static uint8_t
+static uint16_t
get_port_nb_rx_queues(const uint16_t port)
{
int32_t queue = -1;
@@ -862,14 +860,14 @@ get_port_nb_rx_queues(const uint16_t port)
lcore_params[i].queue_id > queue)
queue = lcore_params[i].queue_id;
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int32_t
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -1050,6 +1048,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int32_t i;
uint32_t size;
+ uint32_t max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
+ USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1070,7 +1070,7 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++) {
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1079,11 +1079,11 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint32_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
@@ -1919,7 +1919,8 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads,
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
uint16_t nb_tx_queue, nb_rx_queue;
- uint16_t tx_queueid, rx_queueid, queue, lcore_id;
+ uint16_t tx_queueid, rx_queueid, queue;
+ uint32_t lcore_id;
int32_t ret, socket_id;
struct lcore_conf *qconf;
struct rte_ether_addr ethaddr;
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index c321108119..b52b0ffc3d 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -259,7 +259,7 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
continue;
/* Looking for cryptodev, which can handle this SA */
- key.lcore_id = (uint8_t)lcore_id;
+ key.lcore_id = lcore_id;
key.cipher_algo = (uint8_t)sa->cipher_algo;
key.auth_algo = (uint8_t)sa->auth_algo;
key.aead_algo = (uint8_t)sa->aead_algo;
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index bdcada1c40..6526a80d81 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -256,11 +256,11 @@ extern struct offloads tx_offloads;
* (hash key calculation reads 8 bytes if this struct is size 5 bytes).
*/
struct cdev_key {
- uint16_t lcore_id;
+ uint32_t lcore_id;
uint8_t cipher_algo;
uint8_t auth_algo;
uint8_t aead_algo;
- uint8_t padding[3]; /* padding to 8-byte size should be zeroed */
+ uint8_t padding; /* padding to 8-byte size should be zeroed */
};
struct socket_ctx {
@@ -285,7 +285,7 @@ struct cnt_blk {
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
void *sec_ctx;
} __rte_cache_aligned;
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 8d122e8519..90a4c38ba4 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -1598,8 +1598,7 @@ ipsec_poll_mode_wrkr_inl_pr(void)
int32_t socket_id;
uint32_t lcore_id;
int32_t i, nb_rx;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
prev_tsc = 0;
lcore_id = rte_lcore_id();
@@ -1633,7 +1632,7 @@ ipsec_poll_mode_wrkr_inl_pr(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
@@ -1729,8 +1728,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void)
uint32_t i, nb_rx, j;
int32_t socket_id;
uint32_t lcore_id;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
prev_tsc = 0;
lcore_id = rte_lcore_id();
@@ -1764,7 +1762,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v5 5/6] examples/qos_sched: fix lcore ID restriction
2024-03-18 17:31 ` [PATCH v5 " Sivaprasad Tummala
` (3 preceding siblings ...)
2024-03-18 17:31 ` [PATCH v5 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
@ 2024-03-18 17:31 ` Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 6/6] examples/vm_power_manager: " Sivaprasad Tummala
` (3 subsequent siblings)
8 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-18 17:31 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
david.marchand
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: de3cfa2c9823 ("sched: initial import")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/qos_sched/args.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
index 8d61d3e454..886542b3c1 100644
--- a/examples/qos_sched/args.c
+++ b/examples/qos_sched/args.c
@@ -184,10 +184,10 @@ app_parse_flow_conf(const char *conf_str)
pconf->rx_port = vals[0];
pconf->tx_port = vals[1];
- pconf->rx_core = (uint8_t)vals[2];
- pconf->wt_core = (uint8_t)vals[3];
+ pconf->rx_core = vals[2];
+ pconf->wt_core = vals[3];
if (ret == 5)
- pconf->tx_core = (uint8_t)vals[4];
+ pconf->tx_core = vals[4];
else
pconf->tx_core = pconf->wt_core;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v5 6/6] examples/vm_power_manager: fix lcore ID restriction
2024-03-18 17:31 ` [PATCH v5 " Sivaprasad Tummala
` (4 preceding siblings ...)
2024-03-18 17:31 ` [PATCH v5 5/6] examples/qos_sched: " Sivaprasad Tummala
@ 2024-03-18 17:31 ` Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 0/6] " Sivaprasad Tummala
` (2 subsequent siblings)
8 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-18 17:31 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
david.marchand
Cc: dev, marcinx.hajkowski, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch allows config options based on DPDK config.
Fixes: 0e8f47491f09 ("examples/vm_power: add command to query CPU frequency")
Cc: marcinx.hajkowski@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
examples/vm_power_manager/guest_cli/vm_power_cli_guest.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
index 94bfbbaf78..5eddb47847 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
@@ -401,7 +401,7 @@ check_response_cmd(unsigned int lcore_id, int *result)
struct cmd_set_cpu_freq_result {
cmdline_fixed_string_t set_cpu_freq;
- uint8_t lcore_id;
+ uint32_t lcore_id;
cmdline_fixed_string_t cmd;
};
@@ -444,7 +444,7 @@ cmdline_parse_token_string_t cmd_set_cpu_freq =
set_cpu_freq, "set_cpu_freq");
cmdline_parse_token_num_t cmd_set_cpu_freq_core_num =
TOKEN_NUM_INITIALIZER(struct cmd_set_cpu_freq_result,
- lcore_id, RTE_UINT8);
+ lcore_id, RTE_UINT32);
cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
cmd, "up#down#min#max#enable_turbo#disable_turbo");
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v5 0/6] fix lcore ID restriction
2024-03-18 17:31 ` [PATCH v5 " Sivaprasad Tummala
` (5 preceding siblings ...)
2024-03-18 17:31 ` [PATCH v5 6/6] examples/vm_power_manager: " Sivaprasad Tummala
@ 2024-03-18 17:31 ` Sivaprasad Tummala
2024-03-19 18:41 ` Ferruh Yigit
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
8 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-18 17:31 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
david.marchand
Cc: dev
With modern CPUs, it is possible to have higher
CPU count thus we can have higher RTE_MAX_LCORES.
In DPDK sample applications, the current config
lcore options are hard limited to 255.
The patchset fixes these constraints by allowing
all lcore IDs up to RTE_MAX_LCORES. Also the queue
IDs are increased to support up to 65535.
v5:
- updated lcore_id type to uint32_t
v4:
- fixed build errors with queue_id type
in ipsec-secgw
v3:
- updated queue_id type to uint16_t
v2:
- fixed typo with lcore_id type in l3fwd
Sivaprasad Tummala (6):
examples/l3fwd: fix lcore ID restriction
examples/l3fwd-power: fix lcore ID restriction
examples/l3fwd-graph: fix lcore ID restriction
examples/ipsec-secgw: fix lcore ID restriction
examples/qos_sched: fix lcore ID restriction
examples/vm_power_manager: fix lcore ID restriction
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 35 +++++------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 6 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++--
examples/l3fwd-graph/main.c | 33 ++++++-----
examples/l3fwd-power/main.c | 59 +++++++++----------
examples/l3fwd-power/main.h | 4 +-
examples/l3fwd-power/perf_core.c | 16 +++--
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 +-
examples/l3fwd/l3fwd_em.c | 4 +-
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 +-
examples/l3fwd/l3fwd_lpm.c | 5 +-
examples/l3fwd/main.c | 40 +++++++------
examples/qos_sched/args.c | 6 +-
.../guest_cli/vm_power_cli_guest.c | 4 +-
18 files changed, 122 insertions(+), 116 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH v5 1/6] examples/l3fwd: fix lcore ID restriction
2024-03-18 17:31 ` [PATCH v5 1/6] examples/l3fwd: " Sivaprasad Tummala
@ 2024-03-19 7:24 ` Morten Brørup
2024-03-21 9:55 ` Thomas Monjalon
2024-03-21 11:05 ` Tummala, Sivaprasad
0 siblings, 2 replies; 100+ messages in thread
From: Morten Brørup @ 2024-03-19 7:24 UTC (permalink / raw)
To: Sivaprasad Tummala, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, ferruh.yigit,
konstantin.ananyev, stephen, david.marchand
Cc: dev, stable
> From: Sivaprasad Tummala [mailto:sivaprasad.tummala@amd.com]
> Sent: Monday, 18 March 2024 18.32
>
> Currently the config option allows lcore IDs up to 255,
> irrespective of RTE_MAX_LCORES and needs to be fixed.
>
> The patch allows config options based on DPDK config.
>
> Fixes: af75078fece3 ("first public release")
> Cc: stable@dpdk.org
>
> Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> ---
I suggest you update the description of the individual patches too, like you did for patch 0/6.
E.g. this patch not only fixes the lcore_id, but also the queue_id type size.
For the series,
Acked-by: Morten Brørup <mb@smartsharesystems.com>
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH v5 0/6] fix lcore ID restriction
2024-03-18 17:31 ` [PATCH v5 " Sivaprasad Tummala
` (6 preceding siblings ...)
2024-03-18 17:31 ` [PATCH v5 0/6] " Sivaprasad Tummala
@ 2024-03-19 18:41 ` Ferruh Yigit
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
8 siblings, 0 replies; 100+ messages in thread
From: Ferruh Yigit @ 2024-03-19 18:41 UTC (permalink / raw)
To: Sivaprasad Tummala, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, konstantin.ananyev,
stephen, david.marchand
Cc: dev
On 3/18/2024 5:31 PM, Sivaprasad Tummala wrote:
> With modern CPUs, it is possible to have higher
> CPU count thus we can have higher RTE_MAX_LCORES.
> In DPDK sample applications, the current config
> lcore options are hard limited to 255.
>
> The patchset fixes these constraints by allowing
> all lcore IDs up to RTE_MAX_LCORES. Also the queue
> IDs are increased to support up to 65535.
>
> v5:
> - updated lcore_id type to uint32_t
>
> v4:
> - fixed build errors with queue_id type
> in ipsec-secgw
>
> v3:
> - updated queue_id type to uint16_t
>
> v2:
> - fixed typo with lcore_id type in l3fwd
>
> Sivaprasad Tummala (6):
> examples/l3fwd: fix lcore ID restriction
> examples/l3fwd-power: fix lcore ID restriction
> examples/l3fwd-graph: fix lcore ID restriction
> examples/ipsec-secgw: fix lcore ID restriction
> examples/qos_sched: fix lcore ID restriction
> examples/vm_power_manager: fix lcore ID restriction
>
For series,
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH v5 1/6] examples/l3fwd: fix lcore ID restriction
2024-03-19 7:24 ` Morten Brørup
@ 2024-03-21 9:55 ` Thomas Monjalon
2024-03-21 11:05 ` Tummala, Sivaprasad
2024-03-21 11:05 ` Tummala, Sivaprasad
1 sibling, 1 reply; 100+ messages in thread
From: Thomas Monjalon @ 2024-03-21 9:55 UTC (permalink / raw)
To: Sivaprasad Tummala
Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
david.marchand, dev, stable, Morten Brørup
19/03/2024 08:24, Morten Brørup:
> > From: Sivaprasad Tummala [mailto:sivaprasad.tummala@amd.com]
> > Sent: Monday, 18 March 2024 18.32
> >
> > Currently the config option allows lcore IDs up to 255,
> > irrespective of RTE_MAX_LCORES and needs to be fixed.
> >
> > The patch allows config options based on DPDK config.
> >
> > Fixes: af75078fece3 ("first public release")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> > Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> > ---
>
> I suggest you update the description of the individual patches too, like you did for patch 0/6.
>
> E.g. this patch not only fixes the lcore_id, but also the queue_id type size.
>
>
> For the series,
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
I would recommend a separate patch for queue id as it is a separate issue.
However, there is no need to split per directory.
You can have patches changing all examples at once.
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH v5 1/6] examples/l3fwd: fix lcore ID restriction
2024-03-19 7:24 ` Morten Brørup
2024-03-21 9:55 ` Thomas Monjalon
@ 2024-03-21 11:05 ` Tummala, Sivaprasad
1 sibling, 0 replies; 100+ messages in thread
From: Tummala, Sivaprasad @ 2024-03-21 11:05 UTC (permalink / raw)
To: Morten Brørup, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, Yigit, Ferruh,
konstantin.ananyev, stephen, david.marchand
Cc: dev, stable
[AMD Official Use Only - General]
Hi Morten,
> -----Original Message-----
> From: Morten Brørup <mb@smartsharesystems.com>
> Sent: Tuesday, March 19, 2024 12:54 PM
> To: Tummala, Sivaprasad <Sivaprasad.Tummala@amd.com>;
> david.hunt@intel.com; anatoly.burakov@intel.com; jerinj@marvell.com;
> radu.nicolau@intel.com; gakhil@marvell.com; cristian.dumitrescu@intel.com; Yigit,
> Ferruh <Ferruh.Yigit@amd.com>; konstantin.ananyev@huawei.com;
> stephen@networkplumber.org; david.marchand@redhat.com
> Cc: dev@dpdk.org; stable@dpdk.org
> Subject: RE: [PATCH v5 1/6] examples/l3fwd: fix lcore ID restriction
>
> Caution: This message originated from an External Source. Use proper caution
> when opening attachments, clicking links, or responding.
>
>
> > From: Sivaprasad Tummala [mailto:sivaprasad.tummala@amd.com]
> > Sent: Monday, 18 March 2024 18.32
> >
> > Currently the config option allows lcore IDs up to 255, irrespective
> > of RTE_MAX_LCORES and needs to be fixed.
> >
> > The patch allows config options based on DPDK config.
> >
> > Fixes: af75078fece3 ("first public release")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> > Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> > ---
>
> I suggest you update the description of the individual patches too, like you did for
> patch 0/6.
Sure, will fix this in next series.
>
> E.g. this patch not only fixes the lcore_id, but also the queue_id type size.
>
>
> For the series,
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH v5 1/6] examples/l3fwd: fix lcore ID restriction
2024-03-21 9:55 ` Thomas Monjalon
@ 2024-03-21 11:05 ` Tummala, Sivaprasad
2024-03-21 11:18 ` Thomas Monjalon
0 siblings, 1 reply; 100+ messages in thread
From: Tummala, Sivaprasad @ 2024-03-21 11:05 UTC (permalink / raw)
To: Thomas Monjalon
Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, Yigit, Ferruh, konstantin.ananyev, stephen,
david.marchand, dev, stable, Morten Brørup
[AMD Official Use Only - General]
Hi Thomas,
> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Thursday, March 21, 2024 3:25 PM
> To: Tummala, Sivaprasad <Sivaprasad.Tummala@amd.com>
> Cc: david.hunt@intel.com; anatoly.burakov@intel.com; jerinj@marvell.com;
> radu.nicolau@intel.com; gakhil@marvell.com; cristian.dumitrescu@intel.com; Yigit,
> Ferruh <Ferruh.Yigit@amd.com>; konstantin.ananyev@huawei.com;
> stephen@networkplumber.org; david.marchand@redhat.com; dev@dpdk.org;
> stable@dpdk.org; Morten Brørup <mb@smartsharesystems.com>
> Subject: Re: [PATCH v5 1/6] examples/l3fwd: fix lcore ID restriction
>
> Caution: This message originated from an External Source. Use proper caution
> when opening attachments, clicking links, or responding.
>
>
> 19/03/2024 08:24, Morten Brørup:
> > > From: Sivaprasad Tummala [mailto:sivaprasad.tummala@amd.com]
> > > Sent: Monday, 18 March 2024 18.32
> > >
> > > Currently the config option allows lcore IDs up to 255, irrespective
> > > of RTE_MAX_LCORES and needs to be fixed.
> > >
> > > The patch allows config options based on DPDK config.
> > >
> > > Fixes: af75078fece3 ("first public release")
> > > Cc: stable@dpdk.org
> > >
> > > Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> > > Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> > > ---
> >
> > I suggest you update the description of the individual patches too, like you did
> for patch 0/6.
> >
> > E.g. this patch not only fixes the lcore_id, but also the queue_id type size.
> >
> >
> > For the series,
> > Acked-by: Morten Brørup <mb@smartsharesystems.com>
>
> I would recommend a separate patch for queue id as it is a separate issue.
> However, there is no need to split per directory.
> You can have patches changing all examples at once.
>
There's a functional dependency and queue id change is required to support
higher lcore IDs and hence it makes sense to add in the same patch.
I had split the examples to help the maintainers review the patches individually.
Please feel free to squash the git commits while merging.
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH v5 1/6] examples/l3fwd: fix lcore ID restriction
2024-03-21 11:05 ` Tummala, Sivaprasad
@ 2024-03-21 11:18 ` Thomas Monjalon
2024-03-21 18:26 ` Tummala, Sivaprasad
0 siblings, 1 reply; 100+ messages in thread
From: Thomas Monjalon @ 2024-03-21 11:18 UTC (permalink / raw)
To: Tummala, Sivaprasad
Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, Yigit, Ferruh, konstantin.ananyev, stephen,
david.marchand, dev, stable, Morten Brørup
21/03/2024 12:05, Tummala, Sivaprasad:
> From: Thomas Monjalon <thomas@monjalon.net>
> > 19/03/2024 08:24, Morten Brørup:
> > > > From: Sivaprasad Tummala [mailto:sivaprasad.tummala@amd.com]
> > > > Sent: Monday, 18 March 2024 18.32
> > > >
> > > > Currently the config option allows lcore IDs up to 255, irrespective
> > > > of RTE_MAX_LCORES and needs to be fixed.
> > > >
> > > > The patch allows config options based on DPDK config.
> > > >
> > > > Fixes: af75078fece3 ("first public release")
> > > > Cc: stable@dpdk.org
> > > >
> > > > Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> > > > Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> > > > ---
> > >
> > > I suggest you update the description of the individual patches too, like you did
> > for patch 0/6.
> > >
> > > E.g. this patch not only fixes the lcore_id, but also the queue_id type size.
> > >
> > >
> > > For the series,
> > > Acked-by: Morten Brørup <mb@smartsharesystems.com>
> >
> > I would recommend a separate patch for queue id as it is a separate issue.
> > However, there is no need to split per directory.
> > You can have patches changing all examples at once.
> >
> There's a functional dependency and queue id change is required to support
> higher lcore IDs and hence it makes sense to add in the same patch.
> I had split the examples to help the maintainers review the patches individually.
> Please feel free to squash the git commits while merging.
Then if there is a dependency, please make queue id the first patch.
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH v5 1/6] examples/l3fwd: fix lcore ID restriction
2024-03-21 11:18 ` Thomas Monjalon
@ 2024-03-21 18:26 ` Tummala, Sivaprasad
0 siblings, 0 replies; 100+ messages in thread
From: Tummala, Sivaprasad @ 2024-03-21 18:26 UTC (permalink / raw)
To: Thomas Monjalon
Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, Yigit, Ferruh, konstantin.ananyev, stephen,
david.marchand, dev, stable, Morten Brørup
[AMD Official Use Only - General]
> -----Original Message-----
> From: Thomas Monjalon <thomas@monjalon.net>
> Sent: Thursday, March 21, 2024 4:48 PM
> To: Tummala, Sivaprasad <Sivaprasad.Tummala@amd.com>
> Cc: david.hunt@intel.com; anatoly.burakov@intel.com; jerinj@marvell.com;
> radu.nicolau@intel.com; gakhil@marvell.com; cristian.dumitrescu@intel.com; Yigit,
> Ferruh <Ferruh.Yigit@amd.com>; konstantin.ananyev@huawei.com;
> stephen@networkplumber.org; david.marchand@redhat.com; dev@dpdk.org;
> stable@dpdk.org; Morten Brørup <mb@smartsharesystems.com>
> Subject: Re: [PATCH v5 1/6] examples/l3fwd: fix lcore ID restriction
>
> Caution: This message originated from an External Source. Use proper caution
> when opening attachments, clicking links, or responding.
>
>
> 21/03/2024 12:05, Tummala, Sivaprasad:
> > From: Thomas Monjalon <thomas@monjalon.net>
> > > 19/03/2024 08:24, Morten Brørup:
> > > > > From: Sivaprasad Tummala [mailto:sivaprasad.tummala@amd.com]
> > > > > Sent: Monday, 18 March 2024 18.32
> > > > >
> > > > > Currently the config option allows lcore IDs up to 255,
> > > > > irrespective of RTE_MAX_LCORES and needs to be fixed.
> > > > >
> > > > > The patch allows config options based on DPDK config.
> > > > >
> > > > > Fixes: af75078fece3 ("first public release")
> > > > > Cc: stable@dpdk.org
> > > > >
> > > > > Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> > > > > Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
> > > > > ---
> > > >
> > > > I suggest you update the description of the individual patches
> > > > too, like you did
> > > for patch 0/6.
> > > >
> > > > E.g. this patch not only fixes the lcore_id, but also the queue_id type size.
> > > >
> > > >
> > > > For the series,
> > > > Acked-by: Morten Brørup <mb@smartsharesystems.com>
> > >
> > > I would recommend a separate patch for queue id as it is a separate issue.
> > > However, there is no need to split per directory.
> > > You can have patches changing all examples at once.
> > >
> > There's a functional dependency and queue id change is required to
> > support higher lcore IDs and hence it makes sense to add in the same patch.
> > I had split the examples to help the maintainers review the patches individually.
> > Please feel free to squash the git commits while merging.
>
> Then if there is a dependency, please make queue id the first patch.
OK! Will split the changes into separate patches in next version.
>
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 00/10] fix lcore ID restriction
2024-03-18 17:31 ` [PATCH v5 " Sivaprasad Tummala
` (7 preceding siblings ...)
2024-03-19 18:41 ` Ferruh Yigit
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 01/14] examples/l3fwd: fix queue " Sivaprasad Tummala
` (15 more replies)
8 siblings, 16 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev
With modern CPUs, it is possible to have higher
CPU count thus we can have higher RTE_MAX_LCORES.
In DPDK sample applications, the current config
lcore options are hard limited to 255.
The patchset fixes these constraints by allowing
all lcore IDs up to RTE_MAX_LCORES. Also the queue
IDs are increased to support up to 65535.
The port ID constraints were also fixed to support
up to RTE_MAX_ETHPORTS.
v6:
- split queue_id, lcore_id and port_id changes as
separate patches.
- updated git commit description on individual
patches
v5:
- updated lcore_id type to uint32_t
v4:
- fixed build errors with queue_id type
in ipsec-secgw
v3:
- updated queue_id type to uint16_t
v2:
- fixed typo with lcore_id type in l3fwd
Sivaprasad Tummala (10):
examples/l3fwd: fix queue ID restriction
examples/l3fwd-power: fix queue ID restriction
examples/l3fwd-graph: fix queue ID restriction
examples/ipsec-secgw: fix queue ID restriction
examples/l3fwd: fix lcore ID restriction
examples/l3fwd-power: fix lcore ID restriction
examples/l3fwd-graph: fix lcore ID restriction
examples/ipsec-secgw: fix lcore ID restriction
examples/qos_sched: fix lcore ID restriction
examples/vm_power_manager: fix lcore ID restriction
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 35 +++++------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 6 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++--
examples/l3fwd-graph/main.c | 31 +++++-----
examples/l3fwd-power/main.c | 59 +++++++++----------
examples/l3fwd-power/main.h | 4 +-
examples/l3fwd-power/perf_core.c | 14 +++--
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 +-
examples/l3fwd/l3fwd_em.c | 4 +-
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 +-
examples/l3fwd/l3fwd_lpm.c | 5 +-
examples/l3fwd/main.c | 38 ++++++------
examples/qos_sched/args.c | 6 +-
.../guest_cli/vm_power_cli_guest.c | 4 +-
18 files changed, 119 insertions(+), 113 deletions(-)
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 01/14] examples/l3fwd: fix queue ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-22 15:41 ` David Marchand
2024-03-21 18:47 ` [PATCH v6 02/14] examples/l3fwd-power: " Sivaprasad Tummala
` (14 subsequent siblings)
15 siblings, 1 reply; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev, stable
Currently application supports queue IDs up to 255
and max queues of 256 irrespective of device support.
This limits the number of active lcores to 256.
The patch fixes these constraints by increasing
the queue IDs to support up to 65535.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 ++--
examples/l3fwd/l3fwd_em.c | 4 ++--
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 ++--
examples/l3fwd/l3fwd_lpm.c | 5 ++---
examples/l3fwd/main.c | 28 ++++++++++++++++------------
7 files changed, 26 insertions(+), 23 deletions(-)
diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index e7ae0e5834..12c264cb4c 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -74,7 +74,7 @@ struct mbuf_table {
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
} __rte_cache_aligned;
struct lcore_conf {
diff --git a/examples/l3fwd/l3fwd_acl.c b/examples/l3fwd/l3fwd_acl.c
index 401692bcec..2bd63181bc 100644
--- a/examples/l3fwd/l3fwd_acl.c
+++ b/examples/l3fwd/l3fwd_acl.c
@@ -997,7 +997,7 @@ acl_main_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
uint16_t portid;
- uint8_t queueid;
+ uint16_t queueid;
struct lcore_conf *qconf;
int socketid;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -1020,7 +1020,7 @@ acl_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 40e102b38a..cd2bb4a4bb 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -586,7 +586,7 @@ em_main_loop(__rte_unused void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint8_t queueid;
+ uint16_t queueid;
uint16_t portid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
@@ -609,7 +609,7 @@ em_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index 9aad358003..c6a4a89127 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -78,8 +78,8 @@ struct l3fwd_event_resources {
uint8_t deq_depth;
uint8_t has_burst;
uint8_t enabled;
- uint8_t eth_rx_queues;
uint8_t vector_enabled;
+ uint16_t eth_rx_queues;
uint16_t vector_size;
uint64_t vector_tmo_ns;
};
diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index 6a21984415..7da55f707a 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -186,7 +186,7 @@ fib_main_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
uint16_t portid;
- uint8_t queueid;
+ uint16_t queueid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -208,7 +208,7 @@ fib_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index a484a33089..01d38bc69c 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -148,8 +148,7 @@ lpm_main_loop(__rte_unused void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -171,7 +170,7 @@ lpm_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 8d32ae1dd5..4d4738b92b 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -98,7 +98,7 @@ struct parm_cfg parm_config;
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -292,14 +292,14 @@ setup_l3fwd_lookup_tables(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
- uint16_t i;
+ uint16_t queue, i;
+ uint8_t lcore;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("invalid queue number: %hhu\n", queue);
+ printf("invalid queue number: %hu\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
@@ -336,7 +336,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -352,7 +352,7 @@ get_port_n_rx_queues(const uint16_t port)
lcore_params[i].port_id);
}
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
@@ -366,7 +366,7 @@ init_lcore_rx_queues(void)
nb_rx_queue = lcore_conf[lcore].n_rx_queue;
if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
printf("error: too many queues (%u) for lcore: %u\n",
- (unsigned)nb_rx_queue + 1, (unsigned)lcore);
+ (unsigned int)nb_rx_queue + 1, (unsigned int)lcore);
return -1;
} else {
lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
@@ -500,6 +500,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ uint16_t max_fld[_NUM_FLD] = {USHRT_MAX,
+ USHRT_MAX, UCHAR_MAX};
nb_lcore_params = 0;
@@ -518,7 +520,8 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++){
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] >
+ max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -529,7 +532,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].port_id =
(uint8_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
(uint8_t)int_fld[FLD_LCORE];
++nb_lcore_params;
@@ -630,7 +633,7 @@ parse_event_eth_rx_queues(const char *eth_rx_queues)
{
struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
char *end = NULL;
- uint8_t num_eth_rx_queues;
+ uint16_t num_eth_rx_queues;
/* parse decimal string */
num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
@@ -1211,7 +1214,8 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
static void
l3fwd_poll_resource_setup(void)
{
- uint8_t nb_rx_queue, queue, socketid;
+ uint8_t socketid;
+ uint16_t nb_rx_queue, queue;
struct rte_eth_dev_info dev_info;
uint32_t n_tx_queue, nb_lcores;
struct rte_eth_txconf *txconf;
@@ -1535,7 +1539,7 @@ main(int argc, char **argv)
struct lcore_conf *qconf;
uint16_t queueid, portid;
unsigned int lcore_id;
- uint8_t queue;
+ uint16_t queue;
int ret;
/* init EAL */
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 02/14] examples/l3fwd-power: fix queue ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 01/14] examples/l3fwd: fix queue " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 03/14] examples/l3fwd-graph: " Sivaprasad Tummala
` (13 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev, stable
Currently application supports queue IDs up to 255
and max queues of 256 irrespective of device support.
This limits the number of active lcores to 256.
The patch fixes these constraints by increasing
the queue IDs to support up to 65535.
Fixes: f88e7c175a68 ("examples/l3fwd-power: add high/regular perf cores options")
Cc: radu.nicolau@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd-power/main.c | 49 ++++++++++++++++----------------
examples/l3fwd-power/main.h | 2 +-
examples/l3fwd-power/perf_core.c | 10 +++++--
3 files changed, 32 insertions(+), 29 deletions(-)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index f4adcf41b5..1881b1b194 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -214,7 +214,7 @@ enum freq_scale_hint_t
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
enum freq_scale_hint_t freq_up_hint;
uint32_t zero_rx_packet_count;
uint32_t idle_hint;
@@ -838,7 +838,7 @@ sleep_until_rx_interrupt(int num, int lcore)
struct rte_epoll_event event[num];
int n, i;
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
void *data;
if (status[lcore].wakeup) {
@@ -850,9 +850,9 @@ sleep_until_rx_interrupt(int num, int lcore)
n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, num, 10);
for (i = 0; i < n; i++) {
data = event[i].epdata.data;
- port_id = ((uintptr_t)data) >> CHAR_BIT;
+ port_id = ((uintptr_t)data) >> (sizeof(uint16_t) * CHAR_BIT);
queue_id = ((uintptr_t)data) &
- RTE_LEN2MASK(CHAR_BIT, uint8_t);
+ RTE_LEN2MASK((sizeof(uint16_t) * CHAR_BIT), uint16_t);
RTE_LOG(INFO, L3FWD_POWER,
"lcore %u is waked up from rx interrupt on"
" port %d queue %d\n",
@@ -867,7 +867,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
{
int i;
struct lcore_rx_queue *rx_queue;
- uint8_t queue_id;
+ uint16_t queue_id;
uint16_t port_id;
for (i = 0; i < qconf->n_rx_queue; ++i) {
@@ -887,7 +887,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
static int event_register(struct lcore_conf *qconf)
{
struct lcore_rx_queue *rx_queue;
- uint8_t queueid;
+ uint16_t queueid;
uint16_t portid;
uint32_t data;
int ret;
@@ -897,7 +897,7 @@ static int event_register(struct lcore_conf *qconf)
rx_queue = &(qconf->rx_queue_list[i]);
portid = rx_queue->port_id;
queueid = rx_queue->queue_id;
- data = portid << CHAR_BIT | queueid;
+ data = portid << (sizeof(uint16_t) * CHAR_BIT) | queueid;
ret = rte_eth_dev_rx_intr_ctl_q(portid, queueid,
RTE_EPOLL_PER_THREAD,
@@ -917,8 +917,7 @@ static int main_intr_loop(__rte_unused void *dummy)
unsigned int lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
uint32_t lcore_rx_idle_count = 0;
@@ -946,7 +945,7 @@ static int main_intr_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
@@ -1083,8 +1082,7 @@ main_telemetry_loop(__rte_unused void *dummy)
unsigned int lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc, prev_tel_tsc;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
uint64_t ep_nep[2] = {0}, fp_nfp[2] = {0};
@@ -1114,7 +1112,7 @@ main_telemetry_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
- "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ "rxqueueid=%hu\n", lcore_id, portid, queueid);
}
while (!is_done()) {
@@ -1205,8 +1203,7 @@ main_legacy_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc, tim_res_tsc, hz;
uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
enum freq_scale_hint_t lcore_scaleup_hint;
@@ -1234,7 +1231,7 @@ main_legacy_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
- "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ "rxqueueid=%hu\n", lcore_id, portid, queueid);
}
/* add into event wait list */
@@ -1399,14 +1396,14 @@ main_legacy_loop(__rte_unused void *dummy)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
- uint16_t i;
+ uint16_t queue, i;
+ uint8_t lcore;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("invalid queue number: %hhu\n", queue);
+ printf("invalid queue number: %hu\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
@@ -1451,7 +1448,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -1462,7 +1459,7 @@ get_port_n_rx_queues(const uint16_t port)
lcore_params[i].queue_id > queue)
queue = lcore_params[i].queue_id;
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
@@ -1661,6 +1658,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ unsigned int max_fld[_NUM_FLD] = {USHRT_MAX,
+ USHRT_MAX, UCHAR_MAX};
nb_lcore_params = 0;
@@ -1681,7 +1680,7 @@ parse_config(const char *q_arg)
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
if (errno != 0 || end == str_fld[i] || int_fld[i] >
- 255)
+ max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1692,7 +1691,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].port_id =
(uint8_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
(uint8_t)int_fld[FLD_LCORE];
++nb_lcore_params;
@@ -2501,8 +2500,8 @@ main(int argc, char **argv)
uint64_t hz;
uint32_t n_tx_queue, nb_lcores;
uint32_t dev_rxq_num, dev_txq_num;
- uint8_t nb_rx_queue, queue, socketid;
- uint16_t portid;
+ uint8_t socketid;
+ uint16_t portid, nb_rx_queue, queue;
const char *ptr_strings[NUM_TELSTATS];
/* init EAL */
diff --git a/examples/l3fwd-power/main.h b/examples/l3fwd-power/main.h
index 258de98f5b..40b5194726 100644
--- a/examples/l3fwd-power/main.h
+++ b/examples/l3fwd-power/main.h
@@ -9,7 +9,7 @@
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
diff --git a/examples/l3fwd-power/perf_core.c b/examples/l3fwd-power/perf_core.c
index 41ef6d0c9a..3088935ee0 100644
--- a/examples/l3fwd-power/perf_core.c
+++ b/examples/l3fwd-power/perf_core.c
@@ -22,7 +22,7 @@ static uint16_t nb_hp_lcores;
struct perf_lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t high_perf;
uint8_t lcore_idx;
} __rte_cache_aligned;
@@ -132,6 +132,8 @@ parse_perf_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned int size;
+ unsigned int max_fld[_NUM_FLD] = {USHRT_MAX, USHRT_MAX,
+ UCHAR_MAX, UCHAR_MAX};
nb_prf_lc_prms = 0;
@@ -152,7 +154,9 @@ parse_perf_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++) {
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] >
+ max_fld[i])
+
return -1;
}
if (nb_prf_lc_prms >= MAX_LCORE_PARAMS) {
@@ -163,7 +167,7 @@ parse_perf_config(const char *q_arg)
prf_lc_prms[nb_prf_lc_prms].port_id =
(uint8_t)int_fld[FLD_PORT];
prf_lc_prms[nb_prf_lc_prms].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
prf_lc_prms[nb_prf_lc_prms].high_perf =
!!(uint8_t)int_fld[FLD_LCORE_HP];
prf_lc_prms[nb_prf_lc_prms].lcore_idx =
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 03/14] examples/l3fwd-graph: fix queue ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 01/14] examples/l3fwd: fix queue " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 02/14] examples/l3fwd-power: " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 04/14] examples/ipsec-secgw: " Sivaprasad Tummala
` (12 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev, ndabilpuram, stable
Currently application supports queue IDs up to 255
and max queues of 256 irrespective of device support.
This limits the number of active lcores to 256.
The patch fixes these constraints by increasing
the queue IDs to support up to 65535.
Fixes: 08bd1a174461 ("examples/l3fwd-graph: add graph-based l3fwd skeleton")
Cc: ndabilpuram@marvell.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd-graph/main.c | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index 96cb1c81ff..c3a7b267e9 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -90,7 +90,7 @@ static int pcap_trace_enable;
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
char node_name[RTE_NODE_NAMESIZE];
};
@@ -110,7 +110,7 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -205,14 +205,14 @@ check_worker_model_params(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
+ uint16_t queue, i;
int socketid;
- uint16_t i;
+ uint8_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("Invalid queue number: %hhu\n", queue);
+ printf("Invalid queue number: %hu\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
@@ -257,7 +257,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -275,7 +275,7 @@ get_port_n_rx_queues(const uint16_t port)
}
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
@@ -450,7 +450,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].port_id =
(uint8_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
(uint8_t)int_fld[FLD_LCORE];
++nb_lcore_params;
@@ -1011,7 +1011,8 @@ main(int argc, char **argv)
"ethdev_tx-*",
"pkt_drop",
};
- uint8_t nb_rx_queue, queue, socketid;
+ uint8_t socketid;
+ uint16_t nb_rx_queue, queue;
struct rte_graph_param graph_conf;
struct rte_eth_dev_info dev_info;
uint32_t nb_ports, nb_conf = 0;
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 04/14] examples/ipsec-secgw: fix queue ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
` (2 preceding siblings ...)
2024-03-21 18:47 ` [PATCH v6 03/14] examples/l3fwd-graph: " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 05/14] examples/l3fwd: fix lcore " Sivaprasad Tummala
` (11 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev
Currently application supports queue IDs up to 255
and max queues of 256 irrespective of device support.
This limits the number of active lcores to 256.
The patch fixes these constraints by increasing
the queue IDs to support up to 65535.
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 20 ++++++++++----------
examples/ipsec-secgw/ipsec.h | 2 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++++------
3 files changed, 15 insertions(+), 17 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 45a303850d..106a058b60 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -220,7 +220,7 @@ static const char *cfgfile;
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -695,8 +695,7 @@ ipsec_poll_mode_worker(void)
struct rte_mbuf *pkts[MAX_PKT_BURST];
uint32_t lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
- uint16_t i, nb_rx, portid;
- uint8_t queueid;
+ uint16_t i, nb_rx, portid, queueid;
struct lcore_conf *qconf;
int32_t rc, socket_id;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -743,7 +742,7 @@ ipsec_poll_mode_worker(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
@@ -788,8 +787,7 @@ int
check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
{
uint16_t i;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
for (i = 0; i < nb_lcore_params; ++i) {
portid = lcore_params_array[i].port_id;
@@ -851,7 +849,7 @@ check_poll_mode_params(struct eh_conf *eh_conf)
return 0;
}
-static uint8_t
+static uint16_t
get_port_nb_rx_queues(const uint16_t port)
{
int32_t queue = -1;
@@ -862,7 +860,7 @@ get_port_nb_rx_queues(const uint16_t port)
lcore_params[i].queue_id > queue)
queue = lcore_params[i].queue_id;
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int32_t
@@ -1050,6 +1048,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int32_t i;
uint32_t size;
+ uint32_t max_fld[_NUM_FLD] = {USHRT_MAX,
+ USHRT_MAX, UCHAR_MAX};
nb_lcore_params = 0;
@@ -1070,7 +1070,7 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++) {
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1081,7 +1081,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].port_id =
(uint8_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
(uint8_t)int_fld[FLD_LCORE];
++nb_lcore_params;
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index bdcada1c40..29b9b283f0 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -285,7 +285,7 @@ struct cnt_blk {
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
void *sec_ctx;
} __rte_cache_aligned;
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 8d122e8519..90a4c38ba4 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -1598,8 +1598,7 @@ ipsec_poll_mode_wrkr_inl_pr(void)
int32_t socket_id;
uint32_t lcore_id;
int32_t i, nb_rx;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
prev_tsc = 0;
lcore_id = rte_lcore_id();
@@ -1633,7 +1632,7 @@ ipsec_poll_mode_wrkr_inl_pr(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
@@ -1729,8 +1728,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void)
uint32_t i, nb_rx, j;
int32_t socket_id;
uint32_t lcore_id;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
prev_tsc = 0;
lcore_id = rte_lcore_id();
@@ -1764,7 +1762,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
lcore_id, portid, queueid);
}
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 05/14] examples/l3fwd: fix lcore ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
` (3 preceding siblings ...)
2024-03-21 18:47 ` [PATCH v6 04/14] examples/ipsec-secgw: " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 06/14] examples/l3fwd-power: " Sivaprasad Tummala
` (10 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch fixes these constraints by allowing all
lcore IDs up to RTE_MAX_LCORES.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd/main.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 4d4738b92b..9fb4fe97d4 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -99,7 +99,7 @@ struct parm_cfg parm_config;
struct lcore_params {
uint16_t port_id;
uint16_t queue_id;
- uint8_t lcore_id;
+ uint32_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -293,7 +293,7 @@ static int
check_lcore_params(void)
{
uint16_t queue, i;
- uint8_t lcore;
+ uint32_t lcore;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
@@ -304,12 +304,12 @@ check_lcore_params(void)
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
+ printf("error: lcore %u is not enabled in lcore mask\n", lcore);
return -1;
}
if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
(numa_on == 0)) {
- printf("warning: lcore %hhu is on socket %d with numa off \n",
+ printf("warning: lcore %u is on socket %d with numa off\n",
lcore, socketid);
}
}
@@ -359,14 +359,14 @@ static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
nb_rx_queue = lcore_conf[lcore].n_rx_queue;
if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
printf("error: too many queues (%u) for lcore: %u\n",
- (unsigned int)nb_rx_queue + 1, (unsigned int)lcore);
+ (unsigned int)nb_rx_queue + 1, lcore);
return -1;
} else {
lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
@@ -500,8 +500,8 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
- uint16_t max_fld[_NUM_FLD] = {USHRT_MAX,
- USHRT_MAX, UCHAR_MAX};
+ uint32_t max_fld[_NUM_FLD] = {USHRT_MAX,
+ USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -534,7 +534,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint32_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 06/14] examples/l3fwd-power: fix lcore ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
` (4 preceding siblings ...)
2024-03-21 18:47 ` [PATCH v6 05/14] examples/l3fwd: fix lcore " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 07/14] examples/l3fwd-graph: " Sivaprasad Tummala
` (9 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch fixes these constraints by allowing all
lcore IDs up to RTE_MAX_LCORES.
Fixes: f88e7c175a68 ("examples/l3fwd-power: add high/regular perf cores options")
Cc: radu.nicolau@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd-power/main.c | 14 +++++++-------
examples/l3fwd-power/main.h | 2 +-
examples/l3fwd-power/perf_core.c | 6 +++---
3 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index 1881b1b194..5411592dca 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -1397,7 +1397,7 @@ static int
check_lcore_params(void)
{
uint16_t queue, i;
- uint8_t lcore;
+ uint32_t lcore;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
@@ -1408,13 +1408,13 @@ check_lcore_params(void)
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in lcore "
+ printf("error: lcore %u is not enabled in lcore "
"mask\n", lcore);
return -1;
}
if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
(numa_on == 0)) {
- printf("warning: lcore %hhu is on socket %d with numa "
+ printf("warning: lcore %u is on socket %d with numa "
"off\n", lcore, socketid);
}
if (app_mode == APP_MODE_TELEMETRY && lcore == rte_lcore_id()) {
@@ -1466,14 +1466,14 @@ static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
nb_rx_queue = lcore_conf[lcore].n_rx_queue;
if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
printf("error: too many queues (%u) for lcore: %u\n",
- (unsigned)nb_rx_queue + 1, (unsigned)lcore);
+ (unsigned int)nb_rx_queue + 1, lcore);
return -1;
} else {
lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
@@ -1659,7 +1659,7 @@ parse_config(const char *q_arg)
int i;
unsigned size;
unsigned int max_fld[_NUM_FLD] = {USHRT_MAX,
- USHRT_MAX, UCHAR_MAX};
+ USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1693,7 +1693,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint32_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
diff --git a/examples/l3fwd-power/main.h b/examples/l3fwd-power/main.h
index 40b5194726..194bd82102 100644
--- a/examples/l3fwd-power/main.h
+++ b/examples/l3fwd-power/main.h
@@ -10,7 +10,7 @@
struct lcore_params {
uint16_t port_id;
uint16_t queue_id;
- uint8_t lcore_id;
+ uint32_t lcore_id;
} __rte_cache_aligned;
extern struct lcore_params *lcore_params;
diff --git a/examples/l3fwd-power/perf_core.c b/examples/l3fwd-power/perf_core.c
index 3088935ee0..d8b35d062c 100644
--- a/examples/l3fwd-power/perf_core.c
+++ b/examples/l3fwd-power/perf_core.c
@@ -24,7 +24,7 @@ struct perf_lcore_params {
uint16_t port_id;
uint16_t queue_id;
uint8_t high_perf;
- uint8_t lcore_idx;
+ uint32_t lcore_idx;
} __rte_cache_aligned;
static struct perf_lcore_params prf_lc_prms[MAX_LCORE_PARAMS];
@@ -133,7 +133,7 @@ parse_perf_config(const char *q_arg)
int i;
unsigned int size;
unsigned int max_fld[_NUM_FLD] = {USHRT_MAX, USHRT_MAX,
- UCHAR_MAX, UCHAR_MAX};
+ UCHAR_MAX, RTE_MAX_LCORE};
nb_prf_lc_prms = 0;
@@ -171,7 +171,7 @@ parse_perf_config(const char *q_arg)
prf_lc_prms[nb_prf_lc_prms].high_perf =
!!(uint8_t)int_fld[FLD_LCORE_HP];
prf_lc_prms[nb_prf_lc_prms].lcore_idx =
- (uint8_t)int_fld[FLD_LCORE_IDX];
+ (uint32_t)int_fld[FLD_LCORE_IDX];
++nb_prf_lc_prms;
}
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 07/14] examples/l3fwd-graph: fix lcore ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
` (5 preceding siblings ...)
2024-03-21 18:47 ` [PATCH v6 06/14] examples/l3fwd-power: " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 08/14] examples/ipsec-secgw: " Sivaprasad Tummala
` (8 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev, ndabilpuram, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch fixes these constraints by allowing all
lcore IDs up to RTE_MAX_LCORES. Also the queue
IDs are increased to support up to 65535.
Fixes: 08bd1a174461 ("examples/l3fwd-graph: add graph-based l3fwd skeleton")
Cc: ndabilpuram@marvell.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd-graph/main.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index c3a7b267e9..8b7aa6642b 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -111,7 +111,7 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
struct lcore_params {
uint16_t port_id;
uint16_t queue_id;
- uint8_t lcore_id;
+ uint32_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -207,7 +207,7 @@ check_lcore_params(void)
{
uint16_t queue, i;
int socketid;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
@@ -217,7 +217,7 @@ check_lcore_params(void)
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("Error: lcore %hhu is not enabled in lcore mask\n",
+ printf("Error: lcore %u is not enabled in lcore mask\n",
lcore);
return -1;
}
@@ -228,7 +228,7 @@ check_lcore_params(void)
}
socketid = rte_lcore_to_socket_id(lcore);
if ((socketid != 0) && (numa_on == 0)) {
- printf("Warning: lcore %hhu is on socket %d with numa off\n",
+ printf("Warning: lcore %u is on socket %d with numa off\n",
lcore, socketid);
}
}
@@ -282,7 +282,7 @@ static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -290,7 +290,7 @@ init_lcore_rx_queues(void)
if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
printf("Error: too many queues (%u) for lcore: %u\n",
(unsigned int)nb_rx_queue + 1,
- (unsigned int)lcore);
+ lcore);
return -1;
}
@@ -452,7 +452,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint32_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 08/14] examples/ipsec-secgw: fix lcore ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
` (6 preceding siblings ...)
2024-03-21 18:47 ` [PATCH v6 07/14] examples/l3fwd-graph: " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 09/14] examples/qos_sched: " Sivaprasad Tummala
` (7 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev, sergio.gonzalez.monroy, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch fixes these constraints by allowing all
lcore IDs up to RTE_MAX_LCORES. Also the queue
IDs are increased to support up to 65535.
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: sergio.gonzalez.monroy@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 17 +++++++++--------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 4 ++--
4 files changed, 13 insertions(+), 12 deletions(-)
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index dfb81bfcf1..be635685b4 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -102,7 +102,7 @@ struct eh_event_link_info {
/**< Event port ID */
uint8_t eventq_id;
/**< Event queue to be linked to the port */
- uint8_t lcore_id;
+ uint32_t lcore_id;
/**< Lcore to be polling on this port */
};
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 106a058b60..0fa9622626 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -221,7 +221,7 @@ static const char *cfgfile;
struct lcore_params {
uint16_t port_id;
uint16_t queue_id;
- uint8_t lcore_id;
+ uint32_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -807,7 +807,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
static int32_t
check_poll_mode_params(struct eh_conf *eh_conf)
{
- uint8_t lcore;
+ uint32_t lcore;
uint16_t portid;
uint16_t i;
int32_t socket_id;
@@ -826,13 +826,13 @@ check_poll_mode_params(struct eh_conf *eh_conf)
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in "
+ printf("error: lcore %u is not enabled in "
"lcore mask\n", lcore);
return -1;
}
socket_id = rte_lcore_to_socket_id(lcore);
if (socket_id != 0 && numa_on == 0) {
- printf("warning: lcore %hhu is on socket %d "
+ printf("warning: lcore %u is on socket %d "
"with numa off\n",
lcore, socket_id);
}
@@ -867,7 +867,7 @@ static int32_t
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -1049,7 +1049,7 @@ parse_config(const char *q_arg)
int32_t i;
uint32_t size;
uint32_t max_fld[_NUM_FLD] = {USHRT_MAX,
- USHRT_MAX, UCHAR_MAX};
+ USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1083,7 +1083,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint32_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
@@ -1919,7 +1919,8 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads,
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
uint16_t nb_tx_queue, nb_rx_queue;
- uint16_t tx_queueid, rx_queueid, queue, lcore_id;
+ uint16_t tx_queueid, rx_queueid, queue;
+ uint32_t lcore_id;
int32_t ret, socket_id;
struct lcore_conf *qconf;
struct rte_ether_addr ethaddr;
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index c321108119..b52b0ffc3d 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -259,7 +259,7 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
continue;
/* Looking for cryptodev, which can handle this SA */
- key.lcore_id = (uint8_t)lcore_id;
+ key.lcore_id = lcore_id;
key.cipher_algo = (uint8_t)sa->cipher_algo;
key.auth_algo = (uint8_t)sa->auth_algo;
key.aead_algo = (uint8_t)sa->aead_algo;
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 29b9b283f0..6526a80d81 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -256,11 +256,11 @@ extern struct offloads tx_offloads;
* (hash key calculation reads 8 bytes if this struct is size 5 bytes).
*/
struct cdev_key {
- uint16_t lcore_id;
+ uint32_t lcore_id;
uint8_t cipher_algo;
uint8_t auth_algo;
uint8_t aead_algo;
- uint8_t padding[3]; /* padding to 8-byte size should be zeroed */
+ uint8_t padding; /* padding to 8-byte size should be zeroed */
};
struct socket_ctx {
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 09/14] examples/qos_sched: fix lcore ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
` (7 preceding siblings ...)
2024-03-21 18:47 ` [PATCH v6 08/14] examples/ipsec-secgw: " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 10/14] examples/vm_power_manager: " Sivaprasad Tummala
` (6 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch fixes these constraints by allowing all
lcore IDs up to RTE_MAX_LCORES. Also the queue
IDs are increased to support up to 65535.
Fixes: de3cfa2c9823 ("sched: initial import")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/qos_sched/args.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
index 8d61d3e454..886542b3c1 100644
--- a/examples/qos_sched/args.c
+++ b/examples/qos_sched/args.c
@@ -184,10 +184,10 @@ app_parse_flow_conf(const char *conf_str)
pconf->rx_port = vals[0];
pconf->tx_port = vals[1];
- pconf->rx_core = (uint8_t)vals[2];
- pconf->wt_core = (uint8_t)vals[3];
+ pconf->rx_core = vals[2];
+ pconf->wt_core = vals[3];
if (ret == 5)
- pconf->tx_core = (uint8_t)vals[4];
+ pconf->tx_core = vals[4];
else
pconf->tx_core = pconf->wt_core;
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 10/14] examples/vm_power_manager: fix lcore ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
` (8 preceding siblings ...)
2024-03-21 18:47 ` [PATCH v6 09/14] examples/qos_sched: " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 11/14] examples/l3fwd: fix port " Sivaprasad Tummala
` (5 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev, marcinx.hajkowski, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch fixes these constraints by allowing all
lcore IDs up to RTE_MAX_LCORES. Also the queue
IDs are increased to support up to 65535.
Fixes: 0e8f47491f09 ("examples/vm_power: add command to query CPU frequency")
Cc: marcinx.hajkowski@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/vm_power_manager/guest_cli/vm_power_cli_guest.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
index 94bfbbaf78..5eddb47847 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
@@ -401,7 +401,7 @@ check_response_cmd(unsigned int lcore_id, int *result)
struct cmd_set_cpu_freq_result {
cmdline_fixed_string_t set_cpu_freq;
- uint8_t lcore_id;
+ uint32_t lcore_id;
cmdline_fixed_string_t cmd;
};
@@ -444,7 +444,7 @@ cmdline_parse_token_string_t cmd_set_cpu_freq =
set_cpu_freq, "set_cpu_freq");
cmdline_parse_token_num_t cmd_set_cpu_freq_core_num =
TOKEN_NUM_INITIALIZER(struct cmd_set_cpu_freq_result,
- lcore_id, RTE_UINT8);
+ lcore_id, RTE_UINT32);
cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
cmd, "up#down#min#max#enable_turbo#disable_turbo");
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 11/14] examples/l3fwd: fix port ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
` (9 preceding siblings ...)
2024-03-21 18:47 ` [PATCH v6 10/14] examples/vm_power_manager: " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 12/14] examples/l3fwd-power: " Sivaprasad Tummala
` (4 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev, stable
Currently application supports port IDs up to 255
irrespective of RTE_MAX_ETHPORTS.
The patch fixes these constraints by allowing port
IDs up to RTE_MAX_ETHPORTS.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd/main.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 9fb4fe97d4..19e4d9dfa2 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -500,7 +500,7 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
- uint32_t max_fld[_NUM_FLD] = {USHRT_MAX,
+ uint32_t max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -530,7 +530,7 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 12/14] examples/l3fwd-power: fix port ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
` (10 preceding siblings ...)
2024-03-21 18:47 ` [PATCH v6 11/14] examples/l3fwd: fix port " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 13/14] examples/l3fwd-graph: " Sivaprasad Tummala
` (3 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev, stable
Currently application supports port IDs up to 255
irrespective of RTE_MAX_ETHPORTS.
The patch fixes these constraints by allowing port
IDs up to RTE_MAX_ETHPORTS.
Fixes: f88e7c175a68 ("examples/l3fwd-power: add high/regular perf cores options")
Cc: radu.nicolau@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd-power/main.c | 4 ++--
examples/l3fwd-power/perf_core.c | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index 5411592dca..822b802f29 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -1658,7 +1658,7 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
- unsigned int max_fld[_NUM_FLD] = {USHRT_MAX,
+ unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1689,7 +1689,7 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
diff --git a/examples/l3fwd-power/perf_core.c b/examples/l3fwd-power/perf_core.c
index d8b35d062c..c2cdc4bf49 100644
--- a/examples/l3fwd-power/perf_core.c
+++ b/examples/l3fwd-power/perf_core.c
@@ -132,7 +132,7 @@ parse_perf_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned int size;
- unsigned int max_fld[_NUM_FLD] = {USHRT_MAX, USHRT_MAX,
+ unsigned int max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS, USHRT_MAX,
UCHAR_MAX, RTE_MAX_LCORE};
nb_prf_lc_prms = 0;
@@ -165,7 +165,7 @@ parse_perf_config(const char *q_arg)
return -1;
}
prf_lc_prms[nb_prf_lc_prms].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
prf_lc_prms[nb_prf_lc_prms].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
prf_lc_prms[nb_prf_lc_prms].high_perf =
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 13/14] examples/l3fwd-graph: fix port ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
` (11 preceding siblings ...)
2024-03-21 18:47 ` [PATCH v6 12/14] examples/l3fwd-power: " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 14/14] examples/ipsec-secgw: " Sivaprasad Tummala
` (2 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev, ndabilpuram, stable
Currently application supports port IDs up to 255
irrespective of RTE_MAX_ETHPORTS.
The patch fixes these constraints by allowing port
IDs up to RTE_MAX_ETHPORTS.
Fixes: 08bd1a174461 ("examples/l3fwd-graph: add graph-based l3fwd skeleton")
Cc: ndabilpuram@marvell.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd-graph/main.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index 8b7aa6642b..557ac6d823 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -448,7 +448,7 @@ parse_config(const char *q_arg)
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 14/14] examples/ipsec-secgw: fix port ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
` (12 preceding siblings ...)
2024-03-21 18:47 ` [PATCH v6 13/14] examples/l3fwd-graph: " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 00/10] fix lcore " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev, sergio.gonzalez.monroy, stable
Currently application supports port IDs up to 255
irrespective of RTE_MAX_ETHPORTS.
The patch fixes these constraints by allowing port
IDs up to RTE_MAX_ETHPORTS.
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: sergio.gonzalez.monroy@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 0fa9622626..dc7491a2b9 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1048,7 +1048,7 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int32_t i;
uint32_t size;
- uint32_t max_fld[_NUM_FLD] = {USHRT_MAX,
+ uint32_t max_fld[_NUM_FLD] = {RTE_MAX_ETHPORTS,
USHRT_MAX, RTE_MAX_LCORE};
nb_lcore_params = 0;
@@ -1079,7 +1079,7 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v6 00/10] fix lcore ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
` (13 preceding siblings ...)
2024-03-21 18:47 ` [PATCH v6 14/14] examples/ipsec-secgw: " Sivaprasad Tummala
@ 2024-03-21 18:47 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-21 18:47 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas
Cc: dev
With modern CPUs, it is possible to have higher
CPU count thus we can have higher RTE_MAX_LCORES.
In DPDK sample applications, the current config
lcore options are hard limited to 255.
The patchset fixes these constraints by allowing
all lcore IDs up to RTE_MAX_LCORES. Also the queue
IDs are increased to support up to 65535.
The port ID constraints were also fixed to support
up to RTE_MAX_ETHPORTS.
v6:
- split queue_id, lcore_id and port_id changes as
separate patches.
- updated git commit description on individual
patches
v5:
- updated lcore_id type to uint32_t
v4:
- fixed build errors with queue_id type
in ipsec-secgw
v3:
- updated queue_id type to uint16_t
v2:
- fixed typo with lcore_id type in l3fwd
Sivaprasad Tummala (10):
examples/l3fwd: fix queue ID restriction
examples/l3fwd-power: fix queue ID restriction
examples/l3fwd-graph: fix queue ID restriction
examples/ipsec-secgw: fix queue ID restriction
examples/l3fwd: fix lcore ID restriction
examples/l3fwd-power: fix lcore ID restriction
examples/l3fwd-graph: fix lcore ID restriction
examples/ipsec-secgw: fix lcore ID restriction
examples/qos_sched: fix lcore ID restriction
examples/vm_power_manager: fix lcore ID restriction
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 35 +++++------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 6 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++--
examples/l3fwd-graph/main.c | 31 +++++-----
examples/l3fwd-power/main.c | 59 +++++++++----------
examples/l3fwd-power/main.h | 4 +-
examples/l3fwd-power/perf_core.c | 14 +++--
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 +-
examples/l3fwd/l3fwd_em.c | 4 +-
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 +-
examples/l3fwd/l3fwd_lpm.c | 5 +-
examples/l3fwd/main.c | 38 ++++++------
examples/qos_sched/args.c | 6 +-
.../guest_cli/vm_power_cli_guest.c | 4 +-
18 files changed, 119 insertions(+), 113 deletions(-)
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH v6 01/14] examples/l3fwd: fix queue ID restriction
2024-03-21 18:47 ` [PATCH v6 01/14] examples/l3fwd: fix queue " Sivaprasad Tummala
@ 2024-03-22 15:41 ` David Marchand
2024-03-25 12:45 ` Tummala, Sivaprasad
0 siblings, 1 reply; 100+ messages in thread
From: David Marchand @ 2024-03-22 15:41 UTC (permalink / raw)
To: Sivaprasad Tummala
Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, thomas, dev, stable
Hello,
On Thu, Mar 21, 2024 at 7:48 PM Sivaprasad Tummala
<sivaprasad.tummala@amd.com> wrote:
>
> Currently application supports queue IDs up to 255
I think it only relates to Rx queue IDs.
Before this patch, the Tx queue count is already stored as a uint32_t
or uint16_t and checked against RTE_MAX_LCORE.
So no limit on the Tx queue count side.
Can you just adjust the commitlog accordingly?
(One may argue that the Tx queue count should be also checked against
RTE_MAX_QUEUES_PER_PORT, but it is a separate issue to this patch and
in practice, we probably always have RTE_MAX_QUEUES_PER_PORT >
RTE_MAX_LCORE).
> and max queues of 256 irrespective of device support.
> This limits the number of active lcores to 256.
>
> The patch fixes these constraints by increasing
> the queue IDs to support up to 65535.
[snip]
> diff --git a/examples/l3fwd/l3fwd_acl.c b/examples/l3fwd/l3fwd_acl.c
> index 401692bcec..2bd63181bc 100644
> --- a/examples/l3fwd/l3fwd_acl.c
> +++ b/examples/l3fwd/l3fwd_acl.c
> @@ -997,7 +997,7 @@ acl_main_loop(__rte_unused void *dummy)
> uint64_t prev_tsc, diff_tsc, cur_tsc;
> int i, nb_rx;
> uint16_t portid;
> - uint8_t queueid;
> + uint16_t queueid;
> struct lcore_conf *qconf;
> int socketid;
> const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
> @@ -1020,7 +1020,7 @@ acl_main_loop(__rte_unused void *dummy)
> portid = qconf->rx_queue_list[i].port_id;
> queueid = qconf->rx_queue_list[i].queue_id;
> RTE_LOG(INFO, L3FWD,
> - " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
> + " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
Nit: should be %PRIu16 (idem in other hunks formatting a queue).
> lcore_id, portid, queueid);
> }
>
[snip]
> diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
> index 8d32ae1dd5..4d4738b92b 100644
> --- a/examples/l3fwd/main.c
> +++ b/examples/l3fwd/main.c
[snip]
> @@ -366,7 +366,7 @@ init_lcore_rx_queues(void)
> nb_rx_queue = lcore_conf[lcore].n_rx_queue;
> if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
> printf("error: too many queues (%u) for lcore: %u\n",
> - (unsigned)nb_rx_queue + 1, (unsigned)lcore);
> + (unsigned int)nb_rx_queue + 1, (unsigned int)lcore);
Nit: this does not seem related to the patch (probably a split issue,
as a later patch touches this part of the code too).
> return -1;
> } else {
> lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
> @@ -500,6 +500,8 @@ parse_config(const char *q_arg)
> char *str_fld[_NUM_FLD];
> int i;
> unsigned size;
> + uint16_t max_fld[_NUM_FLD] = {USHRT_MAX,
> + USHRT_MAX, UCHAR_MAX};
Nit: no newline.
This part validates user input for the rx queue used by a lcore.
Some later check in the example (or in ethdev) may raise an error if
requesting too many queues, but I think the limit here should be
RTE_MAX_QUEUES_PER_PORT.
Besides, this hunk also changes the check on max port and max lcore.
This is something that should be left untouched at this point of the series.
I would expect something like:
uint16_t max_fld[_NUM_FLD] = {255, RTE_MAX_QUEUES_PER_PORT, 255};
>
> nb_lcore_params = 0;
>
> @@ -518,7 +520,8 @@ parse_config(const char *q_arg)
> for (i = 0; i < _NUM_FLD; i++){
> errno = 0;
> int_fld[i] = strtoul(str_fld[i], &end, 0);
> - if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
> + if (errno != 0 || end == str_fld[i] || int_fld[i] >
> + max_fld[i])
Nit: no newline.
> return -1;
> }
> if (nb_lcore_params >= MAX_LCORE_PARAMS) {
[snip]
The other changes on the l3fwd example code in this series look good to me.
Thanks.
--
David Marchand
^ permalink raw reply [flat|nested] 100+ messages in thread
* RE: [PATCH v6 01/14] examples/l3fwd: fix queue ID restriction
2024-03-22 15:41 ` David Marchand
@ 2024-03-25 12:45 ` Tummala, Sivaprasad
0 siblings, 0 replies; 100+ messages in thread
From: Tummala, Sivaprasad @ 2024-03-25 12:45 UTC (permalink / raw)
To: David Marchand
Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, Yigit, Ferruh, konstantin.ananyev, stephen,
mb, thomas, dev, stable
[AMD Official Use Only - General]
Hi,
> -----Original Message-----
> From: David Marchand <david.marchand@redhat.com>
> Sent: Friday, March 22, 2024 9:11 PM
> To: Tummala, Sivaprasad <Sivaprasad.Tummala@amd.com>
> Cc: david.hunt@intel.com; anatoly.burakov@intel.com; jerinj@marvell.com;
> radu.nicolau@intel.com; gakhil@marvell.com; cristian.dumitrescu@intel.com; Yigit,
> Ferruh <Ferruh.Yigit@amd.com>; konstantin.ananyev@huawei.com;
> stephen@networkplumber.org; mb@smartsharesystems.com;
> thomas@monjalon.net; dev@dpdk.org; stable@dpdk.org
> Subject: Re: [PATCH v6 01/14] examples/l3fwd: fix queue ID restriction
>
> Caution: This message originated from an External Source. Use proper caution
> when opening attachments, clicking links, or responding.
>
>
> Hello,
>
> On Thu, Mar 21, 2024 at 7:48 PM Sivaprasad Tummala
> <sivaprasad.tummala@amd.com> wrote:
> >
> > Currently application supports queue IDs up to 255
>
> I think it only relates to Rx queue IDs.
>
> Before this patch, the Tx queue count is already stored as a uint32_t or uint16_t
> and checked against RTE_MAX_LCORE.
> So no limit on the Tx queue count side.
>
> Can you just adjust the commitlog accordingly?
OK
>
>
> (One may argue that the Tx queue count should be also checked against
> RTE_MAX_QUEUES_PER_PORT, but it is a separate issue to this patch and in
> practice, we probably always have RTE_MAX_QUEUES_PER_PORT >
> RTE_MAX_LCORE).
>
>
> > and max queues of 256 irrespective of device support.
> > This limits the number of active lcores to 256.
> >
> > The patch fixes these constraints by increasing the queue IDs to
> > support up to 65535.
>
> [snip]
>
> > diff --git a/examples/l3fwd/l3fwd_acl.c b/examples/l3fwd/l3fwd_acl.c
> > index 401692bcec..2bd63181bc 100644
> > --- a/examples/l3fwd/l3fwd_acl.c
> > +++ b/examples/l3fwd/l3fwd_acl.c
> > @@ -997,7 +997,7 @@ acl_main_loop(__rte_unused void *dummy)
> > uint64_t prev_tsc, diff_tsc, cur_tsc;
> > int i, nb_rx;
> > uint16_t portid;
> > - uint8_t queueid;
> > + uint16_t queueid;
> > struct lcore_conf *qconf;
> > int socketid;
> > const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
> > @@ -1020,7 +1020,7 @@ acl_main_loop(__rte_unused void *dummy)
> > portid = qconf->rx_queue_list[i].port_id;
> > queueid = qconf->rx_queue_list[i].queue_id;
> > RTE_LOG(INFO, L3FWD,
> > - " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
> > + " -- lcoreid=%u portid=%u rxqueueid=%hu\n",
>
> Nit: should be %PRIu16 (idem in other hunks formatting a queue).
OK
>
>
> > lcore_id, portid, queueid);
> > }
> >
>
> [snip]
>
>
> > diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index
> > 8d32ae1dd5..4d4738b92b 100644
> > --- a/examples/l3fwd/main.c
> > +++ b/examples/l3fwd/main.c
>
> [snip]
>
>
> > @@ -366,7 +366,7 @@ init_lcore_rx_queues(void)
> > nb_rx_queue = lcore_conf[lcore].n_rx_queue;
> > if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
> > printf("error: too many queues (%u) for lcore: %u\n",
> > - (unsigned)nb_rx_queue + 1, (unsigned)lcore);
> > + (unsigned int)nb_rx_queue + 1,
> > + (unsigned int)lcore);
>
> Nit: this does not seem related to the patch (probably a split issue, as a later patch
> touches this part of the code too).
Yes, this was done to avoid checkpatch error.
>
>
> > return -1;
> > } else {
> >
> > lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = @@ -500,6 +500,8 @@
> parse_config(const char *q_arg)
> > char *str_fld[_NUM_FLD];
> > int i;
> > unsigned size;
> > + uint16_t max_fld[_NUM_FLD] = {USHRT_MAX,
> > + USHRT_MAX, UCHAR_MAX};
>
> Nit: no newline.
>
> This part validates user input for the rx queue used by a lcore.
> Some later check in the example (or in ethdev) may raise an error if requesting too
> many queues, but I think the limit here should be RTE_MAX_QUEUES_PER_PORT.
>
> Besides, this hunk also changes the check on max port and max lcore.
> This is something that should be left untouched at this point of the series.
>
> I would expect something like:
> uint16_t max_fld[_NUM_FLD] = {255, RTE_MAX_QUEUES_PER_PORT, 255};
I agree on the RTE_MAX_QUEUES_PER_PORT, but port_id is already uint16_t and
Hence USHRT_MAX is relevant and similarly UCHAR_MAX expands to 255.
>
>
> >
> > nb_lcore_params = 0;
> >
> > @@ -518,7 +520,8 @@ parse_config(const char *q_arg)
> > for (i = 0; i < _NUM_FLD; i++){
> > errno = 0;
> > int_fld[i] = strtoul(str_fld[i], &end, 0);
> > - if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
> > + if (errno != 0 || end == str_fld[i] || int_fld[i] >
> > +
> > + max_fld[i])
>
> Nit: no newline.
OK
>
> > return -1;
> > }
> > if (nb_lcore_params >= MAX_LCORE_PARAMS) {
>
> [snip]
>
>
> The other changes on the l3fwd example code in this series look good to me.
>
>
> Thanks.
>
> --
> David Marchand
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 00/14] fix lcore ID restriction
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
` (14 preceding siblings ...)
2024-03-21 18:47 ` [PATCH v6 00/10] fix lcore " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 01/14] examples/l3fwd: fix queue " Sivaprasad Tummala
` (15 more replies)
15 siblings, 16 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev
With modern CPUs, it is possible to have higher
CPU count thus we can have higher RTE_MAX_LCORES.
In DPDK sample applications, the current config
lcore options are hard limited to 255.
The patchset fixes these constraints by allowing
all lcore IDs up to RTE_MAX_LCORES. Also the rx
queue IDs are increased to support up to 65535.
The port ID constraints were also fixed to support
up to RTE_MAX_ETHPORTS.
v7:
- updated commit log with rx queue IDs
- changed format specifier to %PRIu16
- removed patch unrelated changes
v6:
- split queue_id, lcore_id and port_id changes as
separate patches.
- updated git commit description on individual
patches
v5:
- updated lcore_id type to uint32_t
v4:
- fixed build errors with queue_id type
in ipsec-secgw
v3:
- updated queue_id type to uint16_t
v2:
- fixed typo with lcore_id type in l3fwd
Sivaprasad Tummala (14):
examples/l3fwd: fix queue ID restriction
examples/l3fwd-power: fix queue ID restriction
examples/l3fwd-graph: fix queue ID restriction
examples/ipsec-secgw: fix queue ID restriction
examples/l3fwd: fix lcore ID restriction
examples/l3fwd-power: fix lcore ID restriction
examples/l3fwd-graph: fix lcore ID restriction
examples/ipsec-secgw: fix lcore ID restriction
examples/qos_sched: fix lcore ID restriction
examples/vm_power_manager: fix lcore ID restriction
examples/l3fwd: fix port ID restriction
examples/l3fwd-power: fix port ID restriction
examples/l3fwd-graph: fix port ID restriction
examples/ipsec-secgw: fix port ID restriction
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 40 +++++++-----
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 6 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++-
examples/l3fwd-graph/main.c | 33 +++++-----
examples/l3fwd-power/main.c | 65 ++++++++++---------
examples/l3fwd-power/main.h | 4 +-
examples/l3fwd-power/perf_core.c | 19 ++++--
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 +-
examples/l3fwd/l3fwd_em.c | 4 +-
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 +-
examples/l3fwd/l3fwd_lpm.c | 5 +-
examples/l3fwd/main.c | 42 +++++++-----
examples/qos_sched/args.c | 6 +-
.../guest_cli/vm_power_cli_guest.c | 4 +-
18 files changed, 135 insertions(+), 119 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 01/14] examples/l3fwd: fix queue ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 02/14] examples/l3fwd-power: " Sivaprasad Tummala
` (14 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev, stable
Currently application supports rx queue IDs up to 255
and max queues of 256 irrespective of device support.
This limits the number of active lcores to 256.
The patch fixes these constraints by increasing
the rx queue IDs to support up to 65535.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 ++--
examples/l3fwd/l3fwd_em.c | 4 ++--
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 ++--
examples/l3fwd/l3fwd_lpm.c | 5 ++---
examples/l3fwd/main.c | 24 +++++++++++++-----------
7 files changed, 23 insertions(+), 22 deletions(-)
diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index e7ae0e5834..12c264cb4c 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -74,7 +74,7 @@ struct mbuf_table {
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
} __rte_cache_aligned;
struct lcore_conf {
diff --git a/examples/l3fwd/l3fwd_acl.c b/examples/l3fwd/l3fwd_acl.c
index 401692bcec..eec0361a3e 100644
--- a/examples/l3fwd/l3fwd_acl.c
+++ b/examples/l3fwd/l3fwd_acl.c
@@ -997,7 +997,7 @@ acl_main_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
uint16_t portid;
- uint8_t queueid;
+ uint16_t queueid;
struct lcore_conf *qconf;
int socketid;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -1020,7 +1020,7 @@ acl_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 40e102b38a..f18ac0048b 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -586,7 +586,7 @@ em_main_loop(__rte_unused void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint8_t queueid;
+ uint16_t queueid;
uint16_t portid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
@@ -609,7 +609,7 @@ em_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_event.h b/examples/l3fwd/l3fwd_event.h
index 9aad358003..c6a4a89127 100644
--- a/examples/l3fwd/l3fwd_event.h
+++ b/examples/l3fwd/l3fwd_event.h
@@ -78,8 +78,8 @@ struct l3fwd_event_resources {
uint8_t deq_depth;
uint8_t has_burst;
uint8_t enabled;
- uint8_t eth_rx_queues;
uint8_t vector_enabled;
+ uint16_t eth_rx_queues;
uint16_t vector_size;
uint64_t vector_tmo_ns;
};
diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index 6a21984415..f38b19af3f 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -186,7 +186,7 @@ fib_main_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
uint16_t portid;
- uint8_t queueid;
+ uint16_t queueid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -208,7 +208,7 @@ fib_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index a484a33089..e8fd95aae9 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -148,8 +148,7 @@ lpm_main_loop(__rte_unused void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -171,7 +170,7 @@ lpm_main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 8d32ae1dd5..039207b06c 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -98,7 +98,7 @@ struct parm_cfg parm_config;
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -292,14 +292,14 @@ setup_l3fwd_lookup_tables(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
- uint16_t i;
+ uint16_t queue, i;
+ uint8_t lcore;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("invalid queue number: %hhu\n", queue);
+ printf("invalid queue number: %" PRIu16 "\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
@@ -336,7 +336,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -352,7 +352,7 @@ get_port_n_rx_queues(const uint16_t port)
lcore_params[i].port_id);
}
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
@@ -500,6 +500,7 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ uint16_t max_fld[_NUM_FLD] = {255, RTE_MAX_QUEUES_PER_PORT, 255};
nb_lcore_params = 0;
@@ -518,7 +519,7 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++){
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -529,7 +530,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].port_id =
(uint8_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
(uint8_t)int_fld[FLD_LCORE];
++nb_lcore_params;
@@ -630,7 +631,7 @@ parse_event_eth_rx_queues(const char *eth_rx_queues)
{
struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
char *end = NULL;
- uint8_t num_eth_rx_queues;
+ uint16_t num_eth_rx_queues;
/* parse decimal string */
num_eth_rx_queues = strtoul(eth_rx_queues, &end, 10);
@@ -1211,7 +1212,8 @@ config_port_max_pkt_len(struct rte_eth_conf *conf,
static void
l3fwd_poll_resource_setup(void)
{
- uint8_t nb_rx_queue, queue, socketid;
+ uint8_t socketid;
+ uint16_t nb_rx_queue, queue;
struct rte_eth_dev_info dev_info;
uint32_t n_tx_queue, nb_lcores;
struct rte_eth_txconf *txconf;
@@ -1535,7 +1537,7 @@ main(int argc, char **argv)
struct lcore_conf *qconf;
uint16_t queueid, portid;
unsigned int lcore_id;
- uint8_t queue;
+ uint16_t queue;
int ret;
/* init EAL */
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 02/14] examples/l3fwd-power: fix queue ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 01/14] examples/l3fwd: fix queue " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 03/14] examples/l3fwd-graph: " Sivaprasad Tummala
` (13 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev, stable
Currently application supports rx queue IDs up to 255
and max queues of 256 irrespective of device support.
This limits the number of active lcores to 256.
The patch fixes these constraints by increasing the
rx queue IDs to support up to 65535.
Fixes: f88e7c175a68 ("examples/l3fwd-power: add high/regular perf cores options")
Cc: radu.nicolau@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd-power/main.c | 49 +++++++++++++++-----------------
examples/l3fwd-power/main.h | 2 +-
examples/l3fwd-power/perf_core.c | 8 ++++--
3 files changed, 29 insertions(+), 30 deletions(-)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index f4adcf41b5..50aea99428 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -214,7 +214,7 @@ enum freq_scale_hint_t
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
enum freq_scale_hint_t freq_up_hint;
uint32_t zero_rx_packet_count;
uint32_t idle_hint;
@@ -838,7 +838,7 @@ sleep_until_rx_interrupt(int num, int lcore)
struct rte_epoll_event event[num];
int n, i;
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
void *data;
if (status[lcore].wakeup) {
@@ -850,9 +850,9 @@ sleep_until_rx_interrupt(int num, int lcore)
n = rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, num, 10);
for (i = 0; i < n; i++) {
data = event[i].epdata.data;
- port_id = ((uintptr_t)data) >> CHAR_BIT;
+ port_id = ((uintptr_t)data) >> (sizeof(uint16_t) * CHAR_BIT);
queue_id = ((uintptr_t)data) &
- RTE_LEN2MASK(CHAR_BIT, uint8_t);
+ RTE_LEN2MASK((sizeof(uint16_t) * CHAR_BIT), uint16_t);
RTE_LOG(INFO, L3FWD_POWER,
"lcore %u is waked up from rx interrupt on"
" port %d queue %d\n",
@@ -867,7 +867,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
{
int i;
struct lcore_rx_queue *rx_queue;
- uint8_t queue_id;
+ uint16_t queue_id;
uint16_t port_id;
for (i = 0; i < qconf->n_rx_queue; ++i) {
@@ -887,7 +887,7 @@ static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
static int event_register(struct lcore_conf *qconf)
{
struct lcore_rx_queue *rx_queue;
- uint8_t queueid;
+ uint16_t queueid;
uint16_t portid;
uint32_t data;
int ret;
@@ -897,7 +897,7 @@ static int event_register(struct lcore_conf *qconf)
rx_queue = &(qconf->rx_queue_list[i]);
portid = rx_queue->port_id;
queueid = rx_queue->queue_id;
- data = portid << CHAR_BIT | queueid;
+ data = portid << (sizeof(uint16_t) * CHAR_BIT) | queueid;
ret = rte_eth_dev_rx_intr_ctl_q(portid, queueid,
RTE_EPOLL_PER_THREAD,
@@ -917,8 +917,7 @@ static int main_intr_loop(__rte_unused void *dummy)
unsigned int lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
uint32_t lcore_rx_idle_count = 0;
@@ -946,7 +945,7 @@ static int main_intr_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
lcore_id, portid, queueid);
}
@@ -1083,8 +1082,7 @@ main_telemetry_loop(__rte_unused void *dummy)
unsigned int lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc, prev_tel_tsc;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
uint64_t ep_nep[2] = {0}, fp_nfp[2] = {0};
@@ -1114,7 +1112,7 @@ main_telemetry_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
- "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ "rxqueueid=%" PRIu16 "\n", lcore_id, portid, queueid);
}
while (!is_done()) {
@@ -1205,8 +1203,7 @@ main_legacy_loop(__rte_unused void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc, tim_res_tsc, hz;
uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
int i, j, nb_rx;
- uint8_t queueid;
- uint16_t portid;
+ uint16_t portid, queueid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
enum freq_scale_hint_t lcore_scaleup_hint;
@@ -1234,7 +1231,7 @@ main_legacy_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
- "rxqueueid=%hhu\n", lcore_id, portid, queueid);
+ "rxqueueid=%" PRIu16 "\n", lcore_id, portid, queueid);
}
/* add into event wait list */
@@ -1399,14 +1396,14 @@ main_legacy_loop(__rte_unused void *dummy)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
- uint16_t i;
+ uint16_t queue, i;
+ uint8_t lcore;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("invalid queue number: %hhu\n", queue);
+ printf("invalid queue number: %" PRIu16 "\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
@@ -1451,7 +1448,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -1462,7 +1459,7 @@ get_port_n_rx_queues(const uint16_t port)
lcore_params[i].queue_id > queue)
queue = lcore_params[i].queue_id;
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
@@ -1661,6 +1658,7 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
+ unsigned int max_fld[_NUM_FLD] = {255, RTE_MAX_QUEUES_PER_PORT, 255};
nb_lcore_params = 0;
@@ -1680,8 +1678,7 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++){
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] >
- 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1692,7 +1689,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].port_id =
(uint8_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
(uint8_t)int_fld[FLD_LCORE];
++nb_lcore_params;
@@ -2501,8 +2498,8 @@ main(int argc, char **argv)
uint64_t hz;
uint32_t n_tx_queue, nb_lcores;
uint32_t dev_rxq_num, dev_txq_num;
- uint8_t nb_rx_queue, queue, socketid;
- uint16_t portid;
+ uint8_t socketid;
+ uint16_t portid, nb_rx_queue, queue;
const char *ptr_strings[NUM_TELSTATS];
/* init EAL */
diff --git a/examples/l3fwd-power/main.h b/examples/l3fwd-power/main.h
index 258de98f5b..40b5194726 100644
--- a/examples/l3fwd-power/main.h
+++ b/examples/l3fwd-power/main.h
@@ -9,7 +9,7 @@
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
diff --git a/examples/l3fwd-power/perf_core.c b/examples/l3fwd-power/perf_core.c
index 41ef6d0c9a..f34442b9d0 100644
--- a/examples/l3fwd-power/perf_core.c
+++ b/examples/l3fwd-power/perf_core.c
@@ -22,7 +22,7 @@ static uint16_t nb_hp_lcores;
struct perf_lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t high_perf;
uint8_t lcore_idx;
} __rte_cache_aligned;
@@ -132,6 +132,7 @@ parse_perf_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned int size;
+ unsigned int max_fld[_NUM_FLD] = {255, RTE_MAX_QUEUES_PER_PORT, 255, 255};
nb_prf_lc_prms = 0;
@@ -152,7 +153,8 @@ parse_perf_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++) {
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
+
return -1;
}
if (nb_prf_lc_prms >= MAX_LCORE_PARAMS) {
@@ -163,7 +165,7 @@ parse_perf_config(const char *q_arg)
prf_lc_prms[nb_prf_lc_prms].port_id =
(uint8_t)int_fld[FLD_PORT];
prf_lc_prms[nb_prf_lc_prms].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
prf_lc_prms[nb_prf_lc_prms].high_perf =
!!(uint8_t)int_fld[FLD_LCORE_HP];
prf_lc_prms[nb_prf_lc_prms].lcore_idx =
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 03/14] examples/l3fwd-graph: fix queue ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 01/14] examples/l3fwd: fix queue " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 02/14] examples/l3fwd-power: " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 04/14] examples/ipsec-secgw: " Sivaprasad Tummala
` (12 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev, ndabilpuram, stable
Currently application supports rx queue IDs up to 255
and max queues of 256 irrespective of device support.
This limits the number of active lcores to 256.
The patch fixes these constraints by increasing the
rx queue IDs to support up to 65535.
Fixes: 08bd1a174461 ("examples/l3fwd-graph: add graph-based l3fwd skeleton")
Cc: ndabilpuram@marvell.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd-graph/main.c | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index 96cb1c81ff..4b018d1e78 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -90,7 +90,7 @@ static int pcap_trace_enable;
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
char node_name[RTE_NODE_NAMESIZE];
};
@@ -110,7 +110,7 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -205,14 +205,14 @@ check_worker_model_params(void)
static int
check_lcore_params(void)
{
- uint8_t queue, lcore;
+ uint16_t queue, i;
int socketid;
- uint16_t i;
+ uint8_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
if (queue >= MAX_RX_QUEUE_PER_PORT) {
- printf("Invalid queue number: %hhu\n", queue);
+ printf("Invalid queue number: %" PRIu16 "\n", queue);
return -1;
}
lcore = lcore_params[i].lcore_id;
@@ -257,7 +257,7 @@ check_port_config(void)
return 0;
}
-static uint8_t
+static uint16_t
get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
@@ -275,7 +275,7 @@ get_port_n_rx_queues(const uint16_t port)
}
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int
@@ -450,7 +450,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].port_id =
(uint8_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
(uint8_t)int_fld[FLD_LCORE];
++nb_lcore_params;
@@ -1011,7 +1011,8 @@ main(int argc, char **argv)
"ethdev_tx-*",
"pkt_drop",
};
- uint8_t nb_rx_queue, queue, socketid;
+ uint8_t socketid;
+ uint16_t nb_rx_queue, queue;
struct rte_graph_param graph_conf;
struct rte_eth_dev_info dev_info;
uint32_t nb_ports, nb_conf = 0;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 04/14] examples/ipsec-secgw: fix queue ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
` (2 preceding siblings ...)
2024-03-26 12:55 ` [PATCH v7 03/14] examples/l3fwd-graph: " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 05/14] examples/l3fwd: fix lcore " Sivaprasad Tummala
` (11 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev
Currently application supports rx queue IDs up to 255
and max queues of 256 irrespective of device support.
This limits the number of active lcores to 256.
The patch fixes these constraints by increasing the
rx queue IDs to support up to 65535.
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 19 +++++++++----------
examples/ipsec-secgw/ipsec.h | 2 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++++------
3 files changed, 14 insertions(+), 17 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 45a303850d..782535f4b5 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -220,7 +220,7 @@ static const char *cfgfile;
struct lcore_params {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -695,8 +695,7 @@ ipsec_poll_mode_worker(void)
struct rte_mbuf *pkts[MAX_PKT_BURST];
uint32_t lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
- uint16_t i, nb_rx, portid;
- uint8_t queueid;
+ uint16_t i, nb_rx, portid, queueid;
struct lcore_conf *qconf;
int32_t rc, socket_id;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -743,7 +742,7 @@ ipsec_poll_mode_worker(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
lcore_id, portid, queueid);
}
@@ -788,8 +787,7 @@ int
check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
{
uint16_t i;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
for (i = 0; i < nb_lcore_params; ++i) {
portid = lcore_params_array[i].port_id;
@@ -851,7 +849,7 @@ check_poll_mode_params(struct eh_conf *eh_conf)
return 0;
}
-static uint8_t
+static uint16_t
get_port_nb_rx_queues(const uint16_t port)
{
int32_t queue = -1;
@@ -862,7 +860,7 @@ get_port_nb_rx_queues(const uint16_t port)
lcore_params[i].queue_id > queue)
queue = lcore_params[i].queue_id;
}
- return (uint8_t)(++queue);
+ return (uint16_t)(++queue);
}
static int32_t
@@ -1050,6 +1048,7 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int32_t i;
uint32_t size;
+ uint32_t max_fld[_NUM_FLD] = {255, RTE_MAX_QUEUES_PER_PORT, 255};
nb_lcore_params = 0;
@@ -1070,7 +1069,7 @@ parse_config(const char *q_arg)
for (i = 0; i < _NUM_FLD; i++) {
errno = 0;
int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > max_fld[i])
return -1;
}
if (nb_lcore_params >= MAX_LCORE_PARAMS) {
@@ -1081,7 +1080,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].port_id =
(uint8_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
- (uint8_t)int_fld[FLD_QUEUE];
+ (uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
(uint8_t)int_fld[FLD_LCORE];
++nb_lcore_params;
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index bdcada1c40..29b9b283f0 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -285,7 +285,7 @@ struct cnt_blk {
struct lcore_rx_queue {
uint16_t port_id;
- uint8_t queue_id;
+ uint16_t queue_id;
void *sec_ctx;
} __rte_cache_aligned;
diff --git a/examples/ipsec-secgw/ipsec_worker.c b/examples/ipsec-secgw/ipsec_worker.c
index 8d122e8519..c9c43ebd2b 100644
--- a/examples/ipsec-secgw/ipsec_worker.c
+++ b/examples/ipsec-secgw/ipsec_worker.c
@@ -1598,8 +1598,7 @@ ipsec_poll_mode_wrkr_inl_pr(void)
int32_t socket_id;
uint32_t lcore_id;
int32_t i, nb_rx;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
prev_tsc = 0;
lcore_id = rte_lcore_id();
@@ -1633,7 +1632,7 @@ ipsec_poll_mode_wrkr_inl_pr(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
lcore_id, portid, queueid);
}
@@ -1729,8 +1728,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void)
uint32_t i, nb_rx, j;
int32_t socket_id;
uint32_t lcore_id;
- uint16_t portid;
- uint8_t queueid;
+ uint16_t portid, queueid;
prev_tsc = 0;
lcore_id = rte_lcore_id();
@@ -1764,7 +1762,7 @@ ipsec_poll_mode_wrkr_inl_pr_ss(void)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%" PRIu16 "\n",
lcore_id, portid, queueid);
}
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 05/14] examples/l3fwd: fix lcore ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
` (3 preceding siblings ...)
2024-03-26 12:55 ` [PATCH v7 04/14] examples/ipsec-secgw: " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 06/14] examples/l3fwd-power: " Sivaprasad Tummala
` (10 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch fixes these constraints by allowing all
lcore IDs up to RTE_MAX_LCORES.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd/main.c | 20 ++++++++++++--------
1 file changed, 12 insertions(+), 8 deletions(-)
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 039207b06c..47baf464e2 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -99,7 +99,7 @@ struct parm_cfg parm_config;
struct lcore_params {
uint16_t port_id;
uint16_t queue_id;
- uint8_t lcore_id;
+ uint32_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -293,7 +293,7 @@ static int
check_lcore_params(void)
{
uint16_t queue, i;
- uint8_t lcore;
+ uint32_t lcore;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
@@ -304,12 +304,12 @@ check_lcore_params(void)
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
+ printf("error: lcore %u is not enabled in lcore mask\n", lcore);
return -1;
}
if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
(numa_on == 0)) {
- printf("warning: lcore %hhu is on socket %d with numa off \n",
+ printf("warning: lcore %u is on socket %d with numa off\n",
lcore, socketid);
}
}
@@ -359,14 +359,14 @@ static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
nb_rx_queue = lcore_conf[lcore].n_rx_queue;
if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
printf("error: too many queues (%u) for lcore: %u\n",
- (unsigned)nb_rx_queue + 1, (unsigned)lcore);
+ (unsigned int)nb_rx_queue + 1, lcore);
return -1;
} else {
lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
@@ -500,7 +500,11 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
- uint16_t max_fld[_NUM_FLD] = {255, RTE_MAX_QUEUES_PER_PORT, 255};
+ uint16_t max_fld[_NUM_FLD] = {
+ 255,
+ RTE_MAX_QUEUES_PER_PORT,
+ RTE_MAX_LCORE
+ };
nb_lcore_params = 0;
@@ -532,7 +536,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint32_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 06/14] examples/l3fwd-power: fix lcore ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
` (4 preceding siblings ...)
2024-03-26 12:55 ` [PATCH v7 05/14] examples/l3fwd: fix lcore " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 07/14] examples/l3fwd-graph: " Sivaprasad Tummala
` (9 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch fixes these constraints by allowing all
lcore IDs up to RTE_MAX_LCORES.
Fixes: f88e7c175a68 ("examples/l3fwd-power: add high/regular perf cores options")
Cc: radu.nicolau@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd-power/main.c | 18 +++++++++++-------
examples/l3fwd-power/main.h | 2 +-
examples/l3fwd-power/perf_core.c | 11 ++++++++---
3 files changed, 20 insertions(+), 11 deletions(-)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index 50aea99428..eac92ef875 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -1397,7 +1397,7 @@ static int
check_lcore_params(void)
{
uint16_t queue, i;
- uint8_t lcore;
+ uint32_t lcore;
int socketid;
for (i = 0; i < nb_lcore_params; ++i) {
@@ -1408,13 +1408,13 @@ check_lcore_params(void)
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in lcore "
+ printf("error: lcore %u is not enabled in lcore "
"mask\n", lcore);
return -1;
}
if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
(numa_on == 0)) {
- printf("warning: lcore %hhu is on socket %d with numa "
+ printf("warning: lcore %u is on socket %d with numa "
"off\n", lcore, socketid);
}
if (app_mode == APP_MODE_TELEMETRY && lcore == rte_lcore_id()) {
@@ -1466,14 +1466,14 @@ static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
nb_rx_queue = lcore_conf[lcore].n_rx_queue;
if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
printf("error: too many queues (%u) for lcore: %u\n",
- (unsigned)nb_rx_queue + 1, (unsigned)lcore);
+ (unsigned int)nb_rx_queue + 1, lcore);
return -1;
} else {
lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
@@ -1658,7 +1658,11 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned size;
- unsigned int max_fld[_NUM_FLD] = {255, RTE_MAX_QUEUES_PER_PORT, 255};
+ unsigned int max_fld[_NUM_FLD] = {
+ 255,
+ RTE_MAX_QUEUES_PER_PORT,
+ RTE_MAX_LCORE
+ };
nb_lcore_params = 0;
@@ -1691,7 +1695,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint32_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
diff --git a/examples/l3fwd-power/main.h b/examples/l3fwd-power/main.h
index 40b5194726..194bd82102 100644
--- a/examples/l3fwd-power/main.h
+++ b/examples/l3fwd-power/main.h
@@ -10,7 +10,7 @@
struct lcore_params {
uint16_t port_id;
uint16_t queue_id;
- uint8_t lcore_id;
+ uint32_t lcore_id;
} __rte_cache_aligned;
extern struct lcore_params *lcore_params;
diff --git a/examples/l3fwd-power/perf_core.c b/examples/l3fwd-power/perf_core.c
index f34442b9d0..fbd7864cb9 100644
--- a/examples/l3fwd-power/perf_core.c
+++ b/examples/l3fwd-power/perf_core.c
@@ -24,7 +24,7 @@ struct perf_lcore_params {
uint16_t port_id;
uint16_t queue_id;
uint8_t high_perf;
- uint8_t lcore_idx;
+ uint32_t lcore_idx;
} __rte_cache_aligned;
static struct perf_lcore_params prf_lc_prms[MAX_LCORE_PARAMS];
@@ -132,7 +132,12 @@ parse_perf_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int i;
unsigned int size;
- unsigned int max_fld[_NUM_FLD] = {255, RTE_MAX_QUEUES_PER_PORT, 255, 255};
+ unsigned int max_fld[_NUM_FLD] = {
+ 255,
+ RTE_MAX_QUEUES_PER_PORT,
+ 255,
+ RTE_MAX_LCORE
+ };
nb_prf_lc_prms = 0;
@@ -169,7 +174,7 @@ parse_perf_config(const char *q_arg)
prf_lc_prms[nb_prf_lc_prms].high_perf =
!!(uint8_t)int_fld[FLD_LCORE_HP];
prf_lc_prms[nb_prf_lc_prms].lcore_idx =
- (uint8_t)int_fld[FLD_LCORE_IDX];
+ (uint32_t)int_fld[FLD_LCORE_IDX];
++nb_prf_lc_prms;
}
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 07/14] examples/l3fwd-graph: fix lcore ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
` (5 preceding siblings ...)
2024-03-26 12:55 ` [PATCH v7 06/14] examples/l3fwd-power: " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 08/14] examples/ipsec-secgw: " Sivaprasad Tummala
` (8 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev, ndabilpuram, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch fixes these constraints by allowing all
lcore IDs up to RTE_MAX_LCORES. Also the queue
IDs are increased to support up to 65535.
Fixes: 08bd1a174461 ("examples/l3fwd-graph: add graph-based l3fwd skeleton")
Cc: ndabilpuram@marvell.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd-graph/main.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index 4b018d1e78..dbc36362c3 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -111,7 +111,7 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
struct lcore_params {
uint16_t port_id;
uint16_t queue_id;
- uint8_t lcore_id;
+ uint32_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -207,7 +207,7 @@ check_lcore_params(void)
{
uint16_t queue, i;
int socketid;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
queue = lcore_params[i].queue_id;
@@ -217,7 +217,7 @@ check_lcore_params(void)
}
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("Error: lcore %hhu is not enabled in lcore mask\n",
+ printf("Error: lcore %u is not enabled in lcore mask\n",
lcore);
return -1;
}
@@ -228,7 +228,7 @@ check_lcore_params(void)
}
socketid = rte_lcore_to_socket_id(lcore);
if ((socketid != 0) && (numa_on == 0)) {
- printf("Warning: lcore %hhu is on socket %d with numa off\n",
+ printf("Warning: lcore %u is on socket %d with numa off\n",
lcore, socketid);
}
}
@@ -282,7 +282,7 @@ static int
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -290,7 +290,7 @@ init_lcore_rx_queues(void)
if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
printf("Error: too many queues (%u) for lcore: %u\n",
(unsigned int)nb_rx_queue + 1,
- (unsigned int)lcore);
+ lcore);
return -1;
}
@@ -452,7 +452,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint32_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 08/14] examples/ipsec-secgw: fix lcore ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
` (6 preceding siblings ...)
2024-03-26 12:55 ` [PATCH v7 07/14] examples/l3fwd-graph: " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 09/14] examples/qos_sched: " Sivaprasad Tummala
` (7 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev, sergio.gonzalez.monroy, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch fixes these constraints by allowing all
lcore IDs up to RTE_MAX_LCORES. Also the queue
IDs are increased to support up to 65535.
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: sergio.gonzalez.monroy@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 21 +++++++++++++--------
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 4 ++--
4 files changed, 17 insertions(+), 12 deletions(-)
diff --git a/examples/ipsec-secgw/event_helper.h b/examples/ipsec-secgw/event_helper.h
index dfb81bfcf1..be635685b4 100644
--- a/examples/ipsec-secgw/event_helper.h
+++ b/examples/ipsec-secgw/event_helper.h
@@ -102,7 +102,7 @@ struct eh_event_link_info {
/**< Event port ID */
uint8_t eventq_id;
/**< Event queue to be linked to the port */
- uint8_t lcore_id;
+ uint32_t lcore_id;
/**< Lcore to be polling on this port */
};
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 782535f4b5..2d004d82fd 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -221,7 +221,7 @@ static const char *cfgfile;
struct lcore_params {
uint16_t port_id;
uint16_t queue_id;
- uint8_t lcore_id;
+ uint32_t lcore_id;
} __rte_cache_aligned;
static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
@@ -807,7 +807,7 @@ check_flow_params(uint16_t fdir_portid, uint8_t fdir_qid)
static int32_t
check_poll_mode_params(struct eh_conf *eh_conf)
{
- uint8_t lcore;
+ uint32_t lcore;
uint16_t portid;
uint16_t i;
int32_t socket_id;
@@ -826,13 +826,13 @@ check_poll_mode_params(struct eh_conf *eh_conf)
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
- printf("error: lcore %hhu is not enabled in "
+ printf("error: lcore %u is not enabled in "
"lcore mask\n", lcore);
return -1;
}
socket_id = rte_lcore_to_socket_id(lcore);
if (socket_id != 0 && numa_on == 0) {
- printf("warning: lcore %hhu is on socket %d "
+ printf("warning: lcore %u is on socket %d "
"with numa off\n",
lcore, socket_id);
}
@@ -867,7 +867,7 @@ static int32_t
init_lcore_rx_queues(void)
{
uint16_t i, nb_rx_queue;
- uint8_t lcore;
+ uint32_t lcore;
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
@@ -1048,7 +1048,11 @@ parse_config(const char *q_arg)
char *str_fld[_NUM_FLD];
int32_t i;
uint32_t size;
- uint32_t max_fld[_NUM_FLD] = {255, RTE_MAX_QUEUES_PER_PORT, 255};
+ uint32_t max_fld[_NUM_FLD] = {
+ 255,
+ RTE_MAX_QUEUES_PER_PORT,
+ RTE_MAX_LCORE
+ };
nb_lcore_params = 0;
@@ -1082,7 +1086,7 @@ parse_config(const char *q_arg)
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
- (uint8_t)int_fld[FLD_LCORE];
+ (uint32_t)int_fld[FLD_LCORE];
++nb_lcore_params;
}
lcore_params = lcore_params_array;
@@ -1918,7 +1922,8 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads,
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
uint16_t nb_tx_queue, nb_rx_queue;
- uint16_t tx_queueid, rx_queueid, queue, lcore_id;
+ uint16_t tx_queueid, rx_queueid, queue;
+ uint32_t lcore_id;
int32_t ret, socket_id;
struct lcore_conf *qconf;
struct rte_ether_addr ethaddr;
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index c321108119..b52b0ffc3d 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -259,7 +259,7 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx_lcore[],
continue;
/* Looking for cryptodev, which can handle this SA */
- key.lcore_id = (uint8_t)lcore_id;
+ key.lcore_id = lcore_id;
key.cipher_algo = (uint8_t)sa->cipher_algo;
key.auth_algo = (uint8_t)sa->auth_algo;
key.aead_algo = (uint8_t)sa->aead_algo;
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 29b9b283f0..6526a80d81 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -256,11 +256,11 @@ extern struct offloads tx_offloads;
* (hash key calculation reads 8 bytes if this struct is size 5 bytes).
*/
struct cdev_key {
- uint16_t lcore_id;
+ uint32_t lcore_id;
uint8_t cipher_algo;
uint8_t auth_algo;
uint8_t aead_algo;
- uint8_t padding[3]; /* padding to 8-byte size should be zeroed */
+ uint8_t padding; /* padding to 8-byte size should be zeroed */
};
struct socket_ctx {
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 09/14] examples/qos_sched: fix lcore ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
` (7 preceding siblings ...)
2024-03-26 12:55 ` [PATCH v7 08/14] examples/ipsec-secgw: " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 10/14] examples/vm_power_manager: " Sivaprasad Tummala
` (6 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch fixes these constraints by allowing all
lcore IDs up to RTE_MAX_LCORES. Also the queue
IDs are increased to support up to 65535.
Fixes: de3cfa2c9823 ("sched: initial import")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/qos_sched/args.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
index 8d61d3e454..886542b3c1 100644
--- a/examples/qos_sched/args.c
+++ b/examples/qos_sched/args.c
@@ -184,10 +184,10 @@ app_parse_flow_conf(const char *conf_str)
pconf->rx_port = vals[0];
pconf->tx_port = vals[1];
- pconf->rx_core = (uint8_t)vals[2];
- pconf->wt_core = (uint8_t)vals[3];
+ pconf->rx_core = vals[2];
+ pconf->wt_core = vals[3];
if (ret == 5)
- pconf->tx_core = (uint8_t)vals[4];
+ pconf->tx_core = vals[4];
else
pconf->tx_core = pconf->wt_core;
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 10/14] examples/vm_power_manager: fix lcore ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
` (8 preceding siblings ...)
2024-03-26 12:55 ` [PATCH v7 09/14] examples/qos_sched: " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 11/14] examples/l3fwd: fix port " Sivaprasad Tummala
` (5 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev, marcinx.hajkowski, stable
Currently the config option allows lcore IDs up to 255,
irrespective of RTE_MAX_LCORES and needs to be fixed.
The patch fixes these constraints by allowing all
lcore IDs up to RTE_MAX_LCORES. Also the queue
IDs are increased to support up to 65535.
Fixes: 0e8f47491f09 ("examples/vm_power: add command to query CPU frequency")
Cc: marcinx.hajkowski@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/vm_power_manager/guest_cli/vm_power_cli_guest.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
index 94bfbbaf78..5eddb47847 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
@@ -401,7 +401,7 @@ check_response_cmd(unsigned int lcore_id, int *result)
struct cmd_set_cpu_freq_result {
cmdline_fixed_string_t set_cpu_freq;
- uint8_t lcore_id;
+ uint32_t lcore_id;
cmdline_fixed_string_t cmd;
};
@@ -444,7 +444,7 @@ cmdline_parse_token_string_t cmd_set_cpu_freq =
set_cpu_freq, "set_cpu_freq");
cmdline_parse_token_num_t cmd_set_cpu_freq_core_num =
TOKEN_NUM_INITIALIZER(struct cmd_set_cpu_freq_result,
- lcore_id, RTE_UINT8);
+ lcore_id, RTE_UINT32);
cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
cmd, "up#down#min#max#enable_turbo#disable_turbo");
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 11/14] examples/l3fwd: fix port ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
` (9 preceding siblings ...)
2024-03-26 12:55 ` [PATCH v7 10/14] examples/vm_power_manager: " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 12/14] examples/l3fwd-power: " Sivaprasad Tummala
` (4 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev, stable
Currently application supports port IDs up to 255
irrespective of RTE_MAX_ETHPORTS.
The patch fixes these constraints by allowing port
IDs up to RTE_MAX_ETHPORTS.
Fixes: af75078fece3 ("first public release")
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd/main.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 47baf464e2..a239869ada 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -501,7 +501,7 @@ parse_config(const char *q_arg)
int i;
unsigned size;
uint16_t max_fld[_NUM_FLD] = {
- 255,
+ RTE_MAX_ETHPORTS,
RTE_MAX_QUEUES_PER_PORT,
RTE_MAX_LCORE
};
@@ -532,7 +532,7 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 12/14] examples/l3fwd-power: fix port ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
` (10 preceding siblings ...)
2024-03-26 12:55 ` [PATCH v7 11/14] examples/l3fwd: fix port " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 13/14] examples/l3fwd-graph: " Sivaprasad Tummala
` (3 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev, stable
Currently application supports port IDs up to 255
irrespective of RTE_MAX_ETHPORTS.
The patch fixes these constraints by allowing port
IDs up to RTE_MAX_ETHPORTS.
Fixes: f88e7c175a68 ("examples/l3fwd-power: add high/regular perf cores options")
Cc: radu.nicolau@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd-power/main.c | 4 ++--
examples/l3fwd-power/perf_core.c | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index eac92ef875..a993af0408 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -1659,7 +1659,7 @@ parse_config(const char *q_arg)
int i;
unsigned size;
unsigned int max_fld[_NUM_FLD] = {
- 255,
+ RTE_MAX_ETHPORTS,
RTE_MAX_QUEUES_PER_PORT,
RTE_MAX_LCORE
};
@@ -1691,7 +1691,7 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
diff --git a/examples/l3fwd-power/perf_core.c b/examples/l3fwd-power/perf_core.c
index fbd7864cb9..e4bdb62121 100644
--- a/examples/l3fwd-power/perf_core.c
+++ b/examples/l3fwd-power/perf_core.c
@@ -133,7 +133,7 @@ parse_perf_config(const char *q_arg)
int i;
unsigned int size;
unsigned int max_fld[_NUM_FLD] = {
- 255,
+ RTE_MAX_ETHPORTS,
RTE_MAX_QUEUES_PER_PORT,
255,
RTE_MAX_LCORE
@@ -168,7 +168,7 @@ parse_perf_config(const char *q_arg)
return -1;
}
prf_lc_prms[nb_prf_lc_prms].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
prf_lc_prms[nb_prf_lc_prms].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
prf_lc_prms[nb_prf_lc_prms].high_perf =
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 13/14] examples/l3fwd-graph: fix port ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
` (11 preceding siblings ...)
2024-03-26 12:55 ` [PATCH v7 12/14] examples/l3fwd-power: " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 14/14] examples/ipsec-secgw: " Sivaprasad Tummala
` (2 subsequent siblings)
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev, ndabilpuram, stable
Currently application supports port IDs up to 255
irrespective of RTE_MAX_ETHPORTS.
The patch fixes these constraints by allowing port
IDs up to RTE_MAX_ETHPORTS.
Fixes: 08bd1a174461 ("examples/l3fwd-graph: add graph-based l3fwd skeleton")
Cc: ndabilpuram@marvell.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/l3fwd-graph/main.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/examples/l3fwd-graph/main.c b/examples/l3fwd-graph/main.c
index dbc36362c3..4ded69b4a0 100644
--- a/examples/l3fwd-graph/main.c
+++ b/examples/l3fwd-graph/main.c
@@ -448,7 +448,7 @@ parse_config(const char *q_arg)
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 14/14] examples/ipsec-secgw: fix port ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
` (12 preceding siblings ...)
2024-03-26 12:55 ` [PATCH v7 13/14] examples/l3fwd-graph: " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 00/14] fix lcore " Sivaprasad Tummala
2024-04-25 12:31 ` Ferruh Yigit
15 siblings, 0 replies; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev, sergio.gonzalez.monroy, stable
Currently application supports port IDs up to 255
irrespective of RTE_MAX_ETHPORTS.
The patch fixes these constraints by allowing port
IDs up to RTE_MAX_ETHPORTS.
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application")
Cc: sergio.gonzalez.monroy@intel.com
Cc: stable@dpdk.org
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@huawei.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Ferruh Yigit <ferruh.yigit@amd.com>
---
examples/ipsec-secgw/ipsec-secgw.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 2d004d82fd..761b9cf396 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -1049,7 +1049,7 @@ parse_config(const char *q_arg)
int32_t i;
uint32_t size;
uint32_t max_fld[_NUM_FLD] = {
- 255,
+ RTE_MAX_ETHPORTS,
RTE_MAX_QUEUES_PER_PORT,
RTE_MAX_LCORE
};
@@ -1082,7 +1082,7 @@ parse_config(const char *q_arg)
return -1;
}
lcore_params_array[nb_lcore_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ (uint16_t)int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id =
(uint16_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id =
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH v7 00/14] fix lcore ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
` (13 preceding siblings ...)
2024-03-26 12:55 ` [PATCH v7 14/14] examples/ipsec-secgw: " Sivaprasad Tummala
@ 2024-03-26 12:55 ` Sivaprasad Tummala
2024-07-23 15:19 ` Ferruh Yigit
2024-04-25 12:31 ` Ferruh Yigit
15 siblings, 1 reply; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-03-26 12:55 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, stephen,
mb, david.marchand
Cc: dev
With modern CPUs, it is possible to have higher
CPU count thus we can have higher RTE_MAX_LCORES.
In DPDK sample applications, the current config
lcore options are hard limited to 255.
The patchset fixes these constraints by allowing
all lcore IDs up to RTE_MAX_LCORES. Also the rx
queue IDs are increased to support up to 65535.
The port ID constraints were also fixed to support
up to RTE_MAX_ETHPORTS.
v7:
- updated commit log with rx queue IDs
- changed format specifier to %PRIu16
- removed patch unrelated changes
v6:
- split queue_id, lcore_id and port_id changes as
separate patches.
- updated git commit description on individual
patches
v5:
- updated lcore_id type to uint32_t
v4:
- fixed build errors with queue_id type
in ipsec-secgw
v3:
- updated queue_id type to uint16_t
v2:
- fixed typo with lcore_id type in l3fwd
Sivaprasad Tummala (14):
examples/l3fwd: fix queue ID restriction
examples/l3fwd-power: fix queue ID restriction
examples/l3fwd-graph: fix queue ID restriction
examples/ipsec-secgw: fix queue ID restriction
examples/l3fwd: fix lcore ID restriction
examples/l3fwd-power: fix lcore ID restriction
examples/l3fwd-graph: fix lcore ID restriction
examples/ipsec-secgw: fix lcore ID restriction
examples/qos_sched: fix lcore ID restriction
examples/vm_power_manager: fix lcore ID restriction
examples/l3fwd: fix port ID restriction
examples/l3fwd-power: fix port ID restriction
examples/l3fwd-graph: fix port ID restriction
examples/ipsec-secgw: fix port ID restriction
examples/ipsec-secgw/event_helper.h | 2 +-
examples/ipsec-secgw/ipsec-secgw.c | 40 +++++++-----
examples/ipsec-secgw/ipsec.c | 2 +-
examples/ipsec-secgw/ipsec.h | 6 +-
examples/ipsec-secgw/ipsec_worker.c | 10 ++-
examples/l3fwd-graph/main.c | 33 +++++-----
examples/l3fwd-power/main.c | 65 ++++++++++---------
examples/l3fwd-power/main.h | 4 +-
examples/l3fwd-power/perf_core.c | 19 ++++--
examples/l3fwd/l3fwd.h | 2 +-
examples/l3fwd/l3fwd_acl.c | 4 +-
examples/l3fwd/l3fwd_em.c | 4 +-
examples/l3fwd/l3fwd_event.h | 2 +-
examples/l3fwd/l3fwd_fib.c | 4 +-
examples/l3fwd/l3fwd_lpm.c | 5 +-
examples/l3fwd/main.c | 42 +++++++-----
examples/qos_sched/args.c | 6 +-
.../guest_cli/vm_power_cli_guest.c | 4 +-
18 files changed, 135 insertions(+), 119 deletions(-)
--
2.25.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH v7 00/14] fix lcore ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
` (14 preceding siblings ...)
2024-03-26 12:55 ` [PATCH v7 00/14] fix lcore " Sivaprasad Tummala
@ 2024-04-25 12:31 ` Ferruh Yigit
2024-04-30 13:47 ` Ferruh Yigit
15 siblings, 1 reply; 100+ messages in thread
From: Ferruh Yigit @ 2024-04-25 12:31 UTC (permalink / raw)
To: Sivaprasad Tummala, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, konstantin.ananyev,
stephen, mb, david.marchand
Cc: dev
On 3/26/2024 12:55 PM, Sivaprasad Tummala wrote:
> With modern CPUs, it is possible to have higher
> CPU count thus we can have higher RTE_MAX_LCORES.
> In DPDK sample applications, the current config
> lcore options are hard limited to 255.
>
> The patchset fixes these constraints by allowing
> all lcore IDs up to RTE_MAX_LCORES. Also the rx
> queue IDs are increased to support up to 65535.
> The port ID constraints were also fixed to support
> up to RTE_MAX_ETHPORTS.
>
> v7:
> - updated commit log with rx queue IDs
> - changed format specifier to %PRIu16
> - removed patch unrelated changes
>
> v6:
> - split queue_id, lcore_id and port_id changes as
> separate patches.
> - updated git commit description on individual
> patches
>
> v5:
> - updated lcore_id type to uint32_t
>
> v4:
> - fixed build errors with queue_id type
> in ipsec-secgw
>
> v3:
> - updated queue_id type to uint16_t
>
> v2:
> - fixed typo with lcore_id type in l3fwd
>
> Sivaprasad Tummala (14):
> examples/l3fwd: fix queue ID restriction
> examples/l3fwd-power: fix queue ID restriction
> examples/l3fwd-graph: fix queue ID restriction
> examples/ipsec-secgw: fix queue ID restriction
> examples/l3fwd: fix lcore ID restriction
> examples/l3fwd-power: fix lcore ID restriction
> examples/l3fwd-graph: fix lcore ID restriction
> examples/ipsec-secgw: fix lcore ID restriction
> examples/qos_sched: fix lcore ID restriction
> examples/vm_power_manager: fix lcore ID restriction
> examples/l3fwd: fix port ID restriction
> examples/l3fwd-power: fix port ID restriction
> examples/l3fwd-graph: fix port ID restriction
> examples/ipsec-secgw: fix port ID restriction
>
Recheck-request: iol-unit-amd64-testing
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH v7 00/14] fix lcore ID restriction
2024-04-25 12:31 ` Ferruh Yigit
@ 2024-04-30 13:47 ` Ferruh Yigit
0 siblings, 0 replies; 100+ messages in thread
From: Ferruh Yigit @ 2024-04-30 13:47 UTC (permalink / raw)
To: Sivaprasad Tummala, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, konstantin.ananyev,
stephen, mb, david.marchand
Cc: dev
On 4/25/2024 1:31 PM, Ferruh Yigit wrote:
> On 3/26/2024 12:55 PM, Sivaprasad Tummala wrote:
>> With modern CPUs, it is possible to have higher
>> CPU count thus we can have higher RTE_MAX_LCORES.
>> In DPDK sample applications, the current config
>> lcore options are hard limited to 255.
>>
>> The patchset fixes these constraints by allowing
>> all lcore IDs up to RTE_MAX_LCORES. Also the rx
>> queue IDs are increased to support up to 65535.
>> The port ID constraints were also fixed to support
>> up to RTE_MAX_ETHPORTS.
>>
>> v7:
>> - updated commit log with rx queue IDs
>> - changed format specifier to %PRIu16
>> - removed patch unrelated changes
>>
>> v6:
>> - split queue_id, lcore_id and port_id changes as
>> separate patches.
>> - updated git commit description on individual
>> patches
>>
>> v5:
>> - updated lcore_id type to uint32_t
>>
>> v4:
>> - fixed build errors with queue_id type
>> in ipsec-secgw
>>
>> v3:
>> - updated queue_id type to uint16_t
>>
>> v2:
>> - fixed typo with lcore_id type in l3fwd
>>
>> Sivaprasad Tummala (14):
>> examples/l3fwd: fix queue ID restriction
>> examples/l3fwd-power: fix queue ID restriction
>> examples/l3fwd-graph: fix queue ID restriction
>> examples/ipsec-secgw: fix queue ID restriction
>> examples/l3fwd: fix lcore ID restriction
>> examples/l3fwd-power: fix lcore ID restriction
>> examples/l3fwd-graph: fix lcore ID restriction
>> examples/ipsec-secgw: fix lcore ID restriction
>> examples/qos_sched: fix lcore ID restriction
>> examples/vm_power_manager: fix lcore ID restriction
>> examples/l3fwd: fix port ID restriction
>> examples/l3fwd-power: fix port ID restriction
>> examples/l3fwd-graph: fix port ID restriction
>> examples/ipsec-secgw: fix port ID restriction
>>
>
> Recheck-request: iol-unit-amd64-testing
>
Recheck-request: iol-sample-apps-testing
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH v7 00/14] fix lcore ID restriction
2024-03-26 12:55 ` [PATCH v7 00/14] fix lcore " Sivaprasad Tummala
@ 2024-07-23 15:19 ` Ferruh Yigit
2024-07-23 21:32 ` Thomas Monjalon
0 siblings, 1 reply; 100+ messages in thread
From: Ferruh Yigit @ 2024-07-23 15:19 UTC (permalink / raw)
To: david.marchand, Thomas Monjalon
Cc: dev, Sivaprasad Tummala, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, mb, stephen,
konstantin.ananyev
On 3/26/2024 12:55 PM, Sivaprasad Tummala wrote:
> With modern CPUs, it is possible to have higher
> CPU count thus we can have higher RTE_MAX_LCORES.
> In DPDK sample applications, the current config
> lcore options are hard limited to 255.
>
> The patchset fixes these constraints by allowing
> all lcore IDs up to RTE_MAX_LCORES. Also the rx
> queue IDs are increased to support up to 65535.
> The port ID constraints were also fixed to support
> up to RTE_MAX_ETHPORTS.
>
> v7:
> - updated commit log with rx queue IDs
> - changed format specifier to %PRIu16
> - removed patch unrelated changes
>
> v6:
> - split queue_id, lcore_id and port_id changes as
> separate patches.
> - updated git commit description on individual
> patches
>
> v5:
> - updated lcore_id type to uint32_t
>
> v4:
> - fixed build errors with queue_id type
> in ipsec-secgw
>
> v3:
> - updated queue_id type to uint16_t
>
> v2:
> - fixed typo with lcore_id type in l3fwd
>
> Sivaprasad Tummala (14):
> examples/l3fwd: fix queue ID restriction
> examples/l3fwd-power: fix queue ID restriction
> examples/l3fwd-graph: fix queue ID restriction
> examples/ipsec-secgw: fix queue ID restriction
> examples/l3fwd: fix lcore ID restriction
> examples/l3fwd-power: fix lcore ID restriction
> examples/l3fwd-graph: fix lcore ID restriction
> examples/ipsec-secgw: fix lcore ID restriction
> examples/qos_sched: fix lcore ID restriction
> examples/vm_power_manager: fix lcore ID restriction
> examples/l3fwd: fix port ID restriction
> examples/l3fwd-power: fix port ID restriction
> examples/l3fwd-graph: fix port ID restriction
> examples/ipsec-secgw: fix port ID restriction
>
Hi Thomas,
I am aware we are very close to -rc3, but this patch series is from the
previous release, can it be possible to include it to -rc3?
Thanks,
ferruh
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH v7 00/14] fix lcore ID restriction
2024-07-23 15:19 ` Ferruh Yigit
@ 2024-07-23 21:32 ` Thomas Monjalon
0 siblings, 0 replies; 100+ messages in thread
From: Thomas Monjalon @ 2024-07-23 21:32 UTC (permalink / raw)
To: Sivaprasad Tummala, Ferruh Yigit
Cc: david.marchand, dev, david.hunt, anatoly.burakov, jerinj,
radu.nicolau, gakhil, cristian.dumitrescu, mb, stephen,
konstantin.ananyev
23/07/2024 17:19, Ferruh Yigit:
> > Sivaprasad Tummala (14):
> > examples/l3fwd: fix queue ID restriction
> > examples/l3fwd-power: fix queue ID restriction
> > examples/l3fwd-graph: fix queue ID restriction
> > examples/ipsec-secgw: fix queue ID restriction
> > examples/l3fwd: fix lcore ID restriction
> > examples/l3fwd-power: fix lcore ID restriction
> > examples/l3fwd-graph: fix lcore ID restriction
> > examples/ipsec-secgw: fix lcore ID restriction
> > examples/qos_sched: fix lcore ID restriction
> > examples/vm_power_manager: fix lcore ID restriction
> > examples/l3fwd: fix port ID restriction
> > examples/l3fwd-power: fix port ID restriction
> > examples/l3fwd-graph: fix port ID restriction
> > examples/ipsec-secgw: fix port ID restriction
> >
>
> Hi Thomas,
>
> I am aware we are very close to -rc3, but this patch series is from the
> previous release, can it be possible to include it to -rc3?
Yes applied, thanks.
Note: squashed in 3 patches.
^ permalink raw reply [flat|nested] 100+ messages in thread
* [PATCH] power/amd_uncore: add e-smi installation instructions
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
` (7 preceding siblings ...)
2023-12-20 6:44 ` [PATCH v3 " Sivaprasad Tummala
@ 2024-11-29 4:32 ` Sivaprasad Tummala
2024-11-29 10:34 ` Thomas Monjalon
8 siblings, 1 reply; 100+ messages in thread
From: Sivaprasad Tummala @ 2024-11-29 4:32 UTC (permalink / raw)
To: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, ferruh.yigit, konstantin.ananyev, thomas
Cc: dev
Added section for installing and building the E-SMI library
for AMD EPYC Uncore support and version requirements.
Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
---
doc/guides/prog_guide/power_man.rst | 32 +++++++++++++++++++++++++++++
1 file changed, 32 insertions(+)
diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
index 74039e5786..d367a81596 100644
--- a/doc/guides/prog_guide/power_man.rst
+++ b/doc/guides/prog_guide/power_man.rst
@@ -249,6 +249,38 @@ Additionally, the EPYC System Management Interface In-band Library for Linux
offers essential API, enabling user-space software
to effectively manage system functions.
+E-SMI Installation
+------------------
+
+To build DPDK with AMD EPYC Uncore the user is required to download the e-smi
+library from `here <https://github.com/amd/esmi_ib_library>`_
+and compile it on their user system before building DPDK.
+
+.. code-block:: console
+
+ cd esmi_ib_library
+ cmake .
+ sudo make install
+ cp /opt/e-sms/e_smi/lib/* /usr/local/lib/*
+ cp /opt/e-sms/e_smi/include/* /usr/local/include/*
+
+Library file, header and tool are installed at /opt/e-sms.
+
+Note: Library is dependent on amd_hsmp.h header and without this, compilation will break.
+
+The library requires CMake (v3.5.0) to be built.
+
+As a reference, the following table shows a mapping between the DPDK versions
+and the E-SMI library and kernel version supported by them:
+
+.. table:: DPDK and E-SMI library and kernel version compatibility
+
+ ============== ============== =====================
+ DPDK version E-SMI version Linux Kernel version
+ ============== ============== =====================
+ 24.11+ 4.0.0 6.7+
+ ============== ============== =====================
+
Uncore API Overview
~~~~~~~~~~~~~~~~~~~
--
2.34.1
^ permalink raw reply [flat|nested] 100+ messages in thread
* Re: [PATCH] power/amd_uncore: add e-smi installation instructions
2024-11-29 4:32 ` [PATCH] power/amd_uncore: add e-smi installation instructions Sivaprasad Tummala
@ 2024-11-29 10:34 ` Thomas Monjalon
0 siblings, 0 replies; 100+ messages in thread
From: Thomas Monjalon @ 2024-11-29 10:34 UTC (permalink / raw)
To: ferruh.yigit, Sivaprasad Tummala
Cc: david.hunt, anatoly.burakov, jerinj, radu.nicolau, gakhil,
cristian.dumitrescu, konstantin.ananyev, dev
29/11/2024 05:32, Sivaprasad Tummala:
> Added section for installing and building the E-SMI library
> for AMD EPYC Uncore support and version requirements.
>
> Signed-off-by: Sivaprasad Tummala <sivaprasad.tummala@amd.com>
> ---
> doc/guides/prog_guide/power_man.rst | 32 +++++++++++++++++++++++++++++
> 1 file changed, 32 insertions(+)
>
> diff --git a/doc/guides/prog_guide/power_man.rst b/doc/guides/prog_guide/power_man.rst
> index 74039e5786..d367a81596 100644
> --- a/doc/guides/prog_guide/power_man.rst
> +++ b/doc/guides/prog_guide/power_man.rst
> @@ -249,6 +249,38 @@ Additionally, the EPYC System Management Interface In-band Library for Linux
> offers essential API, enabling user-space software
> to effectively manage system functions.
>
> +E-SMI Installation
> +------------------
> +
> +To build DPDK with AMD EPYC Uncore the user is required to download the e-smi
> +library from `here <https://github.com/amd/esmi_ib_library>`_
> +and compile it on their user system before building DPDK.
> +
> +.. code-block:: console
> +
> + cd esmi_ib_library
> + cmake .
> + sudo make install
> + cp /opt/e-sms/e_smi/lib/* /usr/local/lib/*
> + cp /opt/e-sms/e_smi/include/* /usr/local/include/*
Why copying the files?
You can specify the directories in an environment variable.
There is no pkg-config file?
> +
> +Library file, header and tool are installed at /opt/e-sms.
> +
> +Note: Library is dependent on amd_hsmp.h header and without this, compilation will break.
Yes, it does not compile on my machine (with Linux 6.12):
e_smi.c:566:27: error: ‘HSMP_GET_RAPL_UNITS’ undeclared
What should I do?
> +
> +The library requires CMake (v3.5.0) to be built.
> +
> +As a reference, the following table shows a mapping between the DPDK versions
> +and the E-SMI library and kernel version supported by them:
> +
> +.. table:: DPDK and E-SMI library and kernel version compatibility
> +
> + ============== ============== =====================
> + DPDK version E-SMI version Linux Kernel version
> + ============== ============== =====================
> + 24.11+ 4.0.0 6.7+
> + ============== ============== =====================
^ permalink raw reply [flat|nested] 100+ messages in thread
end of thread, other threads:[~2024-11-29 10:34 UTC | newest]
Thread overview: 100+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-18 7:49 [PATCH 1/6] examples/l3fwd: fix lcore ID restriction Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 2/6] examples/l3fwd-power: " Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 5/6] examples/qos_sched: " Sivaprasad Tummala
2023-12-18 7:49 ` [PATCH 6/6] examples/vm_power_manager: " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 1/6] examples/l3fwd: " Sivaprasad Tummala
2023-12-19 12:05 ` Konstantin Ananyev
2023-12-19 12:30 ` Konstantin Ananyev
2023-12-19 14:18 ` Tummala, Sivaprasad
2023-12-19 15:10 ` Konstantin Ananyev
2023-12-20 1:32 ` Tummala, Sivaprasad
2023-12-19 3:28 ` [PATCH v2 2/6] examples/l3fwd-power: " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
2023-12-19 12:03 ` Konstantin Ananyev
2023-12-19 3:28 ` [PATCH v2 5/6] examples/qos_sched: " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 6/6] examples/vm_power_manager: " Sivaprasad Tummala
2023-12-19 3:28 ` [PATCH v2 0/6] " Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 " Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 1/6] examples/l3fwd: " Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 2/6] examples/l3fwd-power: " Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
2023-12-20 6:44 ` [PATCH v3 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
2023-12-20 6:45 ` [PATCH v3 5/6] examples/qos_sched: " Sivaprasad Tummala
2023-12-20 16:31 ` Stephen Hemminger
2024-01-09 15:16 ` Ferruh Yigit
2024-01-16 12:33 ` Tummala, Sivaprasad
2024-01-16 16:28 ` Stephen Hemminger
2023-12-20 6:45 ` [PATCH v3 6/6] examples/vm_power_manager: " Sivaprasad Tummala
2023-12-20 6:45 ` [PATCH v3 0/6] " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 1/6] examples/l3fwd: " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 2/6] examples/l3fwd-power: " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 5/6] examples/qos_sched: " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 6/6] examples/vm_power_manager: " Sivaprasad Tummala
2024-01-16 18:23 ` [PATCH v4 0/6] " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 1/6] examples/l3fwd: " Sivaprasad Tummala
2024-03-19 7:24 ` Morten Brørup
2024-03-21 9:55 ` Thomas Monjalon
2024-03-21 11:05 ` Tummala, Sivaprasad
2024-03-21 11:18 ` Thomas Monjalon
2024-03-21 18:26 ` Tummala, Sivaprasad
2024-03-21 11:05 ` Tummala, Sivaprasad
2024-03-18 17:31 ` [PATCH v5 2/6] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 3/6] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 4/6] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 5/6] examples/qos_sched: " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 6/6] examples/vm_power_manager: " Sivaprasad Tummala
2024-03-18 17:31 ` [PATCH v5 0/6] " Sivaprasad Tummala
2024-03-19 18:41 ` Ferruh Yigit
2024-03-21 18:47 ` [PATCH v6 00/10] " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 01/14] examples/l3fwd: fix queue " Sivaprasad Tummala
2024-03-22 15:41 ` David Marchand
2024-03-25 12:45 ` Tummala, Sivaprasad
2024-03-21 18:47 ` [PATCH v6 02/14] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 03/14] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 04/14] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 05/14] examples/l3fwd: fix lcore " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 06/14] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 07/14] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 08/14] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 09/14] examples/qos_sched: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 10/14] examples/vm_power_manager: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 11/14] examples/l3fwd: fix port " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 12/14] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 13/14] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 14/14] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-21 18:47 ` [PATCH v6 00/10] fix lcore " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 00/14] " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 01/14] examples/l3fwd: fix queue " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 02/14] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 03/14] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 04/14] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 05/14] examples/l3fwd: fix lcore " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 06/14] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 07/14] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 08/14] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 09/14] examples/qos_sched: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 10/14] examples/vm_power_manager: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 11/14] examples/l3fwd: fix port " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 12/14] examples/l3fwd-power: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 13/14] examples/l3fwd-graph: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 14/14] examples/ipsec-secgw: " Sivaprasad Tummala
2024-03-26 12:55 ` [PATCH v7 00/14] fix lcore " Sivaprasad Tummala
2024-07-23 15:19 ` Ferruh Yigit
2024-07-23 21:32 ` Thomas Monjalon
2024-04-25 12:31 ` Ferruh Yigit
2024-04-30 13:47 ` Ferruh Yigit
2024-11-29 4:32 ` [PATCH] power/amd_uncore: add e-smi installation instructions Sivaprasad Tummala
2024-11-29 10:34 ` Thomas Monjalon
2024-03-07 8:34 ` [PATCH 1/6] examples/l3fwd: fix lcore ID restriction David Marchand
2024-03-07 9:16 ` Morten Brørup
2024-03-07 9:22 ` David Marchand
2024-03-07 9:53 ` Morten Brørup
2024-03-13 9:14 ` Tummala, Sivaprasad
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).