patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH] crypto/scheduler: fix 64-bit mask of workers cores
@ 2018-05-21 10:22 Kirill Rybalchenko
  2018-05-22  9:58 ` De Lara Guarch, Pablo
  0 siblings, 1 reply; 3+ messages in thread
From: Kirill Rybalchenko @ 2018-05-21 10:22 UTC (permalink / raw)
  To: dev
  Cc: stable, kirill.rybalchenko, roy.fan.zhang, reshma.pattan,
	pablo.de.lara.guarch

The list of workers cores was represented by 64-bit bitmask.
It doesn't work if system has cores with id higher than 63.
This fix changes list of workers cores to array of uint16_t.
The size of array equals to RTE_MAX_LCORE.

Fixes: 4c07e0552f0a ("crypto/scheduler: add multicore scheduling mode")
Cc: stable@dpdk.org

Signed-off-by: Kirill Rybalchenko <kirill.rybalchenko@intel.com>
---
 drivers/crypto/scheduler/rte_cryptodev_scheduler.h |  2 +-
 drivers/crypto/scheduler/scheduler_multicore.c     |  4 +-
 drivers/crypto/scheduler/scheduler_pmd.c           | 68 +++++++++++++++++-----
 drivers/crypto/scheduler/scheduler_pmd_private.h   |  2 +-
 4 files changed, 56 insertions(+), 20 deletions(-)

diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index 01e7646..1c164da 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -30,7 +30,7 @@ extern "C" {
 #endif
 
 /** Maximum number of multi-core worker cores */
-#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES	(64)
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES	(RTE_MAX_LCORE - 1)
 
 /** Round-robin scheduling mode string */
 #define SCHEDULER_MODE_NAME_ROUND_ROBIN		round-robin
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
index b4c29d7..91fb066 100644
--- a/drivers/crypto/scheduler/scheduler_multicore.c
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -21,8 +21,8 @@ struct mc_scheduler_ctx {
 	uint32_t num_workers;             /**< Number of workers polling */
 	uint32_t stop_signal;
 
-	struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
-	struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+	struct rte_ring *sched_enq_ring[RTE_MAX_LCORE];
+	struct rte_ring *sched_deq_ring[RTE_MAX_LCORE];
 };
 
 struct mc_scheduler_qp_ctx {
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index ba03b9e..25d6409 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -20,7 +20,8 @@ struct scheduler_init_params {
 	uint32_t nb_slaves;
 	enum rte_cryptodev_scheduler_mode mode;
 	uint32_t enable_ordering;
-	uint64_t wcmask;
+	uint16_t wc_pool[RTE_MAX_LCORE];
+	uint16_t nb_wc;
 	char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
 			[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
 };
@@ -86,10 +87,6 @@ cryptodev_scheduler_create(const char *name,
 		return -EFAULT;
 	}
 
-	if (init_params->wcmask != 0)
-		RTE_LOG(INFO, PMD, "  workers core mask = %"PRIx64"\n",
-			init_params->wcmask);
-
 	dev->driver_id = cryptodev_driver_id;
 	dev->dev_ops = rte_crypto_scheduler_pmd_ops;
 
@@ -100,15 +97,12 @@ cryptodev_scheduler_create(const char *name,
 	if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) {
 		uint16_t i;
 
-		sched_ctx->nb_wc = 0;
+		sched_ctx->nb_wc = init_params->nb_wc;
 
-		for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) {
-			if (init_params->wcmask & (1ULL << i)) {
-				sched_ctx->wc_pool[sched_ctx->nb_wc++] = i;
-				RTE_LOG(INFO, PMD,
-					"  Worker core[%u]=%u added\n",
-					sched_ctx->nb_wc-1, i);
-			}
+		for (i = 0; i < sched_ctx->nb_wc; i++) {
+			sched_ctx->wc_pool[i] = init_params->wc_pool[i];
+			RTE_LOG(INFO, PMD, "  Worker core[%u]=%u added\n",
+				i, sched_ctx->wc_pool[i]);
 		}
 	}
 
@@ -232,9 +226,47 @@ static int
 parse_coremask_arg(const char *key __rte_unused,
 		const char *value, void *extra_args)
 {
+	int i, j, val;
+	uint16_t idx = 0;
+	char c;
 	struct scheduler_init_params *params = extra_args;
 
-	params->wcmask = strtoull(value, NULL, 16);
+	params->nb_wc = 0;
+
+	if (value == NULL)
+		return -1;
+	/* Remove all blank characters ahead and after .
+	 * Remove 0x/0X if exists.
+	 */
+	while (isblank(*value))
+		value++;
+	if (value[0] == '0' && ((value[1] == 'x') || (value[1] == 'X')))
+		value += 2;
+	i = strlen(value);
+	while ((i > 0) && isblank(value[i - 1]))
+		i--;
+
+	if (i == 0)
+		return -1;
+
+	for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
+		c = value[i];
+		if (isxdigit(c) == 0) {
+			/* invalid characters */
+			return -1;
+		}
+		if (isdigit(c))
+			val = c - '0';
+		else if (isupper(c))
+			val = c - 'A' + 10;
+		else
+			val = c - 'a' + 10;
+
+		for (j = 0; j < 4 && idx < RTE_MAX_LCORE; j++, idx++) {
+			if ((1 << j) & val)
+				params->wc_pool[params->nb_wc++] = idx;
+		}
+	}
 
 	return 0;
 }
@@ -246,7 +278,7 @@ parse_corelist_arg(const char *key __rte_unused,
 {
 	struct scheduler_init_params *params = extra_args;
 
-	params->wcmask = 0ULL;
+	params->nb_wc = 0;
 
 	const char *token = value;
 
@@ -254,7 +286,11 @@ parse_corelist_arg(const char *key __rte_unused,
 		char *rval;
 		unsigned int core = strtoul(token, &rval, 10);
 
-		params->wcmask |= 1ULL << core;
+		if (core >= RTE_MAX_LCORE) {
+			CS_LOG_ERR("Invalid worker core %u, should be smaller "
+				   "than %u.\n", core, RTE_MAX_LCORE);
+		}
+		params->wc_pool[params->nb_wc++] = (uint16_t)core;
 		token = (const char *)rval;
 		if (token[0] == '\0')
 			break;
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index dd7ca5a..12410b4 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -60,7 +60,7 @@ struct scheduler_ctx {
 
 	char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
 	char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
-	uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+	uint16_t wc_pool[RTE_MAX_LCORE];
 	uint16_t nb_wc;
 
 	char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
-- 
2.5.5

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-stable] [PATCH] crypto/scheduler: fix 64-bit mask of workers cores
  2018-05-21 10:22 [dpdk-stable] [PATCH] crypto/scheduler: fix 64-bit mask of workers cores Kirill Rybalchenko
@ 2018-05-22  9:58 ` De Lara Guarch, Pablo
  2018-05-22 10:08   ` De Lara Guarch, Pablo
  0 siblings, 1 reply; 3+ messages in thread
From: De Lara Guarch, Pablo @ 2018-05-22  9:58 UTC (permalink / raw)
  To: Rybalchenko, Kirill, dev; +Cc: stable, Zhang, Roy Fan, Pattan, Reshma



> -----Original Message-----
> From: Rybalchenko, Kirill
> Sent: Monday, May 21, 2018 11:22 AM
> To: dev@dpdk.org
> Cc: stable@dpdk.org; Rybalchenko, Kirill <kirill.rybalchenko@intel.com>; Zhang,
> Roy Fan <roy.fan.zhang@intel.com>; Pattan, Reshma
> <reshma.pattan@intel.com>; De Lara Guarch, Pablo
> <pablo.de.lara.guarch@intel.com>
> Subject: [PATCH] crypto/scheduler: fix 64-bit mask of workers cores
> 
> The list of workers cores was represented by 64-bit bitmask.
> It doesn't work if system has cores with id higher than 63.
> This fix changes list of workers cores to array of uint16_t.
> The size of array equals to RTE_MAX_LCORE.
> 
> Fixes: 4c07e0552f0a ("crypto/scheduler: add multicore scheduling mode")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Kirill Rybalchenko <kirill.rybalchenko@intel.com>

Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [dpdk-stable] [PATCH] crypto/scheduler: fix 64-bit mask of workers cores
  2018-05-22  9:58 ` De Lara Guarch, Pablo
@ 2018-05-22 10:08   ` De Lara Guarch, Pablo
  0 siblings, 0 replies; 3+ messages in thread
From: De Lara Guarch, Pablo @ 2018-05-22 10:08 UTC (permalink / raw)
  To: De Lara Guarch, Pablo, Rybalchenko, Kirill, dev
  Cc: stable, Zhang, Roy Fan, Pattan, Reshma



> -----Original Message-----
> From: stable [mailto:stable-bounces@dpdk.org] On Behalf Of De Lara Guarch,
> Pablo
> Sent: Tuesday, May 22, 2018 10:58 AM
> To: Rybalchenko, Kirill <kirill.rybalchenko@intel.com>; dev@dpdk.org
> Cc: stable@dpdk.org; Zhang, Roy Fan <roy.fan.zhang@intel.com>; Pattan,
> Reshma <reshma.pattan@intel.com>
> Subject: Re: [dpdk-stable] [PATCH] crypto/scheduler: fix 64-bit mask of workers
> cores
> 
> 
> 
> > -----Original Message-----
> > From: Rybalchenko, Kirill
> > Sent: Monday, May 21, 2018 11:22 AM
> > To: dev@dpdk.org
> > Cc: stable@dpdk.org; Rybalchenko, Kirill
> > <kirill.rybalchenko@intel.com>; Zhang, Roy Fan
> > <roy.fan.zhang@intel.com>; Pattan, Reshma <reshma.pattan@intel.com>;
> > De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> > Subject: [PATCH] crypto/scheduler: fix 64-bit mask of workers cores
> >
> > The list of workers cores was represented by 64-bit bitmask.
> > It doesn't work if system has cores with id higher than 63.
> > This fix changes list of workers cores to array of uint16_t.
> > The size of array equals to RTE_MAX_LCORE.
> >
> > Fixes: 4c07e0552f0a ("crypto/scheduler: add multicore scheduling
> > mode")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Kirill Rybalchenko <kirill.rybalchenko@intel.com>
> 
> Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>

Applied to dpdk-next-crypto.
Thanks,

Pablo

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2018-05-22 10:08 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-05-21 10:22 [dpdk-stable] [PATCH] crypto/scheduler: fix 64-bit mask of workers cores Kirill Rybalchenko
2018-05-22  9:58 ` De Lara Guarch, Pablo
2018-05-22 10:08   ` De Lara Guarch, Pablo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).