DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] crypto/scheduler: rename slave to worker
@ 2020-08-26 15:34 Adam Dybkowski
  2020-08-27 15:18 ` Zhang, Roy Fan
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Adam Dybkowski @ 2020-08-26 15:34 UTC (permalink / raw)
  To: dev, fiona.trahe, akhil.goyal, roy.fan.zhang; +Cc: Adam Dybkowski

This patch replaces the usage of the word 'slave' with more
appropriate word 'worker' in QAT PMD and Scheduler PMD
as well as in their docs. Also the test app was modified
to use the new wording.

The Scheduler PMD's public API was modified according to the
previous deprecation notice:
rte_cryptodev_scheduler_slave_attach is now called
rte_cryptodev_scheduler_worker_attach,
rte_cryptodev_scheduler_slave_detach is
rte_cryptodev_scheduler_worker_detach,
rte_cryptodev_scheduler_slaves_get is
rte_cryptodev_scheduler_workers_get.

Also, the configuration value RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
was renamed to RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS.

Signed-off-by: Adam Dybkowski <adamx.dybkowski@intel.com>
---
 app/test-crypto-perf/main.c                   |   2 +-
 app/test/test_cryptodev.c                     |  20 +-
 doc/guides/cryptodevs/qat.rst                 |   2 +-
 doc/guides/cryptodevs/scheduler.rst           |  40 ++--
 doc/guides/rel_notes/deprecation.rst          |   2 +-
 .../scheduler/rte_cryptodev_scheduler.c       | 114 +++++-----
 .../scheduler/rte_cryptodev_scheduler.h       |  35 ++-
 .../rte_cryptodev_scheduler_operations.h      |  12 +-
 .../rte_pmd_crypto_scheduler_version.map      |   6 +-
 drivers/crypto/scheduler/scheduler_failover.c |  83 +++----
 .../crypto/scheduler/scheduler_multicore.c    |  54 ++---
 .../scheduler/scheduler_pkt_size_distr.c      | 142 ++++++------
 drivers/crypto/scheduler/scheduler_pmd.c      |  54 ++---
 drivers/crypto/scheduler/scheduler_pmd_ops.c  | 204 +++++++++---------
 .../crypto/scheduler/scheduler_pmd_private.h  |  12 +-
 .../crypto/scheduler/scheduler_roundrobin.c   |  87 ++++----
 examples/l2fwd-crypto/main.c                  |   6 +-
 17 files changed, 439 insertions(+), 436 deletions(-)

diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index 8f8e580e4..62ae6048b 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -240,7 +240,7 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
 					"crypto_scheduler")) {
 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
 			uint32_t nb_slaves =
-				rte_cryptodev_scheduler_slaves_get(cdev_id,
+				rte_cryptodev_scheduler_workers_get(cdev_id,
 								NULL);
 
 			sessions_needed = enabled_cdev_count *
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 70bf6fe2c..255fb7525 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -479,29 +479,29 @@ testsuite_setup(void)
 	char vdev_args[VDEV_ARGS_SIZE] = {""};
 	char temp_str[VDEV_ARGS_SIZE] = {"mode=multi-core,"
 		"ordering=enable,name=cryptodev_test_scheduler,corelist="};
-	uint16_t slave_core_count = 0;
+	uint16_t worker_core_count = 0;
 	uint16_t socket_id = 0;
 
 	if (gbl_driver_id == rte_cryptodev_driver_id_get(
 			RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
 
-		/* Identify the Slave Cores
-		 * Use 2 slave cores for the device args
+		/* Identify the Worker Cores
+		 * Use 2 worker cores for the device args
 		 */
 		RTE_LCORE_FOREACH_SLAVE(i) {
-			if (slave_core_count > 1)
+			if (worker_core_count > 1)
 				break;
 			snprintf(vdev_args, sizeof(vdev_args),
 					"%s%d", temp_str, i);
 			strcpy(temp_str, vdev_args);
 			strlcat(temp_str, ";", sizeof(temp_str));
-			slave_core_count++;
+			worker_core_count++;
 			socket_id = rte_lcore_to_socket_id(i);
 		}
-		if (slave_core_count != 2) {
+		if (worker_core_count != 2) {
 			RTE_LOG(ERR, USER1,
 				"Cryptodev scheduler test require at least "
-				"two slave cores to run. "
+				"two worker cores to run. "
 				"Please use the correct coremask.\n");
 			return TEST_FAILED;
 		}
@@ -11712,7 +11712,7 @@ test_chacha20_poly1305_decrypt_test_case_rfc8439(void)
 
 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
 
-/* global AESNI slave IDs for the scheduler test */
+/* global AESNI worker IDs for the scheduler test */
 uint8_t aesni_ids[2];
 
 static int
@@ -11810,7 +11810,7 @@ test_scheduler_attach_slave_op(void)
 		ts_params->qp_conf.mp_session_private =
 				ts_params->session_priv_mpool;
 
-		ret = rte_cryptodev_scheduler_slave_attach(sched_id,
+		ret = rte_cryptodev_scheduler_worker_attach(sched_id,
 				(uint8_t)i);
 
 		TEST_ASSERT(ret == 0,
@@ -11834,7 +11834,7 @@ test_scheduler_detach_slave_op(void)
 	int ret;
 
 	for (i = 0; i < 2; i++) {
-		ret = rte_cryptodev_scheduler_slave_detach(sched_id,
+		ret = rte_cryptodev_scheduler_worker_detach(sched_id,
 				aesni_ids[i]);
 		TEST_ASSERT(ret == 0,
 			"Failed to detach device %u", aesni_ids[i]);
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index e5d2cf499..ee0bd3a0d 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -328,7 +328,7 @@ The "rte_cryptodev_devices_get()" returns the devices exposed by either of these
 
 	The cryptodev driver name is passed to the dpdk-test-crypto-perf tool in the "-devtype" parameter.
 
-	The qat crypto device name is in the format of the slave parameter passed to the crypto scheduler.
+	The qat crypto device name is in the format of the worker parameter passed to the crypto scheduler.
 
 * The qat compressdev driver name is "compress_qat".
   The rte_compressdev_devices_get() returns the devices exposed by this driver.
diff --git a/doc/guides/cryptodevs/scheduler.rst b/doc/guides/cryptodevs/scheduler.rst
index 7004ca431..565de40f3 100644
--- a/doc/guides/cryptodevs/scheduler.rst
+++ b/doc/guides/cryptodevs/scheduler.rst
@@ -16,12 +16,12 @@ crypto ops among them in a certain manner.
 The Cryptodev Scheduler PMD library (**librte_pmd_crypto_scheduler**) acts as
 a software crypto PMD and shares the same API provided by librte_cryptodev.
 The PMD supports attaching multiple crypto PMDs, software or hardware, as
-slaves, and distributes the crypto workload to them with certain behavior.
+workers, and distributes the crypto workload to them with certain behavior.
 The behaviors are categorizes as different "modes". Basically, a scheduling
-mode defines certain actions for scheduling crypto ops to its slaves.
+mode defines certain actions for scheduling crypto ops to its workers.
 
 The librte_pmd_crypto_scheduler library exports a C API which provides an API
-for attaching/detaching slaves, set/get scheduling modes, and enable/disable
+for attaching/detaching workers, set/get scheduling modes, and enable/disable
 crypto ops reordering.
 
 Limitations
@@ -62,7 +62,7 @@ two calls:
   created. This value may be overwritten internally if there are too
   many devices are attached.
 
-* slave: If a cryptodev has been initialized with specific name, it can be
+* worker: If a cryptodev has been initialized with specific name, it can be
   attached to the scheduler using this parameter, simply filling the name
   here. Multiple cryptodevs can be attached initially by presenting this
   parameter multiple times.
@@ -84,13 +84,13 @@ Example:
 
 .. code-block:: console
 
-    ... --vdev "crypto_aesni_mb0,name=aesni_mb_1" --vdev "crypto_aesni_mb1,name=aesni_mb_2" --vdev "crypto_scheduler,slave=aesni_mb_1,slave=aesni_mb_2" ...
+    ... --vdev "crypto_aesni_mb0,name=aesni_mb_1" --vdev "crypto_aesni_mb1,name=aesni_mb_2" --vdev "crypto_scheduler,worker=aesni_mb_1,worker=aesni_mb_2" ...
 
 .. note::
 
     * The scheduler cryptodev cannot be started unless the scheduling mode
-      is set and at least one slave is attached. Also, to configure the
-      scheduler in the run-time, like attach/detach slave(s), change
+      is set and at least one worker is attached. Also, to configure the
+      scheduler in the run-time, like attach/detach worker(s), change
       scheduling mode, or enable/disable crypto op ordering, one should stop
       the scheduler first, otherwise an error will be returned.
 
@@ -111,7 +111,7 @@ operation:
    *Initialization mode parameter*: **round-robin**
 
    Round-robin mode, which distributes the enqueued burst of crypto ops
-   among its slaves in a round-robin manner. This mode may help to fill
+   among its workers in a round-robin manner. This mode may help to fill
    the throughput gap between the physical core and the existing cryptodevs
    to increase the overall performance.
 
@@ -119,15 +119,15 @@ operation:
 
    *Initialization mode parameter*: **packet-size-distr**
 
-   Packet-size based distribution mode, which works with 2 slaves, the primary
-   slave and the secondary slave, and distributes the enqueued crypto
+   Packet-size based distribution mode, which works with 2 workers, the primary
+   worker and the secondary worker, and distributes the enqueued crypto
    operations to them based on their data lengths. A crypto operation will be
-   distributed to the primary slave if its data length is equal to or bigger
+   distributed to the primary worker if its data length is equal to or bigger
    than the designated threshold, otherwise it will be handled by the secondary
-   slave.
+   worker.
 
    A typical usecase in this mode is with the QAT cryptodev as the primary and
-   a software cryptodev as the secondary slave. This may help applications to
+   a software cryptodev as the secondary worker. This may help applications to
    process additional crypto workload than what the QAT cryptodev can handle on
    its own, by making use of the available CPU cycles to deal with smaller
    crypto workloads.
@@ -148,11 +148,11 @@ operation:
 
    *Initialization mode parameter*: **fail-over**
 
-   Fail-over mode, which works with 2 slaves, the primary slave and the
-   secondary slave. In this mode, the scheduler will enqueue the incoming
-   crypto operation burst to the primary slave. When one or more crypto
+   Fail-over mode, which works with 2 workers, the primary worker and the
+   secondary worker. In this mode, the scheduler will enqueue the incoming
+   crypto operation burst to the primary worker. When one or more crypto
    operations fail to be enqueued, then they will be enqueued to the secondary
-   slave.
+   worker.
 
 *   **CDEV_SCHED_MODE_MULTICORE:**
 
@@ -167,16 +167,16 @@ operation:
    For mixed traffic (IMIX) the optimal number of worker cores is around 2-3.
    For large packets (1.5 kbytes) scheduler shows linear scaling in performance
    up to eight cores.
-   Each worker uses its own slave cryptodev. Only software cryptodevs
+   Each worker uses its own cryptodev. Only software cryptodevs
    are supported. Only the same type of cryptodevs should be used concurrently.
 
    The multi-core mode uses one extra parameter:
 
    * corelist: Semicolon-separated list of logical cores to be used as workers.
-     The number of worker cores should be equal to the number of slave cryptodevs.
+     The number of worker cores should be equal to the number of worker cryptodevs.
      These cores should be present in EAL core list parameter and
      should not be used by the application or any other process.
 
    Example:
     ... --vdev "crypto_aesni_mb1,name=aesni_mb_1" --vdev "crypto_aesni_mb_pmd2,name=aesni_mb_2" \
-    --vdev "crypto_scheduler,slave=aesni_mb_1,slave=aesni_mb_2,mode=multi-core,corelist=23;24" ...
+    --vdev "crypto_scheduler,worker=aesni_mb_1,worker=aesni_mb_2,mode=multi-core,corelist=23;24" ...
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 345c38d5b..24d58060f 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -341,4 +341,4 @@ Deprecation Notices
 * dpdk-setup.sh: This old script relies on deprecated stuff, and especially
   ``make``. Given environments are too much variables for such a simple script,
   it will be removed in DPDK 20.11.
-  Some useful parts may be converted into specific scripts.
+  Some useful parts may be converted into specific scripts.
\ No newline at end of file
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 730504dab..9367a0e91 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -13,31 +13,31 @@
 /** update the scheduler pmd's capability with attaching device's
  *  capability.
  *  For each device to be attached, the scheduler's capability should be
- *  the common capability set of all slaves
+ *  the common capability set of all workers
  **/
 static uint32_t
 sync_caps(struct rte_cryptodev_capabilities *caps,
 		uint32_t nb_caps,
-		const struct rte_cryptodev_capabilities *slave_caps)
+		const struct rte_cryptodev_capabilities *worker_caps)
 {
-	uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;
+	uint32_t sync_nb_caps = nb_caps, nb_worker_caps = 0;
 	uint32_t i;
 
-	while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
-		nb_slave_caps++;
+	while (worker_caps[nb_worker_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
+		nb_worker_caps++;
 
 	if (nb_caps == 0) {
-		rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);
-		return nb_slave_caps;
+		rte_memcpy(caps, worker_caps, sizeof(*caps) * nb_worker_caps);
+		return nb_worker_caps;
 	}
 
 	for (i = 0; i < sync_nb_caps; i++) {
 		struct rte_cryptodev_capabilities *cap = &caps[i];
 		uint32_t j;
 
-		for (j = 0; j < nb_slave_caps; j++) {
+		for (j = 0; j < nb_worker_caps; j++) {
 			const struct rte_cryptodev_capabilities *s_cap =
-					&slave_caps[j];
+					&worker_caps[j];
 
 			if (s_cap->op != cap->op || s_cap->sym.xform_type !=
 					cap->sym.xform_type)
@@ -72,7 +72,7 @@ sync_caps(struct rte_cryptodev_capabilities *caps,
 			break;
 		}
 
-		if (j < nb_slave_caps)
+		if (j < nb_worker_caps)
 			continue;
 
 		/* remove a uncommon cap from the array */
@@ -97,10 +97,10 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx)
 		sched_ctx->capabilities = NULL;
 	}
 
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
 		struct rte_cryptodev_info dev_info;
 
-		rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+		rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
 
 		nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
 		if (nb_caps == 0)
@@ -127,10 +127,10 @@ update_scheduler_feature_flag(struct rte_cryptodev *dev)
 
 	dev->feature_flags = 0;
 
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
 		struct rte_cryptodev_info dev_info;
 
-		rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+		rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
 
 		dev->feature_flags |= dev_info.feature_flags;
 	}
@@ -142,15 +142,15 @@ update_max_nb_qp(struct scheduler_ctx *sched_ctx)
 	uint32_t i;
 	uint32_t max_nb_qp;
 
-	if (!sched_ctx->nb_slaves)
+	if (!sched_ctx->nb_workers)
 		return;
 
-	max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;
+	max_nb_qp = sched_ctx->nb_workers ? UINT32_MAX : 0;
 
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
 		struct rte_cryptodev_info dev_info;
 
-		rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+		rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
 		max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
 				dev_info.max_nb_queue_pairs : max_nb_qp;
 	}
@@ -160,11 +160,11 @@ update_max_nb_qp(struct scheduler_ctx *sched_ctx)
 
 /** Attach a device to the scheduler. */
 int
-rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
+rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, uint8_t worker_id)
 {
 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
 	struct scheduler_ctx *sched_ctx;
-	struct scheduler_slave *slave;
+	struct scheduler_worker *worker;
 	struct rte_cryptodev_info dev_info;
 	uint32_t i;
 
@@ -184,30 +184,30 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
 	}
 
 	sched_ctx = dev->data->dev_private;
-	if (sched_ctx->nb_slaves >=
-			RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
-		CR_SCHED_LOG(ERR, "Too many slaves attached");
+	if (sched_ctx->nb_workers >=
+			RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS) {
+		CR_SCHED_LOG(ERR, "Too many workers attached");
 		return -ENOMEM;
 	}
 
-	for (i = 0; i < sched_ctx->nb_slaves; i++)
-		if (sched_ctx->slaves[i].dev_id == slave_id) {
-			CR_SCHED_LOG(ERR, "Slave already added");
+	for (i = 0; i < sched_ctx->nb_workers; i++)
+		if (sched_ctx->workers[i].dev_id == worker_id) {
+			CR_SCHED_LOG(ERR, "Worker already added");
 			return -ENOTSUP;
 		}
 
-	slave = &sched_ctx->slaves[sched_ctx->nb_slaves];
+	worker = &sched_ctx->workers[sched_ctx->nb_workers];
 
-	rte_cryptodev_info_get(slave_id, &dev_info);
+	rte_cryptodev_info_get(worker_id, &dev_info);
 
-	slave->dev_id = slave_id;
-	slave->driver_id = dev_info.driver_id;
-	sched_ctx->nb_slaves++;
+	worker->dev_id = worker_id;
+	worker->driver_id = dev_info.driver_id;
+	sched_ctx->nb_workers++;
 
 	if (update_scheduler_capability(sched_ctx) < 0) {
-		slave->dev_id = 0;
-		slave->driver_id = 0;
-		sched_ctx->nb_slaves--;
+		worker->dev_id = 0;
+		worker->driver_id = 0;
+		sched_ctx->nb_workers--;
 
 		CR_SCHED_LOG(ERR, "capabilities update failed");
 		return -ENOTSUP;
@@ -221,11 +221,11 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
 }
 
 int
-rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
+rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, uint8_t worker_id)
 {
 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
 	struct scheduler_ctx *sched_ctx;
-	uint32_t i, slave_pos;
+	uint32_t i, worker_pos;
 
 	if (!dev) {
 		CR_SCHED_LOG(ERR, "Operation not supported");
@@ -244,26 +244,26 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
 
 	sched_ctx = dev->data->dev_private;
 
-	for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)
-		if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
+	for (worker_pos = 0; worker_pos < sched_ctx->nb_workers; worker_pos++)
+		if (sched_ctx->workers[worker_pos].dev_id == worker_id)
 			break;
-	if (slave_pos == sched_ctx->nb_slaves) {
-		CR_SCHED_LOG(ERR, "Cannot find slave");
+	if (worker_pos == sched_ctx->nb_workers) {
+		CR_SCHED_LOG(ERR, "Cannot find worker");
 		return -ENOTSUP;
 	}
 
-	if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
-		CR_SCHED_LOG(ERR, "Failed to detach slave");
+	if (sched_ctx->ops.worker_detach(dev, worker_id) < 0) {
+		CR_SCHED_LOG(ERR, "Failed to detach worker");
 		return -ENOTSUP;
 	}
 
-	for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {
-		memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],
-				sizeof(struct scheduler_slave));
+	for (i = worker_pos; i < sched_ctx->nb_workers - 1; i++) {
+		memcpy(&sched_ctx->workers[i], &sched_ctx->workers[i+1],
+				sizeof(struct scheduler_worker));
 	}
-	memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,
-			sizeof(struct scheduler_slave));
-	sched_ctx->nb_slaves--;
+	memset(&sched_ctx->workers[sched_ctx->nb_workers - 1], 0,
+			sizeof(struct scheduler_worker));
+	sched_ctx->nb_workers--;
 
 	if (update_scheduler_capability(sched_ctx) < 0) {
 		CR_SCHED_LOG(ERR, "capabilities update failed");
@@ -459,8 +459,8 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
 	sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
 	sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
 	sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
-	sched_ctx->ops.slave_attach = scheduler->ops->slave_attach;
-	sched_ctx->ops.slave_detach = scheduler->ops->slave_detach;
+	sched_ctx->ops.worker_attach = scheduler->ops->worker_attach;
+	sched_ctx->ops.worker_detach = scheduler->ops->worker_detach;
 	sched_ctx->ops.option_set = scheduler->ops->option_set;
 	sched_ctx->ops.option_get = scheduler->ops->option_get;
 
@@ -485,11 +485,11 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
 }
 
 int
-rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
+rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers)
 {
 	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
 	struct scheduler_ctx *sched_ctx;
-	uint32_t nb_slaves = 0;
+	uint32_t nb_workers = 0;
 
 	if (!dev) {
 		CR_SCHED_LOG(ERR, "Operation not supported");
@@ -503,16 +503,16 @@ rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
 
 	sched_ctx = dev->data->dev_private;
 
-	nb_slaves = sched_ctx->nb_slaves;
+	nb_workers = sched_ctx->nb_workers;
 
-	if (slaves && nb_slaves) {
+	if (workers && nb_workers) {
 		uint32_t i;
 
-		for (i = 0; i < nb_slaves; i++)
-			slaves[i] = sched_ctx->slaves[i].dev_id;
+		for (i = 0; i < nb_workers; i++)
+			workers[i] = sched_ctx->workers[i].dev_id;
 	}
 
-	return (int)nb_slaves;
+	return (int)nb_workers;
 }
 
 int
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index 9a72a90ae..88da8368e 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -10,9 +10,9 @@
  *
  * RTE Cryptodev Scheduler Device
  *
- * The RTE Cryptodev Scheduler Device allows the aggregation of multiple (slave)
+ * The RTE Cryptodev Scheduler Device allows the aggregation of multiple worker
  * Cryptodevs into a single logical crypto device, and the scheduling the
- * crypto operations to the slaves based on the mode of the specified mode of
+ * crypto operations to the workers based on the mode of the specified mode of
  * operation specified and supported. This implementation supports 3 modes of
  * operation: round robin, packet-size based, and fail-over.
  */
@@ -25,8 +25,8 @@ extern "C" {
 #endif
 
 /** Maximum number of bonded devices per device */
-#ifndef RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
-#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES	(8)
+#ifndef RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS	(8)
 #endif
 
 /** Maximum number of multi-core worker cores */
@@ -106,34 +106,33 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
  *
  * @param scheduler_id
  *   The target scheduler device ID
- * @param slave_id
+ * @param worker_id
  *   Crypto device ID to be attached
  *
  * @return
- *   - 0 if the slave is attached.
+ *   - 0 if the worker is attached.
  *   - -ENOTSUP if the operation is not supported.
  *   - -EBUSY if device is started.
- *   - -ENOMEM if the scheduler's slave list is full.
+ *   - -ENOMEM if the scheduler's worker list is full.
  */
 int
-rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id);
+rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, uint8_t worker_id);
 
 /**
  * Detach a crypto device from the scheduler
  *
  * @param scheduler_id
  *   The target scheduler device ID
- * @param slave_id
+ * @param worker_id
  *   Crypto device ID to be detached
  *
  * @return
- *   - 0 if the slave is detached.
+ *   - 0 if the worker is detached.
  *   - -ENOTSUP if the operation is not supported.
  *   - -EBUSY if device is started.
  */
 int
-rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id);
-
+rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, uint8_t worker_id);
 
 /**
  * Set the scheduling mode
@@ -199,21 +198,21 @@ int
 rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id);
 
 /**
- * Get the attached slaves' count and/or ID
+ * Get the attached workers' count and/or ID
  *
  * @param scheduler_id
  *   The target scheduler device ID
- * @param slaves
- *   If successful, the function will write back all slaves' device IDs to it.
+ * @param workers
+ *   If successful, the function will write back all workers' device IDs to it.
  *   This parameter will either be an uint8_t array of
- *   RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES elements or NULL.
+ *   RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS elements or NULL.
  *
  * @return
- *   - non-negative number: the number of slaves attached
+ *   - non-negative number: the number of workers attached
  *   - -ENOTSUP if the operation is not supported.
  */
 int
-rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves);
+rte_cryptodev_scheduler_workers_get(uint8_t scheduler_id, uint8_t *workers);
 
 /**
  * Set the mode specific option
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
index c43695894..f8726c009 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
@@ -11,10 +11,10 @@
 extern "C" {
 #endif
 
-typedef int (*rte_cryptodev_scheduler_slave_attach_t)(
-		struct rte_cryptodev *dev, uint8_t slave_id);
-typedef int (*rte_cryptodev_scheduler_slave_detach_t)(
-		struct rte_cryptodev *dev, uint8_t slave_id);
+typedef int (*rte_cryptodev_scheduler_worker_attach_t)(
+		struct rte_cryptodev *dev, uint8_t worker_id);
+typedef int (*rte_cryptodev_scheduler_worker_detach_t)(
+		struct rte_cryptodev *dev, uint8_t worker_id);
 
 typedef int (*rte_cryptodev_scheduler_start_t)(struct rte_cryptodev *dev);
 typedef int (*rte_cryptodev_scheduler_stop_t)(struct rte_cryptodev *dev);
@@ -36,8 +36,8 @@ typedef int (*rte_cryptodev_scheduler_config_option_get)(
 		void *option);
 
 struct rte_cryptodev_scheduler_ops {
-	rte_cryptodev_scheduler_slave_attach_t slave_attach;
-	rte_cryptodev_scheduler_slave_attach_t slave_detach;
+	rte_cryptodev_scheduler_worker_attach_t worker_attach;
+	rte_cryptodev_scheduler_worker_attach_t worker_detach;
 
 	rte_cryptodev_scheduler_start_t scheduler_start;
 	rte_cryptodev_scheduler_stop_t scheduler_stop;
diff --git a/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map b/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
index ca6f102d9..ab7d50562 100644
--- a/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
+++ b/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
@@ -8,9 +8,9 @@ DPDK_21 {
 	rte_cryptodev_scheduler_option_set;
 	rte_cryptodev_scheduler_ordering_get;
 	rte_cryptodev_scheduler_ordering_set;
-	rte_cryptodev_scheduler_slave_attach;
-	rte_cryptodev_scheduler_slave_detach;
-	rte_cryptodev_scheduler_slaves_get;
+	rte_cryptodev_scheduler_worker_attach;
+	rte_cryptodev_scheduler_worker_detach;
+	rte_cryptodev_scheduler_workers_get;
 
 	local: *;
 };
diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c
index 3a023b8ad..844312dd1 100644
--- a/drivers/crypto/scheduler/scheduler_failover.c
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -8,20 +8,20 @@
 #include "rte_cryptodev_scheduler_operations.h"
 #include "scheduler_pmd_private.h"
 
-#define PRIMARY_SLAVE_IDX	0
-#define SECONDARY_SLAVE_IDX	1
-#define NB_FAILOVER_SLAVES	2
-#define SLAVE_SWITCH_MASK	(0x01)
+#define PRIMARY_WORKER_IDX	0
+#define SECONDARY_WORKER_IDX	1
+#define NB_FAILOVER_WORKERS	2
+#define WORKER_SWITCH_MASK	(0x01)
 
 struct fo_scheduler_qp_ctx {
-	struct scheduler_slave primary_slave;
-	struct scheduler_slave secondary_slave;
+	struct scheduler_worker primary_worker;
+	struct scheduler_worker secondary_worker;
 
 	uint8_t deq_idx;
 };
 
 static __rte_always_inline uint16_t
-failover_slave_enqueue(struct scheduler_slave *slave,
+failover_worker_enqueue(struct scheduler_worker *worker,
 		struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	uint16_t i, processed_ops;
@@ -29,9 +29,9 @@ failover_slave_enqueue(struct scheduler_slave *slave,
 	for (i = 0; i < nb_ops && i < 4; i++)
 		rte_prefetch0(ops[i]->sym->session);
 
-	processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
-			slave->qp_id, ops, nb_ops);
-	slave->nb_inflight_cops += processed_ops;
+	processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,
+			worker->qp_id, ops, nb_ops);
+	worker->nb_inflight_cops += processed_ops;
 
 	return processed_ops;
 }
@@ -46,11 +46,12 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 	if (unlikely(nb_ops == 0))
 		return 0;
 
-	enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave,
+	enqueued_ops = failover_worker_enqueue(&qp_ctx->primary_worker,
 			ops, nb_ops);
 
 	if (enqueued_ops < nb_ops)
-		enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave,
+		enqueued_ops += failover_worker_enqueue(
+				&qp_ctx->secondary_worker,
 				&ops[enqueued_ops],
 				nb_ops - enqueued_ops);
 
@@ -79,28 +80,28 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct fo_scheduler_qp_ctx *qp_ctx =
 			((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
-	struct scheduler_slave *slaves[NB_FAILOVER_SLAVES] = {
-			&qp_ctx->primary_slave, &qp_ctx->secondary_slave};
-	struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
+	struct scheduler_worker *workers[NB_FAILOVER_WORKERS] = {
+			&qp_ctx->primary_worker, &qp_ctx->secondary_worker};
+	struct scheduler_worker *worker = workers[qp_ctx->deq_idx];
 	uint16_t nb_deq_ops = 0, nb_deq_ops2 = 0;
 
-	if (slave->nb_inflight_cops) {
-		nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
-			slave->qp_id, ops, nb_ops);
-		slave->nb_inflight_cops -= nb_deq_ops;
+	if (worker->nb_inflight_cops) {
+		nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,
+			worker->qp_id, ops, nb_ops);
+		worker->nb_inflight_cops -= nb_deq_ops;
 	}
 
-	qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_SWITCH_MASK;
+	qp_ctx->deq_idx = (~qp_ctx->deq_idx) & WORKER_SWITCH_MASK;
 
 	if (nb_deq_ops == nb_ops)
 		return nb_deq_ops;
 
-	slave = slaves[qp_ctx->deq_idx];
+	worker = workers[qp_ctx->deq_idx];
 
-	if (slave->nb_inflight_cops) {
-		nb_deq_ops2 = rte_cryptodev_dequeue_burst(slave->dev_id,
-			slave->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops);
-		slave->nb_inflight_cops -= nb_deq_ops2;
+	if (worker->nb_inflight_cops) {
+		nb_deq_ops2 = rte_cryptodev_dequeue_burst(worker->dev_id,
+			worker->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops);
+		worker->nb_inflight_cops -= nb_deq_ops2;
 	}
 
 	return nb_deq_ops + nb_deq_ops2;
@@ -119,15 +120,15 @@ schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
 }
 
 static int
-slave_attach(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused uint8_t slave_id)
+worker_attach(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint8_t worker_id)
 {
 	return 0;
 }
 
 static int
-slave_detach(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused uint8_t slave_id)
+worker_detach(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint8_t worker_id)
 {
 	return 0;
 }
@@ -138,8 +139,8 @@ scheduler_start(struct rte_cryptodev *dev)
 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
 	uint16_t i;
 
-	if (sched_ctx->nb_slaves < 2) {
-		CR_SCHED_LOG(ERR, "Number of slaves shall no less than 2");
+	if (sched_ctx->nb_workers < 2) {
+		CR_SCHED_LOG(ERR, "Number of workers shall no less than 2");
 		return -ENOMEM;
 	}
 
@@ -156,12 +157,12 @@ scheduler_start(struct rte_cryptodev *dev)
 			((struct scheduler_qp_ctx *)
 				dev->data->queue_pairs[i])->private_qp_ctx;
 
-		rte_memcpy(&qp_ctx->primary_slave,
-				&sched_ctx->slaves[PRIMARY_SLAVE_IDX],
-				sizeof(struct scheduler_slave));
-		rte_memcpy(&qp_ctx->secondary_slave,
-				&sched_ctx->slaves[SECONDARY_SLAVE_IDX],
-				sizeof(struct scheduler_slave));
+		rte_memcpy(&qp_ctx->primary_worker,
+				&sched_ctx->workers[PRIMARY_WORKER_IDX],
+				sizeof(struct scheduler_worker));
+		rte_memcpy(&qp_ctx->secondary_worker,
+				&sched_ctx->workers[SECONDARY_WORKER_IDX],
+				sizeof(struct scheduler_worker));
 	}
 
 	return 0;
@@ -198,8 +199,8 @@ scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
 }
 
 static struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
-	slave_attach,
-	slave_detach,
+	worker_attach,
+	worker_detach,
 	scheduler_start,
 	scheduler_stop,
 	scheduler_config_qp,
@@ -210,8 +211,8 @@ static struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
 
 static struct rte_cryptodev_scheduler fo_scheduler = {
 		.name = "failover-scheduler",
-		.description = "scheduler which enqueues to the primary slave, "
-				"and only then enqueues to the secondary slave "
+		.description = "scheduler which enqueues to the primary worker, "
+				"and only then enqueues to the secondary worker "
 				"upon failing on enqueuing to primary",
 		.mode = CDEV_SCHED_MODE_FAILOVER,
 		.ops = &scheduler_fo_ops
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
index 2d6790bb3..1e2e8dbf9 100644
--- a/drivers/crypto/scheduler/scheduler_multicore.c
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -26,8 +26,8 @@ struct mc_scheduler_ctx {
 };
 
 struct mc_scheduler_qp_ctx {
-	struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
-	uint32_t nb_slaves;
+	struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+	uint32_t nb_workers;
 
 	uint32_t last_enq_worker_idx;
 	uint32_t last_deq_worker_idx;
@@ -132,15 +132,15 @@ schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
 }
 
 static int
-slave_attach(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused uint8_t slave_id)
+worker_attach(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint8_t worker_id)
 {
 	return 0;
 }
 
 static int
-slave_detach(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused uint8_t slave_id)
+worker_detach(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint8_t worker_id)
 {
 	return 0;
 }
@@ -154,7 +154,7 @@ mc_scheduler_worker(struct rte_cryptodev *dev)
 	struct rte_ring *deq_ring;
 	uint32_t core_id = rte_lcore_id();
 	int i, worker_idx = -1;
-	struct scheduler_slave *slave;
+	struct scheduler_worker *worker;
 	struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
 	struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
 	uint16_t processed_ops;
@@ -177,15 +177,16 @@ mc_scheduler_worker(struct rte_cryptodev *dev)
 		return -1;
 	}
 
-	slave = &sched_ctx->slaves[worker_idx];
+	worker = &sched_ctx->workers[worker_idx];
 	enq_ring = mc_ctx->sched_enq_ring[worker_idx];
 	deq_ring = mc_ctx->sched_deq_ring[worker_idx];
 
 	while (!mc_ctx->stop_signal) {
 		if (pending_enq_ops) {
 			processed_ops =
-				rte_cryptodev_enqueue_burst(slave->dev_id,
-					slave->qp_id, &enq_ops[pending_enq_ops_idx],
+				rte_cryptodev_enqueue_burst(worker->dev_id,
+					worker->qp_id,
+					&enq_ops[pending_enq_ops_idx],
 					pending_enq_ops);
 			pending_enq_ops -= processed_ops;
 			pending_enq_ops_idx += processed_ops;
@@ -195,8 +196,8 @@ mc_scheduler_worker(struct rte_cryptodev *dev)
 							MC_SCHED_BUFFER_SIZE, NULL);
 			if (processed_ops) {
 				pending_enq_ops_idx = rte_cryptodev_enqueue_burst(
-							slave->dev_id, slave->qp_id,
-							enq_ops, processed_ops);
+						worker->dev_id, worker->qp_id,
+						enq_ops, processed_ops);
 				pending_enq_ops = processed_ops - pending_enq_ops_idx;
 				inflight_ops += pending_enq_ops_idx;
 			}
@@ -209,8 +210,9 @@ mc_scheduler_worker(struct rte_cryptodev *dev)
 			pending_deq_ops -= processed_ops;
 			pending_deq_ops_idx += processed_ops;
 		} else if (inflight_ops) {
-			processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
-					slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);
+			processed_ops = rte_cryptodev_dequeue_burst(
+					worker->dev_id, worker->qp_id, deq_ops,
+					MC_SCHED_BUFFER_SIZE);
 			if (processed_ops) {
 				inflight_ops -= processed_ops;
 				if (reordering_enabled) {
@@ -264,16 +266,16 @@ scheduler_start(struct rte_cryptodev *dev)
 				qp_ctx->private_qp_ctx;
 		uint32_t j;
 
-		memset(mc_qp_ctx->slaves, 0,
-				RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
-				sizeof(struct scheduler_slave));
-		for (j = 0; j < sched_ctx->nb_slaves; j++) {
-			mc_qp_ctx->slaves[j].dev_id =
-					sched_ctx->slaves[j].dev_id;
-			mc_qp_ctx->slaves[j].qp_id = i;
+		memset(mc_qp_ctx->workers, 0,
+				RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS *
+				sizeof(struct scheduler_worker));
+		for (j = 0; j < sched_ctx->nb_workers; j++) {
+			mc_qp_ctx->workers[j].dev_id =
+					sched_ctx->workers[j].dev_id;
+			mc_qp_ctx->workers[j].qp_id = i;
 		}
 
-		mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+		mc_qp_ctx->nb_workers = sched_ctx->nb_workers;
 
 		mc_qp_ctx->last_enq_worker_idx = 0;
 		mc_qp_ctx->last_deq_worker_idx = 0;
@@ -347,7 +349,7 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
 		mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
 		if (!mc_ctx->sched_enq_ring[i]) {
 			mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
-						PER_SLAVE_BUFF_SIZE,
+						PER_WORKER_BUFF_SIZE,
 						rte_socket_id(),
 						RING_F_SC_DEQ | RING_F_SP_ENQ);
 			if (!mc_ctx->sched_enq_ring[i]) {
@@ -361,7 +363,7 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
 		mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
 		if (!mc_ctx->sched_deq_ring[i]) {
 			mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
-						PER_SLAVE_BUFF_SIZE,
+						PER_WORKER_BUFF_SIZE,
 						rte_socket_id(),
 						RING_F_SC_DEQ | RING_F_SP_ENQ);
 			if (!mc_ctx->sched_deq_ring[i]) {
@@ -387,8 +389,8 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
 }
 
 static struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
-	slave_attach,
-	slave_detach,
+	worker_attach,
+	worker_detach,
 	scheduler_start,
 	scheduler_stop,
 	scheduler_config_qp,
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index 45c8dceb4..57e330a74 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -9,10 +9,10 @@
 #include "scheduler_pmd_private.h"
 
 #define DEF_PKT_SIZE_THRESHOLD			(0xffffff80)
-#define SLAVE_IDX_SWITCH_MASK			(0x01)
-#define PRIMARY_SLAVE_IDX			0
-#define SECONDARY_SLAVE_IDX			1
-#define NB_PKT_SIZE_SLAVES			2
+#define WORKER_IDX_SWITCH_MASK			(0x01)
+#define PRIMARY_WORKER_IDX			0
+#define SECONDARY_WORKER_IDX			1
+#define NB_PKT_SIZE_WORKERS			2
 
 /** pkt size based scheduler context */
 struct psd_scheduler_ctx {
@@ -21,15 +21,15 @@ struct psd_scheduler_ctx {
 
 /** pkt size based scheduler queue pair context */
 struct psd_scheduler_qp_ctx {
-	struct scheduler_slave primary_slave;
-	struct scheduler_slave secondary_slave;
+	struct scheduler_worker primary_worker;
+	struct scheduler_worker secondary_worker;
 	uint32_t threshold;
 	uint8_t deq_idx;
 } __rte_cache_aligned;
 
 /** scheduling operation variables' wrapping */
 struct psd_schedule_op {
-	uint8_t slave_idx;
+	uint8_t worker_idx;
 	uint16_t pos;
 };
 
@@ -38,13 +38,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct scheduler_qp_ctx *qp_ctx = qp;
 	struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
-	struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
-	uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
-			psd_qp_ctx->primary_slave.nb_inflight_cops,
-			psd_qp_ctx->secondary_slave.nb_inflight_cops
+	struct rte_crypto_op *sched_ops[NB_PKT_SIZE_WORKERS][nb_ops];
+	uint32_t in_flight_ops[NB_PKT_SIZE_WORKERS] = {
+			psd_qp_ctx->primary_worker.nb_inflight_cops,
+			psd_qp_ctx->secondary_worker.nb_inflight_cops
 	};
-	struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
-		{PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
+	struct psd_schedule_op enq_ops[NB_PKT_SIZE_WORKERS] = {
+		{PRIMARY_WORKER_IDX, 0}, {SECONDARY_WORKER_IDX, 0}
 	};
 	struct psd_schedule_op *p_enq_op;
 	uint16_t i, processed_ops_pri = 0, processed_ops_sec = 0;
@@ -80,13 +80,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 		/* stop schedule cops before the queue is full, this shall
 		 * prevent the failed enqueue
 		 */
-		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+		if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==
 				qp_ctx->max_nb_objs) {
 			i = nb_ops;
 			break;
 		}
 
-		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
+		sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i];
 		p_enq_op->pos++;
 
 		job_len = ops[i+1]->sym->cipher.data.length;
@@ -94,13 +94,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 				ops[i+1]->sym->auth.data.length;
 		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
 
-		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+		if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==
 				qp_ctx->max_nb_objs) {
 			i = nb_ops;
 			break;
 		}
 
-		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];
+		sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+1];
 		p_enq_op->pos++;
 
 		job_len = ops[i+2]->sym->cipher.data.length;
@@ -108,13 +108,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 				ops[i+2]->sym->auth.data.length;
 		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
 
-		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+		if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==
 				qp_ctx->max_nb_objs) {
 			i = nb_ops;
 			break;
 		}
 
-		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];
+		sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+2];
 		p_enq_op->pos++;
 
 		job_len = ops[i+3]->sym->cipher.data.length;
@@ -122,13 +122,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 				ops[i+3]->sym->auth.data.length;
 		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
 
-		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+		if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==
 				qp_ctx->max_nb_objs) {
 			i = nb_ops;
 			break;
 		}
 
-		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];
+		sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+3];
 		p_enq_op->pos++;
 	}
 
@@ -138,34 +138,34 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 				ops[i]->sym->auth.data.length;
 		p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
 
-		if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+		if (p_enq_op->pos + in_flight_ops[p_enq_op->worker_idx] ==
 				qp_ctx->max_nb_objs) {
 			i = nb_ops;
 			break;
 		}
 
-		sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
+		sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i];
 		p_enq_op->pos++;
 	}
 
 	processed_ops_pri = rte_cryptodev_enqueue_burst(
-			psd_qp_ctx->primary_slave.dev_id,
-			psd_qp_ctx->primary_slave.qp_id,
-			sched_ops[PRIMARY_SLAVE_IDX],
-			enq_ops[PRIMARY_SLAVE_IDX].pos);
-	/* enqueue shall not fail as the slave queue is monitored */
-	RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);
+			psd_qp_ctx->primary_worker.dev_id,
+			psd_qp_ctx->primary_worker.qp_id,
+			sched_ops[PRIMARY_WORKER_IDX],
+			enq_ops[PRIMARY_WORKER_IDX].pos);
+	/* enqueue shall not fail as the worker queue is monitored */
+	RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_WORKER_IDX].pos);
 
-	psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
+	psd_qp_ctx->primary_worker.nb_inflight_cops += processed_ops_pri;
 
 	processed_ops_sec = rte_cryptodev_enqueue_burst(
-			psd_qp_ctx->secondary_slave.dev_id,
-			psd_qp_ctx->secondary_slave.qp_id,
-			sched_ops[SECONDARY_SLAVE_IDX],
-			enq_ops[SECONDARY_SLAVE_IDX].pos);
-	RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);
+			psd_qp_ctx->secondary_worker.dev_id,
+			psd_qp_ctx->secondary_worker.qp_id,
+			sched_ops[SECONDARY_WORKER_IDX],
+			enq_ops[SECONDARY_WORKER_IDX].pos);
+	RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_WORKER_IDX].pos);
 
-	psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
+	psd_qp_ctx->secondary_worker.nb_inflight_cops += processed_ops_sec;
 
 	return processed_ops_pri + processed_ops_sec;
 }
@@ -191,33 +191,33 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct psd_scheduler_qp_ctx *qp_ctx =
 			((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
-	struct scheduler_slave *slaves[NB_PKT_SIZE_SLAVES] = {
-			&qp_ctx->primary_slave, &qp_ctx->secondary_slave};
-	struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
+	struct scheduler_worker *workers[NB_PKT_SIZE_WORKERS] = {
+			&qp_ctx->primary_worker, &qp_ctx->secondary_worker};
+	struct scheduler_worker *worker = workers[qp_ctx->deq_idx];
 	uint16_t nb_deq_ops_pri = 0, nb_deq_ops_sec = 0;
 
-	if (slave->nb_inflight_cops) {
-		nb_deq_ops_pri = rte_cryptodev_dequeue_burst(slave->dev_id,
-			slave->qp_id, ops, nb_ops);
-		slave->nb_inflight_cops -= nb_deq_ops_pri;
+	if (worker->nb_inflight_cops) {
+		nb_deq_ops_pri = rte_cryptodev_dequeue_burst(worker->dev_id,
+			worker->qp_id, ops, nb_ops);
+		worker->nb_inflight_cops -= nb_deq_ops_pri;
 	}
 
-	qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_IDX_SWITCH_MASK;
+	qp_ctx->deq_idx = (~qp_ctx->deq_idx) & WORKER_IDX_SWITCH_MASK;
 
 	if (nb_deq_ops_pri == nb_ops)
 		return nb_deq_ops_pri;
 
-	slave = slaves[qp_ctx->deq_idx];
+	worker = workers[qp_ctx->deq_idx];
 
-	if (slave->nb_inflight_cops) {
-		nb_deq_ops_sec = rte_cryptodev_dequeue_burst(slave->dev_id,
-				slave->qp_id, &ops[nb_deq_ops_pri],
+	if (worker->nb_inflight_cops) {
+		nb_deq_ops_sec = rte_cryptodev_dequeue_burst(worker->dev_id,
+				worker->qp_id, &ops[nb_deq_ops_pri],
 				nb_ops - nb_deq_ops_pri);
-		slave->nb_inflight_cops -= nb_deq_ops_sec;
+		worker->nb_inflight_cops -= nb_deq_ops_sec;
 
-		if (!slave->nb_inflight_cops)
+		if (!worker->nb_inflight_cops)
 			qp_ctx->deq_idx = (~qp_ctx->deq_idx) &
-					SLAVE_IDX_SWITCH_MASK;
+					WORKER_IDX_SWITCH_MASK;
 	}
 
 	return nb_deq_ops_pri + nb_deq_ops_sec;
@@ -236,15 +236,15 @@ schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
 }
 
 static int
-slave_attach(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused uint8_t slave_id)
+worker_attach(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint8_t worker_id)
 {
 	return 0;
 }
 
 static int
-slave_detach(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused uint8_t slave_id)
+worker_detach(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint8_t worker_id)
 {
 	return 0;
 }
@@ -256,9 +256,9 @@ scheduler_start(struct rte_cryptodev *dev)
 	struct psd_scheduler_ctx *psd_ctx = sched_ctx->private_ctx;
 	uint16_t i;
 
-	/* for packet size based scheduler, nb_slaves have to >= 2 */
-	if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) {
-		CR_SCHED_LOG(ERR, "not enough slaves to start");
+	/* for packet size based scheduler, nb_workers have to >= 2 */
+	if (sched_ctx->nb_workers < NB_PKT_SIZE_WORKERS) {
+		CR_SCHED_LOG(ERR, "not enough workers to start");
 		return -1;
 	}
 
@@ -267,15 +267,15 @@ scheduler_start(struct rte_cryptodev *dev)
 		struct psd_scheduler_qp_ctx *ps_qp_ctx =
 				qp_ctx->private_qp_ctx;
 
-		ps_qp_ctx->primary_slave.dev_id =
-				sched_ctx->slaves[PRIMARY_SLAVE_IDX].dev_id;
-		ps_qp_ctx->primary_slave.qp_id = i;
-		ps_qp_ctx->primary_slave.nb_inflight_cops = 0;
+		ps_qp_ctx->primary_worker.dev_id =
+				sched_ctx->workers[PRIMARY_WORKER_IDX].dev_id;
+		ps_qp_ctx->primary_worker.qp_id = i;
+		ps_qp_ctx->primary_worker.nb_inflight_cops = 0;
 
-		ps_qp_ctx->secondary_slave.dev_id =
-				sched_ctx->slaves[SECONDARY_SLAVE_IDX].dev_id;
-		ps_qp_ctx->secondary_slave.qp_id = i;
-		ps_qp_ctx->secondary_slave.nb_inflight_cops = 0;
+		ps_qp_ctx->secondary_worker.dev_id =
+				sched_ctx->workers[SECONDARY_WORKER_IDX].dev_id;
+		ps_qp_ctx->secondary_worker.qp_id = i;
+		ps_qp_ctx->secondary_worker.nb_inflight_cops = 0;
 
 		ps_qp_ctx->threshold = psd_ctx->threshold;
 	}
@@ -300,9 +300,9 @@ scheduler_stop(struct rte_cryptodev *dev)
 		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
 		struct psd_scheduler_qp_ctx *ps_qp_ctx = qp_ctx->private_qp_ctx;
 
-		if (ps_qp_ctx->primary_slave.nb_inflight_cops +
-				ps_qp_ctx->secondary_slave.nb_inflight_cops) {
-			CR_SCHED_LOG(ERR, "Some crypto ops left in slave queue");
+		if (ps_qp_ctx->primary_worker.nb_inflight_cops +
+				ps_qp_ctx->secondary_worker.nb_inflight_cops) {
+			CR_SCHED_LOG(ERR, "Some crypto ops left in worker queue");
 			return -1;
 		}
 	}
@@ -399,8 +399,8 @@ scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,
 }
 
 static struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
-	slave_attach,
-	slave_detach,
+	worker_attach,
+	worker_detach,
 	scheduler_start,
 	scheduler_stop,
 	scheduler_config_qp,
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index a1632a2b9..632197833 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -18,18 +18,18 @@ uint8_t cryptodev_scheduler_driver_id;
 
 struct scheduler_init_params {
 	struct rte_cryptodev_pmd_init_params def_p;
-	uint32_t nb_slaves;
+	uint32_t nb_workers;
 	enum rte_cryptodev_scheduler_mode mode;
 	char mode_param_str[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
 	uint32_t enable_ordering;
 	uint16_t wc_pool[RTE_MAX_LCORE];
 	uint16_t nb_wc;
-	char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
+	char worker_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]
 			[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
 };
 
 #define RTE_CRYPTODEV_VDEV_NAME			("name")
-#define RTE_CRYPTODEV_VDEV_SLAVE		("slave")
+#define RTE_CRYPTODEV_VDEV_WORKER		("worker")
 #define RTE_CRYPTODEV_VDEV_MODE			("mode")
 #define RTE_CRYPTODEV_VDEV_MODE_PARAM		("mode_param")
 #define RTE_CRYPTODEV_VDEV_ORDERING		("ordering")
@@ -40,7 +40,7 @@ struct scheduler_init_params {
 
 static const char * const scheduler_valid_params[] = {
 	RTE_CRYPTODEV_VDEV_NAME,
-	RTE_CRYPTODEV_VDEV_SLAVE,
+	RTE_CRYPTODEV_VDEV_WORKER,
 	RTE_CRYPTODEV_VDEV_MODE,
 	RTE_CRYPTODEV_VDEV_MODE_PARAM,
 	RTE_CRYPTODEV_VDEV_ORDERING,
@@ -193,31 +193,31 @@ cryptodev_scheduler_create(const char *name,
 		break;
 	}
 
-	for (i = 0; i < init_params->nb_slaves; i++) {
-		sched_ctx->init_slave_names[sched_ctx->nb_init_slaves] =
+	for (i = 0; i < init_params->nb_workers; i++) {
+		sched_ctx->init_worker_names[sched_ctx->nb_init_workers] =
 			rte_zmalloc_socket(
 				NULL,
 				RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN, 0,
 				SOCKET_ID_ANY);
 
-		if (!sched_ctx->init_slave_names[
-				sched_ctx->nb_init_slaves]) {
+		if (!sched_ctx->init_worker_names[
+				sched_ctx->nb_init_workers]) {
 			CR_SCHED_LOG(ERR, "driver %s: Insufficient memory",
 					name);
 			return -ENOMEM;
 		}
 
-		strncpy(sched_ctx->init_slave_names[
-					sched_ctx->nb_init_slaves],
-				init_params->slave_names[i],
+		strncpy(sched_ctx->init_worker_names[
+					sched_ctx->nb_init_workers],
+				init_params->worker_names[i],
 				RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
 
-		sched_ctx->nb_init_slaves++;
+		sched_ctx->nb_init_workers++;
 	}
 
 	/*
 	 * Initialize capabilities structure as an empty structure,
-	 * in case device information is requested when no slaves are attached
+	 * in case device information is requested when no workers are attached
 	 */
 	sched_ctx->capabilities = rte_zmalloc_socket(NULL,
 			sizeof(struct rte_cryptodev_capabilities),
@@ -249,12 +249,12 @@ cryptodev_scheduler_remove(struct rte_vdev_device *vdev)
 
 	sched_ctx = dev->data->dev_private;
 
-	if (sched_ctx->nb_slaves) {
+	if (sched_ctx->nb_workers) {
 		uint32_t i;
 
-		for (i = 0; i < sched_ctx->nb_slaves; i++)
-			rte_cryptodev_scheduler_slave_detach(dev->data->dev_id,
-					sched_ctx->slaves[i].dev_id);
+		for (i = 0; i < sched_ctx->nb_workers; i++)
+			rte_cryptodev_scheduler_worker_detach(dev->data->dev_id,
+					sched_ctx->workers[i].dev_id);
 	}
 
 	return rte_cryptodev_pmd_destroy(dev);
@@ -374,19 +374,19 @@ parse_name_arg(const char *key __rte_unused,
 	return 0;
 }
 
-/** Parse slave */
+/** Parse worker */
 static int
-parse_slave_arg(const char *key __rte_unused,
+parse_worker_arg(const char *key __rte_unused,
 		const char *value, void *extra_args)
 {
 	struct scheduler_init_params *param = extra_args;
 
-	if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
-		CR_SCHED_LOG(ERR, "Too many slaves.");
+	if (param->nb_workers >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS) {
+		CR_SCHED_LOG(ERR, "Too many workers.");
 		return -ENOMEM;
 	}
 
-	strncpy(param->slave_names[param->nb_slaves++], value,
+	strncpy(param->worker_names[param->nb_workers++], value,
 			RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
 
 	return 0;
@@ -498,8 +498,8 @@ scheduler_parse_init_params(struct scheduler_init_params *params,
 		if (ret < 0)
 			goto free_kvlist;
 
-		ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SLAVE,
-				&parse_slave_arg, params);
+		ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_WORKER,
+				&parse_worker_arg, params);
 		if (ret < 0)
 			goto free_kvlist;
 
@@ -534,10 +534,10 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
 			rte_socket_id(),
 			RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
 		},
-		.nb_slaves = 0,
+		.nb_workers = 0,
 		.mode = CDEV_SCHED_MODE_NOT_SET,
 		.enable_ordering = 0,
-		.slave_names = { {0} }
+		.worker_names = { {0} }
 	};
 	const char *name;
 
@@ -566,7 +566,7 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD,
 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
 	"max_nb_queue_pairs=<int> "
 	"socket_id=<int> "
-	"slave=<name>");
+	"worker=<name>");
 RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
 		cryptodev_scheduler_pmd_drv.driver,
 		cryptodev_scheduler_driver_id);
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index 14e5a3712..cb125e802 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -12,43 +12,43 @@
 
 #include "scheduler_pmd_private.h"
 
-/** attaching the slaves predefined by scheduler's EAL options */
+/** attaching the workers predefined by scheduler's EAL options */
 static int
-scheduler_attach_init_slave(struct rte_cryptodev *dev)
+scheduler_attach_init_worker(struct rte_cryptodev *dev)
 {
 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
 	uint8_t scheduler_id = dev->data->dev_id;
 	int i;
 
-	for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) {
-		const char *dev_name = sched_ctx->init_slave_names[i];
-		struct rte_cryptodev *slave_dev =
+	for (i = sched_ctx->nb_init_workers - 1; i >= 0; i--) {
+		const char *dev_name = sched_ctx->init_worker_names[i];
+		struct rte_cryptodev *worker_dev =
 				rte_cryptodev_pmd_get_named_dev(dev_name);
 		int status;
 
-		if (!slave_dev) {
-			CR_SCHED_LOG(ERR, "Failed to locate slave dev %s",
+		if (!worker_dev) {
+			CR_SCHED_LOG(ERR, "Failed to locate worker dev %s",
 					dev_name);
 			return -EINVAL;
 		}
 
-		status = rte_cryptodev_scheduler_slave_attach(
-				scheduler_id, slave_dev->data->dev_id);
+		status = rte_cryptodev_scheduler_worker_attach(
+				scheduler_id, worker_dev->data->dev_id);
 
 		if (status < 0) {
-			CR_SCHED_LOG(ERR, "Failed to attach slave cryptodev %u",
-					slave_dev->data->dev_id);
+			CR_SCHED_LOG(ERR, "Failed to attach worker cryptodev %u",
+					worker_dev->data->dev_id);
 			return status;
 		}
 
-		CR_SCHED_LOG(INFO, "Scheduler %s attached slave %s",
+		CR_SCHED_LOG(INFO, "Scheduler %s attached worker %s",
 				dev->data->name,
-				sched_ctx->init_slave_names[i]);
+				sched_ctx->init_worker_names[i]);
 
-		rte_free(sched_ctx->init_slave_names[i]);
-		sched_ctx->init_slave_names[i] = NULL;
+		rte_free(sched_ctx->init_worker_names[i]);
+		sched_ctx->init_worker_names[i] = NULL;
 
-		sched_ctx->nb_init_slaves -= 1;
+		sched_ctx->nb_init_workers -= 1;
 	}
 
 	return 0;
@@ -62,17 +62,17 @@ scheduler_pmd_config(struct rte_cryptodev *dev,
 	uint32_t i;
 	int ret;
 
-	/* although scheduler_attach_init_slave presents multiple times,
+	/* although scheduler_attach_init_worker presents multiple times,
 	 * there will be only 1 meaningful execution.
 	 */
-	ret = scheduler_attach_init_slave(dev);
+	ret = scheduler_attach_init_worker(dev);
 	if (ret < 0)
 		return ret;
 
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
-		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
 
-		ret = rte_cryptodev_configure(slave_dev_id, config);
+		ret = rte_cryptodev_configure(worker_dev_id, config);
 		if (ret < 0)
 			break;
 	}
@@ -89,7 +89,7 @@ update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
 	if (sched_ctx->reordering_enabled) {
 		char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
 		uint32_t buff_size = rte_align32pow2(
-			sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);
+			sched_ctx->nb_workers * PER_WORKER_BUFF_SIZE);
 
 		if (qp_ctx->order_ring) {
 			rte_ring_free(qp_ctx->order_ring);
@@ -135,10 +135,10 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
 	if (dev->data->dev_started)
 		return 0;
 
-	/* although scheduler_attach_init_slave presents multiple times,
+	/* although scheduler_attach_init_worker presents multiple times,
 	 * there will be only 1 meaningful execution.
 	 */
-	ret = scheduler_attach_init_slave(dev);
+	ret = scheduler_attach_init_worker(dev);
 	if (ret < 0)
 		return ret;
 
@@ -155,18 +155,18 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
 		return -1;
 	}
 
-	if (!sched_ctx->nb_slaves) {
-		CR_SCHED_LOG(ERR, "No slave in the scheduler");
+	if (!sched_ctx->nb_workers) {
+		CR_SCHED_LOG(ERR, "No worker in the scheduler");
 		return -1;
 	}
 
-	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
+	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.worker_attach, -ENOTSUP);
 
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
-		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
 
-		if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
-			CR_SCHED_LOG(ERR, "Failed to attach slave");
+		if ((*sched_ctx->ops.worker_attach)(dev, worker_dev_id) < 0) {
+			CR_SCHED_LOG(ERR, "Failed to attach worker");
 			return -ENOTSUP;
 		}
 	}
@@ -178,16 +178,16 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
 		return -1;
 	}
 
-	/* start all slaves */
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
-		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
-		struct rte_cryptodev *slave_dev =
-				rte_cryptodev_pmd_get_dev(slave_dev_id);
+	/* start all workers */
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+		struct rte_cryptodev *worker_dev =
+				rte_cryptodev_pmd_get_dev(worker_dev_id);
 
-		ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
+		ret = (*worker_dev->dev_ops->dev_start)(worker_dev);
 		if (ret < 0) {
-			CR_SCHED_LOG(ERR, "Failed to start slave dev %u",
-					slave_dev_id);
+			CR_SCHED_LOG(ERR, "Failed to start worker dev %u",
+					worker_dev_id);
 			return ret;
 		}
 	}
@@ -205,23 +205,23 @@ scheduler_pmd_stop(struct rte_cryptodev *dev)
 	if (!dev->data->dev_started)
 		return;
 
-	/* stop all slaves first */
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
-		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
-		struct rte_cryptodev *slave_dev =
-				rte_cryptodev_pmd_get_dev(slave_dev_id);
+	/* stop all workers first */
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+		struct rte_cryptodev *worker_dev =
+				rte_cryptodev_pmd_get_dev(worker_dev_id);
 
-		(*slave_dev->dev_ops->dev_stop)(slave_dev);
+		(*worker_dev->dev_ops->dev_stop)(worker_dev);
 	}
 
 	if (*sched_ctx->ops.scheduler_stop)
 		(*sched_ctx->ops.scheduler_stop)(dev);
 
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
-		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
 
-		if (*sched_ctx->ops.slave_detach)
-			(*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
+		if (*sched_ctx->ops.worker_detach)
+			(*sched_ctx->ops.worker_detach)(dev, worker_dev_id);
 	}
 }
 
@@ -237,13 +237,13 @@ scheduler_pmd_close(struct rte_cryptodev *dev)
 	if (dev->data->dev_started)
 		return -EBUSY;
 
-	/* close all slaves first */
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
-		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
-		struct rte_cryptodev *slave_dev =
-				rte_cryptodev_pmd_get_dev(slave_dev_id);
+	/* close all workers first */
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+		struct rte_cryptodev *worker_dev =
+				rte_cryptodev_pmd_get_dev(worker_dev_id);
 
-		ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
+		ret = (*worker_dev->dev_ops->dev_close)(worker_dev);
 		if (ret < 0)
 			return ret;
 	}
@@ -283,19 +283,19 @@ scheduler_pmd_stats_get(struct rte_cryptodev *dev,
 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
 	uint32_t i;
 
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
-		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
-		struct rte_cryptodev *slave_dev =
-				rte_cryptodev_pmd_get_dev(slave_dev_id);
-		struct rte_cryptodev_stats slave_stats = {0};
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+		struct rte_cryptodev *worker_dev =
+				rte_cryptodev_pmd_get_dev(worker_dev_id);
+		struct rte_cryptodev_stats worker_stats = {0};
 
-		(*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
+		(*worker_dev->dev_ops->stats_get)(worker_dev, &worker_stats);
 
-		stats->enqueued_count += slave_stats.enqueued_count;
-		stats->dequeued_count += slave_stats.dequeued_count;
+		stats->enqueued_count += worker_stats.enqueued_count;
+		stats->dequeued_count += worker_stats.dequeued_count;
 
-		stats->enqueue_err_count += slave_stats.enqueue_err_count;
-		stats->dequeue_err_count += slave_stats.dequeue_err_count;
+		stats->enqueue_err_count += worker_stats.enqueue_err_count;
+		stats->dequeue_err_count += worker_stats.dequeue_err_count;
 	}
 }
 
@@ -306,12 +306,12 @@ scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
 	uint32_t i;
 
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
-		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
-		struct rte_cryptodev *slave_dev =
-				rte_cryptodev_pmd_get_dev(slave_dev_id);
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+		struct rte_cryptodev *worker_dev =
+				rte_cryptodev_pmd_get_dev(worker_dev_id);
 
-		(*slave_dev->dev_ops->stats_reset)(slave_dev);
+		(*worker_dev->dev_ops->stats_reset)(worker_dev);
 	}
 }
 
@@ -329,32 +329,32 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev,
 	if (!dev_info)
 		return;
 
-	/* although scheduler_attach_init_slave presents multiple times,
+	/* although scheduler_attach_init_worker presents multiple times,
 	 * there will be only 1 meaningful execution.
 	 */
-	scheduler_attach_init_slave(dev);
+	scheduler_attach_init_worker(dev);
 
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
-		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
-		struct rte_cryptodev_info slave_info;
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+		struct rte_cryptodev_info worker_info;
 
-		rte_cryptodev_info_get(slave_dev_id, &slave_info);
-		uint32_t dev_max_sess = slave_info.sym.max_nb_sessions;
+		rte_cryptodev_info_get(worker_dev_id, &worker_info);
+		uint32_t dev_max_sess = worker_info.sym.max_nb_sessions;
 		if (dev_max_sess != 0) {
 			if (max_nb_sess == 0 ||	dev_max_sess < max_nb_sess)
-				max_nb_sess = slave_info.sym.max_nb_sessions;
+				max_nb_sess = worker_info.sym.max_nb_sessions;
 		}
 
-		/* Get the max headroom requirement among slave PMDs */
-		headroom_sz = slave_info.min_mbuf_headroom_req >
+		/* Get the max headroom requirement among worker PMDs */
+		headroom_sz = worker_info.min_mbuf_headroom_req >
 				headroom_sz ?
-				slave_info.min_mbuf_headroom_req :
+				worker_info.min_mbuf_headroom_req :
 				headroom_sz;
 
-		/* Get the max tailroom requirement among slave PMDs */
-		tailroom_sz = slave_info.min_mbuf_tailroom_req >
+		/* Get the max tailroom requirement among worker PMDs */
+		tailroom_sz = worker_info.min_mbuf_tailroom_req >
 				tailroom_sz ?
-				slave_info.min_mbuf_tailroom_req :
+				worker_info.min_mbuf_tailroom_req :
 				tailroom_sz;
 	}
 
@@ -409,15 +409,15 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 	if (dev->data->queue_pairs[qp_id] != NULL)
 		scheduler_pmd_qp_release(dev, qp_id);
 
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
-		uint8_t slave_id = sched_ctx->slaves[i].dev_id;
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		uint8_t worker_id = sched_ctx->workers[i].dev_id;
 
 		/*
-		 * All slaves will share the same session mempool
+		 * All workers will share the same session mempool
 		 * for session-less operations, so the objects
 		 * must be big enough for all the drivers used.
 		 */
-		ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
+		ret = rte_cryptodev_queue_pair_setup(worker_id, qp_id,
 				qp_conf, socket_id);
 		if (ret < 0)
 			return ret;
@@ -434,12 +434,12 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 
 	dev->data->queue_pairs[qp_id] = qp_ctx;
 
-	/* although scheduler_attach_init_slave presents multiple times,
+	/* although scheduler_attach_init_worker presents multiple times,
 	 * there will be only 1 meaningful execution.
 	 */
-	ret = scheduler_attach_init_slave(dev);
+	ret = scheduler_attach_init_worker(dev);
 	if (ret < 0) {
-		CR_SCHED_LOG(ERR, "Failed to attach slave");
+		CR_SCHED_LOG(ERR, "Failed to attach worker");
 		scheduler_pmd_qp_release(dev, qp_id);
 		return ret;
 	}
@@ -461,10 +461,10 @@ scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
 	uint8_t i = 0;
 	uint32_t max_priv_sess_size = 0;
 
-	/* Check what is the maximum private session size for all slaves */
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
-		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
-		struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];
+	/* Check what is the maximum private session size for all workers */
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+		struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
 		uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
 
 		if (max_priv_sess_size < priv_sess_size)
@@ -484,10 +484,10 @@ scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
 	uint32_t i;
 	int ret;
 
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
-		struct scheduler_slave *slave = &sched_ctx->slaves[i];
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		struct scheduler_worker *worker = &sched_ctx->workers[i];
 
-		ret = rte_cryptodev_sym_session_init(slave->dev_id, sess,
+		ret = rte_cryptodev_sym_session_init(worker->dev_id, sess,
 					xform, mempool);
 		if (ret < 0) {
 			CR_SCHED_LOG(ERR, "unable to config sym session");
@@ -506,11 +506,11 @@ scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
 	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
 	uint32_t i;
 
-	/* Clear private data of slaves */
-	for (i = 0; i < sched_ctx->nb_slaves; i++) {
-		struct scheduler_slave *slave = &sched_ctx->slaves[i];
+	/* Clear private data of workers */
+	for (i = 0; i < sched_ctx->nb_workers; i++) {
+		struct scheduler_worker *worker = &sched_ctx->workers[i];
 
-		rte_cryptodev_sym_session_clear(slave->dev_id, sess);
+		rte_cryptodev_sym_session_clear(worker->dev_id, sess);
 	}
 }
 
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index e1531d1da..adb4eb063 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -10,7 +10,7 @@
 #define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
 /**< Scheduler Crypto PMD device name */
 
-#define PER_SLAVE_BUFF_SIZE			(256)
+#define PER_WORKER_BUFF_SIZE			(256)
 
 extern int scheduler_logtype_driver;
 
@@ -18,7 +18,7 @@ extern int scheduler_logtype_driver;
 	rte_log(RTE_LOG_ ## level, scheduler_logtype_driver,		\
 			"%s() line %u: "fmt "\n", __func__, __LINE__, ##args)
 
-struct scheduler_slave {
+struct scheduler_worker {
 	uint8_t dev_id;
 	uint16_t qp_id;
 	uint32_t nb_inflight_cops;
@@ -35,8 +35,8 @@ struct scheduler_ctx {
 
 	uint32_t max_nb_queue_pairs;
 
-	struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
-	uint32_t nb_slaves;
+	struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+	uint32_t nb_workers;
 
 	enum rte_cryptodev_scheduler_mode mode;
 
@@ -49,8 +49,8 @@ struct scheduler_ctx {
 	uint16_t wc_pool[RTE_MAX_LCORE];
 	uint16_t nb_wc;
 
-	char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
-	int nb_init_slaves;
+	char *init_worker_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+	int nb_init_workers;
 } __rte_cache_aligned;
 
 struct scheduler_qp_ctx {
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index 9b891d978..bc4a63210 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -9,11 +9,11 @@
 #include "scheduler_pmd_private.h"
 
 struct rr_scheduler_qp_ctx {
-	struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
-	uint32_t nb_slaves;
+	struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+	uint32_t nb_workers;
 
-	uint32_t last_enq_slave_idx;
-	uint32_t last_deq_slave_idx;
+	uint32_t last_enq_worker_idx;
+	uint32_t last_deq_worker_idx;
 };
 
 static uint16_t
@@ -21,8 +21,8 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct rr_scheduler_qp_ctx *rr_qp_ctx =
 			((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
-	uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
-	struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
+	uint32_t worker_idx = rr_qp_ctx->last_enq_worker_idx;
+	struct scheduler_worker *worker = &rr_qp_ctx->workers[worker_idx];
 	uint16_t i, processed_ops;
 
 	if (unlikely(nb_ops == 0))
@@ -31,13 +31,13 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 	for (i = 0; i < nb_ops && i < 4; i++)
 		rte_prefetch0(ops[i]->sym->session);
 
-	processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
-			slave->qp_id, ops, nb_ops);
+	processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,
+			worker->qp_id, ops, nb_ops);
 
-	slave->nb_inflight_cops += processed_ops;
+	worker->nb_inflight_cops += processed_ops;
 
-	rr_qp_ctx->last_enq_slave_idx += 1;
-	rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
+	rr_qp_ctx->last_enq_worker_idx += 1;
+	rr_qp_ctx->last_enq_worker_idx %= rr_qp_ctx->nb_workers;
 
 	return processed_ops;
 }
@@ -64,34 +64,35 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	struct rr_scheduler_qp_ctx *rr_qp_ctx =
 			((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
-	struct scheduler_slave *slave;
-	uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
+	struct scheduler_worker *worker;
+	uint32_t last_worker_idx = rr_qp_ctx->last_deq_worker_idx;
 	uint16_t nb_deq_ops;
 
-	if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
+	if (unlikely(rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops
+			== 0)) {
 		do {
-			last_slave_idx += 1;
+			last_worker_idx += 1;
 
-			if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
-				last_slave_idx = 0;
+			if (unlikely(last_worker_idx >= rr_qp_ctx->nb_workers))
+				last_worker_idx = 0;
 			/* looped back, means no inflight cops in the queue */
-			if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
+			if (last_worker_idx == rr_qp_ctx->last_deq_worker_idx)
 				return 0;
-		} while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
+		} while (rr_qp_ctx->workers[last_worker_idx].nb_inflight_cops
 				== 0);
 	}
 
-	slave = &rr_qp_ctx->slaves[last_slave_idx];
+	worker = &rr_qp_ctx->workers[last_worker_idx];
 
-	nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
-			slave->qp_id, ops, nb_ops);
+	nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,
+			worker->qp_id, ops, nb_ops);
 
-	last_slave_idx += 1;
-	last_slave_idx %= rr_qp_ctx->nb_slaves;
+	last_worker_idx += 1;
+	last_worker_idx %= rr_qp_ctx->nb_workers;
 
-	rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
+	rr_qp_ctx->last_deq_worker_idx = last_worker_idx;
 
-	slave->nb_inflight_cops -= nb_deq_ops;
+	worker->nb_inflight_cops -= nb_deq_ops;
 
 	return nb_deq_ops;
 }
@@ -109,15 +110,15 @@ schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
 }
 
 static int
-slave_attach(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused uint8_t slave_id)
+worker_attach(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint8_t worker_id)
 {
 	return 0;
 }
 
 static int
-slave_detach(__rte_unused struct rte_cryptodev *dev,
-		__rte_unused uint8_t slave_id)
+worker_detach(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint8_t worker_id)
 {
 	return 0;
 }
@@ -142,19 +143,19 @@ scheduler_start(struct rte_cryptodev *dev)
 				qp_ctx->private_qp_ctx;
 		uint32_t j;
 
-		memset(rr_qp_ctx->slaves, 0,
-				RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
-				sizeof(struct scheduler_slave));
-		for (j = 0; j < sched_ctx->nb_slaves; j++) {
-			rr_qp_ctx->slaves[j].dev_id =
-					sched_ctx->slaves[j].dev_id;
-			rr_qp_ctx->slaves[j].qp_id = i;
+		memset(rr_qp_ctx->workers, 0,
+				RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS *
+				sizeof(struct scheduler_worker));
+		for (j = 0; j < sched_ctx->nb_workers; j++) {
+			rr_qp_ctx->workers[j].dev_id =
+					sched_ctx->workers[j].dev_id;
+			rr_qp_ctx->workers[j].qp_id = i;
 		}
 
-		rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+		rr_qp_ctx->nb_workers = sched_ctx->nb_workers;
 
-		rr_qp_ctx->last_enq_slave_idx = 0;
-		rr_qp_ctx->last_deq_slave_idx = 0;
+		rr_qp_ctx->last_enq_worker_idx = 0;
+		rr_qp_ctx->last_deq_worker_idx = 0;
 	}
 
 	return 0;
@@ -191,8 +192,8 @@ scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
 }
 
 static struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
-	slave_attach,
-	slave_detach,
+	worker_attach,
+	worker_detach,
 	scheduler_start,
 	scheduler_stop,
 	scheduler_config_qp,
@@ -204,7 +205,7 @@ static struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
 static struct rte_cryptodev_scheduler scheduler = {
 		.name = "roundrobin-scheduler",
 		.description = "scheduler which will round robin burst across "
-				"slave crypto devices",
+				"worker crypto devices",
 		.mode = CDEV_SCHED_MODE_ROUNDROBIN,
 		.ops = &scheduler_rr_ops
 };
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 827da9b3e..42e80bc3f 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -2277,11 +2277,11 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
 		 */
 		if (!strcmp(dev_info.driver_name, "crypto_scheduler")) {
 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
-			uint32_t nb_slaves =
-				rte_cryptodev_scheduler_slaves_get(cdev_id,
+			uint32_t nb_workers =
+				rte_cryptodev_scheduler_workers_get(cdev_id,
 								NULL);
 
-			sessions_needed = enabled_cdev_count * nb_slaves;
+			sessions_needed = enabled_cdev_count * nb_workers;
 #endif
 		} else
 			sessions_needed = enabled_cdev_count;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-10-06 20:49 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-26 15:34 [dpdk-dev] [PATCH] crypto/scheduler: rename slave to worker Adam Dybkowski
2020-08-27 15:18 ` Zhang, Roy Fan
2020-09-28  2:49 ` Ruifeng Wang
2020-09-28 10:17   ` Dybkowski, AdamX
2020-09-28 14:16 ` [dpdk-dev] [PATCH v2 0/1] " Adam Dybkowski
2020-09-28 14:16   ` [dpdk-dev] [PATCH v2 1/1] " Adam Dybkowski
2020-09-28 15:12     ` Ruifeng Wang
2020-10-06 20:49       ` Akhil Goyal

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).