From: Pablo de Lara <pablo.de.lara.guarch@intel.com>
To: roy.fan.zhang@intel.com, declan.doherty@intel.com
Cc: dev@dpdk.org, Kirill Rybalchenko <kirill.rybalchenko@intel.com>
Subject: [dpdk-dev] [PATCH v2] crypto/scheduler: add multicore scheduling mode
Date: Fri, 30 Jun 2017 10:51:09 +0100 [thread overview]
Message-ID: <20170630095109.26765-1-pablo.de.lara.guarch@intel.com> (raw)
In-Reply-To: <1496002118-64735-1-git-send-email-pablo.de.lara.guarch@intel.com>
From: Kirill Rybalchenko <kirill.rybalchenko@intel.com>
Multi-core scheduling mode is a mode where scheduler distributes
crypto operations in a round-robin base, between several core
assigned as workers.
Signed-off-by: Kirill Rybalchenko <kirill.rybalchenko@intel.com>
---
Changes in v2:
- Rebased against dpdk-next-crypto
- Fixed compilation issue
- Addressed comments from list
doc/guides/cryptodevs/scheduler.rst | 28 ++
doc/guides/rel_notes/release_17_08.rst | 5 +
drivers/crypto/scheduler/Makefile | 1 +
drivers/crypto/scheduler/rte_cryptodev_scheduler.c | 7 +
drivers/crypto/scheduler/rte_cryptodev_scheduler.h | 6 +
drivers/crypto/scheduler/scheduler_multicore.c | 413 +++++++++++++++++++++
drivers/crypto/scheduler/scheduler_pmd.c | 75 +++-
drivers/crypto/scheduler/scheduler_pmd_private.h | 6 +-
8 files changed, 538 insertions(+), 3 deletions(-)
create mode 100644 drivers/crypto/scheduler/scheduler_multicore.c
diff --git a/doc/guides/cryptodevs/scheduler.rst b/doc/guides/cryptodevs/scheduler.rst
index 32e5653..9bc85b3 100644
--- a/doc/guides/cryptodevs/scheduler.rst
+++ b/doc/guides/cryptodevs/scheduler.rst
@@ -170,3 +170,31 @@ operation:
crypto operation burst to the primary slave. When one or more crypto
operations fail to be enqueued, then they will be enqueued to the secondary
slave.
+
+* **CDEV_SCHED_MODE_MULTICORE:**
+
+ *Initialization mode parameter*: **multi-core**
+
+ Multi-core mode, which distributes the workload with several (up to eight)
+ worker cores. The enqueued bursts are distributed among the worker cores in a
+ round-robin manner. If scheduler cannot enqueue entire burst to the same worker,
+ it will enqueue the remaining operations to the next available worker.
+ For pure small packet size (64 bytes) traffic however the multi-core mode is not
+ an optimal solution, as it doesn't give significant per-core performance improvement.
+ For mixed traffic (IMIX) the optimal number of worker cores is around 2-3.
+ For large packets (1.5 Kbytes) scheduler shows linear scaling in performance
+ up to eight cores.
+ Each worker uses its own slave cryptodev. Only software cryptodevs
+ are supported. Only the same type of cryptodevs should be used concurrently.
+
+ The multi-core mode uses one extra parameter:
+
+ * corelist: Semicolon-separated list of logical cores to be used as
+ workers. The number of worker cores should be equal to the number of
+ slave cryptodevs.
+
+ Example::
+
+ ... --vdev "crypto_aesni_mb_pmd,name=aesni_mb_1" \
+ --vdev "crypto_aesni_mb_pmd,name=aesni_mb_2" \
+ --vdev "crypto_scheduler_pmd,slave=aesni_mb_1,slave=aesni_mb_2,mode=multi-core,corelist=23;24"
diff --git a/doc/guides/rel_notes/release_17_08.rst b/doc/guides/rel_notes/release_17_08.rst
index 842f46f..247c3b0 100644
--- a/doc/guides/rel_notes/release_17_08.rst
+++ b/doc/guides/rel_notes/release_17_08.rst
@@ -75,6 +75,11 @@ New Features
Added support for firmwares with multiple Ethernet ports per physical port.
+* **Updated the Cryptodev Scheduler PMD.**
+
+ Added a multicore based distribution mode, which distributes the enqueued
+ crypto operations among several slaves, running on different logical cores.
+
Resolved Issues
---------------
diff --git a/drivers/crypto/scheduler/Makefile b/drivers/crypto/scheduler/Makefile
index c273e78..b045410 100644
--- a/drivers/crypto/scheduler/Makefile
+++ b/drivers/crypto/scheduler/Makefile
@@ -56,5 +56,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += rte_cryptodev_scheduler.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_roundrobin.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pkt_size_distr.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_failover.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_multicore.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 95566d5..3d4992f 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -351,6 +351,13 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
return -1;
}
break;
+ case CDEV_SCHED_MODE_MULTICORE:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ multicore_scheduler) < 0) {
+ CS_LOG_ERR("Failed to load scheduler");
+ return -1;
+ }
+ break;
default:
CS_LOG_ERR("Not yet supported");
return -ENOTSUP;
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index 33570ec..1e415fb 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -64,6 +64,8 @@ extern "C" {
#define SCHEDULER_MODE_NAME_PKT_SIZE_DISTR packet-size-distr
/** Fail-over scheduling mode string */
#define SCHEDULER_MODE_NAME_FAIL_OVER fail-over
+/** multi-core scheduling mode string */
+#define SCHEDULER_MODE_NAME_MULTI_CORE multi-core
/**
* Crypto scheduler PMD operation modes
@@ -78,6 +80,8 @@ enum rte_cryptodev_scheduler_mode {
CDEV_SCHED_MODE_PKT_SIZE_DISTR,
/** Fail-over mode */
CDEV_SCHED_MODE_FAILOVER,
+ /** multi-core mode */
+ CDEV_SCHED_MODE_MULTICORE,
CDEV_SCHED_MODE_COUNT /**< number of modes */
};
@@ -295,6 +299,8 @@ extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
extern struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler;
/** Fail-over mode scheduler */
extern struct rte_cryptodev_scheduler *failover_scheduler;
+/** multi-core mode scheduler */
+extern struct rte_cryptodev_scheduler *multicore_scheduler;
#ifdef __cplusplus
}
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
new file mode 100644
index 0000000..9d92cc4
--- /dev/null
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -0,0 +1,413 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <unistd.h>
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define MC_SCHED_RING_SIZE 1024
+#define MC_SCHED_ENQ_RING_NAME "MCS_ENQR_"
+#define MC_SCHED_DEQ_RING_NAME "MCS_DEQR_"
+
+#define MC_SCHED_BUFFER_SIZE 32
+
+/** multi-core scheduler context */
+struct mc_scheduler_ctx {
+ unsigned int num_workers; /**< Number of workers polling */
+ unsigned int stop_signal;
+
+ struct rte_ring *sched_enq_ring[MAX_NB_WORKER_CORES];
+ struct rte_ring *sched_deq_ring[MAX_NB_WORKER_CORES];
+};
+
+struct mc_scheduler_qp_ctx {
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ uint32_t nb_slaves;
+
+ uint32_t last_enq_worker_idx;
+ uint32_t last_deq_worker_idx;
+
+ struct mc_scheduler_ctx *mc_private_ctx;
+};
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
+ uint32_t worker_idx = mc_qp_ctx->last_enq_worker_idx;
+ uint16_t i, processed_ops = 0;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
+ struct rte_ring *enq_ring = mc_ctx->sched_enq_ring[worker_idx];
+ uint16_t nb_queue_ops = rte_ring_enqueue_burst(enq_ring,
+ (void *)(&ops[processed_ops]), nb_ops, NULL);
+
+ nb_ops -= nb_queue_ops;
+ processed_ops += nb_queue_ops;
+
+ if (++worker_idx == mc_ctx->num_workers)
+ worker_idx = 0;
+ }
+ mc_qp_ctx->last_enq_worker_idx = worker_idx;
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct mc_scheduler_ctx *mc_ctx = mc_qp_ctx->mc_private_ctx;
+ uint32_t worker_idx = mc_qp_ctx->last_deq_worker_idx;
+ uint16_t i, processed_ops = 0;
+
+ for (i = 0; i < mc_ctx->num_workers && nb_ops != 0; i++) {
+ struct rte_ring *deq_ring = mc_ctx->sched_deq_ring[worker_idx];
+ uint16_t nb_deq_ops = rte_ring_dequeue_burst(deq_ring,
+ (void *)(&ops[processed_ops]), nb_ops, NULL);
+
+ nb_ops -= nb_deq_ops;
+ processed_ops += nb_deq_ops;
+ if (++worker_idx == mc_ctx->num_workers)
+ worker_idx = 0;
+ }
+
+ mc_qp_ctx->last_deq_worker_idx = worker_idx;
+
+ return processed_ops;
+
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+
+ return scheduler_order_drain(order_ring, ops, nb_ops);
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+mc_scheduler_worker(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ struct rte_ring *enq_ring;
+ struct rte_ring *deq_ring;
+ unsigned int core_id = rte_lcore_id();
+ int worker_idx = -1;
+ struct scheduler_slave *slave;
+ struct rte_crypto_op *enq_ops[MC_SCHED_BUFFER_SIZE];
+ struct rte_crypto_op *deq_ops[MC_SCHED_BUFFER_SIZE];
+ struct scheduler_session *sess0, *sess1, *sess2, *sess3;
+ uint16_t processed_ops;
+ uint16_t left_op = 0;
+ uint16_t left_op_idx = 0;
+ uint16_t inflight_ops = 0;
+
+ for (int i = 0; i < (int)sched_ctx->nb_wc; i++) {
+ if (sched_ctx->wc_pool[i] == core_id) {
+ worker_idx = i;
+ break;
+ }
+ }
+ if (worker_idx == -1) {
+ CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id);
+ return -1;
+ }
+
+ slave = &sched_ctx->slaves[worker_idx];
+ enq_ring = mc_ctx->sched_enq_ring[worker_idx];
+ deq_ring = mc_ctx->sched_deq_ring[worker_idx];
+
+ while (!mc_ctx->stop_signal) {
+ if (left_op) {
+ processed_ops =
+ rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id,
+ &enq_ops[left_op_idx], left_op);
+
+ left_op -= processed_ops;
+ left_op_idx += processed_ops;
+ } else {
+ uint16_t nb_deq_ops = rte_ring_dequeue_burst(enq_ring,
+ (void *)enq_ops, MC_SCHED_BUFFER_SIZE, NULL);
+ if (nb_deq_ops) {
+ uint16_t i;
+
+ for (i = 0; i < nb_deq_ops && i < 4; i++)
+ rte_prefetch0(enq_ops[i]->sym->session);
+
+ for (i = 0; (i < (nb_deq_ops - 8))
+ && (nb_deq_ops > 8); i += 4) {
+ sess0 = (struct scheduler_session *)
+ enq_ops[i]->sym->session->_private;
+ sess1 = (struct scheduler_session *)
+ enq_ops[i+1]->sym->session->_private;
+ sess2 = (struct scheduler_session *)
+ enq_ops[i+2]->sym->session->_private;
+ sess3 = (struct scheduler_session *)
+ enq_ops[i+3]->sym->session->_private;
+
+ enq_ops[i]->sym->session =
+ sess0->sessions[worker_idx];
+ enq_ops[i + 1]->sym->session =
+ sess1->sessions[worker_idx];
+ enq_ops[i + 2]->sym->session =
+ sess2->sessions[worker_idx];
+ enq_ops[i + 3]->sym->session =
+ sess3->sessions[worker_idx];
+
+ rte_prefetch0(enq_ops[i + 4]->sym->session);
+ rte_prefetch0(enq_ops[i + 5]->sym->session);
+ rte_prefetch0(enq_ops[i + 6]->sym->session);
+ rte_prefetch0(enq_ops[i + 7]->sym->session);
+ }
+
+ for (; i < nb_deq_ops; i++) {
+ sess0 = (struct scheduler_session *)
+ enq_ops[i]->sym->session->_private;
+ enq_ops[i]->sym->session =
+ sess0->sessions[worker_idx];
+ }
+
+ processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, enq_ops, nb_deq_ops);
+
+ if (unlikely(processed_ops < nb_deq_ops)) {
+ left_op = nb_deq_ops - processed_ops;
+ left_op_idx = processed_ops;
+ }
+
+ inflight_ops += processed_ops;
+ }
+ }
+
+ if (inflight_ops > 0) {
+ processed_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, deq_ops, MC_SCHED_BUFFER_SIZE);
+ if (processed_ops) {
+ uint16_t nb_enq_ops = rte_ring_enqueue_burst(deq_ring,
+ (void *)deq_ops, processed_ops, NULL);
+ inflight_ops -= nb_enq_ops;
+ }
+ }
+
+ rte_pause();
+ }
+
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+ uint16_t i;
+
+ mc_ctx->stop_signal = 0;
+
+ for (i = 0; i < sched_ctx->nb_wc; i++)
+ rte_eal_remote_launch(
+ (lcore_function_t *)mc_scheduler_worker, dev,
+ sched_ctx->wc_pool[i]);
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct mc_scheduler_qp_ctx *mc_qp_ctx =
+ qp_ctx->private_qp_ctx;
+ uint32_t j;
+
+ memset(mc_qp_ctx->slaves, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
+ sizeof(struct scheduler_slave));
+ for (j = 0; j < sched_ctx->nb_slaves; j++) {
+ mc_qp_ctx->slaves[j].dev_id =
+ sched_ctx->slaves[j].dev_id;
+ mc_qp_ctx->slaves[j].qp_id = i;
+ }
+
+ mc_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+
+ mc_qp_ctx->last_enq_worker_idx = 0;
+ mc_qp_ctx->last_deq_worker_idx = 0;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+
+ mc_ctx->stop_signal = 1;
+
+ for (uint16_t i = 0; i < sched_ctx->nb_wc; i++)
+ rte_eal_wait_lcore(sched_ctx->wc_pool[i]);
+
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct mc_scheduler_qp_ctx *mc_qp_ctx;
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx = sched_ctx->private_ctx;
+
+ mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
+ rte_socket_id());
+ if (!mc_qp_ctx) {
+ CS_LOG_ERR("failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ mc_qp_ctx->mc_private_ctx = mc_ctx;
+ qp_ctx->private_qp_ctx = (void *)mc_qp_ctx;
+
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct mc_scheduler_ctx *mc_ctx;
+
+ if (sched_ctx->private_ctx)
+ rte_free(sched_ctx->private_ctx);
+
+ mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
+ rte_socket_id());
+ if (!mc_ctx) {
+ CS_LOG_ERR("failed allocate memory");
+ return -ENOMEM;
+ }
+
+ mc_ctx->num_workers = sched_ctx->nb_wc;
+ for (uint16_t i = 0; i < sched_ctx->nb_wc; i++) {
+ char r_name[16];
+
+ snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME "%u", i);
+ mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, MC_SCHED_RING_SIZE,
+ rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_enq_ring[i]) {
+ CS_LOG_ERR("Cannot create ring for worker %u", i);
+ return -1;
+ }
+ snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME "%u", i);
+ mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, MC_SCHED_RING_SIZE,
+ rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_deq_ring[i]) {
+ CS_LOG_ERR("Cannot create ring for worker %u", i);
+ return -1;
+ }
+ }
+
+ sched_ctx->private_ctx = (void *)mc_ctx;
+
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ NULL, /* option_set */
+ NULL /* option_get */
+};
+
+struct rte_cryptodev_scheduler mc_scheduler = {
+ .name = "multicore-scheduler",
+ .description = "scheduler which will run burst across multiple cpu cores",
+ .mode = CDEV_SCHED_MODE_MULTICORE,
+ .ops = &scheduler_mc_ops
+};
+
+struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index fefd6cc..4954b6b 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -47,6 +47,7 @@ struct scheduler_init_params {
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
uint32_t enable_ordering;
+ uint64_t wcmask;
char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
};
@@ -58,6 +59,8 @@ struct scheduler_init_params {
#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
+#define RTE_CRYPTODEV_VDEV_COREMASK ("coremask")
+#define RTE_CRYPTODEV_VDEV_CORELIST ("corelist")
const char *scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_NAME,
@@ -66,7 +69,9 @@ const char *scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_ORDERING,
RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
- RTE_CRYPTODEV_VDEV_SOCKET_ID
+ RTE_CRYPTODEV_VDEV_SOCKET_ID,
+ RTE_CRYPTODEV_VDEV_COREMASK,
+ RTE_CRYPTODEV_VDEV_CORELIST
};
struct scheduler_parse_map {
@@ -80,7 +85,9 @@ const struct scheduler_parse_map scheduler_mode_map[] = {
{RTE_STR(SCHEDULER_MODE_NAME_PKT_SIZE_DISTR),
CDEV_SCHED_MODE_PKT_SIZE_DISTR},
{RTE_STR(SCHEDULER_MODE_NAME_FAIL_OVER),
- CDEV_SCHED_MODE_FAILOVER}
+ CDEV_SCHED_MODE_FAILOVER},
+ {RTE_STR(SCHEDULER_MODE_NAME_MULTI_CORE),
+ CDEV_SCHED_MODE_MULTICORE}
};
const struct scheduler_parse_map scheduler_ordering_map[] = {
@@ -120,6 +127,18 @@ cryptodev_scheduler_create(const char *name,
sched_ctx->max_nb_queue_pairs =
init_params->def_p.max_nb_queue_pairs;
+ if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) {
+ sched_ctx->nb_wc = 0;
+ for (uint16_t i = 0; i < MAX_NB_WORKER_CORES; i++) {
+ if (init_params->wcmask & (1ULL << i)) {
+ sched_ctx->wc_pool[sched_ctx->nb_wc++] = i;
+ RTE_LOG(INFO, PMD,
+ " Worker core[%u]=%u added\n",
+ sched_ctx->nb_wc-1, i);
+ }
+ }
+ }
+
if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED &&
init_params->mode < CDEV_SCHED_MODE_COUNT) {
ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id,
@@ -238,6 +257,43 @@ parse_integer_arg(const char *key __rte_unused,
return 0;
}
+/** Parse integer from hexadecimal integer argument */
+static int
+parse_coremask_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *params = extra_args;
+
+ params->wcmask = strtoull(value, NULL, 16);
+
+ return 0;
+}
+
+/** Parse integer from list of integers argument */
+static int
+parse_corelist_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *params = extra_args;
+
+ params->wcmask = 0ULL;
+
+ const char *token = value;
+
+ while (isdigit(token[0])) {
+ char *rval;
+ unsigned int core = strtoul(token, &rval, 10);
+
+ params->wcmask |= 1ULL << core;
+ token = (const char *)rval;
+ if (token[0] == '\0')
+ break;
+ token++;
+ }
+
+ return 0;
+}
+
/** Parse name */
static int
parse_name_arg(const char *key __rte_unused,
@@ -357,6 +413,18 @@ scheduler_parse_init_params(struct scheduler_init_params *params,
if (ret < 0)
goto free_kvlist;
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_COREMASK,
+ &parse_coremask_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_CORELIST,
+ &parse_corelist_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME,
&parse_name_arg,
¶ms->def_p);
@@ -418,6 +486,9 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
if (init_params.def_p.name[0] != '\0')
RTE_LOG(INFO, PMD, " User defined name = %s\n",
init_params.def_p.name);
+ if (init_params.wcmask != 0)
+ RTE_LOG(INFO, PMD, " workers core mask = %lx\n",
+ init_params.wcmask);
return cryptodev_scheduler_create(name,
vdev,
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index 05a5916..f6d54d4 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -36,7 +36,7 @@
#include "rte_cryptodev_scheduler.h"
-#define PER_SLAVE_BUFF_SIZE (256)
+#define PER_SLAVE_BUFF_SIZE (8192)
#define CS_LOG_ERR(fmt, args...) \
RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
@@ -58,6 +58,8 @@
#define CS_LOG_DBG(fmt, args...)
#endif
+#define MAX_NB_WORKER_CORES 64
+
struct scheduler_slave {
uint8_t dev_id;
uint16_t qp_id;
@@ -86,6 +88,8 @@ struct scheduler_ctx {
char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
+ uint16_t wc_pool[MAX_NB_WORKER_CORES];
+ uint16_t nb_wc;
char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
int nb_init_slaves;
--
2.9.4
next prev parent reply other threads:[~2017-06-30 17:51 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-05-28 20:08 [dpdk-dev] [PATCH] " Pablo de Lara
2017-05-31 7:48 ` De Lara Guarch, Pablo
2017-06-05 6:19 ` Zhang, Roy Fan
2017-06-30 9:51 ` Pablo de Lara [this message]
2017-07-05 16:14 ` [dpdk-dev] [PATCH v3] " Kirill Rybalchenko
2017-07-06 1:42 ` Zhang, Roy Fan
2017-07-06 8:30 ` De Lara Guarch, Pablo
2017-07-06 8:32 ` De Lara Guarch, Pablo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170630095109.26765-1-pablo.de.lara.guarch@intel.com \
--to=pablo.de.lara.guarch@intel.com \
--cc=declan.doherty@intel.com \
--cc=dev@dpdk.org \
--cc=kirill.rybalchenko@intel.com \
--cc=roy.fan.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).