From: Fan Zhang <roy.fan.zhang@intel.com>
To: dev@dpdk.org
Cc: pablo.de.lara.guarch@intel.com, sergio.gonzalez.monroy@intel.com,
declan.doherty@intel.com
Subject: [dpdk-dev] [PATCH] crypto/scheduler: change enqueue and dequeue functions
Date: Thu, 2 Mar 2017 11:12:11 +0000 [thread overview]
Message-ID: <1488453131-94845-1-git-send-email-roy.fan.zhang@intel.com> (raw)
This patch changes the enqueue and dequeue methods to cryptodev
scheduler PMD. Originally a 2-layer function call is carried out
upon enqueuing or dequeuing a burst of crypto ops. This patch
removes one layer to improve the performance.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
drivers/crypto/scheduler/scheduler_pmd.c | 29 --------------
drivers/crypto/scheduler/scheduler_pmd_private.h | 3 --
drivers/crypto/scheduler/scheduler_roundrobin.c | 49 ++++++++++++------------
3 files changed, 24 insertions(+), 57 deletions(-)
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index eeafbe6..f5038c9 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -61,32 +61,6 @@ const char *scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_SOCKET_ID
};
-static uint16_t
-scheduler_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct scheduler_qp_ctx *qp_ctx = queue_pair;
- uint16_t processed_ops;
-
- processed_ops = (*qp_ctx->schedule_enqueue)(qp_ctx, ops,
- nb_ops);
-
- return processed_ops;
-}
-
-static uint16_t
-scheduler_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct scheduler_qp_ctx *qp_ctx = queue_pair;
- uint16_t processed_ops;
-
- processed_ops = (*qp_ctx->schedule_dequeue)(qp_ctx, ops,
- nb_ops);
-
- return processed_ops;
-}
-
static int
attach_init_slaves(uint8_t scheduler_id,
const uint8_t *slaves, const uint8_t nb_slaves)
@@ -146,9 +120,6 @@ cryptodev_scheduler_create(const char *name,
dev->dev_type = RTE_CRYPTODEV_SCHEDULER_PMD;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
- dev->enqueue_burst = scheduler_enqueue_burst;
- dev->dequeue_burst = scheduler_dequeue_burst;
-
sched_ctx = dev->data->dev_private;
sched_ctx->max_nb_queue_pairs =
init_params->def_p.max_nb_queue_pairs;
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index ac4690e..e3ea21a 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -98,9 +98,6 @@ struct scheduler_ctx {
struct scheduler_qp_ctx {
void *private_qp_ctx;
- rte_cryptodev_scheduler_burst_enqueue_t schedule_enqueue;
- rte_cryptodev_scheduler_burst_dequeue_t schedule_dequeue;
-
struct rte_reorder_buffer *reorder_buf;
uint32_t seqn;
} __rte_cache_aligned;
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index 9545aa9..4990c74 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -45,10 +45,10 @@ struct rr_scheduler_qp_ctx {
};
static uint16_t
-schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rr_scheduler_qp_ctx *rr_qp_ctx =
- ((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
uint16_t i, processed_ops;
@@ -112,12 +112,11 @@ schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
}
static uint16_t
-schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct scheduler_qp_ctx *gen_qp_ctx = qp_ctx;
- struct rr_scheduler_qp_ctx *rr_qp_ctx =
- gen_qp_ctx->private_qp_ctx;
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct rr_scheduler_qp_ctx *rr_qp_ctx = qp_ctx->private_qp_ctx;
uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
uint16_t i, processed_ops;
@@ -148,13 +147,13 @@ schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
sessions[i + 3] = ops[i + 3]->sym->session;
ops[i]->sym->session = sess0->sessions[slave_idx];
- ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+ ops[i]->sym->m_src->seqn = qp_ctx->seqn++;
ops[i + 1]->sym->session = sess1->sessions[slave_idx];
- ops[i + 1]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+ ops[i + 1]->sym->m_src->seqn = qp_ctx->seqn++;
ops[i + 2]->sym->session = sess2->sessions[slave_idx];
- ops[i + 2]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+ ops[i + 2]->sym->m_src->seqn = qp_ctx->seqn++;
ops[i + 3]->sym->session = sess3->sessions[slave_idx];
- ops[i + 3]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+ ops[i + 3]->sym->m_src->seqn = qp_ctx->seqn++;
rte_prefetch0(ops[i + 4]->sym->session);
rte_prefetch0(ops[i + 4]->sym->m_src);
@@ -171,7 +170,7 @@ schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
ops[i]->sym->session->_private;
sessions[i] = ops[i]->sym->session;
ops[i]->sym->session = sess0->sessions[slave_idx];
- ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+ ops[i]->sym->m_src->seqn = qp_ctx->seqn++;
}
processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
@@ -193,10 +192,10 @@ schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
static uint16_t
-schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rr_scheduler_qp_ctx *rr_qp_ctx =
- ((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
struct scheduler_slave *slave;
uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
uint16_t nb_deq_ops;
@@ -230,13 +229,13 @@ schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
}
static uint16_t
-schedule_dequeue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct scheduler_qp_ctx *gen_qp_ctx = (struct scheduler_qp_ctx *)qp_ctx;
- struct rr_scheduler_qp_ctx *rr_qp_ctx = (gen_qp_ctx->private_qp_ctx);
+ struct scheduler_qp_ctx *qp_ctx = (struct scheduler_qp_ctx *)qp;
+ struct rr_scheduler_qp_ctx *rr_qp_ctx = (qp_ctx->private_qp_ctx);
struct scheduler_slave *slave;
- struct rte_reorder_buffer *reorder_buff = gen_qp_ctx->reorder_buf;
+ struct rte_reorder_buffer *reorder_buff = qp_ctx->reorder_buf;
struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
uint16_t nb_deq_ops, nb_drained_mbufs;
const uint16_t nb_op_ops = nb_ops;
@@ -354,6 +353,14 @@ scheduler_start(struct rte_cryptodev *dev)
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint16_t i;
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
struct rr_scheduler_qp_ctx *rr_qp_ctx =
@@ -372,14 +379,6 @@ scheduler_start(struct rte_cryptodev *dev)
rr_qp_ctx->last_enq_slave_idx = 0;
rr_qp_ctx->last_deq_slave_idx = 0;
-
- if (sched_ctx->reordering_enabled) {
- qp_ctx->schedule_enqueue = &schedule_enqueue_ordering;
- qp_ctx->schedule_dequeue = &schedule_dequeue_ordering;
- } else {
- qp_ctx->schedule_enqueue = &schedule_enqueue;
- qp_ctx->schedule_dequeue = &schedule_dequeue;
- }
}
return 0;
--
2.7.4
next reply other threads:[~2017-03-02 11:11 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-03-02 11:12 Fan Zhang [this message]
2017-03-20 14:18 ` Declan Doherty
2017-03-21 17:17 ` De Lara Guarch, Pablo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1488453131-94845-1-git-send-email-roy.fan.zhang@intel.com \
--to=roy.fan.zhang@intel.com \
--cc=declan.doherty@intel.com \
--cc=dev@dpdk.org \
--cc=pablo.de.lara.guarch@intel.com \
--cc=sergio.gonzalez.monroy@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).