From: Fan Zhang <roy.fan.zhang@intel.com>
To: dev@dpdk.org
Cc: pablo.de.lara.guarch@intel.com, stable@dpdk.org
Subject: [dpdk-dev] [PATCH] crypto/scheduler: fix queue pair configuration
Date: Mon, 10 Apr 2017 16:00:54 +0100 [thread overview]
Message-ID: <1491836454-124949-1-git-send-email-roy.fan.zhang@intel.com> (raw)
This patch fixes the queue pair configuration for the scheduler PMD.
The queue pairs of a scheduler may have different nb_descriptors sizes,
which was not the case. Also, the maximum available objects in a
queue pair is 1 object smaller than nb_descriptors. This patch fixes
these issues.
Fixes: 4987bbaa4810 ("crypto/scheduler: add packet size based mode")
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
.../crypto/scheduler/scheduler_pkt_size_distr.c | 33 ++++++++++------------
drivers/crypto/scheduler/scheduler_pmd_ops.c | 5 ++--
drivers/crypto/scheduler/scheduler_pmd_private.h | 4 +--
3 files changed, 20 insertions(+), 22 deletions(-)
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index 1066451..0d32c0b 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -52,7 +52,6 @@ struct psd_scheduler_qp_ctx {
struct scheduler_slave primary_slave;
struct scheduler_slave secondary_slave;
uint32_t threshold;
- uint32_t max_nb_objs;
uint8_t deq_idx;
} __rte_cache_aligned;
@@ -65,13 +64,13 @@ struct psd_schedule_op {
static uint16_t
schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
- struct psd_scheduler_qp_ctx *qp_ctx =
- ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
struct scheduler_session *sess;
uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
- qp_ctx->primary_slave.nb_inflight_cops,
- qp_ctx->secondary_slave.nb_inflight_cops
+ psd_qp_ctx->primary_slave.nb_inflight_cops,
+ psd_qp_ctx->secondary_slave.nb_inflight_cops
};
struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
{PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
@@ -107,7 +106,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
job_len += (ops[i]->sym->cipher.data.length == 0) *
ops[i]->sym->auth.data.length;
/* decide the target op based on the job length */
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
/* stop schedule cops before the queue is full, this shall
* prevent the failed enqueue
@@ -127,7 +126,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
job_len = ops[i+1]->sym->cipher.data.length;
job_len += (ops[i+1]->sym->cipher.data.length == 0) *
ops[i+1]->sym->auth.data.length;
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
@@ -144,7 +143,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
job_len = ops[i+2]->sym->cipher.data.length;
job_len += (ops[i+2]->sym->cipher.data.length == 0) *
ops[i+2]->sym->auth.data.length;
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
@@ -162,7 +161,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
job_len = ops[i+3]->sym->cipher.data.length;
job_len += (ops[i+3]->sym->cipher.data.length == 0) *
ops[i+3]->sym->auth.data.length;
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
@@ -182,7 +181,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
job_len = ops[i]->sym->cipher.data.length;
job_len += (ops[i]->sym->cipher.data.length == 0) *
ops[i]->sym->auth.data.length;
- p_enq_op = &enq_ops[!(job_len & qp_ctx->threshold)];
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
qp_ctx->max_nb_objs) {
@@ -196,23 +195,23 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
}
processed_ops_pri = rte_cryptodev_enqueue_burst(
- qp_ctx->primary_slave.dev_id,
- qp_ctx->primary_slave.qp_id,
+ psd_qp_ctx->primary_slave.dev_id,
+ psd_qp_ctx->primary_slave.qp_id,
sched_ops[PRIMARY_SLAVE_IDX],
enq_ops[PRIMARY_SLAVE_IDX].pos);
/* enqueue shall not fail as the slave queue is monitored */
RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);
- qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
+ psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
processed_ops_sec = rte_cryptodev_enqueue_burst(
- qp_ctx->secondary_slave.dev_id,
- qp_ctx->secondary_slave.qp_id,
+ psd_qp_ctx->secondary_slave.dev_id,
+ psd_qp_ctx->secondary_slave.qp_id,
sched_ops[SECONDARY_SLAVE_IDX],
enq_ops[SECONDARY_SLAVE_IDX].pos);
RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);
- qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
+ psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
return processed_ops_pri + processed_ops_sec;
}
@@ -325,8 +324,6 @@ scheduler_start(struct rte_cryptodev *dev)
ps_qp_ctx->secondary_slave.nb_inflight_cops = 0;
ps_qp_ctx->threshold = psd_ctx->threshold;
-
- ps_qp_ctx->max_nb_objs = sched_ctx->qp_conf.nb_descriptors;
}
if (sched_ctx->reordering_enabled) {
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index 34e0cc9..725ba9d 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -367,14 +367,15 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
return ret;
}
- sched_ctx->qp_conf.nb_descriptors = qp_conf->nb_descriptors;
-
/* Allocate the queue pair data structure. */
qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
socket_id);
if (qp_ctx == NULL)
return -ENOMEM;
+ /* The actual available object number = nb_descriptors - 1 */
+ qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
+
dev->data->queue_pairs[qp_id] = qp_ctx;
if (*sched_ctx->ops.config_queue_pair) {
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index 2f4feea..cfffa06 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -84,8 +84,6 @@ struct scheduler_ctx {
uint8_t reordering_enabled;
- struct rte_cryptodev_qp_conf qp_conf;
-
char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
} __rte_cache_aligned;
@@ -93,6 +91,8 @@ struct scheduler_ctx {
struct scheduler_qp_ctx {
void *private_qp_ctx;
+ uint32_t max_nb_objs;
+
struct rte_ring *order_ring;
uint32_t seqn;
} __rte_cache_aligned;
--
2.7.4
next reply other threads:[~2017-04-10 14:59 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-04-10 15:00 Fan Zhang [this message]
2017-04-17 20:31 ` De Lara Guarch, Pablo
2017-04-17 21:43 ` De Lara Guarch, Pablo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1491836454-124949-1-git-send-email-roy.fan.zhang@intel.com \
--to=roy.fan.zhang@intel.com \
--cc=dev@dpdk.org \
--cc=pablo.de.lara.guarch@intel.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).