DPDK patches and discussions
 help / color / mirror / Atom feed
From: Fan Zhang <roy.fan.zhang@intel.com>
To: dev@dpdk.org
Cc: pablo.de.lara.guarch@intel.com
Subject: [dpdk-dev] [PATCH] crypto/scheduler: remove session backup and recover
Date: Fri, 26 May 2017 02:24:43 +0100	[thread overview]
Message-ID: <20170526012443.13001-1-roy.fan.zhang@intel.com> (raw)

This patch removes the unnecssary session backup and recover
steps in the round-robin and fail-over modes of cryptodev
scheduler PMD. Originally, the scheduler blindly enqueues to
the slaves regardless of their available queue rooms, and
recovers the sessions once failed. This patch predicts the
number of crypto ops the slave can be enqueued by checking
its inflight ops and thus removed the session backup and
recovery steps.

Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
 drivers/crypto/scheduler/scheduler_failover.c   | 33 +++++++++----------------
 drivers/crypto/scheduler/scheduler_roundrobin.c | 33 ++++++++++---------------
 2 files changed, 25 insertions(+), 41 deletions(-)

diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c
index 2471a5f..0fb13de 100644
--- a/drivers/crypto/scheduler/scheduler_failover.c
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -53,7 +53,6 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
 		struct rte_crypto_op **ops, uint16_t nb_ops)
 {
 	uint16_t i, processed_ops;
-	struct rte_cryptodev_sym_session *sessions[nb_ops];
 	struct scheduler_session *sess0, *sess1, *sess2, *sess3;
 
 	for (i = 0; i < nb_ops && i < 4; i++)
@@ -74,11 +73,6 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
 		sess3 = (struct scheduler_session *)
 				ops[i+3]->sym->session->_private;
 
-		sessions[i] = ops[i]->sym->session;
-		sessions[i + 1] = ops[i + 1]->sym->session;
-		sessions[i + 2] = ops[i + 2]->sym->session;
-		sessions[i + 3] = ops[i + 3]->sym->session;
-
 		ops[i]->sym->session = sess0->sessions[slave_idx];
 		ops[i + 1]->sym->session = sess1->sessions[slave_idx];
 		ops[i + 2]->sym->session = sess2->sessions[slave_idx];
@@ -88,7 +82,6 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
 	for (; i < nb_ops; i++) {
 		sess0 = (struct scheduler_session *)
 				ops[i]->sym->session->_private;
-		sessions[i] = ops[i]->sym->session;
 		ops[i]->sym->session = sess0->sessions[slave_idx];
 	}
 
@@ -96,9 +89,7 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
 			slave->qp_id, ops, nb_ops);
 	slave->nb_inflight_cops += processed_ops;
 
-	if (unlikely(processed_ops < nb_ops))
-		for (i = processed_ops; i < nb_ops; i++)
-			ops[i]->sym->session = sessions[i];
+	RTE_ASSERT(prcessed_ops == nb_ops);
 
 	return processed_ops;
 }
@@ -106,22 +97,22 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
 static uint16_t
 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
-	struct fo_scheduler_qp_ctx *qp_ctx =
-			((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
-	uint16_t enqueued_ops;
+	struct scheduler_qp_ctx *qp_ctx = qp;
+	struct fo_scheduler_qp_ctx *fo_qp_ctx = qp_ctx->private_qp_ctx;
 
 	if (unlikely(nb_ops == 0))
 		return 0;
 
-	enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave,
-			PRIMARY_SLAVE_IDX, ops, nb_ops);
-
-	if (enqueued_ops < nb_ops)
-		enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave,
-				SECONDARY_SLAVE_IDX, &ops[enqueued_ops],
-				nb_ops - enqueued_ops);
+	if (fo_qp_ctx->primary_slave.nb_inflight_cops + nb_ops <
+				qp_ctx->max_nb_objs)
+		return failover_slave_enqueue(&fo_qp_ctx->primary_slave,
+				PRIMARY_SLAVE_IDX, ops, nb_ops);
 
-	return enqueued_ops;
+	return failover_slave_enqueue(&fo_qp_ctx->secondary_slave,
+			SECONDARY_SLAVE_IDX, ops, (fo_qp_ctx->secondary_slave.
+			nb_inflight_cops + nb_ops <= qp_ctx->max_nb_objs) ?
+			nb_ops : qp_ctx->max_nb_objs -
+			fo_qp_ctx->secondary_slave.nb_inflight_cops);
 }
 
 
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index 0116276..74ab5de 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -47,21 +47,24 @@ struct rr_scheduler_qp_ctx {
 static uint16_t
 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 {
-	struct rr_scheduler_qp_ctx *rr_qp_ctx =
-			((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+	struct scheduler_qp_ctx *qp_ctx = qp;
+	struct rr_scheduler_qp_ctx *rr_qp_ctx = qp_ctx->private_qp_ctx;
 	uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
 	struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
 	uint16_t i, processed_ops;
-	struct rte_cryptodev_sym_session *sessions[nb_ops];
 	struct scheduler_session *sess0, *sess1, *sess2, *sess3;
+	uint16_t nb_ops_to_enq;
 
 	if (unlikely(nb_ops == 0))
 		return 0;
 
-	for (i = 0; i < nb_ops && i < 4; i++)
+	nb_ops_to_enq = slave->nb_inflight_cops + nb_ops > qp_ctx->max_nb_objs ?
+			qp_ctx->max_nb_objs - slave->nb_inflight_cops : nb_ops;
+
+	for (i = 0; i < nb_ops_to_enq && i < 4; i++)
 		rte_prefetch0(ops[i]->sym->session);
 
-	for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
+	for (i = 0; (i < (nb_ops_to_enq - 8)) && (nb_ops_to_enq > 8); i += 4) {
 		sess0 = (struct scheduler_session *)
 				ops[i]->sym->session->_private;
 		sess1 = (struct scheduler_session *)
@@ -71,11 +74,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 		sess3 = (struct scheduler_session *)
 				ops[i+3]->sym->session->_private;
 
-		sessions[i] = ops[i]->sym->session;
-		sessions[i + 1] = ops[i + 1]->sym->session;
-		sessions[i + 2] = ops[i + 2]->sym->session;
-		sessions[i + 3] = ops[i + 3]->sym->session;
-
 		ops[i]->sym->session = sess0->sessions[slave_idx];
 		ops[i + 1]->sym->session = sess1->sessions[slave_idx];
 		ops[i + 2]->sym->session = sess2->sessions[slave_idx];
@@ -87,26 +85,21 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
 		rte_prefetch0(ops[i + 7]->sym->session);
 	}
 
-	for (; i < nb_ops; i++) {
+	for (; i < nb_ops_to_enq; i++) {
 		sess0 = (struct scheduler_session *)
 				ops[i]->sym->session->_private;
-		sessions[i] = ops[i]->sym->session;
 		ops[i]->sym->session = sess0->sessions[slave_idx];
 	}
 
 	processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
-			slave->qp_id, ops, nb_ops);
+			slave->qp_id, ops, nb_ops_to_enq);
+	RTE_ASSERT(processed_ops == nb_ops_to_enq);
 
 	slave->nb_inflight_cops += processed_ops;
 
 	rr_qp_ctx->last_enq_slave_idx += 1;
-	rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
-
-	/* recover session if enqueue is failed */
-	if (unlikely(processed_ops < nb_ops)) {
-		for (i = processed_ops; i < nb_ops; i++)
-			ops[i]->sym->session = sessions[i];
-	}
+	if (rr_qp_ctx->last_enq_slave_idx == rr_qp_ctx->nb_slaves)
+		rr_qp_ctx->last_enq_slave_idx = 0;
 
 	return processed_ops;
 }
-- 
2.9.4

             reply	other threads:[~2017-05-26  1:22 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-05-26  1:24 Fan Zhang [this message]
2017-07-06  8:43 ` De Lara Guarch, Pablo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170526012443.13001-1-roy.fan.zhang@intel.com \
    --to=roy.fan.zhang@intel.com \
    --cc=dev@dpdk.org \
    --cc=pablo.de.lara.guarch@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).