From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 854E499CC for ; Fri, 26 May 2017 03:22:22 +0200 (CEST) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga104.jf.intel.com with ESMTP; 25 May 2017 18:22:21 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.38,394,1491289200"; d="scan'208";a="1152953171" Received: from silpixa00381633.ir.intel.com (HELO silpixa00381633.ger.corp.intel.com.) ([10.237.222.114]) by fmsmga001.fm.intel.com with ESMTP; 25 May 2017 18:22:20 -0700 From: Fan Zhang To: dev@dpdk.org Cc: pablo.de.lara.guarch@intel.com Date: Fri, 26 May 2017 02:24:43 +0100 Message-Id: <20170526012443.13001-1-roy.fan.zhang@intel.com> X-Mailer: git-send-email 2.9.4 Subject: [dpdk-dev] [PATCH] crypto/scheduler: remove session backup and recover X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 26 May 2017 01:22:24 -0000 This patch removes the unnecssary session backup and recover steps in the round-robin and fail-over modes of cryptodev scheduler PMD. Originally, the scheduler blindly enqueues to the slaves regardless of their available queue rooms, and recovers the sessions once failed. This patch predicts the number of crypto ops the slave can be enqueued by checking its inflight ops and thus removed the session backup and recovery steps. Signed-off-by: Fan Zhang --- drivers/crypto/scheduler/scheduler_failover.c | 33 +++++++++---------------- drivers/crypto/scheduler/scheduler_roundrobin.c | 33 ++++++++++--------------- 2 files changed, 25 insertions(+), 41 deletions(-) diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c index 2471a5f..0fb13de 100644 --- a/drivers/crypto/scheduler/scheduler_failover.c +++ b/drivers/crypto/scheduler/scheduler_failover.c @@ -53,7 +53,6 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx, struct rte_crypto_op **ops, uint16_t nb_ops) { uint16_t i, processed_ops; - struct rte_cryptodev_sym_session *sessions[nb_ops]; struct scheduler_session *sess0, *sess1, *sess2, *sess3; for (i = 0; i < nb_ops && i < 4; i++) @@ -74,11 +73,6 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx, sess3 = (struct scheduler_session *) ops[i+3]->sym->session->_private; - sessions[i] = ops[i]->sym->session; - sessions[i + 1] = ops[i + 1]->sym->session; - sessions[i + 2] = ops[i + 2]->sym->session; - sessions[i + 3] = ops[i + 3]->sym->session; - ops[i]->sym->session = sess0->sessions[slave_idx]; ops[i + 1]->sym->session = sess1->sessions[slave_idx]; ops[i + 2]->sym->session = sess2->sessions[slave_idx]; @@ -88,7 +82,6 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx, for (; i < nb_ops; i++) { sess0 = (struct scheduler_session *) ops[i]->sym->session->_private; - sessions[i] = ops[i]->sym->session; ops[i]->sym->session = sess0->sessions[slave_idx]; } @@ -96,9 +89,7 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx, slave->qp_id, ops, nb_ops); slave->nb_inflight_cops += processed_ops; - if (unlikely(processed_ops < nb_ops)) - for (i = processed_ops; i < nb_ops; i++) - ops[i]->sym->session = sessions[i]; + RTE_ASSERT(prcessed_ops == nb_ops); return processed_ops; } @@ -106,22 +97,22 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx, static uint16_t schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { - struct fo_scheduler_qp_ctx *qp_ctx = - ((struct scheduler_qp_ctx *)qp)->private_qp_ctx; - uint16_t enqueued_ops; + struct scheduler_qp_ctx *qp_ctx = qp; + struct fo_scheduler_qp_ctx *fo_qp_ctx = qp_ctx->private_qp_ctx; if (unlikely(nb_ops == 0)) return 0; - enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave, - PRIMARY_SLAVE_IDX, ops, nb_ops); - - if (enqueued_ops < nb_ops) - enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave, - SECONDARY_SLAVE_IDX, &ops[enqueued_ops], - nb_ops - enqueued_ops); + if (fo_qp_ctx->primary_slave.nb_inflight_cops + nb_ops < + qp_ctx->max_nb_objs) + return failover_slave_enqueue(&fo_qp_ctx->primary_slave, + PRIMARY_SLAVE_IDX, ops, nb_ops); - return enqueued_ops; + return failover_slave_enqueue(&fo_qp_ctx->secondary_slave, + SECONDARY_SLAVE_IDX, ops, (fo_qp_ctx->secondary_slave. + nb_inflight_cops + nb_ops <= qp_ctx->max_nb_objs) ? + nb_ops : qp_ctx->max_nb_objs - + fo_qp_ctx->secondary_slave.nb_inflight_cops); } diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c index 0116276..74ab5de 100644 --- a/drivers/crypto/scheduler/scheduler_roundrobin.c +++ b/drivers/crypto/scheduler/scheduler_roundrobin.c @@ -47,21 +47,24 @@ struct rr_scheduler_qp_ctx { static uint16_t schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { - struct rr_scheduler_qp_ctx *rr_qp_ctx = - ((struct scheduler_qp_ctx *)qp)->private_qp_ctx; + struct scheduler_qp_ctx *qp_ctx = qp; + struct rr_scheduler_qp_ctx *rr_qp_ctx = qp_ctx->private_qp_ctx; uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx; struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx]; uint16_t i, processed_ops; - struct rte_cryptodev_sym_session *sessions[nb_ops]; struct scheduler_session *sess0, *sess1, *sess2, *sess3; + uint16_t nb_ops_to_enq; if (unlikely(nb_ops == 0)) return 0; - for (i = 0; i < nb_ops && i < 4; i++) + nb_ops_to_enq = slave->nb_inflight_cops + nb_ops > qp_ctx->max_nb_objs ? + qp_ctx->max_nb_objs - slave->nb_inflight_cops : nb_ops; + + for (i = 0; i < nb_ops_to_enq && i < 4; i++) rte_prefetch0(ops[i]->sym->session); - for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) { + for (i = 0; (i < (nb_ops_to_enq - 8)) && (nb_ops_to_enq > 8); i += 4) { sess0 = (struct scheduler_session *) ops[i]->sym->session->_private; sess1 = (struct scheduler_session *) @@ -71,11 +74,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) sess3 = (struct scheduler_session *) ops[i+3]->sym->session->_private; - sessions[i] = ops[i]->sym->session; - sessions[i + 1] = ops[i + 1]->sym->session; - sessions[i + 2] = ops[i + 2]->sym->session; - sessions[i + 3] = ops[i + 3]->sym->session; - ops[i]->sym->session = sess0->sessions[slave_idx]; ops[i + 1]->sym->session = sess1->sessions[slave_idx]; ops[i + 2]->sym->session = sess2->sessions[slave_idx]; @@ -87,26 +85,21 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) rte_prefetch0(ops[i + 7]->sym->session); } - for (; i < nb_ops; i++) { + for (; i < nb_ops_to_enq; i++) { sess0 = (struct scheduler_session *) ops[i]->sym->session->_private; - sessions[i] = ops[i]->sym->session; ops[i]->sym->session = sess0->sessions[slave_idx]; } processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id, - slave->qp_id, ops, nb_ops); + slave->qp_id, ops, nb_ops_to_enq); + RTE_ASSERT(processed_ops == nb_ops_to_enq); slave->nb_inflight_cops += processed_ops; rr_qp_ctx->last_enq_slave_idx += 1; - rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves; - - /* recover session if enqueue is failed */ - if (unlikely(processed_ops < nb_ops)) { - for (i = processed_ops; i < nb_ops; i++) - ops[i]->sym->session = sessions[i]; - } + if (rr_qp_ctx->last_enq_slave_idx == rr_qp_ctx->nb_slaves) + rr_qp_ctx->last_enq_slave_idx = 0; return processed_ops; } -- 2.9.4