From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) by dpdk.org (Postfix) with ESMTP id 437F52C66 for ; Mon, 20 Feb 2017 17:16:03 +0100 (CET) Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga104.jf.intel.com with ESMTP; 20 Feb 2017 08:16:02 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.35,187,1484035200"; d="scan'208";a="936073838" Received: from silpixa00381633.ir.intel.com (HELO silpixa00381633.ger.corp.intel.com) ([10.237.222.114]) by orsmga003.jf.intel.com with ESMTP; 20 Feb 2017 08:16:01 -0800 From: Fan Zhang To: dev@dpdk.org Cc: pablo.de.lara.guarch@intel.com Date: Mon, 20 Feb 2017 16:17:05 +0000 Message-Id: <1487607425-76495-3-git-send-email-roy.fan.zhang@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1487607425-76495-1-git-send-email-roy.fan.zhang@intel.com> References: <1487607425-76495-1-git-send-email-roy.fan.zhang@intel.com> Subject: [dpdk-dev] [PATCH 2/2] crypto/scheduler: update round-robin mode X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 20 Feb 2017 16:16:03 -0000 Since the reusable reorder function is there, the round-robin mode scheduler should be updated to use these functions. This patch does this. Signed-off-by: Fan Zhang --- drivers/crypto/scheduler/scheduler_roundrobin.c | 195 +++--------------------- 1 file changed, 19 insertions(+), 176 deletions(-) diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c index 9545aa9..1580856 100644 --- a/drivers/crypto/scheduler/scheduler_roundrobin.c +++ b/drivers/crypto/scheduler/scheduler_roundrobin.c @@ -112,86 +112,21 @@ schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops) } static uint16_t -schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops, +schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { - struct scheduler_qp_ctx *gen_qp_ctx = qp_ctx; - struct rr_scheduler_qp_ctx *rr_qp_ctx = - gen_qp_ctx->private_qp_ctx; - uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx; - struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx]; - uint16_t i, processed_ops; - struct rte_cryptodev_sym_session *sessions[nb_ops]; - struct scheduler_session *sess0, *sess1, *sess2, *sess3; + uint16_t processed_ops; - if (unlikely(nb_ops == 0)) - return 0; + scheduler_reorder_prepare(qp, ops, nb_ops); - for (i = 0; i < nb_ops && i < 4; i++) { - rte_prefetch0(ops[i]->sym->session); - rte_prefetch0(ops[i]->sym->m_src); - } + processed_ops = schedule_enqueue(qp, ops, nb_ops); - for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) { - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - sess1 = (struct scheduler_session *) - ops[i+1]->sym->session->_private; - sess2 = (struct scheduler_session *) - ops[i+2]->sym->session->_private; - sess3 = (struct scheduler_session *) - ops[i+3]->sym->session->_private; - - sessions[i] = ops[i]->sym->session; - sessions[i + 1] = ops[i + 1]->sym->session; - sessions[i + 2] = ops[i + 2]->sym->session; - sessions[i + 3] = ops[i + 3]->sym->session; - - ops[i]->sym->session = sess0->sessions[slave_idx]; - ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++; - ops[i + 1]->sym->session = sess1->sessions[slave_idx]; - ops[i + 1]->sym->m_src->seqn = gen_qp_ctx->seqn++; - ops[i + 2]->sym->session = sess2->sessions[slave_idx]; - ops[i + 2]->sym->m_src->seqn = gen_qp_ctx->seqn++; - ops[i + 3]->sym->session = sess3->sessions[slave_idx]; - ops[i + 3]->sym->m_src->seqn = gen_qp_ctx->seqn++; - - rte_prefetch0(ops[i + 4]->sym->session); - rte_prefetch0(ops[i + 4]->sym->m_src); - rte_prefetch0(ops[i + 5]->sym->session); - rte_prefetch0(ops[i + 5]->sym->m_src); - rte_prefetch0(ops[i + 6]->sym->session); - rte_prefetch0(ops[i + 6]->sym->m_src); - rte_prefetch0(ops[i + 7]->sym->session); - rte_prefetch0(ops[i + 7]->sym->m_src); - } - - for (; i < nb_ops; i++) { - sess0 = (struct scheduler_session *) - ops[i]->sym->session->_private; - sessions[i] = ops[i]->sym->session; - ops[i]->sym->session = sess0->sessions[slave_idx]; - ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++; - } - - processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id, - slave->qp_id, ops, nb_ops); - - slave->nb_inflight_cops += processed_ops; - - rr_qp_ctx->last_enq_slave_idx += 1; - rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves; - - /* recover session if enqueue is failed */ - if (unlikely(processed_ops < nb_ops)) { - for (i = processed_ops; i < nb_ops; i++) - ops[i]->sym->session = sessions[i]; - } + if (processed_ops < nb_ops) + scheduler_reorder_revert(qp, nb_ops < processed_ops); return processed_ops; } - static uint16_t schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops) { @@ -230,108 +165,16 @@ schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops) } static uint16_t -schedule_dequeue_ordering(void *qp_ctx, struct rte_crypto_op **ops, +schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) { - struct scheduler_qp_ctx *gen_qp_ctx = (struct scheduler_qp_ctx *)qp_ctx; - struct rr_scheduler_qp_ctx *rr_qp_ctx = (gen_qp_ctx->private_qp_ctx); - struct scheduler_slave *slave; - struct rte_reorder_buffer *reorder_buff = gen_qp_ctx->reorder_buf; - struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3; - uint16_t nb_deq_ops, nb_drained_mbufs; - const uint16_t nb_op_ops = nb_ops; - struct rte_crypto_op *op_ops[nb_op_ops]; - struct rte_mbuf *reorder_mbufs[nb_op_ops]; - uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx; - uint16_t i; - - if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) { - do { - last_slave_idx += 1; - - if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves)) - last_slave_idx = 0; - /* looped back, means no inflight cops in the queue */ - if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx) - return 0; - } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops - == 0); - } - - slave = &rr_qp_ctx->slaves[last_slave_idx]; - - nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id, - slave->qp_id, op_ops, nb_ops); - - rr_qp_ctx->last_deq_slave_idx += 1; - rr_qp_ctx->last_deq_slave_idx %= rr_qp_ctx->nb_slaves; + struct scheduler_qp_ctx *gen_qp_ctx = qp; + uint16_t nb_deq_ops = gen_qp_ctx->nb_empty_bufs > nb_ops ? + nb_ops : gen_qp_ctx->nb_empty_bufs > nb_ops; - slave->nb_inflight_cops -= nb_deq_ops; - - for (i = 0; i < nb_deq_ops && i < 4; i++) - rte_prefetch0(op_ops[i]->sym->m_src); - - for (i = 0; (i < (nb_deq_ops - 8)) && (nb_deq_ops > 8); i += 4) { - mbuf0 = op_ops[i]->sym->m_src; - mbuf1 = op_ops[i + 1]->sym->m_src; - mbuf2 = op_ops[i + 2]->sym->m_src; - mbuf3 = op_ops[i + 3]->sym->m_src; - - mbuf0->userdata = op_ops[i]; - mbuf1->userdata = op_ops[i + 1]; - mbuf2->userdata = op_ops[i + 2]; - mbuf3->userdata = op_ops[i + 3]; - - rte_reorder_insert(reorder_buff, mbuf0); - rte_reorder_insert(reorder_buff, mbuf1); - rte_reorder_insert(reorder_buff, mbuf2); - rte_reorder_insert(reorder_buff, mbuf3); - - rte_prefetch0(op_ops[i + 4]->sym->m_src); - rte_prefetch0(op_ops[i + 5]->sym->m_src); - rte_prefetch0(op_ops[i + 6]->sym->m_src); - rte_prefetch0(op_ops[i + 7]->sym->m_src); - } + nb_deq_ops = schedule_dequeue(qp, ops, nb_deq_ops); - for (; i < nb_deq_ops; i++) { - mbuf0 = op_ops[i]->sym->m_src; - mbuf0->userdata = op_ops[i]; - rte_reorder_insert(reorder_buff, mbuf0); - } - - nb_drained_mbufs = rte_reorder_drain(reorder_buff, reorder_mbufs, - nb_ops); - for (i = 0; i < nb_drained_mbufs && i < 4; i++) - rte_prefetch0(reorder_mbufs[i]); - - for (i = 0; (i < (nb_drained_mbufs - 8)) && (nb_drained_mbufs > 8); - i += 4) { - ops[i] = *(struct rte_crypto_op **)reorder_mbufs[i]->userdata; - ops[i + 1] = *(struct rte_crypto_op **) - reorder_mbufs[i + 1]->userdata; - ops[i + 2] = *(struct rte_crypto_op **) - reorder_mbufs[i + 2]->userdata; - ops[i + 3] = *(struct rte_crypto_op **) - reorder_mbufs[i + 3]->userdata; - - reorder_mbufs[i]->userdata = NULL; - reorder_mbufs[i + 1]->userdata = NULL; - reorder_mbufs[i + 2]->userdata = NULL; - reorder_mbufs[i + 3]->userdata = NULL; - - rte_prefetch0(reorder_mbufs[i + 4]); - rte_prefetch0(reorder_mbufs[i + 5]); - rte_prefetch0(reorder_mbufs[i + 6]); - rte_prefetch0(reorder_mbufs[i + 7]); - } - - for (; i < nb_drained_mbufs; i++) { - ops[i] = *(struct rte_crypto_op **) - reorder_mbufs[i]->userdata; - reorder_mbufs[i]->userdata = NULL; - } - - return nb_drained_mbufs; + return scheduler_reorder_drain(qp, ops, nb_deq_ops, nb_ops); } static int @@ -372,14 +215,14 @@ scheduler_start(struct rte_cryptodev *dev) rr_qp_ctx->last_enq_slave_idx = 0; rr_qp_ctx->last_deq_slave_idx = 0; + } - if (sched_ctx->reordering_enabled) { - qp_ctx->schedule_enqueue = &schedule_enqueue_ordering; - qp_ctx->schedule_dequeue = &schedule_dequeue_ordering; - } else { - qp_ctx->schedule_enqueue = &schedule_enqueue; - qp_ctx->schedule_dequeue = &schedule_dequeue; - } + if (sched_ctx->reordering_enabled) { + dev->enqueue_burst = &schedule_enqueue_ordering; + dev->dequeue_burst = &schedule_dequeue_ordering; + } else { + dev->enqueue_burst = &schedule_enqueue; + dev->dequeue_burst = &schedule_dequeue; } return 0; -- 2.7.4