From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id 7DC7B1C17C for ; Fri, 11 May 2018 13:15:35 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 11 May 2018 04:15:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.49,388,1520924400"; d="scan'208";a="40443853" Received: from sivswdev01.ir.intel.com (HELO localhost.localdomain) ([10.237.217.45]) by orsmga008.jf.intel.com with ESMTP; 11 May 2018 04:15:33 -0700 From: Fiona Trahe To: dev@dpdk.org Cc: pablo.de.lara.guarch@intel.com, fiona.trahe@intel.com, tomaszx.jozwiak@intel.com Date: Fri, 11 May 2018 12:13:48 +0100 Message-Id: <1526037249-25545-11-git-send-email-fiona.trahe@intel.com> X-Mailer: git-send-email 1.7.0.7 In-Reply-To: <1523040732-3290-1-git-send-email-fiona.trahe@intel.com> References: <1523040732-3290-1-git-send-email-fiona.trahe@intel.com> Subject: [dpdk-dev] [PATCH v2 10/31] crypto/qat: move generic qp fn to qp file X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 11 May 2018 11:15:36 -0000 Move the generic enqueue and dequeue fns from the qat_sym.c file to the qat_qp.c file Move generic qp structs to a new qat_qp.h file Signed-off-by: Fiona Trahe --- drivers/crypto/qat/qat_qp.c | 152 +++++++++++++++++++++++++++++++++++++++++++ drivers/crypto/qat/qat_qp.h | 63 ++++++++++++++++++ drivers/crypto/qat/qat_sym.c | 149 +----------------------------------------- drivers/crypto/qat/qat_sym.h | 49 -------------- 4 files changed, 216 insertions(+), 197 deletions(-) create mode 100644 drivers/crypto/qat/qat_qp.h diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c index bae6cf114..56ea10242 100644 --- a/drivers/crypto/qat/qat_qp.c +++ b/drivers/crypto/qat/qat_qp.c @@ -13,7 +13,9 @@ #include #include "qat_logs.h" +#include "qat_qp.h" #include "qat_sym.h" + #include "adf_transport_access_macros.h" #define ADF_MAX_SYM_DESC 4096 @@ -450,3 +452,153 @@ static void adf_configure_queues(struct qat_qp *qp) WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, queue->hw_queue_number, queue_config); } + + +static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) +{ + uint32_t div = data >> shift; + uint32_t mult = div << shift; + + return data - mult; +} + +static inline void +txq_write_tail(struct qat_qp *qp, struct qat_queue *q) { + WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number, + q->hw_queue_number, q->tail); + q->nb_pending_requests = 0; + q->csr_tail = q->tail; +} + +static inline +void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q) +{ + uint32_t old_head, new_head; + uint32_t max_head; + + old_head = q->csr_head; + new_head = q->head; + max_head = qp->nb_descriptors * q->msg_size; + + /* write out free descriptors */ + void *cur_desc = (uint8_t *)q->base_addr + old_head; + + if (new_head < old_head) { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head); + memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head); + } else { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head); + } + q->nb_processed_responses = 0; + q->csr_head = new_head; + + /* write current head to CSR */ + WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number, + q->hw_queue_number, new_head); +} + +uint16_t +qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) +{ + register struct qat_queue *queue; + struct qat_qp *tmp_qp = (struct qat_qp *)qp; + register uint32_t nb_ops_sent = 0; + register int ret; + uint16_t nb_ops_possible = nb_ops; + register uint8_t *base_addr; + register uint32_t tail; + int overflow; + + if (unlikely(nb_ops == 0)) + return 0; + + /* read params used a lot in main loop into registers */ + queue = &(tmp_qp->tx_q); + base_addr = (uint8_t *)queue->base_addr; + tail = queue->tail; + + /* Find how many can actually fit on the ring */ + tmp_qp->inflights16 += nb_ops; + overflow = tmp_qp->inflights16 - queue->max_inflights; + if (overflow > 0) { + tmp_qp->inflights16 -= overflow; + nb_ops_possible = nb_ops - overflow; + if (nb_ops_possible == 0) + return 0; + } + + while (nb_ops_sent != nb_ops_possible) { + ret = tmp_qp->build_request(*ops, base_addr + tail, + tmp_qp->op_cookies[tail / queue->msg_size], + tmp_qp->qat_dev_gen); + if (ret != 0) { + tmp_qp->stats.enqueue_err_count++; + /* + * This message cannot be enqueued, + * decrease number of ops that wasn't sent + */ + tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent; + if (nb_ops_sent == 0) + return 0; + goto kick_tail; + } + + tail = adf_modulo(tail + queue->msg_size, queue->modulo); + ops++; + nb_ops_sent++; + } +kick_tail: + queue->tail = tail; + tmp_qp->stats.enqueued_count += nb_ops_sent; + queue->nb_pending_requests += nb_ops_sent; + if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH || + queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) { + txq_write_tail(tmp_qp, queue); + } + return nb_ops_sent; +} + +uint16_t +qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) +{ + struct qat_queue *rx_queue, *tx_queue; + struct qat_qp *tmp_qp = (struct qat_qp *)qp; + uint32_t head; + uint32_t resp_counter = 0; + uint8_t *resp_msg; + + rx_queue = &(tmp_qp->rx_q); + tx_queue = &(tmp_qp->tx_q); + head = rx_queue->head; + resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head; + + while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG && + resp_counter != nb_ops) { + + tmp_qp->process_response(ops, resp_msg, + tmp_qp->op_cookies[head / rx_queue->msg_size], + tmp_qp->qat_dev_gen); + + head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo); + + resp_msg = (uint8_t *)rx_queue->base_addr + head; + ops++; + resp_counter++; + } + if (resp_counter > 0) { + rx_queue->head = head; + tmp_qp->stats.dequeued_count += resp_counter; + rx_queue->nb_processed_responses += resp_counter; + tmp_qp->inflights16 -= resp_counter; + + if (rx_queue->nb_processed_responses > + QAT_CSR_HEAD_WRITE_THRESH) + rxq_free_desc(tmp_qp, rx_queue); + } + /* also check if tail needs to be advanced */ + if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH && + tx_queue->tail != tx_queue->csr_tail) { + txq_write_tail(tmp_qp, tx_queue); + } + return resp_counter; +} diff --git a/drivers/crypto/qat/qat_qp.h b/drivers/crypto/qat/qat_qp.h new file mode 100644 index 000000000..87d55c5f2 --- /dev/null +++ b/drivers/crypto/qat/qat_qp.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#ifndef _QAT_QP_H_ +#define _QAT_QP_H_ + +#include "qat_common.h" + +typedef int (*build_request_t)(void *op, + uint8_t *req, void *op_cookie, + enum qat_device_gen qat_dev_gen); +/**< Build a request from an op. */ + +typedef int (*process_response_t)(void **ops, + uint8_t *resp, void *op_cookie, + enum qat_device_gen qat_dev_gen); +/**< Process a response descriptor and return the associated op. */ + +/** + * Structure associated with each queue. + */ +struct qat_queue { + char memz_name[RTE_MEMZONE_NAMESIZE]; + void *base_addr; /* Base address */ + rte_iova_t base_phys_addr; /* Queue physical address */ + uint32_t head; /* Shadow copy of the head */ + uint32_t tail; /* Shadow copy of the tail */ + uint32_t modulo; + uint32_t msg_size; + uint16_t max_inflights; + uint32_t queue_size; + uint8_t hw_bundle_number; + uint8_t hw_queue_number; + /* HW queue aka ring offset on bundle */ + uint32_t csr_head; /* last written head value */ + uint32_t csr_tail; /* last written tail value */ + uint16_t nb_processed_responses; + /* number of responses processed since last CSR head write */ + uint16_t nb_pending_requests; + /* number of requests pending since last CSR tail write */ +}; + +struct qat_qp { + void *mmap_bar_addr; + uint16_t inflights16; + struct qat_queue tx_q; + struct qat_queue rx_q; + struct rte_cryptodev_stats stats; + struct rte_mempool *op_cookie_pool; + void **op_cookies; + uint32_t nb_descriptors; + enum qat_device_gen qat_dev_gen; + build_request_t build_request; + process_response_t process_response; +} __rte_cache_aligned; + +uint16_t +qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops); + +uint16_t +qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops); + +#endif /* _QAT_QP_H_ */ diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c index 2bae913a1..ab8ce2c96 100644 --- a/drivers/crypto/qat/qat_sym.c +++ b/drivers/crypto/qat/qat_sym.c @@ -14,6 +14,7 @@ #include "qat_logs.h" #include "qat_sym_session.h" #include "qat_sym.h" +#include "qat_qp.h" #include "adf_transport_access_macros.h" #define BYTE_LENGTH 8 @@ -83,8 +84,6 @@ bpi_cipher_decrypt(uint8_t *src, uint8_t *dst, /** Creates a context in either AES or DES in ECB mode * Depends on openssl libcrypto */ -static inline uint32_t -adf_modulo(uint32_t data, uint32_t shift); static inline uint32_t qat_bpicipher_preprocess(struct qat_sym_session *ctx, @@ -197,102 +196,6 @@ qat_bpicipher_postprocess(struct qat_sym_session *ctx, return sym_op->cipher.data.length - last_block_len; } -static inline void -txq_write_tail(struct qat_qp *qp, struct qat_queue *q) { - WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number, - q->hw_queue_number, q->tail); - q->nb_pending_requests = 0; - q->csr_tail = q->tail; -} - -static uint16_t -qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) -{ - register struct qat_queue *queue; - struct qat_qp *tmp_qp = (struct qat_qp *)qp; - register uint32_t nb_ops_sent = 0; - register int ret; - uint16_t nb_ops_possible = nb_ops; - register uint8_t *base_addr; - register uint32_t tail; - int overflow; - - if (unlikely(nb_ops == 0)) - return 0; - - /* read params used a lot in main loop into registers */ - queue = &(tmp_qp->tx_q); - base_addr = (uint8_t *)queue->base_addr; - tail = queue->tail; - - /* Find how many can actually fit on the ring */ - tmp_qp->inflights16 += nb_ops; - overflow = tmp_qp->inflights16 - queue->max_inflights; - if (overflow > 0) { - tmp_qp->inflights16 -= overflow; - nb_ops_possible = nb_ops - overflow; - if (nb_ops_possible == 0) - return 0; - } - - while (nb_ops_sent != nb_ops_possible) { - ret = tmp_qp->build_request(*ops, base_addr + tail, - tmp_qp->op_cookies[tail / queue->msg_size], - tmp_qp->qat_dev_gen); - if (ret != 0) { - tmp_qp->stats.enqueue_err_count++; - /* - * This message cannot be enqueued, - * decrease number of ops that wasn't sent - */ - tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent; - if (nb_ops_sent == 0) - return 0; - goto kick_tail; - } - - tail = adf_modulo(tail + queue->msg_size, queue->modulo); - ops++; - nb_ops_sent++; - } -kick_tail: - queue->tail = tail; - tmp_qp->stats.enqueued_count += nb_ops_sent; - queue->nb_pending_requests += nb_ops_sent; - if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH || - queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) { - txq_write_tail(tmp_qp, queue); - } - return nb_ops_sent; -} - -static inline -void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q) -{ - uint32_t old_head, new_head; - uint32_t max_head; - - old_head = q->csr_head; - new_head = q->head; - max_head = qp->nb_descriptors * q->msg_size; - - /* write out free descriptors */ - void *cur_desc = (uint8_t *)q->base_addr + old_head; - - if (new_head < old_head) { - memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head); - memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head); - } else { - memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head); - } - q->nb_processed_responses = 0; - q->csr_head = new_head; - - /* write current head to CSR */ - WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number, - q->hw_queue_number, new_head); -} - uint16_t qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) @@ -336,49 +239,6 @@ qat_sym_process_response(void **op, uint8_t *resp, return 0; } -static uint16_t -qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) -{ - struct qat_queue *rx_queue, *tx_queue; - struct qat_qp *tmp_qp = (struct qat_qp *)qp; - uint32_t head; - uint32_t resp_counter = 0; - uint8_t *resp_msg; - - rx_queue = &(tmp_qp->rx_q); - tx_queue = &(tmp_qp->tx_q); - head = rx_queue->head; - resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head; - - while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG && - resp_counter != nb_ops) { - - tmp_qp->process_response(ops, resp_msg, - tmp_qp->op_cookies[head / rx_queue->msg_size], - tmp_qp->qat_dev_gen); - - head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo); - - resp_msg = (uint8_t *)rx_queue->base_addr + head; - ops++; - resp_counter++; - } - if (resp_counter > 0) { - rx_queue->head = head; - tmp_qp->stats.dequeued_count += resp_counter; - rx_queue->nb_processed_responses += resp_counter; - tmp_qp->inflights16 -= resp_counter; - - if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) - rxq_free_desc(tmp_qp, rx_queue); - } - /* also check if tail needs to be advanced */ - if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH && - tx_queue->tail != tx_queue->csr_tail) { - txq_write_tail(tmp_qp, tx_queue); - } - return resp_counter; -} uint16_t qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, @@ -903,13 +763,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, return 0; } -static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) -{ - uint32_t div = data >> shift; - uint32_t mult = div << shift; - - return data - mult; -} void qat_sym_stats_get(struct rte_cryptodev *dev, struct rte_cryptodev_stats *stats) diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h index 279d3a3ae..39574eeb6 100644 --- a/drivers/crypto/qat/qat_sym.h +++ b/drivers/crypto/qat/qat_sym.h @@ -27,57 +27,8 @@ #define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U /* number of inflights below which no tail write coalescing should occur */ -typedef int (*build_request_t)(void *op, - uint8_t *req, void *op_cookie, - enum qat_device_gen qat_dev_gen); -/**< Build a request from an op. */ - -typedef int (*process_response_t)(void **ops, - uint8_t *resp, void *op_cookie, - enum qat_device_gen qat_dev_gen); -/**< Process a response descriptor and return the associated op. */ - struct qat_sym_session; -/** - * Structure associated with each queue. - */ -struct qat_queue { - char memz_name[RTE_MEMZONE_NAMESIZE]; - void *base_addr; /* Base address */ - rte_iova_t base_phys_addr; /* Queue physical address */ - uint32_t head; /* Shadow copy of the head */ - uint32_t tail; /* Shadow copy of the tail */ - uint32_t modulo; - uint32_t msg_size; - uint16_t max_inflights; - uint32_t queue_size; - uint8_t hw_bundle_number; - uint8_t hw_queue_number; - /* HW queue aka ring offset on bundle */ - uint32_t csr_head; /* last written head value */ - uint32_t csr_tail; /* last written tail value */ - uint16_t nb_processed_responses; - /* number of responses processed since last CSR head write */ - uint16_t nb_pending_requests; - /* number of requests pending since last CSR tail write */ -}; - -struct qat_qp { - void *mmap_bar_addr; - uint16_t inflights16; - struct qat_queue tx_q; - struct qat_queue rx_q; - struct rte_cryptodev_stats stats; - struct rte_mempool *op_cookie_pool; - void **op_cookies; - uint32_t nb_descriptors; - enum qat_device_gen qat_dev_gen; - build_request_t build_request; - process_response_t process_response; -} __rte_cache_aligned; - - int qat_sym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie, enum qat_device_gen qat_dev_gen); -- 2.13.6