From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 9EB39A04A5; Wed, 9 Feb 2022 15:01:17 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 605D7426E7; Wed, 9 Feb 2022 15:00:57 +0100 (CET) Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by mails.dpdk.org (Postfix) with ESMTP id A07F340140 for ; Wed, 9 Feb 2022 15:00:55 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1644415255; x=1675951255; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=CA4qH8zlOB9AzpWTBVhXRJ7y7fS9RCJ18C/4EuGXDU0=; b=epmU7zn/+J8fIvliPLfF8g5gWeq7VpH6TXqeEY5l5TrvY1Az5YUVbUZd qtXc/w5rqmG3oZE4r/Lh5CtSH/MTGoOiezgEJDMwBAMB+PVRNmyJ4LzDu SC0ayXFfVNNjcDuSGoNhueelQck+rFOro6ViCOvx3PRI8xGUs0obbcP61 Q79SdbE0NOo1xtwbIEynykjFeX4GcIKt+inrQW8rnbfP+yi/RHQcB5dq6 zTK8LGydKHkoF1N79vmdkpd3pYi95/gw38ATpxSAqAHkal5vGa70i55dI L/1+KcdnTzJcHValZ+jh3ib5+y8OAxjnSaevwzGfvMMRnc5WufDvwq9ys g==; X-IronPort-AV: E=McAfee;i="6200,9189,10252"; a="249157391" X-IronPort-AV: E=Sophos;i="5.88,355,1635231600"; d="scan'208";a="249157391" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 09 Feb 2022 06:00:53 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.88,355,1635231600"; d="scan'208";a="525996092" Received: from silpixa00400308.ir.intel.com ([10.237.214.95]) by orsmga007.jf.intel.com with ESMTP; 09 Feb 2022 06:00:51 -0800 From: Arek Kusztal To: dev@dpdk.org Cc: gakhil@marvell.com, roy.fan.zhang@intel.com, Arek Kusztal Subject: [PATCH v2 5/5] crypto/qat: refactor asymmetric session Date: Wed, 9 Feb 2022 14:00:20 +0000 Message-Id: <20220209140020.19365-6-arkadiuszx.kusztal@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20220209140020.19365-1-arkadiuszx.kusztal@intel.com> References: <20220209140020.19365-1-arkadiuszx.kusztal@intel.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch refactors asymmetric session in Intel QuickAssist Technology PMD and fixes some issues with xform. Code will be now bit more scalable, and easier readable. Signed-off-by: Arek Kusztal --- drivers/crypto/qat/qat_asym.c | 441 ++++++++++++++++++++++++++++++++++++------ drivers/crypto/qat/qat_asym.h | 2 +- drivers/crypto/qat/qat_ec.h | 4 +- 3 files changed, 380 insertions(+), 67 deletions(-) diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c index 444c702572..8991b7c943 100644 --- a/drivers/crypto/qat/qat_asym.c +++ b/drivers/crypto/qat/qat_asym.c @@ -626,17 +626,17 @@ ecdsa_set_input(struct rte_crypto_asym_op *asym_op, qat_req->output_param_count = QAT_ASYM_ECDSA_RS_VERIFY_OUT_PARAMS; - HEXDUMP_OFF_F("e", 0); - HEXDUMP_OFF_F("s", 1); - HEXDUMP_OFF_F("r", 2); - HEXDUMP_OFF_F("n", 3); - HEXDUMP_OFF_F("xG", 4); + HEXDUMP_OFF_F("p", 0); + HEXDUMP_OFF_F("b", 1); + HEXDUMP_OFF_F("a", 2); + HEXDUMP_OFF_F("y", 3); + HEXDUMP_OFF_F("x", 4); HEXDUMP_OFF_F("yG", 5); - HEXDUMP_OFF_F("xQ", 6); - HEXDUMP_OFF_F("yQ", 7); - HEXDUMP_OFF_F("a", 8); - HEXDUMP_OFF_F("b", 9); - HEXDUMP_OFF_F("q", 10); + HEXDUMP_OFF_F("xG", 6); + HEXDUMP_OFF_F("n", 7); + HEXDUMP_OFF_F("r", 8); + HEXDUMP_OFF_F("s", 9); + HEXDUMP_OFF_F("e", 10); break; default: return -1; @@ -649,7 +649,9 @@ static uint8_t ecdsa_collect(struct rte_crypto_asym_op *asym_op, struct qat_asym_op_cookie *cookie) { - uint32_t alg_bytesize = RTE_ALIGN_CEIL(cookie->alg_bytesize, 8); + uint32_t alg_bytesize = cookie->alg_bytesize; + uint32_t qat_alg_bytesize = RTE_ALIGN_CEIL(cookie->alg_bytesize, 8); + uint32_t ltrim = qat_alg_bytesize - alg_bytesize; if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) { uint8_t *r = asym_op->ecdsa.r.data; @@ -657,8 +659,9 @@ ecdsa_collect(struct rte_crypto_asym_op *asym_op, asym_op->ecdsa.r.length = alg_bytesize; asym_op->ecdsa.s.length = alg_bytesize; - rte_memcpy(r, cookie->output_array[0], alg_bytesize); - rte_memcpy(s, cookie->output_array[1], alg_bytesize); + rte_memcpy(r, &cookie->output_array[0][ltrim], alg_bytesize); + rte_memcpy(s, &cookie->output_array[1][ltrim], alg_bytesize); + HEXDUMP("R", cookie->output_array[0], alg_bytesize); HEXDUMP("S", cookie->output_array[1], @@ -692,19 +695,19 @@ ecpm_set_input(struct rte_crypto_asym_op *asym_op, alg_bytesize = qat_function.bytesize; qat_alg_bytesize = RTE_ALIGN_CEIL(alg_bytesize, 8); - SET_PKE_LN(cookie->input_array, asym_op->ecpm.scalar, - alg_bytesize, 0); - SET_PKE_LN(cookie->input_array, asym_op->ecpm.p.x, - alg_bytesize, 1); - SET_PKE_LN(cookie->input_array, asym_op->ecpm.p.y, - alg_bytesize, 2); - SET_PKE_LN_EC(cookie->input_array, curve[SECP256R1].a, + SET_PKE_LN_EC(cookie->input_array, asym_op->ecpm.scalar, + asym_op->ecpm.scalar.length, 0); + SET_PKE_LN_EC(cookie->input_array, asym_op->ecpm.p.x, + asym_op->ecpm.p.x.length, 1); + SET_PKE_LN_EC(cookie->input_array, asym_op->ecpm.p.y, + asym_op->ecpm.p.y.length, 2); + SET_PKE_LN_EC(cookie->input_array, curve[curve_id].a, alg_bytesize, 3); - SET_PKE_LN_EC(cookie->input_array, curve[SECP256R1].b, + SET_PKE_LN_EC(cookie->input_array, curve[curve_id].b, alg_bytesize, 4); - SET_PKE_LN_EC(cookie->input_array, curve[SECP256R1].p, + SET_PKE_LN_EC(cookie->input_array, curve[curve_id].p, alg_bytesize, 5); - SET_PKE_LN_EC(cookie->input_array, curve[SECP256R1].h, + SET_PKE_LN_EC(cookie->input_array, curve[curve_id].h, alg_bytesize, 6); cookie->alg_bytesize = alg_bytesize; @@ -729,14 +732,16 @@ static uint8_t ecpm_collect(struct rte_crypto_asym_op *asym_op, struct qat_asym_op_cookie *cookie) { - uint8_t *r = asym_op->ecpm.r.x.data; - uint8_t *s = asym_op->ecpm.r.y.data; + uint8_t *x = asym_op->ecpm.r.x.data; + uint8_t *y = asym_op->ecpm.r.y.data; uint32_t alg_bytesize = cookie->alg_bytesize; + uint32_t qat_alg_bytesize = RTE_ALIGN_CEIL(cookie->alg_bytesize, 8); + uint32_t ltrim = qat_alg_bytesize - alg_bytesize; asym_op->ecpm.r.x.length = alg_bytesize; asym_op->ecpm.r.y.length = alg_bytesize; - rte_memcpy(r, cookie->output_array[0], alg_bytesize); - rte_memcpy(s, cookie->output_array[1], alg_bytesize); + rte_memcpy(x, &cookie->output_array[0][ltrim], alg_bytesize); + rte_memcpy(y, &cookie->output_array[1][ltrim], alg_bytesize); HEXDUMP("rX", cookie->output_array[0], alg_bytesize); @@ -784,29 +789,38 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie, (struct icp_qat_fw_pke_request *)out_msg; struct qat_asym_op_cookie *cookie = (struct qat_asym_op_cookie *)op_cookie; + struct rte_crypto_asym_xform *xform; + struct qat_asym_session *qat_session = (struct qat_asym_session *) + get_asym_session_private_data( + op->asym->session, qat_asym_driver_id); int err = 0; + if (unlikely(qat_session == NULL)) { + QAT_DP_LOG(ERR, "Session was not created for this device"); + goto error; + } + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; switch (op->sess_type) { case RTE_CRYPTO_OP_WITH_SESSION: - QAT_LOG(ERR, - "QAT asymmetric crypto PMD does not support session" - ); - goto error; + request_init(qat_req); + xform = &qat_session->xform; + break; case RTE_CRYPTO_OP_SESSIONLESS: request_init(qat_req); - err = asym_set_input(asym_op, qat_req, cookie, - op->asym->xform); - if (err) { - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - goto error; - } + xform = op->asym->xform; break; default: QAT_DP_LOG(ERR, "Invalid session/xform settings"); op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; goto error; } + err = asym_set_input(asym_op, qat_req, cookie, + xform); + if (err) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + goto error; + } qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op; qat_req->pke_mid.src_data_addr = cookie->input_addr; @@ -827,11 +841,11 @@ qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie, } static uint8_t -qat_asym_collect_response(struct rte_crypto_op *rx_op, +qat_asym_collect_response(struct rte_crypto_op *op, struct qat_asym_op_cookie *cookie, struct rte_crypto_asym_xform *xform) { - struct rte_crypto_asym_op *asym_op = rx_op->asym; + struct rte_crypto_asym_op *asym_op = op->asym; switch (xform->xform_type) { case RTE_CRYPTO_ASYM_XFORM_MODEX: @@ -851,66 +865,365 @@ qat_asym_collect_response(struct rte_crypto_op *rx_op, } void -qat_asym_process_response(void **op, uint8_t *resp, +qat_asym_process_response(void **out_op, uint8_t *resp, void *op_cookie) { struct icp_qat_fw_pke_resp *resp_msg = (struct icp_qat_fw_pke_resp *)resp; - struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) + struct rte_crypto_op *op = (struct rte_crypto_op *)(uintptr_t) (resp_msg->opaque); struct qat_asym_op_cookie *cookie = op_cookie; + struct rte_crypto_asym_xform *xform; + struct qat_asym_session *qat_session = (struct qat_asym_session *) + get_asym_session_private_data( + op->asym->session, qat_asym_driver_id); if (cookie->error) { cookie->error = 0; - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) - rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; + if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + op->status = RTE_CRYPTO_OP_STATUS_ERROR; QAT_DP_LOG(ERR, "Cookie status returned error"); } else { if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) { - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) - rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; + if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + op->status = RTE_CRYPTO_OP_STATUS_ERROR; QAT_DP_LOG(ERR, "Asymmetric response status" " returned error"); } if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) { - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) - rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; + if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + op->status = RTE_CRYPTO_OP_STATUS_ERROR; QAT_DP_LOG(ERR, "Asymmetric common status" " returned error"); } } - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { - rx_op->status = qat_asym_collect_response(rx_op, - cookie, rx_op->asym->xform); - cleanup(cookie, rx_op->asym->xform, - cookie->alg_bytesize); + + switch (op->sess_type) { + case RTE_CRYPTO_OP_WITH_SESSION: + xform = &qat_session->xform; + break; + case RTE_CRYPTO_OP_SESSIONLESS: + xform = op->asym->xform; + break; + default: + QAT_DP_LOG(ERR, + "Invalid session/xform settings in response ring!"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + } + + if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { + op->status = qat_asym_collect_response(op, + cookie, xform); + cleanup(cookie, xform, cookie->alg_bytesize); } - *op = rx_op; + *out_op = op; HEXDUMP("resp_msg:", resp_msg, sizeof(struct icp_qat_fw_pke_resp)); } +static int +session_set_modexp(struct qat_asym_session *qat_session, + struct rte_crypto_asym_xform *xform) +{ + uint8_t *modulus = xform->modex.modulus.data; + uint8_t *exponent = xform->modex.exponent.data; + + qat_session->xform.modex.modulus.data = + rte_malloc(NULL, xform->modex.modulus.length, 0); + if (qat_session->xform.modex.modulus.data == NULL) + return -ENOMEM; + qat_session->xform.modex.modulus.length = xform->modex.modulus.length; + qat_session->xform.modex.exponent.data = rte_malloc(NULL, + xform->modex.exponent.length, 0); + if (qat_session->xform.modex.exponent.data == NULL) { + rte_free(qat_session->xform.modex.exponent.data); + return -ENOMEM; + } + qat_session->xform.modex.exponent.length = xform->modex.exponent.length; + + rte_memcpy(qat_session->xform.modex.modulus.data, modulus, + xform->modex.modulus.length); + rte_memcpy(qat_session->xform.modex.exponent.data, exponent, + xform->modex.exponent.length); + + return 0; +} + +static int +session_set_modinv(struct qat_asym_session *qat_session, + struct rte_crypto_asym_xform *xform) +{ + uint8_t *modulus = xform->modinv.modulus.data; + + qat_session->xform.modinv.modulus.data = + rte_malloc(NULL, xform->modinv.modulus.length, 0); + if (qat_session->xform.modinv.modulus.data == NULL) + return -ENOMEM; + qat_session->xform.modinv.modulus.length = xform->modinv.modulus.length; + + rte_memcpy(qat_session->xform.modinv.modulus.data, modulus, + xform->modinv.modulus.length); + + return 0; +} + +static int +session_set_rsa(struct qat_asym_session *qat_session, + struct rte_crypto_asym_xform *xform) +{ + uint8_t *n = xform->rsa.n.data; + uint8_t *e = xform->rsa.e.data; + int ret = 0; + + qat_session->xform.rsa.key_type = xform->rsa.key_type; + + qat_session->xform.rsa.n.data = + rte_malloc(NULL, xform->rsa.n.length, 0); + if (qat_session->xform.rsa.n.data == NULL) + return -ENOMEM; + qat_session->xform.rsa.n.length = + xform->rsa.n.length; + + qat_session->xform.rsa.e.data = + rte_malloc(NULL, xform->rsa.e.length, 0); + if (qat_session->xform.rsa.e.data == NULL) { + ret = -ENOMEM; + goto err; + } + qat_session->xform.rsa.e.length = + xform->rsa.e.length; + + if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) { + uint8_t *p = xform->rsa.qt.p.data; + uint8_t *q = xform->rsa.qt.q.data; + uint8_t *dP = xform->rsa.qt.dP.data; + uint8_t *dQ = xform->rsa.qt.dQ.data; + uint8_t *qInv = xform->rsa.qt.qInv.data; + + qat_session->xform.rsa.qt.p.data = + rte_malloc(NULL, xform->rsa.qt.p.length, 0); + if (qat_session->xform.rsa.qt.p.data == NULL) { + ret = -ENOMEM; + goto err; + } + qat_session->xform.rsa.qt.p.length = + xform->rsa.qt.p.length; + + qat_session->xform.rsa.qt.q.data = + rte_malloc(NULL, xform->rsa.qt.q.length, 0); + if (qat_session->xform.rsa.qt.q.data == NULL) { + ret = -ENOMEM; + goto err; + } + qat_session->xform.rsa.qt.q.length = + xform->rsa.qt.q.length; + + qat_session->xform.rsa.qt.dP.data = + rte_malloc(NULL, xform->rsa.qt.dP.length, 0); + if (qat_session->xform.rsa.qt.dP.data == NULL) { + ret = -ENOMEM; + goto err; + } + qat_session->xform.rsa.qt.dP.length = + xform->rsa.qt.dP.length; + + qat_session->xform.rsa.qt.dQ.data = + rte_malloc(NULL, xform->rsa.qt.dQ.length, 0); + if (qat_session->xform.rsa.qt.dQ.data == NULL) { + ret = -ENOMEM; + goto err; + } + qat_session->xform.rsa.qt.dQ.length = + xform->rsa.qt.dQ.length; + + qat_session->xform.rsa.qt.qInv.data = + rte_malloc(NULL, xform->rsa.qt.qInv.length, 0); + if (qat_session->xform.rsa.qt.qInv.data == NULL) { + ret = -ENOMEM; + goto err; + } + qat_session->xform.rsa.qt.qInv.length = + xform->rsa.qt.qInv.length; + + rte_memcpy(qat_session->xform.rsa.qt.p.data, p, + xform->rsa.qt.p.length); + rte_memcpy(qat_session->xform.rsa.qt.q.data, q, + xform->rsa.qt.q.length); + rte_memcpy(qat_session->xform.rsa.qt.dP.data, dP, + xform->rsa.qt.dP.length); + rte_memcpy(qat_session->xform.rsa.qt.dQ.data, dQ, + xform->rsa.qt.dQ.length); + rte_memcpy(qat_session->xform.rsa.qt.qInv.data, qInv, + xform->rsa.qt.qInv.length); + + } else { + uint8_t *d = xform->rsa.d.data; + + qat_session->xform.rsa.d.data = + rte_malloc(NULL, xform->rsa.d.length, 0); + if (qat_session->xform.rsa.d.data == NULL) { + ret = -ENOMEM; + goto err; + } + qat_session->xform.rsa.d.length = + xform->rsa.d.length; + rte_memcpy(qat_session->xform.rsa.d.data, d, + xform->rsa.d.length); + } + + rte_memcpy(qat_session->xform.rsa.n.data, n, + xform->rsa.n.length); + rte_memcpy(qat_session->xform.rsa.e.data, e, + xform->rsa.e.length); + + return 0; + +err: + rte_free(qat_session->xform.rsa.n.data); + rte_free(qat_session->xform.rsa.e.data); + rte_free(qat_session->xform.rsa.d.data); + rte_free(qat_session->xform.rsa.qt.p.data); + rte_free(qat_session->xform.rsa.qt.q.data); + rte_free(qat_session->xform.rsa.qt.dP.data); + rte_free(qat_session->xform.rsa.qt.dQ.data); + rte_free(qat_session->xform.rsa.qt.qInv.data); + return ret; +} + +static void +session_set_ecdsa(struct qat_asym_session *qat_session, + struct rte_crypto_asym_xform *xform) +{ + qat_session->xform.ec.curve_id = xform->ec.curve_id; +} + int -qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused, - struct rte_crypto_asym_xform *xform __rte_unused, - struct rte_cryptodev_asym_session *sess __rte_unused, - struct rte_mempool *mempool __rte_unused) +qat_asym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_asym_xform *xform, + struct rte_cryptodev_asym_session *session, + struct rte_mempool *mempool) { - QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session"); - return -ENOTSUP; + struct qat_asym_session *qat_session; + int ret = 0; + + if (rte_mempool_get(mempool, (void *)&qat_session)) { + QAT_LOG(ERR, "Cannot allocate asymmetric crypto session"); + return -ENOMEM; + } + + memset(qat_session, 0, sizeof(*qat_session)); + + qat_session->xform.xform_type = xform->xform_type; + switch (xform->xform_type) { + case RTE_CRYPTO_ASYM_XFORM_MODEX: + ret = session_set_modexp(qat_session, xform); + break; + case RTE_CRYPTO_ASYM_XFORM_MODINV: + ret = session_set_modinv(qat_session, xform); + break; + case RTE_CRYPTO_ASYM_XFORM_RSA: + ret = session_set_rsa(qat_session, xform); + break; + case RTE_CRYPTO_ASYM_XFORM_ECDSA: + case RTE_CRYPTO_ASYM_XFORM_ECPM: + session_set_ecdsa(qat_session, xform); + break; + default: + ret = -ENOTSUP; + } + + if (ret) { + QAT_LOG(ERR, "Unsupported xform type"); + rte_mempool_put(mempool, (void *)&qat_session); + return ret; + } + + set_asym_session_private_data(session, dev->driver_id, + (void *)qat_session); + return 0; } unsigned int qat_asym_session_get_private_size(struct rte_cryptodev *dev __rte_unused) { - QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session"); - return 0; + return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8); +} + +static void +session_clear_modexp(struct rte_crypto_modex_xform *modex) +{ + memset(modex->modulus.data, 0, modex->modulus.length); + rte_free(modex->modulus.data); + memset(modex->exponent.data, 0, modex->exponent.length); + rte_free(modex->exponent.data); +} + +static void +session_clear_modinv(struct rte_crypto_modinv_xform *modinv) +{ + memset(modinv->modulus.data, 0, modinv->modulus.length); + rte_free(modinv->modulus.data); +} + +static void +session_clear_rsa(struct rte_crypto_rsa_xform *rsa) +{ + return; + memset(rsa->n.data, 0, rsa->n.length); + rte_free(rsa->n.data); + memset(rsa->e.data, 0, rsa->e.length); + rte_free(rsa->e.data); + if (rsa->key_type == RTE_RSA_KEY_TYPE_EXP) { + memset(rsa->d.data, 0, rsa->d.length); + rte_free(rsa->d.data); + } else { + memset(rsa->qt.p.data, 0, rsa->qt.p.length); + rte_free(rsa->qt.p.data); + memset(rsa->qt.q.data, 0, rsa->qt.q.length); + rte_free(rsa->qt.q.data); + memset(rsa->qt.dP.data, 0, rsa->qt.dP.length); + rte_free(rsa->qt.dP.data); + memset(rsa->qt.dQ.data, 0, rsa->qt.dQ.length); + rte_free(rsa->qt.dQ.data); + memset(rsa->qt.qInv.data, 0, rsa->qt.qInv.length); + rte_free(rsa->qt.qInv.data); + } +} + +static void +session_clear_xform(struct qat_asym_session *qat_session) +{ + switch (qat_session->xform.xform_type) { + case RTE_CRYPTO_ASYM_XFORM_MODEX: + session_clear_modexp(&qat_session->xform.modex); + break; + case RTE_CRYPTO_ASYM_XFORM_MODINV: + session_clear_modinv(&qat_session->xform.modinv); + break; + case RTE_CRYPTO_ASYM_XFORM_RSA: + session_clear_rsa(&qat_session->xform.rsa); + break; + default: + break; + } } void -qat_asym_session_clear(struct rte_cryptodev *dev __rte_unused, - struct rte_cryptodev_asym_session *sess __rte_unused) +qat_asym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_asym_session *session) { - QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session"); + uint8_t index = dev->driver_id; + void *sess_priv = get_asym_session_private_data(session, index); + struct qat_asym_session *qat_session = + (struct qat_asym_session *)sess_priv; + + if (sess_priv) { + session_clear_xform(qat_session); + memset(qat_session, 0, qat_asym_session_get_private_size(dev)); + struct rte_mempool *mempool = rte_mempool_from_obj(sess_priv); + + set_asym_session_private_data(session, index, NULL); + rte_mempool_put(mempool, sess_priv); + } } diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h index e05ed7fc86..4291a77df7 100644 --- a/drivers/crypto/qat/qat_asym.h +++ b/drivers/crypto/qat/qat_asym.h @@ -48,7 +48,7 @@ struct qat_asym_op_cookie { struct qat_asym_session { struct icp_qat_fw_pke_request req_tmpl; - struct rte_crypto_asym_xform *xform; + struct rte_crypto_asym_xform xform; }; int diff --git a/drivers/crypto/qat/qat_ec.h b/drivers/crypto/qat/qat_ec.h index c2f3ce93d2..a310e3f4d3 100644 --- a/drivers/crypto/qat/qat_ec.h +++ b/drivers/crypto/qat/qat_ec.h @@ -31,7 +31,7 @@ struct elliptic_curve { buffer h; }; -static struct elliptic_curve __rte_unused curve[] = { +static struct elliptic_curve curve[] = { [SECP256R1] = { .name = "secp256r1", .bytesize = 32, @@ -190,7 +190,7 @@ static struct elliptic_curve __rte_unused curve[] = { } }; -static int __rte_unused +static int pick_curve(struct rte_crypto_asym_xform *xform) { switch (xform->ec.curve_id) { -- 2.13.6