From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id E146EA050B; Thu, 7 Apr 2022 12:31:15 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2FAFD410EF; Thu, 7 Apr 2022 12:31:12 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by mails.dpdk.org (Postfix) with ESMTP id EDE5140689 for ; Thu, 7 Apr 2022 12:31:09 +0200 (CEST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1649327470; x=1680863470; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=oU52CWK+n/lqWqJjI02oVZTlvULjUluieGfOhIkjjew=; b=n6klRdzfDgwcNGZfolZ7Q1UrrlYNa3qfI2fQbOzZXJsZvAhx5ijzzjZY V/F7CnpoEEL34/7zcnwXnb8n0bsRN2bkaPiNPISKILC65Sf7Ru3gbvYaI TDTyAGtMlAoihhX0x+BdrKJrM5Q7E95+IvEiSBh93SwvBKXQDfVTWio98 jy5DMd1x7QLT/U8M+Z13riwJjONTVxoGllQSMop1CtHYsldouq77jMkgY avVGOu4+6e3/NOZ0bVotVRXfZC+h4dy1VjKVfbSojz+t6DYzN94e5gIsn pvpJC2PXu1HeSmrNXhq3EZS+UeIGeAhswFBDiBENO2+cbIJ/VqQL52SND A==; X-IronPort-AV: E=McAfee;i="6200,9189,10309"; a="258879999" X-IronPort-AV: E=Sophos;i="5.90,241,1643702400"; d="scan'208";a="258879999" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga102.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 07 Apr 2022 03:31:09 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.90,241,1643702400"; d="scan'208";a="549988945" Received: from silpixa00400355.ir.intel.com (HELO silpixa00400355.ger.corp.intel.com) ([10.237.222.49]) by orsmga007.jf.intel.com with ESMTP; 07 Apr 2022 03:31:07 -0700 From: Ciara Power To: dev@dpdk.org Cc: roy.fan.zhang@intel.com, kai.ji@intel.com, Ciara Power , Pablo de Lara Subject: [PATCH 1/3] crypto/ipsec_mb: add GCM sgl support to aesni_mb Date: Thu, 7 Apr 2022 10:30:39 +0000 Message-Id: <20220407103041.4037942-2-ciara.power@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20220407103041.4037942-1-ciara.power@intel.com> References: <20220407103041.4037942-1-ciara.power@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Add SGL support for GCM algorithm through JOB API. This change supports IN-PLACE SGL, OOP SGL IN and LB OUT, and OOP SGL IN and SGL OUT. Feature flags are not added, as the PMD does not yet support SGL for all other algorithms. Signed-off-by: Ciara Power --- drivers/crypto/ipsec_mb/pmd_aesni_mb.c | 144 +++++++++++++++++++- drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h | 2 + 2 files changed, 142 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c index afa0b6e3a4..09a0cc5ace 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb.c +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb.c @@ -4,6 +4,11 @@ #include "pmd_aesni_mb_priv.h" +struct aesni_mb_op_buf_data { + struct rte_mbuf *m; + uint32_t offset; +}; + /** * Calculate the authentication pre-computes * @@ -1092,6 +1097,69 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, job->user_data = udata; } +static int +handle_aead_sgl_job(IMB_JOB *job, IMB_MGR *mb_mgr, + uint32_t *total_len, + struct aesni_mb_op_buf_data *src_data, + struct aesni_mb_op_buf_data *dst_data) +{ + uint32_t data_len, part_len; + + if (*total_len == 0) { + job->sgl_state = IMB_SGL_COMPLETE; + return 0; + } + + if (src_data->m == NULL) { + IPSEC_MB_LOG(ERR, "Invalid source buffer"); + return -EINVAL; + } + + job->sgl_state = IMB_SGL_UPDATE; + + data_len = src_data->m->data_len - src_data->offset; + + job->src = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *, + src_data->offset); + + if (dst_data->m != NULL) { + if (dst_data->m->data_len - dst_data->offset == 0) { + dst_data->m = dst_data->m->next; + if (dst_data->m == NULL) { + IPSEC_MB_LOG(ERR, "Invalid destination buffer"); + return -EINVAL; + } + dst_data->offset = 0; + } + part_len = RTE_MIN(data_len, (dst_data->m->data_len - + dst_data->offset)); + job->dst = rte_pktmbuf_mtod_offset(dst_data->m, + uint8_t *, dst_data->offset); + dst_data->offset += part_len; + } else { + part_len = RTE_MIN(data_len, *total_len); + job->dst = rte_pktmbuf_mtod_offset(src_data->m, uint8_t *, + src_data->offset); + } + + job->msg_len_to_cipher_in_bytes = part_len; + job->msg_len_to_hash_in_bytes = part_len; + + job = IMB_SUBMIT_JOB(mb_mgr); + + *total_len -= part_len; + + if (part_len != data_len) { + src_data->offset += part_len; + } else { + src_data->m = src_data->m->next; + src_data->offset = 0; + } + + return 0; +} + + /** * Process a crypto operation and complete a IMB_JOB job structure for * submission to the multi buffer library for processing. @@ -1107,16 +1175,23 @@ set_cpu_mb_job_params(IMB_JOB *job, struct aesni_mb_session *session, */ static inline int set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, - struct rte_crypto_op *op, uint8_t *digest_idx) + struct rte_crypto_op *op, uint8_t *digest_idx, + IMB_MGR *mb_mgr) { struct rte_mbuf *m_src = op->sym->m_src, *m_dst; struct aesni_mb_qp_data *qp_data = ipsec_mb_get_qp_private_data(qp); + struct aesni_mb_op_buf_data src_sgl = {0}; + struct aesni_mb_op_buf_data dst_sgl = {0}; struct aesni_mb_session *session; uint32_t m_offset, oop; uint32_t auth_off_in_bytes; uint32_t ciph_off_in_bytes; uint32_t auth_len_in_bytes; uint32_t ciph_len_in_bytes; + uint32_t total_len; + IMB_JOB base_job; + uint8_t sgl = 0; + int ret; session = ipsec_mb_get_session_private(qp, op); if (session == NULL) { @@ -1124,6 +1199,9 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, return -1; } + if (op->sym->m_src->nb_segs > 1) + sgl = 1; + /* Set crypto operation */ job->chain_order = session->chain_order; @@ -1175,6 +1253,11 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, if (session->cipher.mode == IMB_CIPHER_GCM) { job->u.GCM.aad = op->sym->aead.aad.data; job->u.GCM.aad_len_in_bytes = session->aead.aad_len; + if (sgl) { + job->u.GCM.ctx = &session->aead.gcm_sgl_ctx; + job->cipher_mode = IMB_CIPHER_GCM_SGL; + job->hash_alg = IMB_AUTH_GCM_SGL; + } } else { /* For GMAC */ job->u.GCM.aad = rte_pktmbuf_mtod_offset(m_src, @@ -1278,8 +1361,13 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, job->iv_len_in_bytes = session->iv.length; /* Data Parameters */ - job->src = rte_pktmbuf_mtod(m_src, uint8_t *); - job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset); + if (sgl) { + job->src = NULL; + job->dst = NULL; + } else { + job->src = rte_pktmbuf_mtod(m_src, uint8_t *); + job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset); + } switch (job->hash_alg) { case IMB_AUTH_AES_CCM: @@ -1305,6 +1393,13 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, session->iv.offset); break; + case IMB_AUTH_GCM_SGL: + job->hash_start_src_offset_in_bytes = 0; + job->msg_len_to_hash_in_bytes = 0; + job->iv = rte_crypto_op_ctod_offset(op, uint8_t *, + session->iv.offset); + break; + case IMB_AUTH_CHACHA20_POLY1305: job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset; @@ -1395,6 +1490,10 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, op->sym->aead.data.offset; job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length; break; + case IMB_CIPHER_GCM_SGL: + job->msg_len_to_cipher_in_bytes = 0; + job->cipher_start_src_offset_in_bytes = 0; + break; default: job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset; @@ -1410,6 +1509,43 @@ set_mb_job_params(IMB_JOB *job, struct ipsec_mb_qp *qp, /* Set user data to be crypto operation data struct */ job->user_data = op; + if (sgl && aead) { + base_job = *job; + job = IMB_SUBMIT_JOB(mb_mgr); + total_len = op->sym->aead.data.length; + + src_sgl.m = m_src; + src_sgl.offset = m_offset; + + while (src_sgl.offset >= src_sgl.m->data_len) { + src_sgl.offset -= src_sgl.m->data_len; + src_sgl.m = src_sgl.m->next; + + RTE_ASSERT(src_sgl.m != NULL); + } + + if (oop) { + dst_sgl.m = m_dst; + dst_sgl.offset = m_offset; + + while (dst_sgl.offset >= dst_sgl.m->data_len) { + dst_sgl.offset -= dst_sgl.m->data_len; + dst_sgl.m = dst_sgl.m->next; + + RTE_ASSERT(dst_sgl.m != NULL); + } + } + + while (job->sgl_state != IMB_SGL_COMPLETE) { + job = IMB_GET_NEXT_JOB(mb_mgr); + *job = base_job; + ret = handle_aead_sgl_job(job, mb_mgr, &total_len, + &src_sgl, &dst_sgl); + if (ret < 0) + return ret; + } + } + return 0; } @@ -1776,7 +1912,7 @@ aesni_mb_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, else #endif retval = set_mb_job_params(job, qp, op, - &digest_idx); + &digest_idx, mb_mgr); if (unlikely(retval != 0)) { qp->stats.dequeue_err_count++; diff --git a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h index 6ddfce2285..1d1e9dde00 100644 --- a/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h +++ b/drivers/crypto/ipsec_mb/pmd_aesni_mb_priv.h @@ -946,6 +946,8 @@ struct aesni_mb_session { struct { /* * AAD data length */ uint16_t aad_len; + + struct gcm_context_data gcm_sgl_ctx; } aead; } __rte_cache_aligned; -- 2.25.1