From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 7056A44093; Wed, 22 May 2024 05:32:39 +0200 (CEST) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 0E6B740ED3; Wed, 22 May 2024 05:30:56 +0200 (CEST) Received: from mail-pg1-f175.google.com (mail-pg1-f175.google.com [209.85.215.175]) by mails.dpdk.org (Postfix) with ESMTP id 1430D40A71 for ; Wed, 22 May 2024 05:30:42 +0200 (CEST) Received: by mail-pg1-f175.google.com with SMTP id 41be03b00d2f7-5ff57410ebbso1914602a12.1 for ; Tue, 21 May 2024 20:30:41 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=networkplumber-org.20230601.gappssmtp.com; s=20230601; t=1716348641; x=1716953441; darn=dpdk.org; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=mUFH4nRROl/hP+2uPtXw4QtLIGv4zexlPmaUq4/gvsQ=; b=AoaebK2LeD1UPUtgYtevhcchw47XyOz49hbdtxWFK/x8PMvyE0uyLc6HxSN/US4gm4 O+rm7D00JMoYJjZXLEBOa0WFb+iSFHhWpGDrzPUDXYIfUGU92hzltCAaGoWNnV0uDdja GzJTpvLiAoZWEYGpsPztNhgROUqJv8sdOH4ywc7TMrK+RVLComET34dHUf9bOWNUqfHG v5BfUnSeGhpq4xiexaGO9qWms57rm3KlA67BbqIvxRQHXGxpXxeR8j40JM6d5rduizut KZOqJ7+rDKkfk4AWhPJZd/pFuKhbMttz0myv5Dn10SlkuNWaNy1266Pbwj5IK2gMPL1u 2dwg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1716348641; x=1716953441; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=mUFH4nRROl/hP+2uPtXw4QtLIGv4zexlPmaUq4/gvsQ=; b=rcG6A7d/o7lstbJRPpl+4SYkaSxrVX53TCPcPOqB60TCP2vJHTDIbhljA/lR1kk2LP NP9y0ngghgW5dov7ORSGXU4path4BIA7xg+FKiXajxpMA7Kyir2xDw1yxQuzhosCHbVj 8wE0HrfYk66sUEJPabnOq5U0J5etmlTonFZBobeI/Pk6ppanBL3mjB3An0aVJSzUvStI Eor0MHxB6BjQKcXSnhD+2EIqnCPnJT3G1ejFDUeTsp3Q+p27OKlejrrbPUh4Vs6Nd2Mf v3vLyK7OHlA7uwColaXeBsCw1R7RfHfoSNGB9qz/yhZ+1NA9jmEt2FtbEwVl7BUmh3Zw SsJg== X-Gm-Message-State: AOJu0YwJalrSDRKA0XT62/sS4pZhVjTu1Vh4L2WAz+IbL6+ucVQOLgKm zNm9STGK7e5pfExF7ylQvNASyFpPS9t7BLmMZ/LdMZXCAzKFhuD2LKlNuUTtmSFMxoe+o+mmtJS I+K8= X-Google-Smtp-Source: AGHT+IGN+Ct1xSgk0D8sr9YeSgSx303GUNhfvyEYJNVmFQEYvaHueKcysYs3rJ4/+piYFRhvA0Ys5w== X-Received: by 2002:a05:6a20:3c91:b0:1ad:7e4d:5b81 with SMTP id adf61e73a8af0-1b1f874f99emr1042758637.5.1716348641237; Tue, 21 May 2024 20:30:41 -0700 (PDT) Received: from hermes.local (204-195-96-226.wavecable.com. [204.195.96.226]) by smtp.gmail.com with ESMTPSA id d9443c01a7336-1f323c70dafsm2264325ad.260.2024.05.21.20.30.40 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Tue, 21 May 2024 20:30:40 -0700 (PDT) From: Stephen Hemminger To: dev@dpdk.org Cc: Stephen Hemminger , Hemant Agrawal , Sunil Uttarwar , Ankur Dwivedi , Anoob Joseph , Tejasree Kondoj , Gagandeep Singh , Kai Ji , Pablo de Lara Subject: [PATCH v5 18/32] crypto: replace use of fixed size rte_memcpy Date: Tue, 21 May 2024 20:27:45 -0700 Message-ID: <20240522033009.143100-19-stephen@networkplumber.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20240522033009.143100-1-stephen@networkplumber.org> References: <20240403163432.437275-1-stephen@networkplumber.org> <20240522033009.143100-1-stephen@networkplumber.org> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Automatically generated by devtools/cocci/rte_memcpy.cocci Signed-off-by: Stephen Hemminger Acked-by: Hemant Agrawal --- drivers/crypto/ccp/ccp_crypto.c | 14 ++--- drivers/crypto/cnxk/cnxk_cryptodev_ops.c | 2 +- drivers/crypto/cnxk/cnxk_se.h | 2 +- drivers/crypto/dpaa_sec/dpaa_sec.c | 2 +- drivers/crypto/ipsec_mb/pmd_snow3g.c | 4 +- drivers/crypto/qat/qat_sym_session.c | 52 +++++++++---------- .../scheduler/rte_cryptodev_scheduler.c | 6 +-- drivers/crypto/scheduler/scheduler_failover.c | 12 ++--- 8 files changed, 44 insertions(+), 50 deletions(-) diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c index 4b84b3303e..4a70bc5d6e 100644 --- a/drivers/crypto/ccp/ccp_crypto.c +++ b/drivers/crypto/ccp/ccp_crypto.c @@ -168,7 +168,7 @@ static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out) if (!SHA1_Init(&ctx)) return -EFAULT; SHA1_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH); + memcpy(data_out, &ctx, SHA_DIGEST_LENGTH); return 0; } @@ -179,8 +179,7 @@ static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out) if (!SHA224_Init(&ctx)) return -EFAULT; SHA256_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, - SHA256_DIGEST_LENGTH); + memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); return 0; } @@ -191,8 +190,7 @@ static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out) if (!SHA256_Init(&ctx)) return -EFAULT; SHA256_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, - SHA256_DIGEST_LENGTH); + memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); return 0; } @@ -203,8 +201,7 @@ static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out) if (!SHA384_Init(&ctx)) return -EFAULT; SHA512_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, - SHA512_DIGEST_LENGTH); + memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); return 0; } @@ -215,8 +212,7 @@ static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out) if (!SHA512_Init(&ctx)) return -EFAULT; SHA512_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, - SHA512_DIGEST_LENGTH); + memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); return 0; } diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c index 1dd1dbac9a..a67af3ec35 100644 --- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c +++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c @@ -959,7 +959,7 @@ cnxk_crypto_cn10k_submit(void *qptr, void *inst, uint16_t nb_inst) lmt_dst = PLT_PTR_CAST(lmt_base); for (j = 0; j < i; j++) { - rte_memcpy(lmt_dst, inst, sizeof(struct cpt_inst_s)); + memcpy(lmt_dst, inst, sizeof(struct cpt_inst_s)); inst = RTE_PTR_ADD(inst, sizeof(struct cpt_inst_s)); lmt_dst = RTE_PTR_ADD(lmt_dst, 2 * sizeof(struct cpt_inst_s)); } diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h index 6374718a82..1fcbf0669a 100644 --- a/drivers/crypto/cnxk/cnxk_se.h +++ b/drivers/crypto/cnxk/cnxk_se.h @@ -1161,7 +1161,7 @@ cpt_sm_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens, struct roc_se_fc_p void *dst = PLT_PTR_ADD(offset_vaddr, ROC_SE_OFF_CTRL_LEN); const uint64_t *src = fc_params->iv_buf; - rte_memcpy(dst, src, 16); + memcpy(dst, src, 16); } inst->w4.u64 = cpt_inst_w4.u64; } else { diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c index 44528eaf7f..3e7b78656b 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c @@ -676,7 +676,7 @@ dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp) } cdb = &sess->cdb; - rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb)); + memcpy(&c_cdb, cdb, sizeof(struct sec_cdb)); #ifdef RTE_LIB_SECURITY printf("\nsession protocol type = %d\n", sess->proto_alg); #endif diff --git a/drivers/crypto/ipsec_mb/pmd_snow3g.c b/drivers/crypto/ipsec_mb/pmd_snow3g.c index a96779f059..f3f519f2d4 100644 --- a/drivers/crypto/ipsec_mb/pmd_snow3g.c +++ b/drivers/crypto/ipsec_mb/pmd_snow3g.c @@ -291,8 +291,8 @@ process_snow3g_hash_op(struct ipsec_mb_qp *qp, struct rte_crypto_op **ops, /* Copy back digest from src to auth.digest.data */ if (digest_appended) - rte_memcpy(ops[i]->sym->auth.digest.data, - dst, SNOW3G_DIGEST_LENGTH); + memcpy(ops[i]->sym->auth.digest.data, dst, + SNOW3G_DIGEST_LENGTH); } processed_ops++; } diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c index 9e2dba5423..a93625b287 100644 --- a/drivers/crypto/qat/qat_sym_session.c +++ b/drivers/crypto/qat/qat_sym_session.c @@ -1439,7 +1439,7 @@ static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out) if (!SHA1_Init(&ctx)) return -EFAULT; SHA1_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH); + memcpy(data_out, &ctx, SHA_DIGEST_LENGTH); return 0; } @@ -1450,7 +1450,7 @@ static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out) if (!SHA224_Init(&ctx)) return -EFAULT; SHA256_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); + memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); return 0; } @@ -1461,7 +1461,7 @@ static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out) if (!SHA256_Init(&ctx)) return -EFAULT; SHA256_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); + memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); return 0; } @@ -1472,7 +1472,7 @@ static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out) if (!SHA384_Init(&ctx)) return -EFAULT; SHA512_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); + memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); return 0; } @@ -1483,7 +1483,7 @@ static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out) if (!SHA512_Init(&ctx)) return -EFAULT; SHA512_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); + memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); return 0; } @@ -1494,7 +1494,7 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out) if (!MD5_Init(&ctx)) return -EFAULT; MD5_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH); + memcpy(data_out, &ctx, MD5_DIGEST_LENGTH); return 0; } @@ -1615,8 +1615,7 @@ static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, return -ENOMEM; } - rte_memcpy(in, AES_CMAC_SEED, - ICP_QAT_HW_AES_128_KEY_SZ); + memcpy(in, AES_CMAC_SEED, ICP_QAT_HW_AES_128_KEY_SZ); rte_memcpy(p_state_buf, auth_key, auth_keylen); if (AES_set_encrypt_key(auth_key, auth_keylen << 3, @@ -1660,8 +1659,8 @@ static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, return -ENOMEM; } - rte_memcpy(in, qat_aes_xcbc_key_seed, - ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); + memcpy(in, qat_aes_xcbc_key_seed, + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) { if (AES_set_encrypt_key(auth_key, auth_keylen << 3, @@ -1924,8 +1923,8 @@ static int qat_sym_do_precomputes_ipsec_mb(enum icp_qat_hw_auth_algo hash_alg, uint8_t *input = in; uint8_t *out = p_state_buf; - rte_memcpy(input, qat_aes_xcbc_key_seed, - ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); + memcpy(input, qat_aes_xcbc_key_seed, + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); for (i = 0; i < HASH_XCBC_PRECOMP_KEY_NUM; i++) { if (aes_ipsecmb_job(input, out, m, auth_key, auth_keylen)) { memset(input - @@ -2441,8 +2440,8 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup); switch (cdesc->qat_hash_alg) { case ICP_QAT_HW_AUTH_ALGO_SM3: - rte_memcpy(cdesc->cd_cur_ptr, sm3InitialState, - sizeof(sm3InitialState)); + memcpy(cdesc->cd_cur_ptr, sm3InitialState, + sizeof(sm3InitialState)); state1_size = qat_hash_get_state1_size( cdesc->qat_hash_alg); state2_size = ICP_QAT_HW_SM3_STATE2_SZ; @@ -2453,9 +2452,8 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, hash_2->auth_config.config = ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE2, cdesc->qat_hash_alg, digestsize); - rte_memcpy(cdesc->cd_cur_ptr + state1_size + state2_size + - sizeof(*hash_2), sm3InitialState, - sizeof(sm3InitialState)); + memcpy(cdesc->cd_cur_ptr + state1_size + state2_size + sizeof(*hash_2), + sm3InitialState, sizeof(sm3InitialState)); hash_cd_ctrl->inner_state1_sz = state1_size; hash_cd_ctrl->inner_state2_sz = state2_size; hash_cd_ctrl->inner_state2_offset = @@ -2493,8 +2491,8 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, case ICP_QAT_HW_AUTH_ALGO_SHA1: if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { /* Plain SHA-1 */ - rte_memcpy(cdesc->cd_cur_ptr, sha1InitialState, - sizeof(sha1InitialState)); + memcpy(cdesc->cd_cur_ptr, sha1InitialState, + sizeof(sha1InitialState)); state1_size = qat_hash_get_state1_size( cdesc->qat_hash_alg); break; @@ -2520,8 +2518,8 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, case ICP_QAT_HW_AUTH_ALGO_SHA224: if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { /* Plain SHA-224 */ - rte_memcpy(cdesc->cd_cur_ptr, sha224InitialState, - sizeof(sha224InitialState)); + memcpy(cdesc->cd_cur_ptr, sha224InitialState, + sizeof(sha224InitialState)); state1_size = qat_hash_get_state1_size( cdesc->qat_hash_alg); break; @@ -2545,8 +2543,8 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, case ICP_QAT_HW_AUTH_ALGO_SHA256: if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { /* Plain SHA-256 */ - rte_memcpy(cdesc->cd_cur_ptr, sha256InitialState, - sizeof(sha256InitialState)); + memcpy(cdesc->cd_cur_ptr, sha256InitialState, + sizeof(sha256InitialState)); state1_size = qat_hash_get_state1_size( cdesc->qat_hash_alg); break; @@ -2570,8 +2568,8 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, case ICP_QAT_HW_AUTH_ALGO_SHA384: if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { /* Plain SHA-384 */ - rte_memcpy(cdesc->cd_cur_ptr, sha384InitialState, - sizeof(sha384InitialState)); + memcpy(cdesc->cd_cur_ptr, sha384InitialState, + sizeof(sha384InitialState)); state1_size = qat_hash_get_state1_size( cdesc->qat_hash_alg); break; @@ -2595,8 +2593,8 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, case ICP_QAT_HW_AUTH_ALGO_SHA512: if (cdesc->auth_mode == ICP_QAT_HW_AUTH_MODE0) { /* Plain SHA-512 */ - rte_memcpy(cdesc->cd_cur_ptr, sha512InitialState, - sizeof(sha512InitialState)); + memcpy(cdesc->cd_cur_ptr, sha512InitialState, + sizeof(sha512InitialState)); state1_size = qat_hash_get_state1_size( cdesc->qat_hash_alg); break; diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c index 9a21edd32a..ed1833a7c6 100644 --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c @@ -79,7 +79,7 @@ sync_caps(struct rte_cryptodev_capabilities *caps, /* remove a uncommon cap from the array */ for (j = i; j < sync_nb_caps - 1; j++) - rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap)); + memcpy(&caps[j], &caps[j + 1], sizeof(*cap)); memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap)); sync_nb_caps--; @@ -216,8 +216,8 @@ sync_sec_caps(uint32_t worker_idx, * caps, from the arrays */ for (j = i; j < nb_sec_caps - 1; j++) { - rte_memcpy(&sec_caps[j], &sec_caps[j+1], - sizeof(*sec_cap)); + memcpy(&sec_caps[j], &sec_caps[j + 1], + sizeof(*sec_cap)); rte_memcpy(&sec_crypto_caps[j][0], &sec_crypto_caps[j+1][0], diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c index 52ff2ffbb7..e7c6d19d25 100644 --- a/drivers/crypto/scheduler/scheduler_failover.c +++ b/drivers/crypto/scheduler/scheduler_failover.c @@ -171,12 +171,12 @@ scheduler_start(struct rte_cryptodev *dev) sched_ctx->workers[PRIMARY_WORKER_IDX].qp_id = i; sched_ctx->workers[SECONDARY_WORKER_IDX].qp_id = i; - rte_memcpy(&qp_ctx->primary_worker, - &sched_ctx->workers[PRIMARY_WORKER_IDX], - sizeof(struct scheduler_worker)); - rte_memcpy(&qp_ctx->secondary_worker, - &sched_ctx->workers[SECONDARY_WORKER_IDX], - sizeof(struct scheduler_worker)); + memcpy(&qp_ctx->primary_worker, + &sched_ctx->workers[PRIMARY_WORKER_IDX], + sizeof(struct scheduler_worker)); + memcpy(&qp_ctx->secondary_worker, + &sched_ctx->workers[SECONDARY_WORKER_IDX], + sizeof(struct scheduler_worker)); } return 0; -- 2.43.0