From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <dev-bounces@dpdk.org>
Received: from dpdk.org (dpdk.org [92.243.14.124])
	by inbox.dpdk.org (Postfix) with ESMTP id 4936CA0487
	for <public@inbox.dpdk.org>; Sat,  6 Jul 2019 15:51:36 +0200 (CEST)
Received: from [92.243.14.124] (localhost [127.0.0.1])
	by dpdk.org (Postfix) with ESMTP id 6A44A5424;
	Sat,  6 Jul 2019 15:51:27 +0200 (CEST)
Received: from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com
 [67.231.148.174]) by dpdk.org (Postfix) with ESMTP id 6897F37AF
 for <dev@dpdk.org>; Sat,  6 Jul 2019 15:51:23 +0200 (CEST)
Received: from pps.filterd (m0045849.ppops.net [127.0.0.1])
 by mx0a-0016f401.pphosted.com (8.16.0.27/8.16.0.27) with SMTP id
 x66DoFt2001765; Sat, 6 Jul 2019 06:51:22 -0700
DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;
 h=from : to : cc :
 subject : date : message-id : mime-version : content-transfer-encoding :
 content-type; s=pfpt0818; bh=ptMzZ3kAnTr1+5KCzBx93TR0u5F/0Lf9TbntOUpPtk4=;
 b=HtgCRPnix0CzM0ArIYNNy8Bze7kdNVxshsB+IzOqjyagM1sL8Qj6aq01U7+sZBbNz9AZ
 ucg58XDYFBYKO4Gg2TWXG7Vdb149hK8t7GszFyWrkfvfT4AD57lwt/CTXPZ3AgRPfs8o
 uIjsyLU8xZGOD9+HCR5uq3MR8gbMicBd3Sn48zGaCVfUHqU0n4Mp3iXyptGlxiLn++dj
 bcm7bDkkTpU4v7asR/ustPnL+TGqXyAF9sr1C1n1hnsVhMy5INNlkS5hSuKxdt2RRv14
 FGrYZwXpERCqtAeOSHCfbmKP2F9SnMdyMoPnAb+kJFahaOva3/OTgnkzM2h6fh+ooPgw 2w== 
Received: from sc-exch03.marvell.com ([199.233.58.183])
 by mx0a-0016f401.pphosted.com with ESMTP id 2tjs0nrn5h-2
 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT);
 Sat, 06 Jul 2019 06:51:22 -0700
Received: from SC-EXCH03.marvell.com (10.93.176.83) by SC-EXCH03.marvell.com
 (10.93.176.83) with Microsoft SMTP Server (TLS) id 15.0.1367.3; Sat, 6 Jul
 2019 06:51:19 -0700
Received: from maili.marvell.com (10.93.176.43) by SC-EXCH03.marvell.com
 (10.93.176.83) with Microsoft SMTP Server id 15.0.1367.3 via Frontend
 Transport; Sat, 6 Jul 2019 06:51:19 -0700
Received: from ajoseph83.caveonetworks.com.com (unknown [10.29.45.56])
 by maili.marvell.com (Postfix) with ESMTP id 25ACD3F739E;
 Sat,  6 Jul 2019 06:23:45 -0700 (PDT)
From: Anoob Joseph <anoobj@marvell.com>
To: Akhil Goyal <akhil.goyal@nxp.com>, Pablo de Lara
 <pablo.de.lara.guarch@intel.com>
CC: Anoob Joseph <anoobj@marvell.com>, Jerin Jacob <jerinj@marvell.com>,
 Narayana Prasad <pathreya@marvell.com>, <dev@dpdk.org>
Date: Sat, 6 Jul 2019 18:53:39 +0530
Message-ID: <1562419420-30278-1-git-send-email-anoobj@marvell.com>
X-Mailer: git-send-email 2.7.4
MIME-Version: 1.0
Content-Transfer-Encoding: 8bit
Content-Type: text/plain
X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10434:, ,
 definitions=2019-07-06_04:, , signatures=0
Subject: [dpdk-dev] [PATCH 1/2] common/cpt: remove redundant bit swaps
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: DPDK patches and discussions <dev.dpdk.org>
List-Unsubscribe: <https://mails.dpdk.org/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://mails.dpdk.org/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <https://mails.dpdk.org/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
Errors-To: dev-bounces@dpdk.org
Sender: "dev" <dev-bounces@dpdk.org>

The bit swaps can be removed by re-arranging the structure.

Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
 drivers/common/cpt/cpt_hw_types.h |   7 +++
 drivers/common/cpt/cpt_ucode.h    | 116 ++++++++++++--------------------------
 2 files changed, 44 insertions(+), 79 deletions(-)

diff --git a/drivers/common/cpt/cpt_hw_types.h b/drivers/common/cpt/cpt_hw_types.h
index 7be1d12..e2b127d 100644
--- a/drivers/common/cpt/cpt_hw_types.h
+++ b/drivers/common/cpt/cpt_hw_types.h
@@ -30,10 +30,17 @@
 typedef union {
 	uint64_t u64;
 	struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 		uint16_t opcode;
 		uint16_t param1;
 		uint16_t param2;
 		uint16_t dlen;
+#else
+		uint16_t dlen;
+		uint16_t param2;
+		uint16_t param1;
+		uint16_t opcode;
+#endif
 	} s;
 } vq_cmd_word0_t;
 
diff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h
index e02b34a..c589b58 100644
--- a/drivers/common/cpt/cpt_ucode.h
+++ b/drivers/common/cpt/cpt_ucode.h
@@ -520,16 +520,15 @@ cpt_digest_gen_prep(uint32_t flags,
 
 	/*GP op header */
 	vq_cmd_w0.u64 = 0;
-	vq_cmd_w0.s.param2 = rte_cpu_to_be_16(((uint16_t)hash_type << 8));
+	vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
 	if (ctx->hmac) {
 		opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
-		vq_cmd_w0.s.param1 = rte_cpu_to_be_16(key_len);
-		vq_cmd_w0.s.dlen =
-			rte_cpu_to_be_16((data_len + ROUNDUP8(key_len)));
+		vq_cmd_w0.s.param1 = key_len;
+		vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
 	} else {
 		opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
 		vq_cmd_w0.s.param1 = 0;
-		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(data_len);
+		vq_cmd_w0.s.dlen = data_len;
 	}
 
 	opcode.s.minor = 0;
@@ -540,10 +539,10 @@ cpt_digest_gen_prep(uint32_t flags,
 		/* Minor op is passthrough */
 		opcode.s.minor = 0x03;
 		/* Send out completion code only */
-		vq_cmd_w0.s.param2 = rte_cpu_to_be_16(0x1);
+		vq_cmd_w0.s.param2 = 0x1;
 	}
 
-	vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+	vq_cmd_w0.s.opcode = opcode.flags;
 
 	/* DPTR has SG list */
 	in_buffer = m_vaddr;
@@ -622,7 +621,7 @@ cpt_digest_gen_prep(uint32_t flags,
 	size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
 
 	/* This is DPTR len incase of SG mode */
-	vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+	vq_cmd_w0.s.dlen = size;
 
 	m_vaddr = (uint8_t *)m_vaddr + size;
 	m_dma += size;
@@ -635,11 +634,6 @@ cpt_digest_gen_prep(uint32_t flags,
 
 	req->ist.ei1 = dptr_dma;
 	req->ist.ei2 = rptr_dma;
-	/* First 16-bit swap then 64-bit swap */
-	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
-	 * to eliminate all the swapping
-	 */
-	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
 
 	/* vq command w3 */
 	vq_cmd_w3.u64 = 0;
@@ -798,8 +792,8 @@ cpt_enc_hmac_prep(uint32_t flags,
 
 	/* GP op header */
 	vq_cmd_w0.u64 = 0;
-	vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
-	vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
+	vq_cmd_w0.s.param1 = encr_data_len;
+	vq_cmd_w0.s.param2 = auth_data_len;
 	/*
 	 * In 83XX since we have a limitation of
 	 * IV & Offset control word not part of instruction
@@ -826,9 +820,9 @@ cpt_enc_hmac_prep(uint32_t flags,
 		req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
 						    + outputlen - iv_len);
 
-		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
+		vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
 
-		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+		vq_cmd_w0.s.opcode = opcode.flags;
 
 		if (likely(iv_len)) {
 			uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
@@ -861,7 +855,7 @@ cpt_enc_hmac_prep(uint32_t flags,
 
 		opcode.s.major |= CPT_DMA_MODE;
 
-		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+		vq_cmd_w0.s.opcode = opcode.flags;
 
 		if (likely(iv_len)) {
 			uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
@@ -1005,7 +999,7 @@ cpt_enc_hmac_prep(uint32_t flags,
 		size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
 
 		/* This is DPTR len incase of SG mode */
-		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+		vq_cmd_w0.s.dlen = size;
 
 		m_vaddr = (uint8_t *)m_vaddr + size;
 		m_dma += size;
@@ -1020,12 +1014,6 @@ cpt_enc_hmac_prep(uint32_t flags,
 		req->ist.ei2 = rptr_dma;
 	}
 
-	/* First 16-bit swap then 64-bit swap */
-	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
-	 * to eliminate all the swapping
-	 */
-	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
 	ctx_dma = fc_params->ctx_buf.dma_addr +
 		offsetof(struct cpt_ctx, fctx);
 	/* vq command w3 */
@@ -1175,8 +1163,8 @@ cpt_dec_hmac_prep(uint32_t flags,
 		encr_offset = inputlen;
 
 	vq_cmd_w0.u64 = 0;
-	vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
-	vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
+	vq_cmd_w0.s.param1 = encr_data_len;
+	vq_cmd_w0.s.param2 = auth_data_len;
 
 	/*
 	 * In 83XX since we have a limitation of
@@ -1209,9 +1197,9 @@ cpt_dec_hmac_prep(uint32_t flags,
 		 * hmac.
 		 */
 
-		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
+		vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
 
-		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+		vq_cmd_w0.s.opcode = opcode.flags;
 
 		if (likely(iv_len)) {
 			uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
@@ -1245,7 +1233,7 @@ cpt_dec_hmac_prep(uint32_t flags,
 
 		opcode.s.major |= CPT_DMA_MODE;
 
-		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+		vq_cmd_w0.s.opcode = opcode.flags;
 
 		if (likely(iv_len)) {
 			uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
@@ -1401,7 +1389,7 @@ cpt_dec_hmac_prep(uint32_t flags,
 		size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
 
 		/* This is DPTR len incase of SG mode */
-		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+		vq_cmd_w0.s.dlen = size;
 
 		m_vaddr = (uint8_t *)m_vaddr + size;
 		m_dma += size;
@@ -1417,12 +1405,6 @@ cpt_dec_hmac_prep(uint32_t flags,
 		req->ist.ei2 = rptr_dma;
 	}
 
-	/* First 16-bit swap then 64-bit swap */
-	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
-	 * to eliminate all the swapping
-	 */
-	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
 	ctx_dma = fc_params->ctx_buf.dma_addr +
 		offsetof(struct cpt_ctx, fctx);
 	/* vq command w3 */
@@ -1579,8 +1561,8 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
 	 * GP op header, lengths are expected in bits.
 	 */
 	vq_cmd_w0.u64 = 0;
-	vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
-	vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
+	vq_cmd_w0.s.param1 = encr_data_len;
+	vq_cmd_w0.s.param2 = auth_data_len;
 
 	/*
 	 * In 83XX since we have a limitation of
@@ -1609,9 +1591,9 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
 		req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
 						    + outputlen - iv_len);
 
-		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
+		vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
 
-		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+		vq_cmd_w0.s.opcode = opcode.flags;
 
 		if (likely(iv_len)) {
 			uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
@@ -1638,7 +1620,7 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
 
 		opcode.s.major |= CPT_DMA_MODE;
 
-		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+		vq_cmd_w0.s.opcode = opcode.flags;
 
 		/* DPTR has SG list */
 		in_buffer = m_vaddr;
@@ -1740,7 +1722,7 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
 		size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
 
 		/* This is DPTR len incase of SG mode */
-		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+		vq_cmd_w0.s.dlen = size;
 
 		m_vaddr = (uint8_t *)m_vaddr + size;
 		m_dma += size;
@@ -1755,12 +1737,6 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags,
 		req->ist.ei2 = rptr_dma;
 	}
 
-	/* First 16-bit swap then 64-bit swap */
-	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
-	 * to eliminate all the swapping
-	 */
-	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
 	/* vq command w3 */
 	vq_cmd_w3.u64 = 0;
 	vq_cmd_w3.s.grp = 0;
@@ -1886,7 +1862,7 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
 	 * GP op header, lengths are expected in bits.
 	 */
 	vq_cmd_w0.u64 = 0;
-	vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
+	vq_cmd_w0.s.param1 = encr_data_len;
 
 	/*
 	 * In 83XX since we have a limitation of
@@ -1915,9 +1891,9 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
 		req->alternate_caddr = (uint64_t *)((uint8_t *)dm_vaddr
 						    + outputlen - iv_len);
 
-		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(inputlen + OFF_CTRL_LEN);
+		vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
 
-		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+		vq_cmd_w0.s.opcode = opcode.flags;
 
 		if (likely(iv_len)) {
 			uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr
@@ -1945,7 +1921,7 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
 
 		opcode.s.major |= CPT_DMA_MODE;
 
-		vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+		vq_cmd_w0.s.opcode = opcode.flags;
 
 		/* DPTR has SG list */
 		in_buffer = m_vaddr;
@@ -2020,7 +1996,7 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
 		size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
 
 		/* This is DPTR len incase of SG mode */
-		vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+		vq_cmd_w0.s.dlen = size;
 
 		m_vaddr = (uint8_t *)m_vaddr + size;
 		m_dma += size;
@@ -2035,12 +2011,6 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags,
 		req->ist.ei2 = rptr_dma;
 	}
 
-	/* First 16-bit swap then 64-bit swap */
-	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
-	 * to eliminate all the swapping
-	 */
-	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
 	/* vq command w3 */
 	vq_cmd_w3.u64 = 0;
 	vq_cmd_w3.s.grp = 0;
@@ -2150,9 +2120,9 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
 	 * GP op header, lengths are expected in bits.
 	 */
 	vq_cmd_w0.u64 = 0;
-	vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
-	vq_cmd_w0.s.param2 = rte_cpu_to_be_16(auth_data_len);
-	vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+	vq_cmd_w0.s.param1 = encr_data_len;
+	vq_cmd_w0.s.param2 = auth_data_len;
+	vq_cmd_w0.s.opcode = opcode.flags;
 
 	/* consider iv len */
 	if (flags == 0x0) {
@@ -2279,7 +2249,7 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
 	size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
 
 	/* This is DPTR len incase of SG mode */
-	vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+	vq_cmd_w0.s.dlen = size;
 
 	m_vaddr = (uint8_t *)m_vaddr + size;
 	m_dma += size;
@@ -2293,12 +2263,6 @@ cpt_kasumi_enc_prep(uint32_t req_flags,
 	req->ist.ei1 = dptr_dma;
 	req->ist.ei2 = rptr_dma;
 
-	/* First 16-bit swap then 64-bit swap */
-	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
-	 * to eliminate all the swapping
-	 */
-	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
 	/* vq command w3 */
 	vq_cmd_w3.u64 = 0;
 	vq_cmd_w3.s.grp = 0;
@@ -2394,8 +2358,8 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
 	 * GP op header, lengths are expected in bits.
 	 */
 	vq_cmd_w0.u64 = 0;
-	vq_cmd_w0.s.param1 = rte_cpu_to_be_16(encr_data_len);
-	vq_cmd_w0.s.opcode = rte_cpu_to_be_16(opcode.flags);
+	vq_cmd_w0.s.param1 = encr_data_len;
+	vq_cmd_w0.s.opcode = opcode.flags;
 
 	/* consider iv len */
 	encr_offset += iv_len;
@@ -2480,7 +2444,7 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
 	size = g_size_bytes + s_size_bytes + SG_LIST_HDR_SIZE;
 
 	/* This is DPTR len incase of SG mode */
-	vq_cmd_w0.s.dlen = rte_cpu_to_be_16(size);
+	vq_cmd_w0.s.dlen = size;
 
 	m_vaddr = (uint8_t *)m_vaddr + size;
 	m_dma += size;
@@ -2494,12 +2458,6 @@ cpt_kasumi_dec_prep(uint64_t d_offs,
 	req->ist.ei1 = dptr_dma;
 	req->ist.ei2 = rptr_dma;
 
-	/* First 16-bit swap then 64-bit swap */
-	/* TODO: HACK: Reverse the vq_cmd and cpt_req bit field definitions
-	 * to eliminate all the swapping
-	 */
-	vq_cmd_w0.u64 = rte_cpu_to_be_64(vq_cmd_w0.u64);
-
 	/* vq command w3 */
 	vq_cmd_w3.u64 = 0;
 	vq_cmd_w3.s.grp = 0;
-- 
2.7.4