DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7
@ 2021-12-20 10:27 Gagandeep Singh
  2021-12-20 10:27 ` [PATCH 2/8] common/dpaax: change job processing mode for PDCP SDAP Gagandeep Singh
                   ` (7 more replies)
  0 siblings, 8 replies; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-20 10:27 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Franck LENORMAND, Gagandeep Singh

From: Franck LENORMAND <franck.lenormand@nxp.com>

DPAA1 and DPAA2 platforms use SEC ERA 8 and 10 only.

This patch removes code in SDAP and PDCP header related to these
ERA to simplify the codebase:
 - Simplify logic using RTA_SEC_ERA_<> macro
 - Remove era_2_sw_hfn_ovrd dedicated to RTA_SEC_ERA_2

Signed-off-by: Franck LENORMAND <franck.lenormand@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/common/dpaax/caamflib/desc/pdcp.h   | 939 ++++----------------
 drivers/common/dpaax/caamflib/desc/sdap.h   |  91 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |  14 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c          |  14 +-
 4 files changed, 183 insertions(+), 875 deletions(-)

diff --git a/drivers/common/dpaax/caamflib/desc/pdcp.h b/drivers/common/dpaax/caamflib/desc/pdcp.h
index 8e8daf5ba8..2fe56c53c6 100644
--- a/drivers/common/dpaax/caamflib/desc/pdcp.h
+++ b/drivers/common/dpaax/caamflib/desc/pdcp.h
@@ -329,91 +329,35 @@ pdcp_insert_cplane_null_op(struct program *p,
 			   struct alginfo *cipherdata __maybe_unused,
 			   struct alginfo *authdata __maybe_unused,
 			   unsigned int dir,
-			   enum pdcp_sn_size sn_size __maybe_unused,
-			   unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			   enum pdcp_sn_size sn_size __maybe_unused)
 {
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
-
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
-		if (dir == OP_TYPE_ENCAP_PROTOCOL)
-			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-		else
-			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-	} else {
-		MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ, 4, 0);
-		MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
-
-		if (dir == OP_TYPE_ENCAP_PROTOCOL) {
-			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-			MATHB(p, VSEQINSZ, SUB, ONE, MATH0, 4, 0);
-		} else {
-			MATHB(p, VSEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4,
-			      IMMED2);
-			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-			MATHB(p, VSEQOUTSZ, SUB, ONE, MATH0, 4, 0);
-		}
-
-		MATHB(p, MATH0, ADD, ONE, MATH0, 4, 0);
+	MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
+	if (dir == OP_TYPE_ENCAP_PROTOCOL)
+		MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+		      IMMED2);
+	else
+		MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+		      IMMED2);
 
-		/*
-		 * Since MOVELEN is available only starting with
-		 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-		 * command dynamically by writing the length from M1 by
-		 * OR-ing the command in the M1 register and MOVE the
-		 * result into the descriptor buffer. Care must be taken
-		 * wrt. the location of the command because of SEC
-		 * pipelining. The actual MOVEs are written at the end
-		 * of the descriptor due to calculations needed on the
-		 * offset in the descriptor for the MOVE command.
-		 */
-		move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
-					     IMMED);
-		move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
-					      WAITCOMP | IMMED);
-	}
 	MATHB(p, VSEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE, 4,
 	      IMMED2);
 	JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		if (dir == OP_TYPE_ENCAP_PROTOCOL)
-			MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
-		else
-			MATHB(p, VSEQOUTSZ, ADD, ZERO, MATH0, 4, 0);
-	}
+	if (dir == OP_TYPE_ENCAP_PROTOCOL)
+		MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
+	else
+		MATHB(p, VSEQOUTSZ, ADD, ZERO, MATH0, 4, 0);
+
 	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 	SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
-	} else {
-		SET_LABEL(p, local_offset);
-
-		/* Shut off automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-		/* Placeholder for MOVE command with length from M1 register */
-		MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-		/* Enable automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-	}
+	MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
 
 	if (dir == OP_TYPE_ENCAP_PROTOCOL) {
 		MATHB(p, MATH1, XOR, MATH1, MATH0, 8, 0);
 		MOVE(p, MATH0, 0, OFIFO, 0, 4, IMMED);
 	}
 
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
-
 	return 0;
 }
 
@@ -422,66 +366,21 @@ insert_copy_frame_op(struct program *p,
 		     struct alginfo *cipherdata __maybe_unused,
 		     unsigned int dir __maybe_unused)
 {
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
-
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ,  4, 0);
-		MATHB(p, SEQINSZ, ADD, ZERO, VSEQOUTSZ,  4, 0);
-	} else {
-		MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ,  4, 0);
-		MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ,  4, 0);
-		MATHB(p, SEQINSZ, ADD, ONE, VSEQOUTSZ,  4, 0);
-		MATHB(p, VSEQOUTSZ, SUB, ONE, VSEQOUTSZ,  4, 0);
-		MATHB(p, VSEQINSZ, SUB, ONE, MATH0,  4, 0);
-		MATHB(p, MATH0, ADD, ONE, MATH0,  4, 0);
+	MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ,  4, 0);
+	MATHB(p, SEQINSZ, ADD, ZERO, VSEQOUTSZ,  4, 0);
 
-		/*
-		 * Since MOVELEN is available only starting with
-		 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-		 * command dynamically by writing the length from M1 by
-		 * OR-ing the command in the M1 register and MOVE the
-		 * result into the descriptor buffer. Care must be taken
-		 * wrt. the location of the command because of SEC
-		 * pipelining. The actual MOVEs are written at the end
-		 * of the descriptor due to calculations needed on the
-		 * offset in the descriptor for the MOVE command.
-		 */
-		move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
-					     IMMED);
-		move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
-					      WAITCOMP | IMMED);
-	}
 	MATHB(p, SEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE,  4,
 	      IFB | IMMED2);
 	JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
 
-	if (rta_sec_era > RTA_SEC_ERA_2)
-		MATHB(p, VSEQINSZ, ADD, ZERO, MATH0,  4, 0);
+	MATHB(p, VSEQINSZ, ADD, ZERO, MATH0,  4, 0);
 
 	SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
-	} else {
-		SET_LABEL(p, local_offset);
 
-		/* Shut off automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-		/* Placeholder for MOVE command with length from M0 register */
-		MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-
-		/* Enable automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-	}
+	MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
 
 	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
 	return 0;
 }
 
@@ -490,13 +389,12 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 			       bool swap __maybe_unused,
 			       struct alginfo *cipherdata __maybe_unused,
 			       struct alginfo *authdata, unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
 	/* 12 bit SN is only supported for protocol offload case */
-	if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size == PDCP_SN_SIZE_12) {
+	if (sn_size == PDCP_SN_SIZE_12) {
 		KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
 
@@ -526,9 +424,6 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		return -ENOTSUP;
 
 	}
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
 
 	switch (authdata->algtype) {
 	case PDCP_AUTH_TYPE_SNOW:
@@ -538,14 +433,7 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		SEQLOAD(p, MATH0, offset, length, 0);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
 
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-				   era_2_sw_hfn_ovrd == 0)) {
-			SEQINPTR(p, 0, length, RTO);
-		} else {
-			SEQINPTR(p, 0, 5, RTO);
-			SEQFIFOLOAD(p, SKIP, 4, 0);
-		}
+		SEQINPTR(p, 0, length, RTO);
 
 		if (swap == false) {
 			MATHB(p, MATH0, AND, sn_mask, MATH1,  8,
@@ -580,40 +468,11 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
 			      IMMED2);
 		} else {
-			if (rta_sec_era > RTA_SEC_ERA_2) {
-				MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
-				      0);
-			} else {
-				MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
-				      0);
-				MATHB(p, MATH1, SUB, ONE, MATH1, 4,
-				      0);
-			}
+			MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
 		}
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
-			MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
-		} else {
-			MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
-			MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
-
-			/*
-			 * Since MOVELEN is available only starting with
-			 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-			 * command dynamically by writing the length from M1 by
-			 * OR-ing the command in the M1 register and MOVE the
-			 * result into the descriptor buffer. Care must be taken
-			 * wrt. the location of the command because of SEC
-			 * pipelining. The actual MOVEs are written at the end
-			 * of the descriptor due to calculations needed on the
-			 * offset in the descriptor for the MOVE command.
-			 */
-			move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
-						     IMMED);
-			move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
-						      8, WAITCOMP | IMMED);
-		}
+		MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+		MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
 
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 		ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9, OP_ALG_AAI_F9,
@@ -622,25 +481,9 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 				     ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
 			      DIR_ENC);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
+		SEQFIFOLOAD(p, MSGINSNOOP, 0,
 				    VLF | LAST1 | LAST2 | FLUSH1);
-			MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
-				    VLF | LAST1 | LAST2 | FLUSH1);
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			/*
-			 * Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
+		MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
 
 		if (dir == OP_TYPE_DECAP_PROTOCOL)
 			SEQFIFOLOAD(p, ICV2, 4, LAST2);
@@ -655,14 +498,7 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		    authdata->keylen, INLINE_KEY(authdata));
 		SEQLOAD(p, MATH0, offset, length, 0);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-		     era_2_sw_hfn_ovrd == 0)) {
-			SEQINPTR(p, 0, length, RTO);
-		} else {
-			SEQINPTR(p, 0, 5, RTO);
-			SEQFIFOLOAD(p, SKIP, 4, 0);
-		}
+		SEQINPTR(p, 0, length, RTO);
 
 		if (swap == false) {
 			MATHB(p, MATH0, AND, sn_mask, MATH1, 8,
@@ -686,40 +522,12 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
 			      IMMED2);
 		} else {
-			if (rta_sec_era > RTA_SEC_ERA_2) {
-				MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
-				      0);
-			} else {
-				MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
-				      0);
-				MATHB(p, MATH1, SUB, ONE, MATH1, 4,
-				      0);
-			}
+			MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
 		}
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
-			MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
-		} else {
-			MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
-			MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
-
-			/*
-			 * Since MOVELEN is available only starting with
-			 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-			 * command dynamically by writing the length from M1 by
-			 * OR-ing the command in the M1 register and MOVE the
-			 * result into the descriptor buffer. Care must be taken
-			 * wrt. the location of the command because of SEC
-			 * pipelining. The actual MOVEs are written at the end
-			 * of the descriptor due to calculations needed on the
-			 * offset in the descriptor for the MOVE command.
-			 */
-			move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
-						     IMMED);
-			move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
-						      8, WAITCOMP | IMMED);
-		}
+		MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+		MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 		ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
 			      OP_ALG_AAI_CMAC,
@@ -728,27 +536,9 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 				     ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
 			      DIR_ENC);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
+		MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
+		SEQFIFOLOAD(p, MSGINSNOOP, 0,
 				    VLF | LAST1 | LAST2 | FLUSH1);
-		} else {
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
-				    VLF | LAST1 | LAST2 | FLUSH1);
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-			/*
-			 * Placeholder for MOVE command with length from
-			 * M1 register
-			 */
-			MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
-
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
 
 		if (dir == OP_TYPE_DECAP_PROTOCOL)
 			SEQFIFOLOAD(p, ICV1, 4, LAST1 | FLUSH1);
@@ -758,10 +548,6 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		break;
 
 	case PDCP_AUTH_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		/* Insert Auth Key */
 		KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
@@ -817,11 +603,6 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		return -EINVAL;
 	}
 
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
-
 	return 0;
 }
 
@@ -831,15 +612,14 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata __maybe_unused,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 	/* Insert Cipher Key */
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18 &&
+	if ((sn_size != PDCP_SN_SIZE_18 &&
 			!(rta_sec_era == RTA_SEC_ERA_8 &&
 				authdata->algtype == 0))
 			|| (rta_sec_era == RTA_SEC_ERA_10)) {
@@ -889,12 +669,7 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 	case PDCP_CIPHER_TYPE_SNOW:
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, WAITCOMP | IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		} else {
-			MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
-			MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
-		}
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 		if (dir == OP_TYPE_ENCAP_PROTOCOL)
 			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
@@ -913,12 +688,7 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 	case PDCP_CIPHER_TYPE_AES:
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x10, 0x10, WAITCOMP | IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		} else {
-			MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
-			MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
-		}
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 		if (dir == OP_TYPE_ENCAP_PROTOCOL)
 			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
@@ -937,11 +707,6 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
 		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
@@ -988,8 +753,7 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
@@ -998,7 +762,7 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 	KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
 	    INLINE_KEY(authdata));
 
-	if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) {
+	if (sn_size != PDCP_SN_SIZE_18) {
 		int pclid;
 
 		if (sn_size == PDCP_SN_SIZE_5)
@@ -1014,18 +778,13 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 	}
 	/* Non-proto is supported only for 5bit cplane and 18bit uplane */
 	switch (sn_size) {
-	case PDCP_SN_SIZE_5:
-		offset = 7;
-		length = 1;
-		sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
-					PDCP_C_PLANE_SN_MASK_BE;
-		break;
 	case PDCP_SN_SIZE_18:
 		offset = 5;
 		length = 3;
 		sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
 					PDCP_U_PLANE_18BIT_SN_MASK_BE;
 		break;
+	case PDCP_SN_SIZE_5:
 	case PDCP_SN_SIZE_7:
 	case PDCP_SN_SIZE_12:
 	case PDCP_SN_SIZE_15:
@@ -1094,20 +853,13 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 		SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
 
 		NFIFOADD(p, IFIFO, ICV2, 4, LAST2);
 
-		if (rta_sec_era <= RTA_SEC_ERA_2) {
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
-		} else {
-			MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
-		}
+		MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
 	}
 
 	return 0;
@@ -1119,19 +871,13 @@ pdcp_insert_uplane_zuc_zuc_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
 	pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
@@ -1141,7 +887,7 @@ pdcp_insert_uplane_zuc_zuc_op(struct program *p,
 	SET_LABEL(p, keyjump);
 	PATCH_JUMP(p, pkeyjump, keyjump);
 
-	if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) {
+	if (sn_size != PDCP_SN_SIZE_18) {
 		int pclid;
 
 		if (sn_size == PDCP_SN_SIZE_5)
@@ -1157,18 +903,13 @@ pdcp_insert_uplane_zuc_zuc_op(struct program *p,
 	}
 	/* Non-proto is supported only for 5bit cplane and 18bit uplane */
 	switch (sn_size) {
-	case PDCP_SN_SIZE_5:
-		offset = 7;
-		length = 1;
-		sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
-					PDCP_C_PLANE_SN_MASK_BE;
-		break;
 	case PDCP_SN_SIZE_18:
 		offset = 5;
 		length = 3;
 		sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
 					PDCP_U_PLANE_18BIT_SN_MASK_BE;
 		break;
+	case PDCP_SN_SIZE_5:
 	case PDCP_SN_SIZE_7:
 	case PDCP_SN_SIZE_12:
 	case PDCP_SN_SIZE_15:
@@ -1243,12 +984,11 @@ pdcp_insert_uplane_aes_aes_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18)) {
+	if (sn_size != PDCP_SN_SIZE_18) {
 		/* Insert Auth Key */
 		KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
@@ -1392,8 +1132,7 @@ pdcp_insert_cplane_acc_op(struct program *p,
 			  struct alginfo *cipherdata,
 			  struct alginfo *authdata,
 			  unsigned int dir,
-			  enum pdcp_sn_size sn_size,
-			  unsigned char era_2_hfn_ovrd __maybe_unused)
+			  enum pdcp_sn_size sn_size)
 {
 	/* Insert Auth Key */
 	KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
@@ -1420,8 +1159,7 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
@@ -1429,14 +1167,12 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 	LABEL(end_desc);
 	LABEL(local_offset);
 	LABEL(jump_to_beginning);
-	LABEL(fifo_load_mac_i_offset);
 	REFERENCE(seqin_ptr_read);
 	REFERENCE(seqin_ptr_write);
 	REFERENCE(seq_out_read);
 	REFERENCE(jump_back_to_sd_cmd);
-	REFERENCE(move_mac_i_to_desc_buf);
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 				cipherdata->keylen, INLINE_KEY(cipherdata));
@@ -1484,56 +1220,17 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 	MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
 	SEQSTORE(p, MATH0, offset, length, 0);
 	if (dir == OP_TYPE_ENCAP_PROTOCOL) {
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-				   era_2_sw_hfn_ovrd == 0)) {
-			SEQINPTR(p, 0, length, RTO);
-		} else {
-			SEQINPTR(p, 0, 5, RTO);
-			SEQFIFOLOAD(p, SKIP, 4, 0);
-		}
+		SEQINPTR(p, 0, length, RTO);
+
 		KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
 		MOVEB(p, MATH2, 0, IFIFOAB1, 0, 0x08, IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-			MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
-			MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
-			      4, IMMED2);
-		} else {
-			MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
-			MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
-			      4, IMMED2);
-			/*
-			 * Note: Although the calculations below might seem a
-			 * little off, the logic is the following:
-			 *
-			 * - SEQ IN PTR RTO below needs the full length of the
-			 *   frame; in case of P4080_REV_2_HFN_OV_WORKAROUND,
-			 *   this means the length of the frame to be processed
-			 *   + 4 bytes (the HFN override flag and value).
-			 *   The length of the frame to be processed minus 1
-			 *   byte is in the VSIL register (because
-			 *   VSIL = SIL + 3, due to 1 byte, the header being
-			 *   already written by the SEQ STORE above). So for
-			 *   calculating the length to use in RTO, I add one
-			 *   to the VSIL value in order to obtain the total
-			 *   frame length. This helps in case of P4080 which
-			 *   can have the value 0 as an operand in a MATH
-			 *   command only as SRC1 When the HFN override
-			 *   workaround is not enabled, the length of the
-			 *   frame is given by the SIL register; the
-			 *   calculation is similar to the one in the SEC 4.2
-			 *   and SEC 5.3 cases.
-			 */
-			if (era_2_sw_hfn_ovrd)
-				MATHB(p, VSEQOUTSZ, ADD, ONE, MATH1, 4,
-				      0);
-			else
-				MATHB(p, SEQINSZ, ADD, MATH3, MATH1, 4,
-				      0);
-		}
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+		MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+		MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
+		      4, IMMED2);
+
 		/*
 		 * Placeholder for filling the length in
 		 * SEQIN PTR RTO below
@@ -1548,24 +1245,14 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			      DIR_DEC);
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 		MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			LOAD(p, CLRW_CLR_C1KEY |
-			     CLRW_CLR_C1CTX |
-			     CLRW_CLR_C1ICV |
-			     CLRW_CLR_C1DATAS |
-			     CLRW_CLR_C1MODE,
-			     CLRW, 0, 4, IMMED);
-		else
-			LOAD(p, CLRW_RESET_CLS1_CHA |
-			     CLRW_CLR_C1KEY |
-			     CLRW_CLR_C1CTX |
-			     CLRW_CLR_C1ICV |
-			     CLRW_CLR_C1DATAS |
-			     CLRW_CLR_C1MODE,
-			     CLRW, 0, 4, IMMED);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
+		LOAD(p, CLRW_RESET_CLS1_CHA |
+		     CLRW_CLR_C1KEY |
+		     CLRW_CLR_C1CTX |
+		     CLRW_CLR_C1ICV |
+		     CLRW_CLR_C1DATAS |
+		     CLRW_CLR_C1MODE,
+		     CLRW, 0, 4, IMMED);
 
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 		    cipherdata->keylen, INLINE_KEY(cipherdata));
@@ -1573,11 +1260,6 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
 		SEQINPTR(p, 0, 0, RTO);
 
-		if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-			SEQFIFOLOAD(p, SKIP, 5, 0);
-			MATHB(p, SEQINSZ, ADD, ONE, SEQINSZ, 4, 0);
-		}
-
 		MATHB(p, SEQINSZ, SUB, length, VSEQINSZ, 4, IMMED2);
 		ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
 			      OP_ALG_AAI_F8,
@@ -1586,10 +1268,7 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			      DIR_ENC);
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-				   era_2_sw_hfn_ovrd == 0))
-			SEQFIFOLOAD(p, SKIP, length, 0);
+		SEQFIFOLOAD(p, SKIP, length, 0);
 
 		SEQFIFOLOAD(p, MSG1, 0, VLF);
 		MOVEB(p, MATH3, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
@@ -1598,13 +1277,9 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 	} else {
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
 
-		if (rta_sec_era >= RTA_SEC_ERA_5)
-			MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+		MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2)
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		else
-			MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 		MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
 /*
@@ -1649,10 +1324,7 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 		    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-		if (rta_sec_era >= RTA_SEC_ERA_4)
-			MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
-		else
-			MOVE(p, CONTEXT1, 0, MATH3, 0, 8, IMMED);
+		MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
 
 		ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
 			      OP_ALG_AAI_F8,
@@ -1662,22 +1334,15 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			move_mac_i_to_desc_buf = MOVE(p, OFIFO, 0, DESCBUF, 0,
-						      4, WAITCOMP | IMMED);
-		else
-			MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+		MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
-		else
-			LOAD(p, CLRW_RESET_CLS1_CHA |
-			     CLRW_CLR_C1KEY |
-			     CLRW_CLR_C1CTX |
-			     CLRW_CLR_C1ICV |
-			     CLRW_CLR_C1DATAS |
-			     CLRW_CLR_C1MODE,
-			     CLRW, 0, 4, IMMED);
+		LOAD(p, CLRW_RESET_CLS1_CHA |
+		     CLRW_CLR_C1KEY |
+		     CLRW_CLR_C1CTX |
+		     CLRW_CLR_C1ICV |
+		     CLRW_CLR_C1DATAS |
+		     CLRW_CLR_C1MODE,
+		     CLRW, 0, 4, IMMED);
 
 		KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
@@ -1698,28 +1363,17 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		/* Read the # of bytes written in the output buffer + 1 (HDR) */
 		MATHI(p, VSEQOUTSZ, ADD, length, VSEQINSZ, 4, IMMED2);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			MOVE(p, MATH3, 0, IFIFOAB1, 0, 8, IMMED);
-		else
-			MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
-
-		if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd)
-			SEQFIFOLOAD(p, SKIP, 4, 0);
+		MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
 
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 
-		if (rta_sec_era >= RTA_SEC_ERA_4) {
-			LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
-			     NFIFOENTRY_DEST_CLASS1 |
-			     NFIFOENTRY_DTYPE_ICV |
-			     NFIFOENTRY_LC1 |
-			     NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
-			MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
-		} else {
-			SET_LABEL(p, fifo_load_mac_i_offset);
-			FIFOLOAD(p, ICV1, fifo_load_mac_i_offset, 4,
-				 LAST1 | FLUSH1 | IMMED);
-		}
+		LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+		     NFIFOENTRY_DEST_CLASS1 |
+		     NFIFOENTRY_DTYPE_ICV |
+		     NFIFOENTRY_LC1 |
+		     NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
+		MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
+
 
 		SET_LABEL(p, end_desc);
 
@@ -1727,18 +1381,10 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			PATCH_MOVE(p, seq_out_read, end_desc + 1);
 			PATCH_JUMP(p, jump_back_to_sd_cmd,
 				   back_to_sd_offset + jump_back_to_sd_cmd - 5);
-
-			if (rta_sec_era <= RTA_SEC_ERA_3)
-				PATCH_MOVE(p, move_mac_i_to_desc_buf,
-					   fifo_load_mac_i_offset + 1);
 		} else {
 			PATCH_MOVE(p, seq_out_read, end_desc + 2);
 			PATCH_JUMP(p, jump_back_to_sd_cmd,
 				   back_to_sd_offset + jump_back_to_sd_cmd - 5);
-
-			if (rta_sec_era <= RTA_SEC_ERA_3)
-				PATCH_MOVE(p, move_mac_i_to_desc_buf,
-					   fifo_load_mac_i_offset + 1);
 		}
 	}
 
@@ -1751,8 +1397,7 @@ pdcp_insert_cplane_aes_snow_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
@@ -1761,7 +1406,7 @@ pdcp_insert_cplane_aes_snow_op(struct program *p,
 	KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
 	    INLINE_KEY(authdata));
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 
@@ -1860,20 +1505,13 @@ pdcp_insert_cplane_aes_snow_op(struct program *p,
 		SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
 
 		NFIFOADD(p, IFIFO, ICV2, 4, LAST2);
 
-		if (rta_sec_era <= RTA_SEC_ERA_2) {
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
-		} else {
-			MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
-		}
+		MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
 	}
 
 	return 0;
@@ -1885,20 +1523,14 @@ pdcp_insert_cplane_snow_zuc_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
@@ -2010,19 +1642,13 @@ pdcp_insert_cplane_aes_zuc_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
@@ -2138,19 +1764,13 @@ pdcp_insert_cplane_zuc_snow_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
@@ -2259,13 +1879,12 @@ pdcp_insert_cplane_zuc_snow_op(struct program *p,
 		SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			/*
-			 * For SEC ERA 6, there's a problem with the OFIFO
-			 * pointer, and thus it needs to be reset here before
-			 * moving to M0.
-			 */
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		/*
+		 * For SEC ERA 6, there's a problem with the OFIFO
+		 * pointer, and thus it needs to be reset here before
+		 * moving to M0.
+		 */
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		/* Put ICV to M0 before sending it to C2 for comparison. */
 		MOVEB(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
@@ -2287,16 +1906,11 @@ pdcp_insert_cplane_zuc_aes_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 
@@ -2459,7 +2073,7 @@ pdcp_insert_uplane_no_int_op(struct program *p,
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size == PDCP_SN_SIZE_15) ||
+	if ((sn_size == PDCP_SN_SIZE_15) ||
 			(rta_sec_era >= RTA_SEC_ERA_10)) {
 		PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_USER,
 			 (uint16_t)cipherdata->algtype);
@@ -2513,10 +2127,6 @@ pdcp_insert_uplane_no_int_op(struct program *p,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
 
@@ -2546,7 +2156,6 @@ static inline int
 insert_hfn_ov_op(struct program *p,
 		 uint32_t shift,
 		 enum pdb_type_e pdb_type,
-		 unsigned char era_2_sw_hfn_ovrd,
 		 bool clear_dpovrd_at_end)
 {
 	uint32_t imm = PDCP_DPOVRD_HFN_OV_EN;
@@ -2554,9 +2163,6 @@ insert_hfn_ov_op(struct program *p,
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era == RTA_SEC_ERA_2 && !era_2_sw_hfn_ovrd)
-		return 0;
-
 	switch (pdb_type) {
 	case PDCP_PDB_TYPE_NO_PDB:
 		/*
@@ -2579,26 +2185,16 @@ insert_hfn_ov_op(struct program *p,
 		return -EINVAL;
 	}
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, DPOVRD, AND, imm, NONE, 8, IFB | IMMED2);
-	} else {
-		SEQLOAD(p, MATH0, 4, 4, 0);
-		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
-		MATHB(p, MATH0, AND, imm, NONE, 8, IFB | IMMED2);
-		SEQSTORE(p, MATH0, 4, 4, 0);
-	}
+	MATHB(p, DPOVRD, AND, imm, NONE, 8, IFB | IMMED2);
 
 	pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, MATH_Z);
 
-	if (rta_sec_era > RTA_SEC_ERA_2)
-		MATHI(p, DPOVRD, LSHIFT, shift, MATH0, 4, IMMED2);
-	else
-		MATHB(p, MATH0, LSHIFT, shift, MATH0, 4, IMMED2);
+	MATHI(p, DPOVRD, LSHIFT, shift, MATH0, 4, IMMED2);
 
 	MATHB(p, MATH0, SHLD, MATH0, MATH0, 8, 0);
 	MOVE(p, MATH0, 0, DESCBUF, hfn_pdb_offset, 4, IMMED);
 
-	if (clear_dpovrd_at_end && (rta_sec_era >= RTA_SEC_ERA_8)) {
+	if (clear_dpovrd_at_end) {
 		/*
 		 * For ERA8, DPOVRD could be handled by the PROTOCOL command
 		 * itself. For now, this is not done. Thus, clear DPOVRD here
@@ -2621,97 +2217,28 @@ cnstr_pdcp_c_plane_pdb(struct program *p,
 		       enum pdcp_sn_size sn_size,
 		       unsigned char bearer,
 		       unsigned char direction,
-		       uint32_t hfn_threshold,
-		       struct alginfo *cipherdata,
-		       struct alginfo *authdata)
+		       uint32_t hfn_threshold)
 {
 	struct pdcp_pdb pdb;
-	enum pdb_type_e
-		pdb_mask[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
-			{	/* NULL */
-				PDCP_PDB_TYPE_NO_PDB,		/* NULL */
-				PDCP_PDB_TYPE_FULL_PDB,		/* SNOW f9 */
-				PDCP_PDB_TYPE_FULL_PDB,		/* AES CMAC */
-				PDCP_PDB_TYPE_FULL_PDB		/* ZUC-I */
-			},
-			{	/* SNOW f8 */
-				PDCP_PDB_TYPE_FULL_PDB,		/* NULL */
-				PDCP_PDB_TYPE_FULL_PDB,		/* SNOW f9 */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* AES CMAC */
-				PDCP_PDB_TYPE_REDUCED_PDB	/* ZUC-I */
-			},
-			{	/* AES CTR */
-				PDCP_PDB_TYPE_FULL_PDB,		/* NULL */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* SNOW f9 */
-				PDCP_PDB_TYPE_FULL_PDB,		/* AES CMAC */
-				PDCP_PDB_TYPE_REDUCED_PDB	/* ZUC-I */
-			},
-			{	/* ZUC-E */
-				PDCP_PDB_TYPE_FULL_PDB,		/* NULL */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* SNOW f9 */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* AES CMAC */
-				PDCP_PDB_TYPE_FULL_PDB		/* ZUC-I */
-			},
-	};
-
-	if (rta_sec_era >= RTA_SEC_ERA_8) {
-		memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
-
-		/* To support 12-bit seq numbers, we use u-plane opt in pdb.
-		 * SEC supports 5-bit only with c-plane opt in pdb.
-		 */
-		if (sn_size == PDCP_SN_SIZE_12) {
-			pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT;
-			pdb.bearer_dir_res = (uint32_t)
-				((bearer << PDCP_U_PLANE_PDB_BEARER_SHIFT) |
-				 (direction << PDCP_U_PLANE_PDB_DIR_SHIFT));
-
-			pdb.hfn_thr_res =
-			hfn_threshold << PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT;
-
-		} else {
-			/* This means 5-bit c-plane.
-			 * Here we use c-plane opt in pdb
-			 */
-
-			/* This is a HW issue. Bit 2 should be set to zero,
-			 * but it does not work this way. Override here.
-			 */
-			pdb.opt_res.rsvd = 0x00000002;
-
-			/* Copy relevant information from user to PDB */
-			pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
-			pdb.bearer_dir_res = (uint32_t)
-				((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
-				(direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
-			pdb.hfn_thr_res =
-			hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
-		}
-
-		/* copy PDB in descriptor*/
-		__rta_out32(p, pdb.opt_res.opt);
-		__rta_out32(p, pdb.hfn_res);
-		__rta_out32(p, pdb.bearer_dir_res);
-		__rta_out32(p, pdb.hfn_thr_res);
 
-		return PDCP_PDB_TYPE_FULL_PDB;
-	}
+	memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
 
-	switch (pdb_mask[cipherdata->algtype][authdata->algtype]) {
-	case PDCP_PDB_TYPE_NO_PDB:
-		break;
+	/* To support 12-bit seq numbers, we use u-plane opt in pdb.
+	 * SEC supports 5-bit only with c-plane opt in pdb.
+	 */
+	if (sn_size == PDCP_SN_SIZE_12) {
+		pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT;
+		pdb.bearer_dir_res = (uint32_t)
+			((bearer << PDCP_U_PLANE_PDB_BEARER_SHIFT) |
+			 (direction << PDCP_U_PLANE_PDB_DIR_SHIFT));
 
-	case PDCP_PDB_TYPE_REDUCED_PDB:
-		__rta_out32(p, (hfn << PDCP_C_PLANE_PDB_HFN_SHIFT));
-		__rta_out32(p,
-			    (uint32_t)((bearer <<
-					PDCP_C_PLANE_PDB_BEARER_SHIFT) |
-					(direction <<
-					 PDCP_C_PLANE_PDB_DIR_SHIFT)));
-		break;
+		pdb.hfn_thr_res =
+		hfn_threshold << PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT;
 
-	case PDCP_PDB_TYPE_FULL_PDB:
-		memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+	} else {
+		/* This means 5-bit c-plane.
+		 * Here we use c-plane opt in pdb
+		 */
 
 		/* This is a HW issue. Bit 2 should be set to zero,
 		 * but it does not work this way. Override here.
@@ -2722,23 +2249,18 @@ cnstr_pdcp_c_plane_pdb(struct program *p,
 		pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
 		pdb.bearer_dir_res = (uint32_t)
 			((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
-			 (direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
+			(direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
 		pdb.hfn_thr_res =
-			hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
-
-		/* copy PDB in descriptor*/
-		__rta_out32(p, pdb.opt_res.opt);
-		__rta_out32(p, pdb.hfn_res);
-		__rta_out32(p, pdb.bearer_dir_res);
-		__rta_out32(p, pdb.hfn_thr_res);
-
-		break;
-
-	default:
-		return PDCP_PDB_TYPE_INVALID;
+		hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
 	}
 
-	return pdb_mask[cipherdata->algtype][authdata->algtype];
+	/* copy PDB in descriptor*/
+	__rta_out32(p, pdb.opt_res.opt);
+	__rta_out32(p, pdb.hfn_res);
+	__rta_out32(p, pdb.bearer_dir_res);
+	__rta_out32(p, pdb.hfn_thr_res);
+
+	return PDCP_PDB_TYPE_FULL_PDB;
 }
 
 /*
@@ -2817,7 +2339,7 @@ cnstr_pdcp_u_plane_pdb(struct program *p,
 		pdb.hfn_thr_res =
 			hfn_threshold<<PDCP_U_PLANE_PDB_18BIT_SN_HFN_THR_SHIFT;
 
-		if (rta_sec_era <= RTA_SEC_ERA_8) {
+		if (rta_sec_era == RTA_SEC_ERA_8) {
 			if (cipherdata && authdata)
 				pdb_type = pdb_mask[cipherdata->algtype]
 						   [authdata->algtype];
@@ -2857,6 +2379,7 @@ cnstr_pdcp_u_plane_pdb(struct program *p,
 
 	return pdb_type;
 }
+
 /**
  * cnstr_shdsc_pdcp_c_plane_encap - Function for creating a PDCP Control Plane
  *                                  encapsulation descriptor.
@@ -2874,9 +2397,6 @@ cnstr_pdcp_u_plane_pdb(struct program *p,
  *              Valid algorithm values are those from cipher_type_pdcp enum.
  * @authdata: pointer to authentication transform definitions
  *            Valid algorithm values are those from auth_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
  *         for reclaiming the space that wasn't used for the descriptor.
@@ -2895,14 +2415,12 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 			       unsigned char direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	static int
 		(*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
 			(struct program*, bool swap, struct alginfo *,
-			 struct alginfo *, unsigned int, enum pdcp_sn_size,
-			unsigned char __maybe_unused) = {
+			 struct alginfo *, unsigned int dir, enum pdcp_sn_size) = {
 		{	/* NULL */
 			pdcp_insert_cplane_null_op,	/* NULL */
 			pdcp_insert_cplane_int_only_op,	/* SNOW f9 */
@@ -2961,11 +2479,6 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 	int err;
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN override for other era than 2");
-		return -EINVAL;
-	}
-
 	if (sn_size != PDCP_SN_SIZE_12 && sn_size != PDCP_SN_SIZE_5) {
 		pr_err("C-plane supports only 5-bit and 12-bit sequence numbers\n");
 		return -EINVAL;
@@ -2984,14 +2497,11 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 			sn_size,
 			bearer,
 			direction,
-			hfn_threshold,
-			cipherdata,
-			authdata);
+			hfn_threshold);
 
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type,
-			       era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3000,8 +2510,7 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 		cipherdata,
 		authdata,
 		OP_TYPE_ENCAP_PROTOCOL,
-		sn_size,
-		era_2_sw_hfn_ovrd);
+		sn_size);
 	if (err)
 		return err;
 
@@ -3027,9 +2536,6 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
  *              Valid algorithm values are those from cipher_type_pdcp enum.
  * @authdata: pointer to authentication transform definitions
  *            Valid algorithm values are those from auth_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -3049,14 +2555,12 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 			       unsigned char direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	static int
 		(*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
 			(struct program*, bool swap, struct alginfo *,
-			 struct alginfo *, unsigned int, enum pdcp_sn_size,
-			 unsigned char) = {
+			 struct alginfo *, unsigned int dir, enum pdcp_sn_size) = {
 		{	/* NULL */
 			pdcp_insert_cplane_null_op,	/* NULL */
 			pdcp_insert_cplane_int_only_op,	/* SNOW f9 */
@@ -3115,11 +2619,6 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 	int err;
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN override for other era than 2");
-		return -EINVAL;
-	}
-
 	if (sn_size != PDCP_SN_SIZE_12 && sn_size != PDCP_SN_SIZE_5) {
 		pr_err("C-plane supports only 5-bit and 12-bit sequence numbers\n");
 		return -EINVAL;
@@ -3138,14 +2637,11 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 			sn_size,
 			bearer,
 			direction,
-			hfn_threshold,
-			cipherdata,
-			authdata);
+			hfn_threshold);
 
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type,
-			       era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3154,8 +2650,7 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 		cipherdata,
 		authdata,
 		OP_TYPE_DECAP_PROTOCOL,
-		sn_size,
-		era_2_sw_hfn_ovrd);
+		sn_size);
 	if (err)
 		return err;
 
@@ -3170,14 +2665,12 @@ pdcp_insert_uplane_with_int_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd,
 			      unsigned int dir)
 {
 	static int
 		(*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
 			(struct program*, bool swap, struct alginfo *,
-			 struct alginfo *, unsigned int, enum pdcp_sn_size,
-			unsigned char __maybe_unused) = {
+			 struct alginfo *, unsigned int dir, enum pdcp_sn_size) = {
 		{	/* NULL */
 			pdcp_insert_cplane_null_op,	/* NULL */
 			pdcp_insert_cplane_int_only_op,	/* SNOW f9 */
@@ -3210,8 +2703,7 @@ pdcp_insert_uplane_with_int_op(struct program *p,
 		cipherdata,
 		authdata,
 		dir,
-		sn_size,
-		era_2_sw_hfn_ovrd);
+		sn_size);
 	if (err)
 		return err;
 
@@ -3234,9 +2726,6 @@ pdcp_insert_uplane_with_int_op(struct program *p,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -3256,8 +2745,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	struct program prg;
 	struct program *p = &prg;
@@ -3292,16 +2780,6 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 	};
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN ovrd for other era than 2");
-		return -EINVAL;
-	}
-
-	if (authdata && !authdata->algtype && rta_sec_era < RTA_SEC_ERA_8) {
-		pr_err("Cannot use u-plane auth with era < 8");
-		return -EINVAL;
-	}
-
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 	if (swap)
 		PROGRAM_SET_BSWAP(p);
@@ -3321,7 +2799,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 	}
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3330,10 +2808,6 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 	case PDCP_SN_SIZE_12:
 		switch (cipherdata->algtype) {
 		case PDCP_CIPHER_TYPE_ZUC:
-			if (rta_sec_era < RTA_SEC_ERA_5) {
-				pr_err("Invalid era for selected algorithm\n");
-				return -ENOTSUP;
-			}
 			/* fallthrough */
 		case PDCP_CIPHER_TYPE_AES:
 		case PDCP_CIPHER_TYPE_SNOW:
@@ -3342,7 +2816,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 					authdata && authdata->algtype == 0){
 				err = pdcp_insert_uplane_with_int_op(p, swap,
 						cipherdata, authdata,
-						sn_size, era_2_sw_hfn_ovrd,
+						sn_size,
 						OP_TYPE_ENCAP_PROTOCOL);
 				if (err)
 					return err;
@@ -3388,7 +2862,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 		if (authdata) {
 			err = pdcp_insert_uplane_with_int_op(p, swap,
 					cipherdata, authdata,
-					sn_size, era_2_sw_hfn_ovrd,
+					sn_size,
 					OP_TYPE_ENCAP_PROTOCOL);
 			if (err)
 				return err;
@@ -3437,9 +2911,6 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -3459,8 +2930,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	struct program prg;
 	struct program *p = &prg;
@@ -3496,16 +2966,6 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN override for other era than 2");
-		return -EINVAL;
-	}
-
-	if (authdata && !authdata->algtype && rta_sec_era < RTA_SEC_ERA_8) {
-		pr_err("Cannot use u-plane auth with era < 8");
-		return -EINVAL;
-	}
-
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 	if (swap)
 		PROGRAM_SET_BSWAP(p);
@@ -3525,7 +2985,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 	}
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3534,10 +2994,6 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 	case PDCP_SN_SIZE_12:
 		switch (cipherdata->algtype) {
 		case PDCP_CIPHER_TYPE_ZUC:
-			if (rta_sec_era < RTA_SEC_ERA_5) {
-				pr_err("Invalid era for selected algorithm\n");
-				return -ENOTSUP;
-			}
 			/* fallthrough */
 		case PDCP_CIPHER_TYPE_AES:
 		case PDCP_CIPHER_TYPE_SNOW:
@@ -3555,7 +3011,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 			else if (authdata && authdata->algtype == 0) {
 				err = pdcp_insert_uplane_with_int_op(p, swap,
 						cipherdata, authdata,
-						sn_size, era_2_sw_hfn_ovrd,
+						sn_size,
 						OP_TYPE_DECAP_PROTOCOL);
 				if (err)
 					return err;
@@ -3589,7 +3045,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 		if (authdata) {
 			err = pdcp_insert_uplane_with_int_op(p, swap,
 					cipherdata, authdata,
-					sn_size, era_2_sw_hfn_ovrd,
+					sn_size,
 					OP_TYPE_DECAP_PROTOCOL);
 			if (err)
 				return err;
@@ -3649,9 +3105,6 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 	struct program prg;
 	struct program *p = &prg;
 	uint32_t iv[3] = {0, 0, 0};
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
 
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 	if (swap)
@@ -3661,52 +3114,15 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 
 	SHR_HDR(p, SHR_ALWAYS, 1, 0);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
-	} else {
-		MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4, 0);
-		MATHB(p, MATH1, SUB, ONE, MATH1, 4, 0);
-		MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
-		MOVE(p, MATH1, 0, MATH0, 0, 8, IMMED);
+	MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+	MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
 
-		/*
-		 * Since MOVELEN is available only starting with
-		 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-		 * command dynamically by writing the length from M1 by
-		 * OR-ing the command in the M1 register and MOVE the
-		 * result into the descriptor buffer. Care must be taken
-		 * wrt. the location of the command because of SEC
-		 * pipelining. The actual MOVEs are written at the end
-		 * of the descriptor due to calculations needed on the
-		 * offset in the descriptor for the MOVE command.
-		 */
-		move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
-					     IMMED);
-		move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
-					      WAITCOMP | IMMED);
-	}
 	MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
 
 	switch (authdata->algtype) {
 	case PDCP_AUTH_TYPE_NULL:
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-			/* Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
+		MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
 
 		LOAD(p, (uintptr_t)iv, MATH0, 0, 8, IMMED | COPY);
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | LAST2 | FLUSH1);
@@ -3730,23 +3146,8 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 			      DIR_ENC);
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SET_LABEL(p, local_offset);
-
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-			/* Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+		MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
 
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
 		SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
 		SEQSTORE(p, CONTEXT2, 0, 4, 0);
 
@@ -3768,32 +3169,14 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 			      DIR_ENC);
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+		MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
 
-			/* Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
-
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
 		SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
 		SEQSTORE(p, CONTEXT1, 0, 4, 0);
 
 		break;
 
 	case PDCP_AUTH_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		iv[0] = 0xFFFFFFFF;
 		iv[1] = swap ? swab32(0xFC000000) : 0xFC000000;
 		iv[2] = 0x00000000; /* unused */
@@ -3819,12 +3202,6 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 		return -EINVAL;
 	}
 
-
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
-
 	return PROGRAM_FINALIZE(p);
 }
 
diff --git a/drivers/common/dpaax/caamflib/desc/sdap.h b/drivers/common/dpaax/caamflib/desc/sdap.h
index b2497a5424..ee03e95990 100644
--- a/drivers/common/dpaax/caamflib/desc/sdap.h
+++ b/drivers/common/dpaax/caamflib/desc/sdap.h
@@ -225,10 +225,6 @@ static inline int pdcp_sdap_insert_no_int_op(struct program *p,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		/* The LSB and MSB is the same for ZUC context */
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
@@ -253,7 +249,6 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap __maybe_unused,
 			     struct alginfo *cipherdata,
 			     struct alginfo *authdata __maybe_unused,
 			     unsigned int dir, enum pdcp_sn_size sn_size,
-			     unsigned char era_2_sw_hfn_ovrd __maybe_unused,
 			     enum pdb_type_e pdb_type)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
@@ -293,12 +288,7 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap __maybe_unused,
 	/* Write header */
 	SEQSTORE(p, MATH0, offset, length, 0);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-	} else {
-		MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
-		MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
-	}
+	MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 	if (dir == OP_TYPE_ENCAP_PROTOCOL)
 		MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
@@ -326,11 +316,6 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap __maybe_unused,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
 
@@ -378,7 +363,6 @@ static inline int
 pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 			  struct alginfo *cipherdata, struct alginfo *authdata,
 			  unsigned int dir, enum pdcp_sn_size sn_size,
-			  unsigned char era_2_sw_hfn_ovrd __maybe_unused,
 			  enum pdb_type_e pdb_type)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
@@ -391,13 +375,6 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 			FULL_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET :
 			REDUCED_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET;
 
-	if (authdata->algtype == PDCP_CIPHER_TYPE_ZUC) {
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-	}
-
 	if (pdcp_sdap_get_sn_parameters(sn_size, swap, &offset, &length,
 					&sn_mask))
 		return -ENOTSUP;
@@ -588,8 +565,7 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 		 */
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		/* Save the content left in the Output FIFO (the ICV) to MATH0
 		 */
@@ -604,13 +580,7 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 		 * Note: As configured by the altsource, this will send
 		 * the
 		 */
-		if (rta_sec_era <= RTA_SEC_ERA_2) {
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
-		} else {
-			MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
-		}
+		MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
 	}
 
 	if (authdata->algtype == PDCP_CIPHER_TYPE_ZUC) {
@@ -638,7 +608,6 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 static inline int pdcp_sdap_insert_no_snoop_op(
 	struct program *p, bool swap __maybe_unused, struct alginfo *cipherdata,
 	struct alginfo *authdata, unsigned int dir, enum pdcp_sn_size sn_size,
-	unsigned char era_2_sw_hfn_ovrd __maybe_unused,
 	enum pdb_type_e pdb_type)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
@@ -649,13 +618,6 @@ static inline int pdcp_sdap_insert_no_snoop_op(
 			FULL_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET :
 			REDUCED_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET;
 
-	if (authdata->algtype == PDCP_CIPHER_TYPE_ZUC) {
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-	}
-
 	if (pdcp_sdap_get_sn_parameters(sn_size, swap, &offset, &length,
 					&sn_mask))
 		return -ENOTSUP;
@@ -842,11 +804,10 @@ pdcp_sdap_insert_cplane_null_op(struct program *p,
 			   struct alginfo *authdata,
 			   unsigned int dir,
 			   enum pdcp_sn_size sn_size,
-			   unsigned char era_2_sw_hfn_ovrd,
 			   enum pdb_type_e pdb_type __maybe_unused)
 {
 	return pdcp_insert_cplane_null_op(p, swap, cipherdata, authdata, dir,
-					  sn_size, era_2_sw_hfn_ovrd);
+					  sn_size);
 }
 
 static inline int
@@ -856,24 +817,22 @@ pdcp_sdap_insert_cplane_int_only_op(struct program *p,
 			   struct alginfo *authdata,
 			   unsigned int dir,
 			   enum pdcp_sn_size sn_size,
-			   unsigned char era_2_sw_hfn_ovrd,
 			   enum pdb_type_e pdb_type __maybe_unused)
 {
 	return pdcp_insert_cplane_int_only_op(p, swap, cipherdata, authdata,
-				dir, sn_size, era_2_sw_hfn_ovrd);
+				dir, sn_size);
 }
 
 static int pdcp_sdap_insert_with_int_op(
 	struct program *p, bool swap __maybe_unused, struct alginfo *cipherdata,
 	struct alginfo *authdata, enum pdcp_sn_size sn_size,
-	unsigned char era_2_sw_hfn_ovrd, unsigned int dir,
+	unsigned int dir,
 	enum pdb_type_e pdb_type)
 {
 	static int (
 		*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])(
 		struct program *, bool swap, struct alginfo *, struct alginfo *,
-		unsigned int, enum pdcp_sn_size,
-		unsigned char __maybe_unused, enum pdb_type_e pdb_type) = {
+		unsigned int dir, enum pdcp_sn_size, enum pdb_type_e pdb_type) = {
 		{
 			/* NULL */
 			pdcp_sdap_insert_cplane_null_op,     /* NULL */
@@ -907,7 +866,7 @@ static int pdcp_sdap_insert_with_int_op(
 
 	err = pdcp_cp_fp[cipherdata->algtype]
 			[authdata->algtype](p, swap, cipherdata, authdata, dir,
-					sn_size, era_2_sw_hfn_ovrd, pdb_type);
+					sn_size, pdb_type);
 	if (err)
 		return err;
 
@@ -925,7 +884,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd,
 			       uint32_t caps_mode)
 {
 	struct program prg;
@@ -966,12 +924,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 
 	LABEL(pdb_end);
 
-	/* Check HFN override for ERA 2 */
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN ovrd for other era than 2");
-		return -EINVAL;
-	}
-
 	/* Check the confidentiality algorithm is supported by the code */
 	switch (cipherdata->algtype) {
 	case PDCP_CIPHER_TYPE_NULL:
@@ -1013,14 +965,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 		return -ENOTSUP;
 	}
 
-	/* Check that we are not performing ZUC algo on old platforms */
-	if (cipherdata->algtype == PDCP_CIPHER_TYPE_ZUC &&
-			rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("ZUC algorithm not supported for era: %d\n",
-				rta_sec_era);
-		return -ENOTSUP;
-	}
-
 	/* Initialize the program */
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 
@@ -1047,7 +991,7 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 	SET_LABEL(p, pdb_end);
 
 	/* Inser the HFN override operation */
-	err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, false);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, false);
 	if (err)
 		return err;
 
@@ -1068,7 +1012,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 	} else {
 		err = pdcp_sdap_insert_with_int_op(p, swap, cipherdata,
 						   authdata, sn_size,
-						   era_2_sw_hfn_ovrd,
 						   caps_mode, pdb_type);
 		if (err) {
 			pr_err("Fail pdcp_sdap_insert_with_int_op\n");
@@ -1096,9 +1039,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -1118,12 +1058,11 @@ cnstr_shdsc_pdcp_sdap_u_plane_encap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	return cnstr_shdsc_pdcp_sdap_u_plane(descbuf, ps, swap, sn_size,
 			hfn, bearer, direction, hfn_threshold, cipherdata,
-			authdata, era_2_sw_hfn_ovrd, OP_TYPE_ENCAP_PROTOCOL);
+			authdata, OP_TYPE_ENCAP_PROTOCOL);
 }
 
 /**
@@ -1141,9 +1080,6 @@ cnstr_shdsc_pdcp_sdap_u_plane_encap(uint32_t *descbuf,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -1163,12 +1099,11 @@ cnstr_shdsc_pdcp_sdap_u_plane_decap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	return cnstr_shdsc_pdcp_sdap_u_plane(descbuf, ps, swap, sn_size, hfn,
 			bearer, direction, hfn_threshold, cipherdata, authdata,
-			era_2_sw_hfn_ovrd, OP_TYPE_DECAP_PROTOCOL);
+			OP_TYPE_DECAP_PROTOCOL);
 }
 
 #endif /* __DESC_SDAP_H__ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index a5b052375d..1e6b3e548a 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3297,8 +3297,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 		else if (session->dir == DIR_DEC)
 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
 					priv->flc_desc[0].desc, 1, swap,
@@ -3307,8 +3306,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 
 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
 		bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc,
@@ -3323,7 +3321,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 			else
 				bufsize = cnstr_shdsc_pdcp_u_plane_encap(
 					priv->flc_desc[0].desc, 1, swap,
@@ -3332,7 +3330,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 		} else if (session->dir == DIR_DEC) {
 			if (pdcp_xform->sdap_enabled)
 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
@@ -3342,7 +3340,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 			else
 				bufsize = cnstr_shdsc_pdcp_u_plane_decap(
 					priv->flc_desc[0].desc, 1, swap,
@@ -3351,7 +3349,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 		}
 	}
 
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index a552e64506..1dedd9eee5 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -296,8 +296,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 					ses->pdcp.bearer,
 					ses->pdcp.pkt_dir,
 					ses->pdcp.hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 		else if (ses->dir == DIR_DEC)
 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
 					cdb->sh_desc, 1, swap,
@@ -306,8 +305,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 					ses->pdcp.bearer,
 					ses->pdcp.pkt_dir,
 					ses->pdcp.hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 	} else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
 		shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
 						     1, swap, &authdata);
@@ -322,7 +320,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 			else
 				shared_desc_len =
 					cnstr_shdsc_pdcp_u_plane_encap(
@@ -332,7 +330,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 		} else if (ses->dir == DIR_DEC) {
 			if (ses->pdcp.sdap_enabled)
 				shared_desc_len =
@@ -343,7 +341,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 			else
 				shared_desc_len =
 					cnstr_shdsc_pdcp_u_plane_decap(
@@ -353,7 +351,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 		}
 	}
 	return shared_desc_len;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH 2/8] common/dpaax: change job processing mode for PDCP SDAP
  2021-12-20 10:27 [PATCH 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
@ 2021-12-20 10:27 ` Gagandeep Singh
  2021-12-20 10:27 ` [PATCH 3/8] crypto/dpaa2_sec: ordered queue support Gagandeep Singh
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-20 10:27 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

For PDCP SDAP test cases, HW sec engine process the
jobs in WAIT mode.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/common/dpaax/caamflib/desc/sdap.h | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/common/dpaax/caamflib/desc/sdap.h b/drivers/common/dpaax/caamflib/desc/sdap.h
index ee03e95990..1737e14fa6 100644
--- a/drivers/common/dpaax/caamflib/desc/sdap.h
+++ b/drivers/common/dpaax/caamflib/desc/sdap.h
@@ -895,27 +895,27 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 			{
 				/* NULL */
 				SHR_WAIT,   /* NULL */
-				SHR_ALWAYS, /* SNOW f9 */
-				SHR_ALWAYS, /* AES CMAC */
-				SHR_ALWAYS  /* ZUC-I */
+				SHR_WAIT, /* SNOW f9 */
+				SHR_WAIT, /* AES CMAC */
+				SHR_WAIT  /* ZUC-I */
 			},
 			{
 				/* SNOW f8 */
-				SHR_ALWAYS, /* NULL */
-				SHR_ALWAYS, /* SNOW f9 */
+				SHR_WAIT, /* NULL */
+				SHR_WAIT, /* SNOW f9 */
 				SHR_WAIT,   /* AES CMAC */
 				SHR_WAIT    /* ZUC-I */
 			},
 			{
 				/* AES CTR */
-				SHR_ALWAYS, /* NULL */
-				SHR_ALWAYS, /* SNOW f9 */
-				SHR_ALWAYS, /* AES CMAC */
+				SHR_WAIT, /* NULL */
+				SHR_WAIT, /* SNOW f9 */
+				SHR_WAIT, /* AES CMAC */
 				SHR_WAIT    /* ZUC-I */
 			},
 			{
 				/* ZUC-E */
-				SHR_ALWAYS, /* NULL */
+				SHR_WAIT, /* NULL */
 				SHR_WAIT,   /* SNOW f9 */
 				SHR_WAIT,   /* AES CMAC */
 				SHR_WAIT    /* ZUC-I */
@@ -979,7 +979,7 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 		SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype],
 			0, 0);
 	else
-		SHR_HDR(p, SHR_ALWAYS, 0, 0);
+		SHR_HDR(p, SHR_WAIT, 0, 0);
 
 	/* Construct the PDB */
 	pdb_type = cnstr_pdcp_u_plane_pdb(p, sn_size, hfn, bearer, direction,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH 3/8] crypto/dpaa2_sec: ordered queue support
  2021-12-20 10:27 [PATCH 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
  2021-12-20 10:27 ` [PATCH 2/8] common/dpaax: change job processing mode for PDCP SDAP Gagandeep Singh
@ 2021-12-20 10:27 ` Gagandeep Singh
  2021-12-20 10:27 ` [PATCH 4/8] crypto/dpaa2_sec: support AES-GMAC Gagandeep Singh
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-20 10:27 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Nipun Gupta

From: Nipun Gupta <nipun.gupta@nxp.com>

This patch supports ordered queue for DPAA2 platform.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 255 +++++++++++++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   8 +-
 drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h    |  14 +-
 3 files changed, 263 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 1e6b3e548a..a9fda67ac3 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1466,14 +1466,14 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 
 		for (loop = 0; loop < frames_to_send; loop++) {
 			if (*dpaa2_seqn((*ops)->sym->m_src)) {
-				uint8_t dqrr_index =
-					*dpaa2_seqn((*ops)->sym->m_src) - 1;
-
-				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
-				DPAA2_PER_LCORE_DQRR_SIZE--;
-				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
-				*dpaa2_seqn((*ops)->sym->m_src) =
-					DPAA2_INVALID_MBUF_SEQN;
+				if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) {
+					DPAA2_PER_LCORE_DQRR_SIZE--;
+					DPAA2_PER_LCORE_DQRR_HELD &= ~(1 <<
+					*dpaa2_seqn((*ops)->sym->m_src) &
+					QBMAN_EQCR_DCA_IDXMASK);
+				}
+				flags[loop] = *dpaa2_seqn((*ops)->sym->m_src);
+				*dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN;
 			}
 
 			/*Clear the unused FD fields before sending*/
@@ -1621,6 +1621,169 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
 	return op;
 }
 
+static void
+dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci)
+{
+	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+	struct rte_crypto_op *op;
+	struct qbman_fd *fd;
+
+	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
+	op = sec_fd_to_mbuf(fd);
+	/* Instead of freeing, enqueue it to the sec tx queue (sec->core)
+	 * after setting an error in FD. But this will have performance impact.
+	 */
+	rte_pktmbuf_free(op->sym->m_src);
+}
+
+static void
+dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
+			     struct rte_mbuf *m,
+			     struct qbman_eq_desc *eqdesc)
+{
+	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+	struct eqresp_metadata *eqresp_meta;
+	struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private;
+	uint16_t orpid, seqnum;
+	uint8_t dq_idx;
+
+	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
+		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
+			DPAA2_EQCR_OPRID_SHIFT;
+		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
+			DPAA2_EQCR_SEQNUM_SHIFT;
+
+
+		if (!priv->en_loose_ordered) {
+			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
+			qbman_eq_desc_set_response(eqdesc, (uint64_t)
+				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
+				dpio_dev->eqresp_pi]), 1);
+			qbman_eq_desc_set_token(eqdesc, 1);
+
+			eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi];
+			eqresp_meta->dpaa2_q = dpaa2_q;
+			eqresp_meta->mp = m->pool;
+
+			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
+				dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0);
+		} else {
+			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
+		}
+	} else {
+		dq_idx = *dpaa2_seqn(m) - 1;
+		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
+		DPAA2_PER_LCORE_DQRR_SIZE--;
+		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
+	}
+	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
+}
+
+
+static uint16_t
+dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
+			uint16_t nb_ops)
+{
+	/* Function to transmit the frames to given device and VQ*/
+	uint32_t loop;
+	int32_t ret;
+	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+	uint32_t frames_to_send, num_free_eq_desc, retry_count;
+	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+	struct qbman_swp *swp;
+	uint16_t num_tx = 0;
+	/*todo - need to support multiple buffer pools */
+	uint16_t bpid;
+	struct rte_mempool *mb_pool;
+	struct dpaa2_sec_dev_private *priv =
+				dpaa2_qp->tx_vq.crypto_data->dev_private;
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		DPAA2_SEC_ERR("sessionless crypto op not supported");
+		return 0;
+	}
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	while (nb_ops) {
+		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : nb_ops;
+
+		if (!priv->en_loose_ordered) {
+			if (*dpaa2_seqn((*ops)->sym->m_src)) {
+				num_free_eq_desc = dpaa2_free_eq_descriptors();
+				if (num_free_eq_desc < frames_to_send)
+					frames_to_send = num_free_eq_desc;
+			}
+		}
+
+		for (loop = 0; loop < frames_to_send; loop++) {
+			/*Prepare enqueue descriptor*/
+			qbman_eq_desc_clear(&eqdesc[loop]);
+			qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid);
+
+			if (*dpaa2_seqn((*ops)->sym->m_src))
+				dpaa2_sec_set_enqueue_descriptor(
+						&dpaa2_qp->tx_vq,
+						(*ops)->sym->m_src,
+						&eqdesc[loop]);
+			else
+				qbman_eq_desc_set_no_orp(&eqdesc[loop],
+							 DPAA2_EQ_RESP_ERR_FQ);
+
+			/*Clear the unused FD fields before sending*/
+			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+			mb_pool = (*ops)->sym->m_src->pool;
+			bpid = mempool_to_bpid(mb_pool);
+			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
+			if (ret) {
+				DPAA2_SEC_ERR("error: Improper packet contents"
+					      " for crypto operation");
+				goto skip_tx;
+			}
+			ops++;
+		}
+
+		loop = 0;
+		retry_count = 0;
+		while (loop < frames_to_send) {
+			ret = qbman_swp_enqueue_multiple_desc(swp,
+					&eqdesc[loop], &fd_arr[loop],
+					frames_to_send - loop);
+			if (unlikely(ret < 0)) {
+				retry_count++;
+				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+					num_tx += loop;
+					nb_ops -= loop;
+					goto skip_tx;
+				}
+			} else {
+				loop += ret;
+				retry_count = 0;
+			}
+		}
+
+		num_tx += loop;
+		nb_ops -= loop;
+	}
+
+skip_tx:
+	dpaa2_qp->tx_vq.tx_pkts += num_tx;
+	dpaa2_qp->tx_vq.err_pkts += nb_ops;
+	return num_tx;
+}
+
 static uint16_t
 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 			uint16_t nb_ops)
@@ -3527,6 +3690,10 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	/* Change the tx burst function if ordered queues are used */
+	if (priv->en_ordered)
+		dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered;
+
 	memset(&attr, 0, sizeof(struct dpseci_attr));
 
 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
@@ -3739,12 +3906,46 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
 
 	ev->event_ptr = sec_fd_to_mbuf(fd);
 	dqrr_index = qbman_get_dqrr_idx(dq);
-	*dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1;
+	*dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
 	DPAA2_PER_LCORE_DQRR_SIZE++;
 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
 }
 
+static void __attribute__((hot))
+dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
+				const struct qbman_fd *fd,
+				const struct qbman_result *dq,
+				struct dpaa2_queue *rxq,
+				struct rte_event *ev)
+{
+	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
+
+	/* Prefetching mbuf */
+	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+
+	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
+	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+
+	ev->flow_id = rxq->ev.flow_id;
+	ev->sub_event_type = rxq->ev.sub_event_type;
+	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+	ev->op = RTE_EVENT_OP_NEW;
+	ev->sched_type = rxq->ev.sched_type;
+	ev->queue_id = rxq->ev.queue_id;
+	ev->priority = rxq->ev.priority;
+	ev->event_ptr = sec_fd_to_mbuf(fd);
+
+	*dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP;
+	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) <<
+		DPAA2_EQCR_OPRID_SHIFT;
+	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) <<
+		DPAA2_EQCR_SEQNUM_SHIFT;
+
+	qbman_swp_dqrr_consume(swp, dq);
+}
+
 int
 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 		int qp_id,
@@ -3762,6 +3963,8 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
+	else if (event->sched_type == RTE_SCHED_TYPE_ORDERED)
+		qp->rx_vq.cb = dpaa2_sec_process_ordered_event;
 	else
 		return -EINVAL;
 
@@ -3780,6 +3983,40 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
 		cfg.order_preservation_en = 1;
 	}
+
+	if (event->sched_type == RTE_SCHED_TYPE_ORDERED) {
+		struct opr_cfg ocfg;
+
+		/* Restoration window size = 256 frames */
+		ocfg.oprrws = 3;
+		/* Restoration window size = 512 frames for LX2 */
+		if (dpaa2_svr_family == SVR_LX2160A)
+			ocfg.oprrws = 4;
+		/* Auto advance NESN window enabled */
+		ocfg.oa = 1;
+		/* Late arrival window size disabled */
+		ocfg.olws = 0;
+		/* ORL resource exhaustaion advance NESN disabled */
+		ocfg.oeane = 0;
+		/* Loose ordering enabled */
+		ocfg.oloe = 1;
+		priv->en_loose_ordered = 1;
+		/* Strict ordering enabled if explicitly set */
+		if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
+			ocfg.oloe = 0;
+			priv->en_loose_ordered = 0;
+		}
+
+		ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token,
+				   qp_id, OPR_OPT_CREATE, &ocfg);
+		if (ret) {
+			RTE_LOG(ERR, PMD, "Error setting opr: ret: %d\n", ret);
+			return ret;
+		}
+		qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf;
+		priv->en_ordered = 1;
+	}
+
 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
 				  qp_id, &cfg);
 	if (ret) {
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 05bd7c0736..1756d917dd 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -1,8 +1,6 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- *
- *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016,2020-2021 NXP
- *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016,2019-2021 NXP
  */
 
 #ifndef _DPAA2_SEC_PMD_PRIVATE_H_
@@ -37,6 +35,8 @@ struct dpaa2_sec_dev_private {
 	uint16_t token; /**< Token required by DPxxx objects */
 	unsigned int max_nb_queue_pairs;
 	/**< Max number of queue pairs supported by device */
+	uint8_t en_ordered;
+	uint8_t en_loose_ordered;
 };
 
 struct dpaa2_sec_qp {
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 279e8f4d4a..c295c04f24 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
  *
  * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2020 NXP
  *
  */
 #ifndef __FSL_DPSECI_H
@@ -11,6 +11,8 @@
  * Contains initialization APIs and runtime control APIs for DPSECI
  */
 
+#include <fsl_dpopr.h>
+
 struct fsl_mc_io;
 
 /**
@@ -41,6 +43,16 @@ int dpseci_close(struct fsl_mc_io *mc_io,
  */
 #define DPSECI_OPT_HAS_CG				0x000020
 
+/**
+ * Enable the Order Restoration support
+ */
+#define DPSECI_OPT_HAS_OPR				0x000040
+
+/**
+ * Order Point Records are shared for the entire DPSECI
+ */
+#define DPSECI_OPT_OPR_SHARED				0x000080
+
 /**
  * struct dpseci_cfg - Structure representing DPSECI configuration
  * @options: Any combination of the following options:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH 4/8] crypto/dpaa2_sec: support AES-GMAC
  2021-12-20 10:27 [PATCH 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
  2021-12-20 10:27 ` [PATCH 2/8] common/dpaax: change job processing mode for PDCP SDAP Gagandeep Singh
  2021-12-20 10:27 ` [PATCH 3/8] crypto/dpaa2_sec: ordered queue support Gagandeep Singh
@ 2021-12-20 10:27 ` Gagandeep Singh
  2021-12-20 10:27 ` [PATCH 5/8] crypto/dpaa2_sec: change digest size for AES_CMAC Gagandeep Singh
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-20 10:27 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Akhil Goyal, Gagandeep Singh

From: Akhil Goyal <akhil.goyal@nxp.com>

This patch supports AES_GMAC algorithm for DPAA2
driver.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/cryptodevs/features/dpaa2_sec.ini |  1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 14 ++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h    | 30 ++++++++++++++++++++
 lib/cryptodev/rte_crypto_sym.h               |  4 ++-
 4 files changed, 47 insertions(+), 2 deletions(-)

diff --git a/doc/guides/cryptodevs/features/dpaa2_sec.ini b/doc/guides/cryptodevs/features/dpaa2_sec.ini
index 3d6e449ca1..dcaf64965d 100644
--- a/doc/guides/cryptodevs/features/dpaa2_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa2_sec.ini
@@ -48,6 +48,7 @@ SHA512 HMAC  = Y
 SNOW3G UIA2  = Y
 AES XCBC MAC = Y
 ZUC EIA3     = Y
+AES GMAC     = Y
 AES CMAC (128) = Y
 
 ;
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index a9fda67ac3..99f5157abe 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -2847,6 +2847,13 @@ dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
 		aeaddata->algmode = OP_ALG_AAI_CCM;
 		session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
 		break;
+	case RTE_CRYPTO_AEAD_AES_GMAC:
+		/**
+		 * AES-GMAC is an AEAD algo with NULL encryption and GMAC
+		 * authentication.
+		 */
+		aeaddata->algtype = OP_PCL_IPSEC_AES_NULL_WITH_GMAC;
+		break;
 	default:
 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
 			      aead_xform->algo);
@@ -2945,6 +2952,10 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
 	case RTE_CRYPTO_AUTH_NULL:
 		authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
 		break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		DPAA2_SEC_ERR(
+			"AES_GMAC is supported as AEAD algo for IPSEC proto only");
+		return -ENOTSUP;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
 	case RTE_CRYPTO_AUTH_SHA1:
@@ -2953,7 +2964,6 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
 	case RTE_CRYPTO_AUTH_SHA224:
 	case RTE_CRYPTO_AUTH_SHA384:
 	case RTE_CRYPTO_AUTH_MD5:
-	case RTE_CRYPTO_AUTH_AES_GMAC:
 	case RTE_CRYPTO_AUTH_KASUMI_F9:
 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
@@ -3096,6 +3106,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
 		case OP_PCL_IPSEC_AES_GCM8:
 		case OP_PCL_IPSEC_AES_GCM12:
 		case OP_PCL_IPSEC_AES_GCM16:
+		case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
 			memcpy(encap_pdb.gcm.salt,
 				(uint8_t *)&(ipsec_xform->salt), 4);
 			break;
@@ -3172,6 +3183,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
 		case OP_PCL_IPSEC_AES_GCM8:
 		case OP_PCL_IPSEC_AES_GCM12:
 		case OP_PCL_IPSEC_AES_GCM16:
+		case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
 			memcpy(decap_pdb.gcm.salt,
 				(uint8_t *)&(ipsec_xform->salt), 4);
 			break;
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 1756d917dd..6aa1c01e95 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -514,6 +514,36 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* AES GMAC (AEAD) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 65535,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 16,
+					.increment = 4
+				}
+			}, }
+		}, }
+	},
 	{	/* AES XCBC HMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index daa090b978..4644fa3e25 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -467,8 +467,10 @@ enum rte_crypto_aead_algorithm {
 	/**< AES algorithm in CCM mode. */
 	RTE_CRYPTO_AEAD_AES_GCM,
 	/**< AES algorithm in GCM mode. */
-	RTE_CRYPTO_AEAD_CHACHA20_POLY1305
+	RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
 	/**< Chacha20 cipher with poly1305 authenticator */
+	RTE_CRYPTO_AEAD_AES_GMAC
+	/**< AES algorithm in GMAC mode. */
 };
 
 /** AEAD algorithm name strings */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH 5/8] crypto/dpaa2_sec: change digest size for AES_CMAC
  2021-12-20 10:27 [PATCH 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
                   ` (2 preceding siblings ...)
  2021-12-20 10:27 ` [PATCH 4/8] crypto/dpaa2_sec: support AES-GMAC Gagandeep Singh
@ 2021-12-20 10:27 ` Gagandeep Singh
  2021-12-20 10:27 ` [PATCH 6/8] crypto/dpaa2_sec: add useful debug prints in sec dequeue Gagandeep Singh
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-20 10:27 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Hemant Agrawal

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Change the digest size to supported value by the HW engine.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 6aa1c01e95..ab652936bc 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -579,11 +579,11 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 					.increment = 1
 				},
 				.digest_size = {
-					.min = 4,
+					.min = 12,
 					.max = 16,
 					.increment = 4
 				},
-				.aad_size = { 0 }
+				.iv_size = { 0 }
 			}, }
 		}, }
 	},
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH 6/8] crypto/dpaa2_sec: add useful debug prints in sec dequeue
  2021-12-20 10:27 [PATCH 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
                   ` (3 preceding siblings ...)
  2021-12-20 10:27 ` [PATCH 5/8] crypto/dpaa2_sec: change digest size for AES_CMAC Gagandeep Singh
@ 2021-12-20 10:27 ` Gagandeep Singh
  2021-12-20 10:27 ` [PATCH 7/8] crypto/dpaa2: fix to check next type for auth or cipher Gagandeep Singh
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-20 10:27 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

Few useful debug prints added in dequeue function.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 105 +++++++++++++++++++-
 1 file changed, 103 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 99f5157abe..b1ad66d2cb 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -28,6 +28,7 @@
 #include <fsl_dpopr.h>
 #include <fsl_dpseci.h>
 #include <fsl_mc_sys.h>
+#include <rte_hexdump.h>
 
 #include "dpaa2_sec_priv.h"
 #include "dpaa2_sec_event.h"
@@ -50,7 +51,15 @@
 
 #define NO_PREFETCH 0
 
+/* DPAA2_SEC_DP_DUMP levels */
+enum dpaa2_sec_dump_levels {
+	DPAA2_SEC_DP_NO_DUMP,
+	DPAA2_SEC_DP_ERR_DUMP,
+	DPAA2_SEC_DP_FULL_DUMP
+};
+
 uint8_t cryptodev_driver_id;
+uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP;
 
 #ifdef RTE_LIB_SECURITY
 static inline int
@@ -1784,6 +1793,83 @@ dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
 	return num_tx;
 }
 
+static void
+dpaa2_sec_dump(struct rte_crypto_op *op)
+{
+	int i;
+	dpaa2_sec_session *sess = NULL;
+	struct ctxt_priv *priv;
+	uint8_t bufsize;
+	struct rte_crypto_sym_op *sym_op;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa2_sec_session *)get_sym_session_private_data(
+			op->sym->session, cryptodev_driver_id);
+#ifdef RTE_LIBRTE_SECURITY
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa2_sec_session *)get_sec_session_private_data(
+			op->sym->sec_session);
+#endif
+
+	if (sess == NULL)
+		goto mbuf_dump;
+
+	priv = (struct ctxt_priv *)sess->ctxt;
+	printf("\n****************************************\n"
+		"session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
+		"\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
+		"\tCipher key len:\t%zd\n", sess->ctxt_type,
+		(sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
+		sess->cipher_alg, sess->auth_alg, sess->aead_alg,
+		sess->cipher_key.length);
+		rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
+				sess->cipher_key.length);
+		rte_hexdump(stdout, "auth key", sess->auth_key.data,
+				sess->auth_key.length);
+	printf("\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
+		"\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only"
+		" len:\t%d\n\taead cipher text:\t%d\n",
+		sess->auth_key.length, sess->iv.length, sess->iv.offset,
+		sess->digest_length, sess->status,
+		sess->ext_params.aead_ctxt.auth_only_len,
+		sess->ext_params.aead_ctxt.auth_cipher_text);
+#ifdef RTE_LIBRTE_SECURITY
+	printf("PDCP session params:\n"
+		"\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
+		"\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n"
+		"\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
+		sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
+		sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
+		sess->pdcp.hfn_threshold);
+
+#endif
+	bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl;
+	printf("Descriptor Dump:\n");
+	for (i = 0; i < bufsize; i++)
+		printf("\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]);
+
+	printf("\n");
+mbuf_dump:
+	sym_op = op->sym;
+	if (sym_op->m_src) {
+		printf("Source mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_src, sym_op->m_src->data_len);
+	}
+	if (sym_op->m_dst) {
+		printf("Destination mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_dst, sym_op->m_dst->data_len);
+	}
+
+	printf("Session address = %p\ncipher offset: %d, length: %d\n"
+		"auth offset: %d, length:  %d\n aead offset: %d, length: %d\n"
+		, sym_op->session,
+		sym_op->cipher.data.offset, sym_op->cipher.data.length,
+		sym_op->auth.data.offset, sym_op->auth.data.length,
+		sym_op->aead.data.offset, sym_op->aead.data.length);
+	printf("\n");
+
+}
+
 static uint16_t
 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 			uint16_t nb_ops)
@@ -1865,8 +1951,13 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 
 		if (unlikely(fd->simple.frc)) {
 			/* TODO Parse SEC errors */
-			DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
-				      fd->simple.frc);
+			if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) {
+				DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
+						 fd->simple.frc);
+				if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP)
+					dpaa2_sec_dump(ops[num_rx]);
+			}
+
 			dpaa2_qp->rx_vq.err_pkts += 1;
 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
 		} else {
@@ -4233,6 +4324,16 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 		goto init_error;
 	}
 
+	if (getenv("DPAA2_SEC_DP_DUMP_LEVEL")) {
+		dpaa2_sec_dp_dump =
+			atoi(getenv("DPAA2_SEC_DP_DUMP_LEVEL"));
+		if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
+			printf("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
+			       "supported, changing to FULL error prints\n");
+			dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
+		}
+	}
+
 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
 	return 0;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH 7/8] crypto/dpaa2: fix to check next type for auth or cipher
  2021-12-20 10:27 [PATCH 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
                   ` (4 preceding siblings ...)
  2021-12-20 10:27 ` [PATCH 6/8] crypto/dpaa2_sec: add useful debug prints in sec dequeue Gagandeep Singh
@ 2021-12-20 10:27 ` Gagandeep Singh
  2021-12-20 10:27 ` [PATCH 8/8] crypto/dpaa_sec: add debug framework Gagandeep Singh
  2021-12-28  9:10 ` [PATCH v2 0/8] NXP crypto drivers changes Gagandeep Singh
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-20 10:27 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Hemant Agrawal, stable

From: Hemant Agrawal <hemant.agrawal@nxp.com>

This patch add more checks on next type for PDCP cases.

Fixes: 45e019608f31 ("crypto/dpaa2_sec: support integrity only PDCP")
Fixes: a1173d55598c ("crypto/dpaa_sec: support PDCP offload")
Cc: stable@dpdk.org

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 6 ++++--
 drivers/crypto/dpaa_sec/dpaa_sec.c          | 6 ++++--
 2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index b1ad66d2cb..d3d5e9eae5 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3404,13 +3404,15 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 	/* find xfrm types */
 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 		cipher_xform = &xform->cipher;
-		if (xform->next != NULL) {
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 			session->ext_params.aead_ctxt.auth_cipher_text = true;
 			auth_xform = &xform->next->auth;
 		}
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 		auth_xform = &xform->auth;
-		if (xform->next != NULL) {
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 			session->ext_params.aead_ctxt.auth_cipher_text = false;
 			cipher_xform = &xform->next->cipher;
 		}
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 1dedd9eee5..af166252ca 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -2984,11 +2984,13 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
 	/* find xfrm types */
 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 		cipher_xform = &xform->cipher;
-		if (xform->next != NULL)
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
 			auth_xform = &xform->next->auth;
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 		auth_xform = &xform->auth;
-		if (xform->next != NULL)
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
 			cipher_xform = &xform->next->cipher;
 	} else {
 		DPAA_SEC_ERR("Invalid crypto type");
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH 8/8] crypto/dpaa_sec: add debug framework
  2021-12-20 10:27 [PATCH 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
                   ` (5 preceding siblings ...)
  2021-12-20 10:27 ` [PATCH 7/8] crypto/dpaa2: fix to check next type for auth or cipher Gagandeep Singh
@ 2021-12-20 10:27 ` Gagandeep Singh
  2021-12-24 13:02   ` [EXT] " Akhil Goyal
  2021-12-28  9:10 ` [PATCH v2 0/8] NXP crypto drivers changes Gagandeep Singh
  7 siblings, 1 reply; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-20 10:27 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

Adding useful debug prints in DPAA driver for
easy debugging.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa_sec/dpaa_sec.c | 160 ++++++++++++++++++++++++++++-
 1 file changed, 159 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index af166252ca..fc73d8c4c1 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -27,6 +27,7 @@
 #include <rte_memcpy.h>
 #include <rte_string_fns.h>
 #include <rte_spinlock.h>
+#include <rte_hexdump.h>
 
 #include <fsl_usd.h>
 #include <fsl_qman.h>
@@ -45,6 +46,15 @@
 #include <dpaa_sec_log.h>
 #include <dpaax_iova_table.h>
 
+/* DPAA_SEC_DP_DUMP levels */
+enum dpaa_sec_dump_levels {
+	DPAA_SEC_DP_NO_DUMP,
+	DPAA_SEC_DP_ERR_DUMP,
+	DPAA_SEC_DP_FULL_DUMP
+};
+
+uint8_t dpaa_sec_dp_dump = DPAA_SEC_DP_ERR_DUMP;
+
 uint8_t dpaa_cryptodev_driver_id;
 
 static inline void
@@ -649,6 +659,139 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 	return 0;
 }
 
+static void
+dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp)
+{
+	struct dpaa_sec_job *job = &ctx->job;
+	struct rte_crypto_op *op = ctx->op;
+	dpaa_sec_session *sess = NULL;
+	struct sec_cdb c_cdb, *cdb;
+	uint8_t bufsize;
+	struct rte_crypto_sym_op *sym_op;
+	struct qm_sg_entry sg[2];
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa_sec_session *)
+			get_sym_session_private_data(
+					op->sym->session,
+					dpaa_cryptodev_driver_id);
+#ifdef RTE_LIBRTE_SECURITY
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa_sec_session *)
+			get_sec_session_private_data(
+					op->sym->sec_session);
+#endif
+	if (sess == NULL) {
+		printf("session is NULL\n");
+		goto mbuf_dump;
+	}
+
+	cdb = &sess->cdb;
+	rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb));
+#ifdef RTE_LIBRTE_SECURITY
+	printf("\nsession protocol type = %d\n", sess->proto_alg);
+#endif
+	printf("\n****************************************\n"
+		"session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
+		"\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
+		"\tCipher key len:\t%ld\n\tCipher alg:\t%d\n"
+		"\tCipher algmode:\t%d\n", sess->ctxt,
+		(sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
+		sess->cipher_alg, sess->auth_alg, sess->aead_alg,
+		(long)sess->cipher_key.length, sess->cipher_key.alg,
+		sess->cipher_key.algmode);
+		rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
+				sess->cipher_key.length);
+		rte_hexdump(stdout, "auth key", sess->auth_key.data,
+				sess->auth_key.length);
+	printf("\tAuth key len:\t%ld\n\tAuth alg:\t%d\n"
+		"\tAuth algmode:\t%d\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
+		"\tdigest length:\t%d\n\tauth only len:\t\t%d\n"
+		"\taead cipher text:\t%d\n",
+		(long)sess->auth_key.length, sess->auth_key.alg,
+		sess->auth_key.algmode,
+		sess->iv.length, sess->iv.offset,
+		sess->digest_length, sess->auth_only_len,
+		sess->auth_cipher_text);
+#ifdef RTE_LIBRTE_SECURITY
+	printf("PDCP session params:\n"
+		"\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
+		"\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:"
+		"\t%d\n\thfn:\t\t%d\n"
+		"\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
+		sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
+		sess->pdcp.sn_size, sess->pdcp.sdap_enabled,
+		sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
+		sess->pdcp.hfn_threshold);
+#endif
+	c_cdb.sh_hdr.hi.word = rte_be_to_cpu_32(c_cdb.sh_hdr.hi.word);
+	c_cdb.sh_hdr.lo.word = rte_be_to_cpu_32(c_cdb.sh_hdr.lo.word);
+	bufsize = c_cdb.sh_hdr.hi.field.idlen;
+
+	printf("cdb = %p\n\n", cdb);
+	printf("Descriptor size = %d\n", bufsize);
+	int m;
+	for (m = 0; m < bufsize; m++)
+		printf("0x%x\n", rte_be_to_cpu_32(c_cdb.sh_desc[m]));
+
+	printf("\n");
+mbuf_dump:
+	sym_op = op->sym;
+	if (sym_op->m_src) {
+		printf("Source mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_src,
+				 sym_op->m_src->data_len);
+	}
+	if (sym_op->m_dst) {
+		printf("Destination mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_dst,
+				 sym_op->m_dst->data_len);
+	}
+
+	printf("Session address = %p\ncipher offset: %d, length: %d\n"
+		"auth offset: %d, length:  %d\n aead offset: %d, length: %d\n",
+		sym_op->session, sym_op->cipher.data.offset,
+		sym_op->cipher.data.length,
+		sym_op->auth.data.offset, sym_op->auth.data.length,
+		sym_op->aead.data.offset, sym_op->aead.data.length);
+	printf("\n");
+
+	printf("******************************************************\n");
+	printf("ctx info:\n");
+	printf("job->sg[0] output info:\n");
+	memcpy(&sg[0], &job->sg[0], sizeof(sg[0]));
+	printf("\taddr = 0x%lx,\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
+		"\n\tbpid = %d\n\toffset = %d\n",
+		(unsigned long)sg[0].addr, sg[0].length, sg[0].final,
+		sg[0].extension, sg[0].bpid, sg[0].offset);
+	printf("\njob->sg[1] input info:\n");
+	memcpy(&sg[1], &job->sg[1], sizeof(sg[1]));
+	hw_sg_to_cpu(&sg[1]);
+	printf("\taddr = 0x%lx,\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
+		"\n\tbpid = %d\n\toffset = %d\n",
+		(unsigned long)sg[1].addr, sg[1].length, sg[1].final,
+		sg[1].extension, sg[1].bpid, sg[1].offset);
+
+	printf("\nctx pool addr = %p\n", ctx->ctx_pool);
+	if (ctx->ctx_pool)
+		printf("ctx pool available counts = %d\n",
+			rte_mempool_avail_count(ctx->ctx_pool));
+
+	printf("\nop pool addr = %p\n", op->mempool);
+	if (op->mempool)
+		printf("op pool available counts = %d\n",
+			rte_mempool_avail_count(op->mempool));
+
+	printf("********************************************************\n");
+	printf("Queue data:\n");
+	printf("\tFQID = 0x%x\n\tstate = %d\n\tnb_desc = %d\n"
+		"\tctx_pool = %p\n\trx_pkts = %d\n\ttx_pkts"
+	       "= %d\n\trx_errs = %d\n\ttx_errs = %d\n\n",
+		qp->outq.fqid, qp->outq.state, qp->outq.nb_desc,
+		qp->ctx_pool, qp->rx_pkts, qp->tx_pkts,
+		qp->rx_errs, qp->tx_errs);
+}
+
 /* qp is lockless, should be accessed by only one thread */
 static int
 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
@@ -716,7 +859,12 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
 		if (!ctx->fd_status) {
 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 		} else {
-			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
+			if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) {
+				DPAA_SEC_DP_WARN("SEC return err:0x%x\n",
+						  ctx->fd_status);
+				if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP)
+					dpaa_sec_dump(ctx, qp);
+			}
 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 		}
 		ops[pkts++] = op;
@@ -3533,6 +3681,16 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 		}
 	}
 
+	if (getenv("DPAA_SEC_DP_DUMP_LEVEL")) {
+		dpaa_sec_dp_dump =
+			atoi(getenv("DPAA_SEC_DP_DUMP_LEVEL"));
+		if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) {
+			printf("WARN: DPAA_SEC_DP_DUMP_LEVEL is not "
+				"supported, changing to FULL error prints\n");
+			dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP;
+		}
+	}
+
 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
 	return 0;
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* RE: [EXT] [PATCH 8/8] crypto/dpaa_sec: add debug framework
  2021-12-20 10:27 ` [PATCH 8/8] crypto/dpaa_sec: add debug framework Gagandeep Singh
@ 2021-12-24 13:02   ` Akhil Goyal
  0 siblings, 0 replies; 42+ messages in thread
From: Akhil Goyal @ 2021-12-24 13:02 UTC (permalink / raw)
  To: Gagandeep Singh, dev

> Adding useful debug prints in DPAA driver for
> easy debugging.
> 
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> ---
I believe it is better to use devargs instead of environment variables.
Also add documentation on how to explain it.

^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v2 0/8] NXP crypto drivers changes
  2021-12-20 10:27 [PATCH 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
                   ` (6 preceding siblings ...)
  2021-12-20 10:27 ` [PATCH 8/8] crypto/dpaa_sec: add debug framework Gagandeep Singh
@ 2021-12-28  9:10 ` Gagandeep Singh
  2021-12-28  9:10   ` [PATCH v2 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
                     ` (7 more replies)
  7 siblings, 8 replies; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-28  9:10 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

v2-change-log
* using dev args for both DPAA1 and DPAA2 drivers to
 dump debug prints on sec error.

Akhil Goyal (1):
  crypto/dpaa2_sec: support AES-GMAC

Franck LENORMAND (1):
  common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7

Gagandeep Singh (3):
  common/dpaax: change job processing mode for PDCP SDAP
  crypto/dpaa2_sec: add useful debug prints in sec dequeue
  crypto/dpaa_sec: add debug framework

Hemant Agrawal (2):
  crypto/dpaa2_sec: change digest size for AES_CMAC
  crypto/dpaa2: fix to check next type for auth or cipher

Nipun Gupta (1):
  crypto/dpaa2_sec: ordered queue support

 doc/guides/cryptodevs/dpaa2_sec.rst          |  10 +
 doc/guides/cryptodevs/dpaa_sec.rst           |  10 +
 doc/guides/cryptodevs/features/dpaa2_sec.ini |   1 +
 drivers/bus/dpaa/dpaa_bus.c                  |  16 +-
 drivers/common/dpaax/caamflib/desc/pdcp.h    | 939 ++++---------------
 drivers/common/dpaax/caamflib/desc/sdap.h    | 111 +--
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 425 ++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h    |  42 +-
 drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h     |  14 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c           | 212 ++++-
 lib/cryptodev/rte_crypto_sym.h               |   4 +-
 11 files changed, 870 insertions(+), 914 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v2 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7
  2021-12-28  9:10 ` [PATCH v2 0/8] NXP crypto drivers changes Gagandeep Singh
@ 2021-12-28  9:10   ` Gagandeep Singh
  2022-02-10  4:31     ` [PATCH v3 0/7] NXP crypto drivers changes Gagandeep Singh
  2021-12-28  9:10   ` [PATCH v2 2/8] common/dpaax: change job processing mode for PDCP SDAP Gagandeep Singh
                     ` (6 subsequent siblings)
  7 siblings, 1 reply; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-28  9:10 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Franck LENORMAND, Gagandeep Singh

From: Franck LENORMAND <franck.lenormand@nxp.com>

DPAA1 and DPAA2 platforms use SEC ERA 8 and 10 only.

This patch removes code in SDAP and PDCP header related to these
ERA to simplify the codebase:
 - Simplify logic using RTA_SEC_ERA_<> macro
 - Remove era_2_sw_hfn_ovrd dedicated to RTA_SEC_ERA_2

Signed-off-by: Franck LENORMAND <franck.lenormand@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/common/dpaax/caamflib/desc/pdcp.h   | 939 ++++----------------
 drivers/common/dpaax/caamflib/desc/sdap.h   |  91 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |  14 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c          |  14 +-
 4 files changed, 183 insertions(+), 875 deletions(-)

diff --git a/drivers/common/dpaax/caamflib/desc/pdcp.h b/drivers/common/dpaax/caamflib/desc/pdcp.h
index 8e8daf5ba8..2fe56c53c6 100644
--- a/drivers/common/dpaax/caamflib/desc/pdcp.h
+++ b/drivers/common/dpaax/caamflib/desc/pdcp.h
@@ -329,91 +329,35 @@ pdcp_insert_cplane_null_op(struct program *p,
 			   struct alginfo *cipherdata __maybe_unused,
 			   struct alginfo *authdata __maybe_unused,
 			   unsigned int dir,
-			   enum pdcp_sn_size sn_size __maybe_unused,
-			   unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			   enum pdcp_sn_size sn_size __maybe_unused)
 {
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
-
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
-		if (dir == OP_TYPE_ENCAP_PROTOCOL)
-			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-		else
-			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-	} else {
-		MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ, 4, 0);
-		MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
-
-		if (dir == OP_TYPE_ENCAP_PROTOCOL) {
-			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-			MATHB(p, VSEQINSZ, SUB, ONE, MATH0, 4, 0);
-		} else {
-			MATHB(p, VSEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4,
-			      IMMED2);
-			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-			MATHB(p, VSEQOUTSZ, SUB, ONE, MATH0, 4, 0);
-		}
-
-		MATHB(p, MATH0, ADD, ONE, MATH0, 4, 0);
+	MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
+	if (dir == OP_TYPE_ENCAP_PROTOCOL)
+		MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+		      IMMED2);
+	else
+		MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+		      IMMED2);
 
-		/*
-		 * Since MOVELEN is available only starting with
-		 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-		 * command dynamically by writing the length from M1 by
-		 * OR-ing the command in the M1 register and MOVE the
-		 * result into the descriptor buffer. Care must be taken
-		 * wrt. the location of the command because of SEC
-		 * pipelining. The actual MOVEs are written at the end
-		 * of the descriptor due to calculations needed on the
-		 * offset in the descriptor for the MOVE command.
-		 */
-		move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
-					     IMMED);
-		move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
-					      WAITCOMP | IMMED);
-	}
 	MATHB(p, VSEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE, 4,
 	      IMMED2);
 	JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		if (dir == OP_TYPE_ENCAP_PROTOCOL)
-			MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
-		else
-			MATHB(p, VSEQOUTSZ, ADD, ZERO, MATH0, 4, 0);
-	}
+	if (dir == OP_TYPE_ENCAP_PROTOCOL)
+		MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
+	else
+		MATHB(p, VSEQOUTSZ, ADD, ZERO, MATH0, 4, 0);
+
 	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 	SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
-	} else {
-		SET_LABEL(p, local_offset);
-
-		/* Shut off automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-		/* Placeholder for MOVE command with length from M1 register */
-		MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-		/* Enable automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-	}
+	MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
 
 	if (dir == OP_TYPE_ENCAP_PROTOCOL) {
 		MATHB(p, MATH1, XOR, MATH1, MATH0, 8, 0);
 		MOVE(p, MATH0, 0, OFIFO, 0, 4, IMMED);
 	}
 
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
-
 	return 0;
 }
 
@@ -422,66 +366,21 @@ insert_copy_frame_op(struct program *p,
 		     struct alginfo *cipherdata __maybe_unused,
 		     unsigned int dir __maybe_unused)
 {
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
-
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ,  4, 0);
-		MATHB(p, SEQINSZ, ADD, ZERO, VSEQOUTSZ,  4, 0);
-	} else {
-		MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ,  4, 0);
-		MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ,  4, 0);
-		MATHB(p, SEQINSZ, ADD, ONE, VSEQOUTSZ,  4, 0);
-		MATHB(p, VSEQOUTSZ, SUB, ONE, VSEQOUTSZ,  4, 0);
-		MATHB(p, VSEQINSZ, SUB, ONE, MATH0,  4, 0);
-		MATHB(p, MATH0, ADD, ONE, MATH0,  4, 0);
+	MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ,  4, 0);
+	MATHB(p, SEQINSZ, ADD, ZERO, VSEQOUTSZ,  4, 0);
 
-		/*
-		 * Since MOVELEN is available only starting with
-		 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-		 * command dynamically by writing the length from M1 by
-		 * OR-ing the command in the M1 register and MOVE the
-		 * result into the descriptor buffer. Care must be taken
-		 * wrt. the location of the command because of SEC
-		 * pipelining. The actual MOVEs are written at the end
-		 * of the descriptor due to calculations needed on the
-		 * offset in the descriptor for the MOVE command.
-		 */
-		move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
-					     IMMED);
-		move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
-					      WAITCOMP | IMMED);
-	}
 	MATHB(p, SEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE,  4,
 	      IFB | IMMED2);
 	JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
 
-	if (rta_sec_era > RTA_SEC_ERA_2)
-		MATHB(p, VSEQINSZ, ADD, ZERO, MATH0,  4, 0);
+	MATHB(p, VSEQINSZ, ADD, ZERO, MATH0,  4, 0);
 
 	SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
-	} else {
-		SET_LABEL(p, local_offset);
 
-		/* Shut off automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-		/* Placeholder for MOVE command with length from M0 register */
-		MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-
-		/* Enable automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-	}
+	MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
 
 	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
 	return 0;
 }
 
@@ -490,13 +389,12 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 			       bool swap __maybe_unused,
 			       struct alginfo *cipherdata __maybe_unused,
 			       struct alginfo *authdata, unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
 	/* 12 bit SN is only supported for protocol offload case */
-	if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size == PDCP_SN_SIZE_12) {
+	if (sn_size == PDCP_SN_SIZE_12) {
 		KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
 
@@ -526,9 +424,6 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		return -ENOTSUP;
 
 	}
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
 
 	switch (authdata->algtype) {
 	case PDCP_AUTH_TYPE_SNOW:
@@ -538,14 +433,7 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		SEQLOAD(p, MATH0, offset, length, 0);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
 
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-				   era_2_sw_hfn_ovrd == 0)) {
-			SEQINPTR(p, 0, length, RTO);
-		} else {
-			SEQINPTR(p, 0, 5, RTO);
-			SEQFIFOLOAD(p, SKIP, 4, 0);
-		}
+		SEQINPTR(p, 0, length, RTO);
 
 		if (swap == false) {
 			MATHB(p, MATH0, AND, sn_mask, MATH1,  8,
@@ -580,40 +468,11 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
 			      IMMED2);
 		} else {
-			if (rta_sec_era > RTA_SEC_ERA_2) {
-				MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
-				      0);
-			} else {
-				MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
-				      0);
-				MATHB(p, MATH1, SUB, ONE, MATH1, 4,
-				      0);
-			}
+			MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
 		}
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
-			MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
-		} else {
-			MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
-			MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
-
-			/*
-			 * Since MOVELEN is available only starting with
-			 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-			 * command dynamically by writing the length from M1 by
-			 * OR-ing the command in the M1 register and MOVE the
-			 * result into the descriptor buffer. Care must be taken
-			 * wrt. the location of the command because of SEC
-			 * pipelining. The actual MOVEs are written at the end
-			 * of the descriptor due to calculations needed on the
-			 * offset in the descriptor for the MOVE command.
-			 */
-			move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
-						     IMMED);
-			move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
-						      8, WAITCOMP | IMMED);
-		}
+		MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+		MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
 
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 		ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9, OP_ALG_AAI_F9,
@@ -622,25 +481,9 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 				     ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
 			      DIR_ENC);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
+		SEQFIFOLOAD(p, MSGINSNOOP, 0,
 				    VLF | LAST1 | LAST2 | FLUSH1);
-			MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
-				    VLF | LAST1 | LAST2 | FLUSH1);
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			/*
-			 * Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
+		MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
 
 		if (dir == OP_TYPE_DECAP_PROTOCOL)
 			SEQFIFOLOAD(p, ICV2, 4, LAST2);
@@ -655,14 +498,7 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		    authdata->keylen, INLINE_KEY(authdata));
 		SEQLOAD(p, MATH0, offset, length, 0);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-		     era_2_sw_hfn_ovrd == 0)) {
-			SEQINPTR(p, 0, length, RTO);
-		} else {
-			SEQINPTR(p, 0, 5, RTO);
-			SEQFIFOLOAD(p, SKIP, 4, 0);
-		}
+		SEQINPTR(p, 0, length, RTO);
 
 		if (swap == false) {
 			MATHB(p, MATH0, AND, sn_mask, MATH1, 8,
@@ -686,40 +522,12 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
 			      IMMED2);
 		} else {
-			if (rta_sec_era > RTA_SEC_ERA_2) {
-				MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
-				      0);
-			} else {
-				MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
-				      0);
-				MATHB(p, MATH1, SUB, ONE, MATH1, 4,
-				      0);
-			}
+			MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
 		}
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
-			MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
-		} else {
-			MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
-			MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
-
-			/*
-			 * Since MOVELEN is available only starting with
-			 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-			 * command dynamically by writing the length from M1 by
-			 * OR-ing the command in the M1 register and MOVE the
-			 * result into the descriptor buffer. Care must be taken
-			 * wrt. the location of the command because of SEC
-			 * pipelining. The actual MOVEs are written at the end
-			 * of the descriptor due to calculations needed on the
-			 * offset in the descriptor for the MOVE command.
-			 */
-			move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
-						     IMMED);
-			move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
-						      8, WAITCOMP | IMMED);
-		}
+		MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+		MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 		ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
 			      OP_ALG_AAI_CMAC,
@@ -728,27 +536,9 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 				     ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
 			      DIR_ENC);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
+		MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
+		SEQFIFOLOAD(p, MSGINSNOOP, 0,
 				    VLF | LAST1 | LAST2 | FLUSH1);
-		} else {
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
-				    VLF | LAST1 | LAST2 | FLUSH1);
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-			/*
-			 * Placeholder for MOVE command with length from
-			 * M1 register
-			 */
-			MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
-
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
 
 		if (dir == OP_TYPE_DECAP_PROTOCOL)
 			SEQFIFOLOAD(p, ICV1, 4, LAST1 | FLUSH1);
@@ -758,10 +548,6 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		break;
 
 	case PDCP_AUTH_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		/* Insert Auth Key */
 		KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
@@ -817,11 +603,6 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		return -EINVAL;
 	}
 
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
-
 	return 0;
 }
 
@@ -831,15 +612,14 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata __maybe_unused,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 	/* Insert Cipher Key */
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18 &&
+	if ((sn_size != PDCP_SN_SIZE_18 &&
 			!(rta_sec_era == RTA_SEC_ERA_8 &&
 				authdata->algtype == 0))
 			|| (rta_sec_era == RTA_SEC_ERA_10)) {
@@ -889,12 +669,7 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 	case PDCP_CIPHER_TYPE_SNOW:
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, WAITCOMP | IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		} else {
-			MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
-			MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
-		}
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 		if (dir == OP_TYPE_ENCAP_PROTOCOL)
 			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
@@ -913,12 +688,7 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 	case PDCP_CIPHER_TYPE_AES:
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x10, 0x10, WAITCOMP | IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		} else {
-			MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
-			MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
-		}
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 		if (dir == OP_TYPE_ENCAP_PROTOCOL)
 			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
@@ -937,11 +707,6 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
 		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
@@ -988,8 +753,7 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
@@ -998,7 +762,7 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 	KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
 	    INLINE_KEY(authdata));
 
-	if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) {
+	if (sn_size != PDCP_SN_SIZE_18) {
 		int pclid;
 
 		if (sn_size == PDCP_SN_SIZE_5)
@@ -1014,18 +778,13 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 	}
 	/* Non-proto is supported only for 5bit cplane and 18bit uplane */
 	switch (sn_size) {
-	case PDCP_SN_SIZE_5:
-		offset = 7;
-		length = 1;
-		sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
-					PDCP_C_PLANE_SN_MASK_BE;
-		break;
 	case PDCP_SN_SIZE_18:
 		offset = 5;
 		length = 3;
 		sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
 					PDCP_U_PLANE_18BIT_SN_MASK_BE;
 		break;
+	case PDCP_SN_SIZE_5:
 	case PDCP_SN_SIZE_7:
 	case PDCP_SN_SIZE_12:
 	case PDCP_SN_SIZE_15:
@@ -1094,20 +853,13 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 		SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
 
 		NFIFOADD(p, IFIFO, ICV2, 4, LAST2);
 
-		if (rta_sec_era <= RTA_SEC_ERA_2) {
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
-		} else {
-			MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
-		}
+		MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
 	}
 
 	return 0;
@@ -1119,19 +871,13 @@ pdcp_insert_uplane_zuc_zuc_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
 	pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
@@ -1141,7 +887,7 @@ pdcp_insert_uplane_zuc_zuc_op(struct program *p,
 	SET_LABEL(p, keyjump);
 	PATCH_JUMP(p, pkeyjump, keyjump);
 
-	if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) {
+	if (sn_size != PDCP_SN_SIZE_18) {
 		int pclid;
 
 		if (sn_size == PDCP_SN_SIZE_5)
@@ -1157,18 +903,13 @@ pdcp_insert_uplane_zuc_zuc_op(struct program *p,
 	}
 	/* Non-proto is supported only for 5bit cplane and 18bit uplane */
 	switch (sn_size) {
-	case PDCP_SN_SIZE_5:
-		offset = 7;
-		length = 1;
-		sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
-					PDCP_C_PLANE_SN_MASK_BE;
-		break;
 	case PDCP_SN_SIZE_18:
 		offset = 5;
 		length = 3;
 		sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
 					PDCP_U_PLANE_18BIT_SN_MASK_BE;
 		break;
+	case PDCP_SN_SIZE_5:
 	case PDCP_SN_SIZE_7:
 	case PDCP_SN_SIZE_12:
 	case PDCP_SN_SIZE_15:
@@ -1243,12 +984,11 @@ pdcp_insert_uplane_aes_aes_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18)) {
+	if (sn_size != PDCP_SN_SIZE_18) {
 		/* Insert Auth Key */
 		KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
@@ -1392,8 +1132,7 @@ pdcp_insert_cplane_acc_op(struct program *p,
 			  struct alginfo *cipherdata,
 			  struct alginfo *authdata,
 			  unsigned int dir,
-			  enum pdcp_sn_size sn_size,
-			  unsigned char era_2_hfn_ovrd __maybe_unused)
+			  enum pdcp_sn_size sn_size)
 {
 	/* Insert Auth Key */
 	KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
@@ -1420,8 +1159,7 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
@@ -1429,14 +1167,12 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 	LABEL(end_desc);
 	LABEL(local_offset);
 	LABEL(jump_to_beginning);
-	LABEL(fifo_load_mac_i_offset);
 	REFERENCE(seqin_ptr_read);
 	REFERENCE(seqin_ptr_write);
 	REFERENCE(seq_out_read);
 	REFERENCE(jump_back_to_sd_cmd);
-	REFERENCE(move_mac_i_to_desc_buf);
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 				cipherdata->keylen, INLINE_KEY(cipherdata));
@@ -1484,56 +1220,17 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 	MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
 	SEQSTORE(p, MATH0, offset, length, 0);
 	if (dir == OP_TYPE_ENCAP_PROTOCOL) {
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-				   era_2_sw_hfn_ovrd == 0)) {
-			SEQINPTR(p, 0, length, RTO);
-		} else {
-			SEQINPTR(p, 0, 5, RTO);
-			SEQFIFOLOAD(p, SKIP, 4, 0);
-		}
+		SEQINPTR(p, 0, length, RTO);
+
 		KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
 		MOVEB(p, MATH2, 0, IFIFOAB1, 0, 0x08, IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-			MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
-			MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
-			      4, IMMED2);
-		} else {
-			MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
-			MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
-			      4, IMMED2);
-			/*
-			 * Note: Although the calculations below might seem a
-			 * little off, the logic is the following:
-			 *
-			 * - SEQ IN PTR RTO below needs the full length of the
-			 *   frame; in case of P4080_REV_2_HFN_OV_WORKAROUND,
-			 *   this means the length of the frame to be processed
-			 *   + 4 bytes (the HFN override flag and value).
-			 *   The length of the frame to be processed minus 1
-			 *   byte is in the VSIL register (because
-			 *   VSIL = SIL + 3, due to 1 byte, the header being
-			 *   already written by the SEQ STORE above). So for
-			 *   calculating the length to use in RTO, I add one
-			 *   to the VSIL value in order to obtain the total
-			 *   frame length. This helps in case of P4080 which
-			 *   can have the value 0 as an operand in a MATH
-			 *   command only as SRC1 When the HFN override
-			 *   workaround is not enabled, the length of the
-			 *   frame is given by the SIL register; the
-			 *   calculation is similar to the one in the SEC 4.2
-			 *   and SEC 5.3 cases.
-			 */
-			if (era_2_sw_hfn_ovrd)
-				MATHB(p, VSEQOUTSZ, ADD, ONE, MATH1, 4,
-				      0);
-			else
-				MATHB(p, SEQINSZ, ADD, MATH3, MATH1, 4,
-				      0);
-		}
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+		MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+		MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
+		      4, IMMED2);
+
 		/*
 		 * Placeholder for filling the length in
 		 * SEQIN PTR RTO below
@@ -1548,24 +1245,14 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			      DIR_DEC);
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 		MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			LOAD(p, CLRW_CLR_C1KEY |
-			     CLRW_CLR_C1CTX |
-			     CLRW_CLR_C1ICV |
-			     CLRW_CLR_C1DATAS |
-			     CLRW_CLR_C1MODE,
-			     CLRW, 0, 4, IMMED);
-		else
-			LOAD(p, CLRW_RESET_CLS1_CHA |
-			     CLRW_CLR_C1KEY |
-			     CLRW_CLR_C1CTX |
-			     CLRW_CLR_C1ICV |
-			     CLRW_CLR_C1DATAS |
-			     CLRW_CLR_C1MODE,
-			     CLRW, 0, 4, IMMED);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
+		LOAD(p, CLRW_RESET_CLS1_CHA |
+		     CLRW_CLR_C1KEY |
+		     CLRW_CLR_C1CTX |
+		     CLRW_CLR_C1ICV |
+		     CLRW_CLR_C1DATAS |
+		     CLRW_CLR_C1MODE,
+		     CLRW, 0, 4, IMMED);
 
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 		    cipherdata->keylen, INLINE_KEY(cipherdata));
@@ -1573,11 +1260,6 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
 		SEQINPTR(p, 0, 0, RTO);
 
-		if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-			SEQFIFOLOAD(p, SKIP, 5, 0);
-			MATHB(p, SEQINSZ, ADD, ONE, SEQINSZ, 4, 0);
-		}
-
 		MATHB(p, SEQINSZ, SUB, length, VSEQINSZ, 4, IMMED2);
 		ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
 			      OP_ALG_AAI_F8,
@@ -1586,10 +1268,7 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			      DIR_ENC);
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-				   era_2_sw_hfn_ovrd == 0))
-			SEQFIFOLOAD(p, SKIP, length, 0);
+		SEQFIFOLOAD(p, SKIP, length, 0);
 
 		SEQFIFOLOAD(p, MSG1, 0, VLF);
 		MOVEB(p, MATH3, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
@@ -1598,13 +1277,9 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 	} else {
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
 
-		if (rta_sec_era >= RTA_SEC_ERA_5)
-			MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+		MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2)
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		else
-			MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 		MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
 /*
@@ -1649,10 +1324,7 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 		    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-		if (rta_sec_era >= RTA_SEC_ERA_4)
-			MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
-		else
-			MOVE(p, CONTEXT1, 0, MATH3, 0, 8, IMMED);
+		MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
 
 		ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
 			      OP_ALG_AAI_F8,
@@ -1662,22 +1334,15 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			move_mac_i_to_desc_buf = MOVE(p, OFIFO, 0, DESCBUF, 0,
-						      4, WAITCOMP | IMMED);
-		else
-			MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+		MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
-		else
-			LOAD(p, CLRW_RESET_CLS1_CHA |
-			     CLRW_CLR_C1KEY |
-			     CLRW_CLR_C1CTX |
-			     CLRW_CLR_C1ICV |
-			     CLRW_CLR_C1DATAS |
-			     CLRW_CLR_C1MODE,
-			     CLRW, 0, 4, IMMED);
+		LOAD(p, CLRW_RESET_CLS1_CHA |
+		     CLRW_CLR_C1KEY |
+		     CLRW_CLR_C1CTX |
+		     CLRW_CLR_C1ICV |
+		     CLRW_CLR_C1DATAS |
+		     CLRW_CLR_C1MODE,
+		     CLRW, 0, 4, IMMED);
 
 		KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
@@ -1698,28 +1363,17 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		/* Read the # of bytes written in the output buffer + 1 (HDR) */
 		MATHI(p, VSEQOUTSZ, ADD, length, VSEQINSZ, 4, IMMED2);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			MOVE(p, MATH3, 0, IFIFOAB1, 0, 8, IMMED);
-		else
-			MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
-
-		if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd)
-			SEQFIFOLOAD(p, SKIP, 4, 0);
+		MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
 
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 
-		if (rta_sec_era >= RTA_SEC_ERA_4) {
-			LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
-			     NFIFOENTRY_DEST_CLASS1 |
-			     NFIFOENTRY_DTYPE_ICV |
-			     NFIFOENTRY_LC1 |
-			     NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
-			MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
-		} else {
-			SET_LABEL(p, fifo_load_mac_i_offset);
-			FIFOLOAD(p, ICV1, fifo_load_mac_i_offset, 4,
-				 LAST1 | FLUSH1 | IMMED);
-		}
+		LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+		     NFIFOENTRY_DEST_CLASS1 |
+		     NFIFOENTRY_DTYPE_ICV |
+		     NFIFOENTRY_LC1 |
+		     NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
+		MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
+
 
 		SET_LABEL(p, end_desc);
 
@@ -1727,18 +1381,10 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			PATCH_MOVE(p, seq_out_read, end_desc + 1);
 			PATCH_JUMP(p, jump_back_to_sd_cmd,
 				   back_to_sd_offset + jump_back_to_sd_cmd - 5);
-
-			if (rta_sec_era <= RTA_SEC_ERA_3)
-				PATCH_MOVE(p, move_mac_i_to_desc_buf,
-					   fifo_load_mac_i_offset + 1);
 		} else {
 			PATCH_MOVE(p, seq_out_read, end_desc + 2);
 			PATCH_JUMP(p, jump_back_to_sd_cmd,
 				   back_to_sd_offset + jump_back_to_sd_cmd - 5);
-
-			if (rta_sec_era <= RTA_SEC_ERA_3)
-				PATCH_MOVE(p, move_mac_i_to_desc_buf,
-					   fifo_load_mac_i_offset + 1);
 		}
 	}
 
@@ -1751,8 +1397,7 @@ pdcp_insert_cplane_aes_snow_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
@@ -1761,7 +1406,7 @@ pdcp_insert_cplane_aes_snow_op(struct program *p,
 	KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
 	    INLINE_KEY(authdata));
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 
@@ -1860,20 +1505,13 @@ pdcp_insert_cplane_aes_snow_op(struct program *p,
 		SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
 
 		NFIFOADD(p, IFIFO, ICV2, 4, LAST2);
 
-		if (rta_sec_era <= RTA_SEC_ERA_2) {
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
-		} else {
-			MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
-		}
+		MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
 	}
 
 	return 0;
@@ -1885,20 +1523,14 @@ pdcp_insert_cplane_snow_zuc_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
@@ -2010,19 +1642,13 @@ pdcp_insert_cplane_aes_zuc_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
@@ -2138,19 +1764,13 @@ pdcp_insert_cplane_zuc_snow_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
@@ -2259,13 +1879,12 @@ pdcp_insert_cplane_zuc_snow_op(struct program *p,
 		SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			/*
-			 * For SEC ERA 6, there's a problem with the OFIFO
-			 * pointer, and thus it needs to be reset here before
-			 * moving to M0.
-			 */
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		/*
+		 * For SEC ERA 6, there's a problem with the OFIFO
+		 * pointer, and thus it needs to be reset here before
+		 * moving to M0.
+		 */
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		/* Put ICV to M0 before sending it to C2 for comparison. */
 		MOVEB(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
@@ -2287,16 +1906,11 @@ pdcp_insert_cplane_zuc_aes_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 
@@ -2459,7 +2073,7 @@ pdcp_insert_uplane_no_int_op(struct program *p,
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size == PDCP_SN_SIZE_15) ||
+	if ((sn_size == PDCP_SN_SIZE_15) ||
 			(rta_sec_era >= RTA_SEC_ERA_10)) {
 		PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_USER,
 			 (uint16_t)cipherdata->algtype);
@@ -2513,10 +2127,6 @@ pdcp_insert_uplane_no_int_op(struct program *p,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
 
@@ -2546,7 +2156,6 @@ static inline int
 insert_hfn_ov_op(struct program *p,
 		 uint32_t shift,
 		 enum pdb_type_e pdb_type,
-		 unsigned char era_2_sw_hfn_ovrd,
 		 bool clear_dpovrd_at_end)
 {
 	uint32_t imm = PDCP_DPOVRD_HFN_OV_EN;
@@ -2554,9 +2163,6 @@ insert_hfn_ov_op(struct program *p,
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era == RTA_SEC_ERA_2 && !era_2_sw_hfn_ovrd)
-		return 0;
-
 	switch (pdb_type) {
 	case PDCP_PDB_TYPE_NO_PDB:
 		/*
@@ -2579,26 +2185,16 @@ insert_hfn_ov_op(struct program *p,
 		return -EINVAL;
 	}
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, DPOVRD, AND, imm, NONE, 8, IFB | IMMED2);
-	} else {
-		SEQLOAD(p, MATH0, 4, 4, 0);
-		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
-		MATHB(p, MATH0, AND, imm, NONE, 8, IFB | IMMED2);
-		SEQSTORE(p, MATH0, 4, 4, 0);
-	}
+	MATHB(p, DPOVRD, AND, imm, NONE, 8, IFB | IMMED2);
 
 	pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, MATH_Z);
 
-	if (rta_sec_era > RTA_SEC_ERA_2)
-		MATHI(p, DPOVRD, LSHIFT, shift, MATH0, 4, IMMED2);
-	else
-		MATHB(p, MATH0, LSHIFT, shift, MATH0, 4, IMMED2);
+	MATHI(p, DPOVRD, LSHIFT, shift, MATH0, 4, IMMED2);
 
 	MATHB(p, MATH0, SHLD, MATH0, MATH0, 8, 0);
 	MOVE(p, MATH0, 0, DESCBUF, hfn_pdb_offset, 4, IMMED);
 
-	if (clear_dpovrd_at_end && (rta_sec_era >= RTA_SEC_ERA_8)) {
+	if (clear_dpovrd_at_end) {
 		/*
 		 * For ERA8, DPOVRD could be handled by the PROTOCOL command
 		 * itself. For now, this is not done. Thus, clear DPOVRD here
@@ -2621,97 +2217,28 @@ cnstr_pdcp_c_plane_pdb(struct program *p,
 		       enum pdcp_sn_size sn_size,
 		       unsigned char bearer,
 		       unsigned char direction,
-		       uint32_t hfn_threshold,
-		       struct alginfo *cipherdata,
-		       struct alginfo *authdata)
+		       uint32_t hfn_threshold)
 {
 	struct pdcp_pdb pdb;
-	enum pdb_type_e
-		pdb_mask[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
-			{	/* NULL */
-				PDCP_PDB_TYPE_NO_PDB,		/* NULL */
-				PDCP_PDB_TYPE_FULL_PDB,		/* SNOW f9 */
-				PDCP_PDB_TYPE_FULL_PDB,		/* AES CMAC */
-				PDCP_PDB_TYPE_FULL_PDB		/* ZUC-I */
-			},
-			{	/* SNOW f8 */
-				PDCP_PDB_TYPE_FULL_PDB,		/* NULL */
-				PDCP_PDB_TYPE_FULL_PDB,		/* SNOW f9 */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* AES CMAC */
-				PDCP_PDB_TYPE_REDUCED_PDB	/* ZUC-I */
-			},
-			{	/* AES CTR */
-				PDCP_PDB_TYPE_FULL_PDB,		/* NULL */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* SNOW f9 */
-				PDCP_PDB_TYPE_FULL_PDB,		/* AES CMAC */
-				PDCP_PDB_TYPE_REDUCED_PDB	/* ZUC-I */
-			},
-			{	/* ZUC-E */
-				PDCP_PDB_TYPE_FULL_PDB,		/* NULL */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* SNOW f9 */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* AES CMAC */
-				PDCP_PDB_TYPE_FULL_PDB		/* ZUC-I */
-			},
-	};
-
-	if (rta_sec_era >= RTA_SEC_ERA_8) {
-		memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
-
-		/* To support 12-bit seq numbers, we use u-plane opt in pdb.
-		 * SEC supports 5-bit only with c-plane opt in pdb.
-		 */
-		if (sn_size == PDCP_SN_SIZE_12) {
-			pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT;
-			pdb.bearer_dir_res = (uint32_t)
-				((bearer << PDCP_U_PLANE_PDB_BEARER_SHIFT) |
-				 (direction << PDCP_U_PLANE_PDB_DIR_SHIFT));
-
-			pdb.hfn_thr_res =
-			hfn_threshold << PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT;
-
-		} else {
-			/* This means 5-bit c-plane.
-			 * Here we use c-plane opt in pdb
-			 */
-
-			/* This is a HW issue. Bit 2 should be set to zero,
-			 * but it does not work this way. Override here.
-			 */
-			pdb.opt_res.rsvd = 0x00000002;
-
-			/* Copy relevant information from user to PDB */
-			pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
-			pdb.bearer_dir_res = (uint32_t)
-				((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
-				(direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
-			pdb.hfn_thr_res =
-			hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
-		}
-
-		/* copy PDB in descriptor*/
-		__rta_out32(p, pdb.opt_res.opt);
-		__rta_out32(p, pdb.hfn_res);
-		__rta_out32(p, pdb.bearer_dir_res);
-		__rta_out32(p, pdb.hfn_thr_res);
 
-		return PDCP_PDB_TYPE_FULL_PDB;
-	}
+	memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
 
-	switch (pdb_mask[cipherdata->algtype][authdata->algtype]) {
-	case PDCP_PDB_TYPE_NO_PDB:
-		break;
+	/* To support 12-bit seq numbers, we use u-plane opt in pdb.
+	 * SEC supports 5-bit only with c-plane opt in pdb.
+	 */
+	if (sn_size == PDCP_SN_SIZE_12) {
+		pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT;
+		pdb.bearer_dir_res = (uint32_t)
+			((bearer << PDCP_U_PLANE_PDB_BEARER_SHIFT) |
+			 (direction << PDCP_U_PLANE_PDB_DIR_SHIFT));
 
-	case PDCP_PDB_TYPE_REDUCED_PDB:
-		__rta_out32(p, (hfn << PDCP_C_PLANE_PDB_HFN_SHIFT));
-		__rta_out32(p,
-			    (uint32_t)((bearer <<
-					PDCP_C_PLANE_PDB_BEARER_SHIFT) |
-					(direction <<
-					 PDCP_C_PLANE_PDB_DIR_SHIFT)));
-		break;
+		pdb.hfn_thr_res =
+		hfn_threshold << PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT;
 
-	case PDCP_PDB_TYPE_FULL_PDB:
-		memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+	} else {
+		/* This means 5-bit c-plane.
+		 * Here we use c-plane opt in pdb
+		 */
 
 		/* This is a HW issue. Bit 2 should be set to zero,
 		 * but it does not work this way. Override here.
@@ -2722,23 +2249,18 @@ cnstr_pdcp_c_plane_pdb(struct program *p,
 		pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
 		pdb.bearer_dir_res = (uint32_t)
 			((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
-			 (direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
+			(direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
 		pdb.hfn_thr_res =
-			hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
-
-		/* copy PDB in descriptor*/
-		__rta_out32(p, pdb.opt_res.opt);
-		__rta_out32(p, pdb.hfn_res);
-		__rta_out32(p, pdb.bearer_dir_res);
-		__rta_out32(p, pdb.hfn_thr_res);
-
-		break;
-
-	default:
-		return PDCP_PDB_TYPE_INVALID;
+		hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
 	}
 
-	return pdb_mask[cipherdata->algtype][authdata->algtype];
+	/* copy PDB in descriptor*/
+	__rta_out32(p, pdb.opt_res.opt);
+	__rta_out32(p, pdb.hfn_res);
+	__rta_out32(p, pdb.bearer_dir_res);
+	__rta_out32(p, pdb.hfn_thr_res);
+
+	return PDCP_PDB_TYPE_FULL_PDB;
 }
 
 /*
@@ -2817,7 +2339,7 @@ cnstr_pdcp_u_plane_pdb(struct program *p,
 		pdb.hfn_thr_res =
 			hfn_threshold<<PDCP_U_PLANE_PDB_18BIT_SN_HFN_THR_SHIFT;
 
-		if (rta_sec_era <= RTA_SEC_ERA_8) {
+		if (rta_sec_era == RTA_SEC_ERA_8) {
 			if (cipherdata && authdata)
 				pdb_type = pdb_mask[cipherdata->algtype]
 						   [authdata->algtype];
@@ -2857,6 +2379,7 @@ cnstr_pdcp_u_plane_pdb(struct program *p,
 
 	return pdb_type;
 }
+
 /**
  * cnstr_shdsc_pdcp_c_plane_encap - Function for creating a PDCP Control Plane
  *                                  encapsulation descriptor.
@@ -2874,9 +2397,6 @@ cnstr_pdcp_u_plane_pdb(struct program *p,
  *              Valid algorithm values are those from cipher_type_pdcp enum.
  * @authdata: pointer to authentication transform definitions
  *            Valid algorithm values are those from auth_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
  *         for reclaiming the space that wasn't used for the descriptor.
@@ -2895,14 +2415,12 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 			       unsigned char direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	static int
 		(*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
 			(struct program*, bool swap, struct alginfo *,
-			 struct alginfo *, unsigned int, enum pdcp_sn_size,
-			unsigned char __maybe_unused) = {
+			 struct alginfo *, unsigned int dir, enum pdcp_sn_size) = {
 		{	/* NULL */
 			pdcp_insert_cplane_null_op,	/* NULL */
 			pdcp_insert_cplane_int_only_op,	/* SNOW f9 */
@@ -2961,11 +2479,6 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 	int err;
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN override for other era than 2");
-		return -EINVAL;
-	}
-
 	if (sn_size != PDCP_SN_SIZE_12 && sn_size != PDCP_SN_SIZE_5) {
 		pr_err("C-plane supports only 5-bit and 12-bit sequence numbers\n");
 		return -EINVAL;
@@ -2984,14 +2497,11 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 			sn_size,
 			bearer,
 			direction,
-			hfn_threshold,
-			cipherdata,
-			authdata);
+			hfn_threshold);
 
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type,
-			       era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3000,8 +2510,7 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 		cipherdata,
 		authdata,
 		OP_TYPE_ENCAP_PROTOCOL,
-		sn_size,
-		era_2_sw_hfn_ovrd);
+		sn_size);
 	if (err)
 		return err;
 
@@ -3027,9 +2536,6 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
  *              Valid algorithm values are those from cipher_type_pdcp enum.
  * @authdata: pointer to authentication transform definitions
  *            Valid algorithm values are those from auth_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -3049,14 +2555,12 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 			       unsigned char direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	static int
 		(*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
 			(struct program*, bool swap, struct alginfo *,
-			 struct alginfo *, unsigned int, enum pdcp_sn_size,
-			 unsigned char) = {
+			 struct alginfo *, unsigned int dir, enum pdcp_sn_size) = {
 		{	/* NULL */
 			pdcp_insert_cplane_null_op,	/* NULL */
 			pdcp_insert_cplane_int_only_op,	/* SNOW f9 */
@@ -3115,11 +2619,6 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 	int err;
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN override for other era than 2");
-		return -EINVAL;
-	}
-
 	if (sn_size != PDCP_SN_SIZE_12 && sn_size != PDCP_SN_SIZE_5) {
 		pr_err("C-plane supports only 5-bit and 12-bit sequence numbers\n");
 		return -EINVAL;
@@ -3138,14 +2637,11 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 			sn_size,
 			bearer,
 			direction,
-			hfn_threshold,
-			cipherdata,
-			authdata);
+			hfn_threshold);
 
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type,
-			       era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3154,8 +2650,7 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 		cipherdata,
 		authdata,
 		OP_TYPE_DECAP_PROTOCOL,
-		sn_size,
-		era_2_sw_hfn_ovrd);
+		sn_size);
 	if (err)
 		return err;
 
@@ -3170,14 +2665,12 @@ pdcp_insert_uplane_with_int_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd,
 			      unsigned int dir)
 {
 	static int
 		(*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
 			(struct program*, bool swap, struct alginfo *,
-			 struct alginfo *, unsigned int, enum pdcp_sn_size,
-			unsigned char __maybe_unused) = {
+			 struct alginfo *, unsigned int dir, enum pdcp_sn_size) = {
 		{	/* NULL */
 			pdcp_insert_cplane_null_op,	/* NULL */
 			pdcp_insert_cplane_int_only_op,	/* SNOW f9 */
@@ -3210,8 +2703,7 @@ pdcp_insert_uplane_with_int_op(struct program *p,
 		cipherdata,
 		authdata,
 		dir,
-		sn_size,
-		era_2_sw_hfn_ovrd);
+		sn_size);
 	if (err)
 		return err;
 
@@ -3234,9 +2726,6 @@ pdcp_insert_uplane_with_int_op(struct program *p,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -3256,8 +2745,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	struct program prg;
 	struct program *p = &prg;
@@ -3292,16 +2780,6 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 	};
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN ovrd for other era than 2");
-		return -EINVAL;
-	}
-
-	if (authdata && !authdata->algtype && rta_sec_era < RTA_SEC_ERA_8) {
-		pr_err("Cannot use u-plane auth with era < 8");
-		return -EINVAL;
-	}
-
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 	if (swap)
 		PROGRAM_SET_BSWAP(p);
@@ -3321,7 +2799,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 	}
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3330,10 +2808,6 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 	case PDCP_SN_SIZE_12:
 		switch (cipherdata->algtype) {
 		case PDCP_CIPHER_TYPE_ZUC:
-			if (rta_sec_era < RTA_SEC_ERA_5) {
-				pr_err("Invalid era for selected algorithm\n");
-				return -ENOTSUP;
-			}
 			/* fallthrough */
 		case PDCP_CIPHER_TYPE_AES:
 		case PDCP_CIPHER_TYPE_SNOW:
@@ -3342,7 +2816,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 					authdata && authdata->algtype == 0){
 				err = pdcp_insert_uplane_with_int_op(p, swap,
 						cipherdata, authdata,
-						sn_size, era_2_sw_hfn_ovrd,
+						sn_size,
 						OP_TYPE_ENCAP_PROTOCOL);
 				if (err)
 					return err;
@@ -3388,7 +2862,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 		if (authdata) {
 			err = pdcp_insert_uplane_with_int_op(p, swap,
 					cipherdata, authdata,
-					sn_size, era_2_sw_hfn_ovrd,
+					sn_size,
 					OP_TYPE_ENCAP_PROTOCOL);
 			if (err)
 				return err;
@@ -3437,9 +2911,6 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -3459,8 +2930,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	struct program prg;
 	struct program *p = &prg;
@@ -3496,16 +2966,6 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN override for other era than 2");
-		return -EINVAL;
-	}
-
-	if (authdata && !authdata->algtype && rta_sec_era < RTA_SEC_ERA_8) {
-		pr_err("Cannot use u-plane auth with era < 8");
-		return -EINVAL;
-	}
-
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 	if (swap)
 		PROGRAM_SET_BSWAP(p);
@@ -3525,7 +2985,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 	}
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3534,10 +2994,6 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 	case PDCP_SN_SIZE_12:
 		switch (cipherdata->algtype) {
 		case PDCP_CIPHER_TYPE_ZUC:
-			if (rta_sec_era < RTA_SEC_ERA_5) {
-				pr_err("Invalid era for selected algorithm\n");
-				return -ENOTSUP;
-			}
 			/* fallthrough */
 		case PDCP_CIPHER_TYPE_AES:
 		case PDCP_CIPHER_TYPE_SNOW:
@@ -3555,7 +3011,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 			else if (authdata && authdata->algtype == 0) {
 				err = pdcp_insert_uplane_with_int_op(p, swap,
 						cipherdata, authdata,
-						sn_size, era_2_sw_hfn_ovrd,
+						sn_size,
 						OP_TYPE_DECAP_PROTOCOL);
 				if (err)
 					return err;
@@ -3589,7 +3045,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 		if (authdata) {
 			err = pdcp_insert_uplane_with_int_op(p, swap,
 					cipherdata, authdata,
-					sn_size, era_2_sw_hfn_ovrd,
+					sn_size,
 					OP_TYPE_DECAP_PROTOCOL);
 			if (err)
 				return err;
@@ -3649,9 +3105,6 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 	struct program prg;
 	struct program *p = &prg;
 	uint32_t iv[3] = {0, 0, 0};
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
 
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 	if (swap)
@@ -3661,52 +3114,15 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 
 	SHR_HDR(p, SHR_ALWAYS, 1, 0);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
-	} else {
-		MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4, 0);
-		MATHB(p, MATH1, SUB, ONE, MATH1, 4, 0);
-		MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
-		MOVE(p, MATH1, 0, MATH0, 0, 8, IMMED);
+	MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+	MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
 
-		/*
-		 * Since MOVELEN is available only starting with
-		 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-		 * command dynamically by writing the length from M1 by
-		 * OR-ing the command in the M1 register and MOVE the
-		 * result into the descriptor buffer. Care must be taken
-		 * wrt. the location of the command because of SEC
-		 * pipelining. The actual MOVEs are written at the end
-		 * of the descriptor due to calculations needed on the
-		 * offset in the descriptor for the MOVE command.
-		 */
-		move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
-					     IMMED);
-		move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
-					      WAITCOMP | IMMED);
-	}
 	MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
 
 	switch (authdata->algtype) {
 	case PDCP_AUTH_TYPE_NULL:
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-			/* Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
+		MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
 
 		LOAD(p, (uintptr_t)iv, MATH0, 0, 8, IMMED | COPY);
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | LAST2 | FLUSH1);
@@ -3730,23 +3146,8 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 			      DIR_ENC);
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SET_LABEL(p, local_offset);
-
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-			/* Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+		MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
 
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
 		SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
 		SEQSTORE(p, CONTEXT2, 0, 4, 0);
 
@@ -3768,32 +3169,14 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 			      DIR_ENC);
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+		MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
 
-			/* Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
-
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
 		SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
 		SEQSTORE(p, CONTEXT1, 0, 4, 0);
 
 		break;
 
 	case PDCP_AUTH_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		iv[0] = 0xFFFFFFFF;
 		iv[1] = swap ? swab32(0xFC000000) : 0xFC000000;
 		iv[2] = 0x00000000; /* unused */
@@ -3819,12 +3202,6 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 		return -EINVAL;
 	}
 
-
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
-
 	return PROGRAM_FINALIZE(p);
 }
 
diff --git a/drivers/common/dpaax/caamflib/desc/sdap.h b/drivers/common/dpaax/caamflib/desc/sdap.h
index b2497a5424..ee03e95990 100644
--- a/drivers/common/dpaax/caamflib/desc/sdap.h
+++ b/drivers/common/dpaax/caamflib/desc/sdap.h
@@ -225,10 +225,6 @@ static inline int pdcp_sdap_insert_no_int_op(struct program *p,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		/* The LSB and MSB is the same for ZUC context */
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
@@ -253,7 +249,6 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap __maybe_unused,
 			     struct alginfo *cipherdata,
 			     struct alginfo *authdata __maybe_unused,
 			     unsigned int dir, enum pdcp_sn_size sn_size,
-			     unsigned char era_2_sw_hfn_ovrd __maybe_unused,
 			     enum pdb_type_e pdb_type)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
@@ -293,12 +288,7 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap __maybe_unused,
 	/* Write header */
 	SEQSTORE(p, MATH0, offset, length, 0);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-	} else {
-		MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
-		MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
-	}
+	MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 	if (dir == OP_TYPE_ENCAP_PROTOCOL)
 		MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
@@ -326,11 +316,6 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap __maybe_unused,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
 
@@ -378,7 +363,6 @@ static inline int
 pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 			  struct alginfo *cipherdata, struct alginfo *authdata,
 			  unsigned int dir, enum pdcp_sn_size sn_size,
-			  unsigned char era_2_sw_hfn_ovrd __maybe_unused,
 			  enum pdb_type_e pdb_type)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
@@ -391,13 +375,6 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 			FULL_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET :
 			REDUCED_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET;
 
-	if (authdata->algtype == PDCP_CIPHER_TYPE_ZUC) {
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-	}
-
 	if (pdcp_sdap_get_sn_parameters(sn_size, swap, &offset, &length,
 					&sn_mask))
 		return -ENOTSUP;
@@ -588,8 +565,7 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 		 */
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		/* Save the content left in the Output FIFO (the ICV) to MATH0
 		 */
@@ -604,13 +580,7 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 		 * Note: As configured by the altsource, this will send
 		 * the
 		 */
-		if (rta_sec_era <= RTA_SEC_ERA_2) {
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
-		} else {
-			MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
-		}
+		MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
 	}
 
 	if (authdata->algtype == PDCP_CIPHER_TYPE_ZUC) {
@@ -638,7 +608,6 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 static inline int pdcp_sdap_insert_no_snoop_op(
 	struct program *p, bool swap __maybe_unused, struct alginfo *cipherdata,
 	struct alginfo *authdata, unsigned int dir, enum pdcp_sn_size sn_size,
-	unsigned char era_2_sw_hfn_ovrd __maybe_unused,
 	enum pdb_type_e pdb_type)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
@@ -649,13 +618,6 @@ static inline int pdcp_sdap_insert_no_snoop_op(
 			FULL_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET :
 			REDUCED_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET;
 
-	if (authdata->algtype == PDCP_CIPHER_TYPE_ZUC) {
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-	}
-
 	if (pdcp_sdap_get_sn_parameters(sn_size, swap, &offset, &length,
 					&sn_mask))
 		return -ENOTSUP;
@@ -842,11 +804,10 @@ pdcp_sdap_insert_cplane_null_op(struct program *p,
 			   struct alginfo *authdata,
 			   unsigned int dir,
 			   enum pdcp_sn_size sn_size,
-			   unsigned char era_2_sw_hfn_ovrd,
 			   enum pdb_type_e pdb_type __maybe_unused)
 {
 	return pdcp_insert_cplane_null_op(p, swap, cipherdata, authdata, dir,
-					  sn_size, era_2_sw_hfn_ovrd);
+					  sn_size);
 }
 
 static inline int
@@ -856,24 +817,22 @@ pdcp_sdap_insert_cplane_int_only_op(struct program *p,
 			   struct alginfo *authdata,
 			   unsigned int dir,
 			   enum pdcp_sn_size sn_size,
-			   unsigned char era_2_sw_hfn_ovrd,
 			   enum pdb_type_e pdb_type __maybe_unused)
 {
 	return pdcp_insert_cplane_int_only_op(p, swap, cipherdata, authdata,
-				dir, sn_size, era_2_sw_hfn_ovrd);
+				dir, sn_size);
 }
 
 static int pdcp_sdap_insert_with_int_op(
 	struct program *p, bool swap __maybe_unused, struct alginfo *cipherdata,
 	struct alginfo *authdata, enum pdcp_sn_size sn_size,
-	unsigned char era_2_sw_hfn_ovrd, unsigned int dir,
+	unsigned int dir,
 	enum pdb_type_e pdb_type)
 {
 	static int (
 		*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])(
 		struct program *, bool swap, struct alginfo *, struct alginfo *,
-		unsigned int, enum pdcp_sn_size,
-		unsigned char __maybe_unused, enum pdb_type_e pdb_type) = {
+		unsigned int dir, enum pdcp_sn_size, enum pdb_type_e pdb_type) = {
 		{
 			/* NULL */
 			pdcp_sdap_insert_cplane_null_op,     /* NULL */
@@ -907,7 +866,7 @@ static int pdcp_sdap_insert_with_int_op(
 
 	err = pdcp_cp_fp[cipherdata->algtype]
 			[authdata->algtype](p, swap, cipherdata, authdata, dir,
-					sn_size, era_2_sw_hfn_ovrd, pdb_type);
+					sn_size, pdb_type);
 	if (err)
 		return err;
 
@@ -925,7 +884,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd,
 			       uint32_t caps_mode)
 {
 	struct program prg;
@@ -966,12 +924,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 
 	LABEL(pdb_end);
 
-	/* Check HFN override for ERA 2 */
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN ovrd for other era than 2");
-		return -EINVAL;
-	}
-
 	/* Check the confidentiality algorithm is supported by the code */
 	switch (cipherdata->algtype) {
 	case PDCP_CIPHER_TYPE_NULL:
@@ -1013,14 +965,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 		return -ENOTSUP;
 	}
 
-	/* Check that we are not performing ZUC algo on old platforms */
-	if (cipherdata->algtype == PDCP_CIPHER_TYPE_ZUC &&
-			rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("ZUC algorithm not supported for era: %d\n",
-				rta_sec_era);
-		return -ENOTSUP;
-	}
-
 	/* Initialize the program */
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 
@@ -1047,7 +991,7 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 	SET_LABEL(p, pdb_end);
 
 	/* Inser the HFN override operation */
-	err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, false);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, false);
 	if (err)
 		return err;
 
@@ -1068,7 +1012,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 	} else {
 		err = pdcp_sdap_insert_with_int_op(p, swap, cipherdata,
 						   authdata, sn_size,
-						   era_2_sw_hfn_ovrd,
 						   caps_mode, pdb_type);
 		if (err) {
 			pr_err("Fail pdcp_sdap_insert_with_int_op\n");
@@ -1096,9 +1039,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -1118,12 +1058,11 @@ cnstr_shdsc_pdcp_sdap_u_plane_encap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	return cnstr_shdsc_pdcp_sdap_u_plane(descbuf, ps, swap, sn_size,
 			hfn, bearer, direction, hfn_threshold, cipherdata,
-			authdata, era_2_sw_hfn_ovrd, OP_TYPE_ENCAP_PROTOCOL);
+			authdata, OP_TYPE_ENCAP_PROTOCOL);
 }
 
 /**
@@ -1141,9 +1080,6 @@ cnstr_shdsc_pdcp_sdap_u_plane_encap(uint32_t *descbuf,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -1163,12 +1099,11 @@ cnstr_shdsc_pdcp_sdap_u_plane_decap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	return cnstr_shdsc_pdcp_sdap_u_plane(descbuf, ps, swap, sn_size, hfn,
 			bearer, direction, hfn_threshold, cipherdata, authdata,
-			era_2_sw_hfn_ovrd, OP_TYPE_DECAP_PROTOCOL);
+			OP_TYPE_DECAP_PROTOCOL);
 }
 
 #endif /* __DESC_SDAP_H__ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index a5b052375d..1e6b3e548a 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3297,8 +3297,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 		else if (session->dir == DIR_DEC)
 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
 					priv->flc_desc[0].desc, 1, swap,
@@ -3307,8 +3306,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 
 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
 		bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc,
@@ -3323,7 +3321,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 			else
 				bufsize = cnstr_shdsc_pdcp_u_plane_encap(
 					priv->flc_desc[0].desc, 1, swap,
@@ -3332,7 +3330,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 		} else if (session->dir == DIR_DEC) {
 			if (pdcp_xform->sdap_enabled)
 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
@@ -3342,7 +3340,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 			else
 				bufsize = cnstr_shdsc_pdcp_u_plane_decap(
 					priv->flc_desc[0].desc, 1, swap,
@@ -3351,7 +3349,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 		}
 	}
 
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index a552e64506..1dedd9eee5 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -296,8 +296,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 					ses->pdcp.bearer,
 					ses->pdcp.pkt_dir,
 					ses->pdcp.hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 		else if (ses->dir == DIR_DEC)
 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
 					cdb->sh_desc, 1, swap,
@@ -306,8 +305,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 					ses->pdcp.bearer,
 					ses->pdcp.pkt_dir,
 					ses->pdcp.hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 	} else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
 		shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
 						     1, swap, &authdata);
@@ -322,7 +320,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 			else
 				shared_desc_len =
 					cnstr_shdsc_pdcp_u_plane_encap(
@@ -332,7 +330,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 		} else if (ses->dir == DIR_DEC) {
 			if (ses->pdcp.sdap_enabled)
 				shared_desc_len =
@@ -343,7 +341,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 			else
 				shared_desc_len =
 					cnstr_shdsc_pdcp_u_plane_decap(
@@ -353,7 +351,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 		}
 	}
 	return shared_desc_len;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v2 2/8] common/dpaax: change job processing mode for PDCP SDAP
  2021-12-28  9:10 ` [PATCH v2 0/8] NXP crypto drivers changes Gagandeep Singh
  2021-12-28  9:10   ` [PATCH v2 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
@ 2021-12-28  9:10   ` Gagandeep Singh
  2021-12-28  9:10   ` [PATCH v2 3/8] crypto/dpaa2_sec: ordered queue support Gagandeep Singh
                     ` (5 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-28  9:10 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

For PDCP SDAP test cases, HW sec engine process the
jobs in WAIT mode.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/common/dpaax/caamflib/desc/sdap.h | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/common/dpaax/caamflib/desc/sdap.h b/drivers/common/dpaax/caamflib/desc/sdap.h
index ee03e95990..1737e14fa6 100644
--- a/drivers/common/dpaax/caamflib/desc/sdap.h
+++ b/drivers/common/dpaax/caamflib/desc/sdap.h
@@ -895,27 +895,27 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 			{
 				/* NULL */
 				SHR_WAIT,   /* NULL */
-				SHR_ALWAYS, /* SNOW f9 */
-				SHR_ALWAYS, /* AES CMAC */
-				SHR_ALWAYS  /* ZUC-I */
+				SHR_WAIT, /* SNOW f9 */
+				SHR_WAIT, /* AES CMAC */
+				SHR_WAIT  /* ZUC-I */
 			},
 			{
 				/* SNOW f8 */
-				SHR_ALWAYS, /* NULL */
-				SHR_ALWAYS, /* SNOW f9 */
+				SHR_WAIT, /* NULL */
+				SHR_WAIT, /* SNOW f9 */
 				SHR_WAIT,   /* AES CMAC */
 				SHR_WAIT    /* ZUC-I */
 			},
 			{
 				/* AES CTR */
-				SHR_ALWAYS, /* NULL */
-				SHR_ALWAYS, /* SNOW f9 */
-				SHR_ALWAYS, /* AES CMAC */
+				SHR_WAIT, /* NULL */
+				SHR_WAIT, /* SNOW f9 */
+				SHR_WAIT, /* AES CMAC */
 				SHR_WAIT    /* ZUC-I */
 			},
 			{
 				/* ZUC-E */
-				SHR_ALWAYS, /* NULL */
+				SHR_WAIT, /* NULL */
 				SHR_WAIT,   /* SNOW f9 */
 				SHR_WAIT,   /* AES CMAC */
 				SHR_WAIT    /* ZUC-I */
@@ -979,7 +979,7 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 		SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype],
 			0, 0);
 	else
-		SHR_HDR(p, SHR_ALWAYS, 0, 0);
+		SHR_HDR(p, SHR_WAIT, 0, 0);
 
 	/* Construct the PDB */
 	pdb_type = cnstr_pdcp_u_plane_pdb(p, sn_size, hfn, bearer, direction,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v2 3/8] crypto/dpaa2_sec: ordered queue support
  2021-12-28  9:10 ` [PATCH v2 0/8] NXP crypto drivers changes Gagandeep Singh
  2021-12-28  9:10   ` [PATCH v2 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
  2021-12-28  9:10   ` [PATCH v2 2/8] common/dpaax: change job processing mode for PDCP SDAP Gagandeep Singh
@ 2021-12-28  9:10   ` Gagandeep Singh
  2022-01-21 11:31     ` [EXT] " Akhil Goyal
  2021-12-28  9:10   ` [PATCH v2 4/8] crypto/dpaa2_sec: support AES-GMAC Gagandeep Singh
                     ` (4 subsequent siblings)
  7 siblings, 1 reply; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-28  9:10 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Nipun Gupta

From: Nipun Gupta <nipun.gupta@nxp.com>

This patch supports ordered queue for DPAA2 platform.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 255 +++++++++++++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   8 +-
 drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h    |  14 +-
 3 files changed, 263 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 1e6b3e548a..a9fda67ac3 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1466,14 +1466,14 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 
 		for (loop = 0; loop < frames_to_send; loop++) {
 			if (*dpaa2_seqn((*ops)->sym->m_src)) {
-				uint8_t dqrr_index =
-					*dpaa2_seqn((*ops)->sym->m_src) - 1;
-
-				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
-				DPAA2_PER_LCORE_DQRR_SIZE--;
-				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
-				*dpaa2_seqn((*ops)->sym->m_src) =
-					DPAA2_INVALID_MBUF_SEQN;
+				if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) {
+					DPAA2_PER_LCORE_DQRR_SIZE--;
+					DPAA2_PER_LCORE_DQRR_HELD &= ~(1 <<
+					*dpaa2_seqn((*ops)->sym->m_src) &
+					QBMAN_EQCR_DCA_IDXMASK);
+				}
+				flags[loop] = *dpaa2_seqn((*ops)->sym->m_src);
+				*dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN;
 			}
 
 			/*Clear the unused FD fields before sending*/
@@ -1621,6 +1621,169 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
 	return op;
 }
 
+static void
+dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci)
+{
+	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+	struct rte_crypto_op *op;
+	struct qbman_fd *fd;
+
+	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
+	op = sec_fd_to_mbuf(fd);
+	/* Instead of freeing, enqueue it to the sec tx queue (sec->core)
+	 * after setting an error in FD. But this will have performance impact.
+	 */
+	rte_pktmbuf_free(op->sym->m_src);
+}
+
+static void
+dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
+			     struct rte_mbuf *m,
+			     struct qbman_eq_desc *eqdesc)
+{
+	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+	struct eqresp_metadata *eqresp_meta;
+	struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private;
+	uint16_t orpid, seqnum;
+	uint8_t dq_idx;
+
+	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
+		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
+			DPAA2_EQCR_OPRID_SHIFT;
+		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
+			DPAA2_EQCR_SEQNUM_SHIFT;
+
+
+		if (!priv->en_loose_ordered) {
+			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
+			qbman_eq_desc_set_response(eqdesc, (uint64_t)
+				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
+				dpio_dev->eqresp_pi]), 1);
+			qbman_eq_desc_set_token(eqdesc, 1);
+
+			eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi];
+			eqresp_meta->dpaa2_q = dpaa2_q;
+			eqresp_meta->mp = m->pool;
+
+			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
+				dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0);
+		} else {
+			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
+		}
+	} else {
+		dq_idx = *dpaa2_seqn(m) - 1;
+		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
+		DPAA2_PER_LCORE_DQRR_SIZE--;
+		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
+	}
+	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
+}
+
+
+static uint16_t
+dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
+			uint16_t nb_ops)
+{
+	/* Function to transmit the frames to given device and VQ*/
+	uint32_t loop;
+	int32_t ret;
+	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+	uint32_t frames_to_send, num_free_eq_desc, retry_count;
+	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+	struct qbman_swp *swp;
+	uint16_t num_tx = 0;
+	/*todo - need to support multiple buffer pools */
+	uint16_t bpid;
+	struct rte_mempool *mb_pool;
+	struct dpaa2_sec_dev_private *priv =
+				dpaa2_qp->tx_vq.crypto_data->dev_private;
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		DPAA2_SEC_ERR("sessionless crypto op not supported");
+		return 0;
+	}
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	while (nb_ops) {
+		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : nb_ops;
+
+		if (!priv->en_loose_ordered) {
+			if (*dpaa2_seqn((*ops)->sym->m_src)) {
+				num_free_eq_desc = dpaa2_free_eq_descriptors();
+				if (num_free_eq_desc < frames_to_send)
+					frames_to_send = num_free_eq_desc;
+			}
+		}
+
+		for (loop = 0; loop < frames_to_send; loop++) {
+			/*Prepare enqueue descriptor*/
+			qbman_eq_desc_clear(&eqdesc[loop]);
+			qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid);
+
+			if (*dpaa2_seqn((*ops)->sym->m_src))
+				dpaa2_sec_set_enqueue_descriptor(
+						&dpaa2_qp->tx_vq,
+						(*ops)->sym->m_src,
+						&eqdesc[loop]);
+			else
+				qbman_eq_desc_set_no_orp(&eqdesc[loop],
+							 DPAA2_EQ_RESP_ERR_FQ);
+
+			/*Clear the unused FD fields before sending*/
+			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+			mb_pool = (*ops)->sym->m_src->pool;
+			bpid = mempool_to_bpid(mb_pool);
+			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
+			if (ret) {
+				DPAA2_SEC_ERR("error: Improper packet contents"
+					      " for crypto operation");
+				goto skip_tx;
+			}
+			ops++;
+		}
+
+		loop = 0;
+		retry_count = 0;
+		while (loop < frames_to_send) {
+			ret = qbman_swp_enqueue_multiple_desc(swp,
+					&eqdesc[loop], &fd_arr[loop],
+					frames_to_send - loop);
+			if (unlikely(ret < 0)) {
+				retry_count++;
+				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+					num_tx += loop;
+					nb_ops -= loop;
+					goto skip_tx;
+				}
+			} else {
+				loop += ret;
+				retry_count = 0;
+			}
+		}
+
+		num_tx += loop;
+		nb_ops -= loop;
+	}
+
+skip_tx:
+	dpaa2_qp->tx_vq.tx_pkts += num_tx;
+	dpaa2_qp->tx_vq.err_pkts += nb_ops;
+	return num_tx;
+}
+
 static uint16_t
 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 			uint16_t nb_ops)
@@ -3527,6 +3690,10 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	/* Change the tx burst function if ordered queues are used */
+	if (priv->en_ordered)
+		dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered;
+
 	memset(&attr, 0, sizeof(struct dpseci_attr));
 
 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
@@ -3739,12 +3906,46 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
 
 	ev->event_ptr = sec_fd_to_mbuf(fd);
 	dqrr_index = qbman_get_dqrr_idx(dq);
-	*dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1;
+	*dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
 	DPAA2_PER_LCORE_DQRR_SIZE++;
 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
 }
 
+static void __attribute__((hot))
+dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
+				const struct qbman_fd *fd,
+				const struct qbman_result *dq,
+				struct dpaa2_queue *rxq,
+				struct rte_event *ev)
+{
+	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
+
+	/* Prefetching mbuf */
+	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+
+	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
+	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+
+	ev->flow_id = rxq->ev.flow_id;
+	ev->sub_event_type = rxq->ev.sub_event_type;
+	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+	ev->op = RTE_EVENT_OP_NEW;
+	ev->sched_type = rxq->ev.sched_type;
+	ev->queue_id = rxq->ev.queue_id;
+	ev->priority = rxq->ev.priority;
+	ev->event_ptr = sec_fd_to_mbuf(fd);
+
+	*dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP;
+	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) <<
+		DPAA2_EQCR_OPRID_SHIFT;
+	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) <<
+		DPAA2_EQCR_SEQNUM_SHIFT;
+
+	qbman_swp_dqrr_consume(swp, dq);
+}
+
 int
 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 		int qp_id,
@@ -3762,6 +3963,8 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
+	else if (event->sched_type == RTE_SCHED_TYPE_ORDERED)
+		qp->rx_vq.cb = dpaa2_sec_process_ordered_event;
 	else
 		return -EINVAL;
 
@@ -3780,6 +3983,40 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
 		cfg.order_preservation_en = 1;
 	}
+
+	if (event->sched_type == RTE_SCHED_TYPE_ORDERED) {
+		struct opr_cfg ocfg;
+
+		/* Restoration window size = 256 frames */
+		ocfg.oprrws = 3;
+		/* Restoration window size = 512 frames for LX2 */
+		if (dpaa2_svr_family == SVR_LX2160A)
+			ocfg.oprrws = 4;
+		/* Auto advance NESN window enabled */
+		ocfg.oa = 1;
+		/* Late arrival window size disabled */
+		ocfg.olws = 0;
+		/* ORL resource exhaustaion advance NESN disabled */
+		ocfg.oeane = 0;
+		/* Loose ordering enabled */
+		ocfg.oloe = 1;
+		priv->en_loose_ordered = 1;
+		/* Strict ordering enabled if explicitly set */
+		if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
+			ocfg.oloe = 0;
+			priv->en_loose_ordered = 0;
+		}
+
+		ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token,
+				   qp_id, OPR_OPT_CREATE, &ocfg);
+		if (ret) {
+			RTE_LOG(ERR, PMD, "Error setting opr: ret: %d\n", ret);
+			return ret;
+		}
+		qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf;
+		priv->en_ordered = 1;
+	}
+
 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
 				  qp_id, &cfg);
 	if (ret) {
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 05bd7c0736..1756d917dd 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -1,8 +1,6 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- *
- *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016,2020-2021 NXP
- *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016,2019-2021 NXP
  */
 
 #ifndef _DPAA2_SEC_PMD_PRIVATE_H_
@@ -37,6 +35,8 @@ struct dpaa2_sec_dev_private {
 	uint16_t token; /**< Token required by DPxxx objects */
 	unsigned int max_nb_queue_pairs;
 	/**< Max number of queue pairs supported by device */
+	uint8_t en_ordered;
+	uint8_t en_loose_ordered;
 };
 
 struct dpaa2_sec_qp {
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 279e8f4d4a..c295c04f24 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
  *
  * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2020 NXP
  *
  */
 #ifndef __FSL_DPSECI_H
@@ -11,6 +11,8 @@
  * Contains initialization APIs and runtime control APIs for DPSECI
  */
 
+#include <fsl_dpopr.h>
+
 struct fsl_mc_io;
 
 /**
@@ -41,6 +43,16 @@ int dpseci_close(struct fsl_mc_io *mc_io,
  */
 #define DPSECI_OPT_HAS_CG				0x000020
 
+/**
+ * Enable the Order Restoration support
+ */
+#define DPSECI_OPT_HAS_OPR				0x000040
+
+/**
+ * Order Point Records are shared for the entire DPSECI
+ */
+#define DPSECI_OPT_OPR_SHARED				0x000080
+
 /**
  * struct dpseci_cfg - Structure representing DPSECI configuration
  * @options: Any combination of the following options:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v2 4/8] crypto/dpaa2_sec: support AES-GMAC
  2021-12-28  9:10 ` [PATCH v2 0/8] NXP crypto drivers changes Gagandeep Singh
                     ` (2 preceding siblings ...)
  2021-12-28  9:10   ` [PATCH v2 3/8] crypto/dpaa2_sec: ordered queue support Gagandeep Singh
@ 2021-12-28  9:10   ` Gagandeep Singh
  2022-01-21 11:29     ` [EXT] " Akhil Goyal
  2021-12-28  9:10   ` [PATCH v2 5/8] crypto/dpaa2_sec: change digest size for AES_CMAC Gagandeep Singh
                     ` (3 subsequent siblings)
  7 siblings, 1 reply; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-28  9:10 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Akhil Goyal, Gagandeep Singh

From: Akhil Goyal <akhil.goyal@nxp.com>

This patch supports AES_GMAC algorithm for DPAA2
driver.

Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/cryptodevs/features/dpaa2_sec.ini |  1 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 14 ++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h    | 30 ++++++++++++++++++++
 lib/cryptodev/rte_crypto_sym.h               |  4 ++-
 4 files changed, 47 insertions(+), 2 deletions(-)

diff --git a/doc/guides/cryptodevs/features/dpaa2_sec.ini b/doc/guides/cryptodevs/features/dpaa2_sec.ini
index 3d6e449ca1..dcaf64965d 100644
--- a/doc/guides/cryptodevs/features/dpaa2_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa2_sec.ini
@@ -48,6 +48,7 @@ SHA512 HMAC  = Y
 SNOW3G UIA2  = Y
 AES XCBC MAC = Y
 ZUC EIA3     = Y
+AES GMAC     = Y
 AES CMAC (128) = Y
 
 ;
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index a9fda67ac3..99f5157abe 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -2847,6 +2847,13 @@ dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
 		aeaddata->algmode = OP_ALG_AAI_CCM;
 		session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
 		break;
+	case RTE_CRYPTO_AEAD_AES_GMAC:
+		/**
+		 * AES-GMAC is an AEAD algo with NULL encryption and GMAC
+		 * authentication.
+		 */
+		aeaddata->algtype = OP_PCL_IPSEC_AES_NULL_WITH_GMAC;
+		break;
 	default:
 		DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
 			      aead_xform->algo);
@@ -2945,6 +2952,10 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
 	case RTE_CRYPTO_AUTH_NULL:
 		authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
 		break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		DPAA2_SEC_ERR(
+			"AES_GMAC is supported as AEAD algo for IPSEC proto only");
+		return -ENOTSUP;
 	case RTE_CRYPTO_AUTH_SHA224_HMAC:
 	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
 	case RTE_CRYPTO_AUTH_SHA1:
@@ -2953,7 +2964,6 @@ dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
 	case RTE_CRYPTO_AUTH_SHA224:
 	case RTE_CRYPTO_AUTH_SHA384:
 	case RTE_CRYPTO_AUTH_MD5:
-	case RTE_CRYPTO_AUTH_AES_GMAC:
 	case RTE_CRYPTO_AUTH_KASUMI_F9:
 	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
 	case RTE_CRYPTO_AUTH_ZUC_EIA3:
@@ -3096,6 +3106,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
 		case OP_PCL_IPSEC_AES_GCM8:
 		case OP_PCL_IPSEC_AES_GCM12:
 		case OP_PCL_IPSEC_AES_GCM16:
+		case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
 			memcpy(encap_pdb.gcm.salt,
 				(uint8_t *)&(ipsec_xform->salt), 4);
 			break;
@@ -3172,6 +3183,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
 		case OP_PCL_IPSEC_AES_GCM8:
 		case OP_PCL_IPSEC_AES_GCM12:
 		case OP_PCL_IPSEC_AES_GCM16:
+		case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
 			memcpy(decap_pdb.gcm.salt,
 				(uint8_t *)&(ipsec_xform->salt), 4);
 			break;
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 1756d917dd..6aa1c01e95 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -514,6 +514,36 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 			}, }
 		}, }
 	},
+	{	/* AES GMAC (AEAD) */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GMAC,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = {
+					.min = 0,
+					.max = 65535,
+					.increment = 1
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 16,
+					.increment = 4
+				}
+			}, }
+		}, }
+	},
 	{	/* AES XCBC HMAC */
 		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
 		{.sym = {
diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
index daa090b978..4644fa3e25 100644
--- a/lib/cryptodev/rte_crypto_sym.h
+++ b/lib/cryptodev/rte_crypto_sym.h
@@ -467,8 +467,10 @@ enum rte_crypto_aead_algorithm {
 	/**< AES algorithm in CCM mode. */
 	RTE_CRYPTO_AEAD_AES_GCM,
 	/**< AES algorithm in GCM mode. */
-	RTE_CRYPTO_AEAD_CHACHA20_POLY1305
+	RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
 	/**< Chacha20 cipher with poly1305 authenticator */
+	RTE_CRYPTO_AEAD_AES_GMAC
+	/**< AES algorithm in GMAC mode. */
 };
 
 /** AEAD algorithm name strings */
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v2 5/8] crypto/dpaa2_sec: change digest size for AES_CMAC
  2021-12-28  9:10 ` [PATCH v2 0/8] NXP crypto drivers changes Gagandeep Singh
                     ` (3 preceding siblings ...)
  2021-12-28  9:10   ` [PATCH v2 4/8] crypto/dpaa2_sec: support AES-GMAC Gagandeep Singh
@ 2021-12-28  9:10   ` Gagandeep Singh
  2022-01-21 11:23     ` [EXT] " Akhil Goyal
  2021-12-28  9:10   ` [PATCH v2 6/8] crypto/dpaa2_sec: add useful debug prints in sec dequeue Gagandeep Singh
                     ` (2 subsequent siblings)
  7 siblings, 1 reply; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-28  9:10 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Hemant Agrawal

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Change the digest size to supported value by the HW engine.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 6aa1c01e95..ab652936bc 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -579,11 +579,11 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 					.increment = 1
 				},
 				.digest_size = {
-					.min = 4,
+					.min = 12,
 					.max = 16,
 					.increment = 4
 				},
-				.aad_size = { 0 }
+				.iv_size = { 0 }
 			}, }
 		}, }
 	},
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v2 6/8] crypto/dpaa2_sec: add useful debug prints in sec dequeue
  2021-12-28  9:10 ` [PATCH v2 0/8] NXP crypto drivers changes Gagandeep Singh
                     ` (4 preceding siblings ...)
  2021-12-28  9:10   ` [PATCH v2 5/8] crypto/dpaa2_sec: change digest size for AES_CMAC Gagandeep Singh
@ 2021-12-28  9:10   ` Gagandeep Singh
  2021-12-28  9:10   ` [PATCH v2 7/8] crypto/dpaa2: fix to check next type for auth or cipher Gagandeep Singh
  2021-12-28  9:10   ` [PATCH v2 8/8] crypto/dpaa_sec: add debug framework Gagandeep Singh
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-28  9:10 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

Few useful debug prints added in dequeue function.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/cryptodevs/dpaa2_sec.rst         |  10 ++
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 136 +++++++++++++++++++-
 2 files changed, 144 insertions(+), 2 deletions(-)

diff --git a/doc/guides/cryptodevs/dpaa2_sec.rst b/doc/guides/cryptodevs/dpaa2_sec.rst
index 06de988d51..875d918068 100644
--- a/doc/guides/cryptodevs/dpaa2_sec.rst
+++ b/doc/guides/cryptodevs/dpaa2_sec.rst
@@ -175,3 +175,13 @@ For enabling logs, use the following EAL parameter:
 
 Using ``crypto.dpaa2`` as log matching criteria, all Crypto PMD logs can be
 enabled which are lower than logging ``level``.
+
+Enabling debug prints
+---------------------
+
+Use dev arg option ``drv_dump_mode=x`` to dump useful debug prints on HW sec
+error. There are 3 dump modes available 0, 1 and 2. Mode 0 means no dump print
+on error, mode 1 means dump HW error code and mode 2 means dump HW error code
+along with other useful debugging information like session, queue, descriptor
+data.
+e.g. ``fslmc:dpseci.1,drv_dump_mode=1``
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 99f5157abe..b65416097c 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -28,6 +28,7 @@
 #include <fsl_dpopr.h>
 #include <fsl_dpseci.h>
 #include <fsl_mc_sys.h>
+#include <rte_hexdump.h>
 
 #include "dpaa2_sec_priv.h"
 #include "dpaa2_sec_event.h"
@@ -50,7 +51,17 @@
 
 #define NO_PREFETCH 0
 
+#define DRIVER_DUMP_MODE "drv_dump_mode"
+
+/* DPAA2_SEC_DP_DUMP levels */
+enum dpaa2_sec_dump_levels {
+	DPAA2_SEC_DP_NO_DUMP,
+	DPAA2_SEC_DP_ERR_DUMP,
+	DPAA2_SEC_DP_FULL_DUMP
+};
+
 uint8_t cryptodev_driver_id;
+uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP;
 
 #ifdef RTE_LIB_SECURITY
 static inline int
@@ -1784,6 +1795,83 @@ dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
 	return num_tx;
 }
 
+static void
+dpaa2_sec_dump(struct rte_crypto_op *op)
+{
+	int i;
+	dpaa2_sec_session *sess = NULL;
+	struct ctxt_priv *priv;
+	uint8_t bufsize;
+	struct rte_crypto_sym_op *sym_op;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa2_sec_session *)get_sym_session_private_data(
+			op->sym->session, cryptodev_driver_id);
+#ifdef RTE_LIBRTE_SECURITY
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa2_sec_session *)get_sec_session_private_data(
+			op->sym->sec_session);
+#endif
+
+	if (sess == NULL)
+		goto mbuf_dump;
+
+	priv = (struct ctxt_priv *)sess->ctxt;
+	printf("\n****************************************\n"
+		"session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
+		"\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
+		"\tCipher key len:\t%zd\n", sess->ctxt_type,
+		(sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
+		sess->cipher_alg, sess->auth_alg, sess->aead_alg,
+		sess->cipher_key.length);
+		rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
+				sess->cipher_key.length);
+		rte_hexdump(stdout, "auth key", sess->auth_key.data,
+				sess->auth_key.length);
+	printf("\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
+		"\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only"
+		" len:\t%d\n\taead cipher text:\t%d\n",
+		sess->auth_key.length, sess->iv.length, sess->iv.offset,
+		sess->digest_length, sess->status,
+		sess->ext_params.aead_ctxt.auth_only_len,
+		sess->ext_params.aead_ctxt.auth_cipher_text);
+#ifdef RTE_LIBRTE_SECURITY
+	printf("PDCP session params:\n"
+		"\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
+		"\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n"
+		"\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
+		sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
+		sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
+		sess->pdcp.hfn_threshold);
+
+#endif
+	bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl;
+	printf("Descriptor Dump:\n");
+	for (i = 0; i < bufsize; i++)
+		printf("\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]);
+
+	printf("\n");
+mbuf_dump:
+	sym_op = op->sym;
+	if (sym_op->m_src) {
+		printf("Source mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_src, sym_op->m_src->data_len);
+	}
+	if (sym_op->m_dst) {
+		printf("Destination mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_dst, sym_op->m_dst->data_len);
+	}
+
+	printf("Session address = %p\ncipher offset: %d, length: %d\n"
+		"auth offset: %d, length:  %d\n aead offset: %d, length: %d\n"
+		, sym_op->session,
+		sym_op->cipher.data.offset, sym_op->cipher.data.length,
+		sym_op->auth.data.offset, sym_op->auth.data.length,
+		sym_op->aead.data.offset, sym_op->aead.data.length);
+	printf("\n");
+
+}
+
 static uint16_t
 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 			uint16_t nb_ops)
@@ -1865,8 +1953,13 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 
 		if (unlikely(fd->simple.frc)) {
 			/* TODO Parse SEC errors */
-			DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
-				      fd->simple.frc);
+			if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) {
+				DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
+						 fd->simple.frc);
+				if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP)
+					dpaa2_sec_dump(ops[num_rx]);
+			}
+
 			dpaa2_qp->rx_vq.err_pkts += 1;
 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
 		} else {
@@ -4132,6 +4225,42 @@ dpaa2_sec_uninit(const struct rte_cryptodev *dev)
 	return 0;
 }
 
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
+{
+	dpaa2_sec_dp_dump = atoi(value);
+	if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
+		DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
+			      "supported, changing to FULL error prints\n");
+		dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
+	}
+
+	return 0;
+}
+
+static void
+dpaa2_sec_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
+
+	if (!devargs)
+		return;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return;
+
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return;
+	}
+
+	rte_kvargs_process(kvlist, key,
+			check_devargs_handler, NULL);
+	rte_kvargs_free(kvlist);
+}
+
 static int
 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
@@ -4233,6 +4362,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 		goto init_error;
 	}
 
+	dpaa2_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
 	return 0;
 
@@ -4331,4 +4461,6 @@ static struct cryptodev_driver dpaa2_sec_crypto_drv;
 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD,
+		DRIVER_DUMP_MODE "=<int>");
 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v2 7/8] crypto/dpaa2: fix to check next type for auth or cipher
  2021-12-28  9:10 ` [PATCH v2 0/8] NXP crypto drivers changes Gagandeep Singh
                     ` (5 preceding siblings ...)
  2021-12-28  9:10   ` [PATCH v2 6/8] crypto/dpaa2_sec: add useful debug prints in sec dequeue Gagandeep Singh
@ 2021-12-28  9:10   ` Gagandeep Singh
  2021-12-28  9:10   ` [PATCH v2 8/8] crypto/dpaa_sec: add debug framework Gagandeep Singh
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-28  9:10 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Hemant Agrawal, stable

From: Hemant Agrawal <hemant.agrawal@nxp.com>

This patch add more checks on next type for PDCP cases.

Fixes: 45e019608f31 ("crypto/dpaa2_sec: support integrity only PDCP")
Fixes: a1173d55598c ("crypto/dpaa_sec: support PDCP offload")
Cc: stable@dpdk.org

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 6 ++++--
 drivers/crypto/dpaa_sec/dpaa_sec.c          | 6 ++++--
 2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index b65416097c..3f3540eeb6 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3406,13 +3406,15 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 	/* find xfrm types */
 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 		cipher_xform = &xform->cipher;
-		if (xform->next != NULL) {
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 			session->ext_params.aead_ctxt.auth_cipher_text = true;
 			auth_xform = &xform->next->auth;
 		}
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 		auth_xform = &xform->auth;
-		if (xform->next != NULL) {
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 			session->ext_params.aead_ctxt.auth_cipher_text = false;
 			cipher_xform = &xform->next->cipher;
 		}
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 1dedd9eee5..af166252ca 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -2984,11 +2984,13 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
 	/* find xfrm types */
 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 		cipher_xform = &xform->cipher;
-		if (xform->next != NULL)
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
 			auth_xform = &xform->next->auth;
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 		auth_xform = &xform->auth;
-		if (xform->next != NULL)
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
 			cipher_xform = &xform->next->cipher;
 	} else {
 		DPAA_SEC_ERR("Invalid crypto type");
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v2 8/8] crypto/dpaa_sec: add debug framework
  2021-12-28  9:10 ` [PATCH v2 0/8] NXP crypto drivers changes Gagandeep Singh
                     ` (6 preceding siblings ...)
  2021-12-28  9:10   ` [PATCH v2 7/8] crypto/dpaa2: fix to check next type for auth or cipher Gagandeep Singh
@ 2021-12-28  9:10   ` Gagandeep Singh
  2022-01-21 11:20     ` [EXT] " Akhil Goyal
  7 siblings, 1 reply; 42+ messages in thread
From: Gagandeep Singh @ 2021-12-28  9:10 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

Adding useful debug prints in DPAA driver for
easy debugging.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/cryptodevs/dpaa_sec.rst |  10 ++
 drivers/bus/dpaa/dpaa_bus.c        |  16 ++-
 drivers/crypto/dpaa_sec/dpaa_sec.c | 192 ++++++++++++++++++++++++++++-
 3 files changed, 213 insertions(+), 5 deletions(-)

diff --git a/doc/guides/cryptodevs/dpaa_sec.rst b/doc/guides/cryptodevs/dpaa_sec.rst
index bac82421bc..0c8d6cf3da 100644
--- a/doc/guides/cryptodevs/dpaa_sec.rst
+++ b/doc/guides/cryptodevs/dpaa_sec.rst
@@ -123,3 +123,13 @@ For enabling logs, use the following EAL parameter:
 
 Using ``pmd.crypto.dpaa`` as log matching criteria, all Crypto PMD logs can be
 enabled which are lower than logging ``level``.
+
+Enabling debug prints
+---------------------
+
+Use dev arg option ``drv_dump_mode=x`` to dump useful debug prints on HW sec
+error. There are 3 dump modes available 0, 1 and 2. Mode 0 means no dump print
+on error, mode 1 means dump HW error code and mode 2 means dump HW error code
+along with other useful debugging information like session, queue, descriptor
+data.
+e.g. ``dpaa_bus:dpaa_sec-1,drv_dump_mode=1``
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index 737ac8d8c5..a1db25dce9 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -429,6 +429,7 @@ rte_dpaa_bus_parse(const char *name, void *out)
 {
 	unsigned int i, j;
 	size_t delta;
+	size_t max_name_len;
 
 	/* There are two ways of passing device name, with and without
 	 * separator. "dpaa_bus:fm1-mac3" with separator, and "fm1-mac3"
@@ -444,14 +445,21 @@ rte_dpaa_bus_parse(const char *name, void *out)
 		delta = 5;
 	}
 
-	if (sscanf(&name[delta], "fm%u-mac%u", &i, &j) != 2 ||
-	    i >= 2 || j >= 16) {
-		return -EINVAL;
+	if (strncmp("dpaa_sec", &name[delta], 8) == 0) {
+		if (sscanf(&name[delta], "dpaa_sec-%u", &i) != 1 ||
+				i < 1 || i > 4)
+			return -EINVAL;
+		max_name_len = sizeof("dpaa_sec-.") - 1;
+	} else {
+		if (sscanf(&name[delta], "fm%u-mac%u", &i, &j) != 2 ||
+				i >= 2 || j >= 16)
+			return -EINVAL;
+
+		max_name_len = sizeof("fm.-mac..") - 1;
 	}
 
 	if (out != NULL) {
 		char *out_name = out;
-		const size_t max_name_len = sizeof("fm.-mac..") - 1;
 
 		/* Do not check for truncation, either name ends with
 		 * '\0' or the device name is followed by parameters and there
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index af166252ca..b436039117 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -27,6 +27,7 @@
 #include <rte_memcpy.h>
 #include <rte_string_fns.h>
 #include <rte_spinlock.h>
+#include <rte_hexdump.h>
 
 #include <fsl_usd.h>
 #include <fsl_qman.h>
@@ -45,6 +46,17 @@
 #include <dpaa_sec_log.h>
 #include <dpaax_iova_table.h>
 
+#define DRIVER_DUMP_MODE "drv_dump_mode"
+
+/* DPAA_SEC_DP_DUMP levels */
+enum dpaa_sec_dump_levels {
+	DPAA_SEC_DP_NO_DUMP,
+	DPAA_SEC_DP_ERR_DUMP,
+	DPAA_SEC_DP_FULL_DUMP
+};
+
+uint8_t dpaa_sec_dp_dump = DPAA_SEC_DP_ERR_DUMP;
+
 uint8_t dpaa_cryptodev_driver_id;
 
 static inline void
@@ -649,6 +661,139 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 	return 0;
 }
 
+static void
+dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp)
+{
+	struct dpaa_sec_job *job = &ctx->job;
+	struct rte_crypto_op *op = ctx->op;
+	dpaa_sec_session *sess = NULL;
+	struct sec_cdb c_cdb, *cdb;
+	uint8_t bufsize;
+	struct rte_crypto_sym_op *sym_op;
+	struct qm_sg_entry sg[2];
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa_sec_session *)
+			get_sym_session_private_data(
+					op->sym->session,
+					dpaa_cryptodev_driver_id);
+#ifdef RTE_LIBRTE_SECURITY
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa_sec_session *)
+			get_sec_session_private_data(
+					op->sym->sec_session);
+#endif
+	if (sess == NULL) {
+		printf("session is NULL\n");
+		goto mbuf_dump;
+	}
+
+	cdb = &sess->cdb;
+	rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb));
+#ifdef RTE_LIBRTE_SECURITY
+	printf("\nsession protocol type = %d\n", sess->proto_alg);
+#endif
+	printf("\n****************************************\n"
+		"session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
+		"\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
+		"\tCipher key len:\t%ld\n\tCipher alg:\t%d\n"
+		"\tCipher algmode:\t%d\n", sess->ctxt,
+		(sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
+		sess->cipher_alg, sess->auth_alg, sess->aead_alg,
+		(long)sess->cipher_key.length, sess->cipher_key.alg,
+		sess->cipher_key.algmode);
+		rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
+				sess->cipher_key.length);
+		rte_hexdump(stdout, "auth key", sess->auth_key.data,
+				sess->auth_key.length);
+	printf("\tAuth key len:\t%ld\n\tAuth alg:\t%d\n"
+		"\tAuth algmode:\t%d\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
+		"\tdigest length:\t%d\n\tauth only len:\t\t%d\n"
+		"\taead cipher text:\t%d\n",
+		(long)sess->auth_key.length, sess->auth_key.alg,
+		sess->auth_key.algmode,
+		sess->iv.length, sess->iv.offset,
+		sess->digest_length, sess->auth_only_len,
+		sess->auth_cipher_text);
+#ifdef RTE_LIBRTE_SECURITY
+	printf("PDCP session params:\n"
+		"\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
+		"\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:"
+		"\t%d\n\thfn:\t\t%d\n"
+		"\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
+		sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
+		sess->pdcp.sn_size, sess->pdcp.sdap_enabled,
+		sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
+		sess->pdcp.hfn_threshold);
+#endif
+	c_cdb.sh_hdr.hi.word = rte_be_to_cpu_32(c_cdb.sh_hdr.hi.word);
+	c_cdb.sh_hdr.lo.word = rte_be_to_cpu_32(c_cdb.sh_hdr.lo.word);
+	bufsize = c_cdb.sh_hdr.hi.field.idlen;
+
+	printf("cdb = %p\n\n", cdb);
+	printf("Descriptor size = %d\n", bufsize);
+	int m;
+	for (m = 0; m < bufsize; m++)
+		printf("0x%x\n", rte_be_to_cpu_32(c_cdb.sh_desc[m]));
+
+	printf("\n");
+mbuf_dump:
+	sym_op = op->sym;
+	if (sym_op->m_src) {
+		printf("Source mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_src,
+				 sym_op->m_src->data_len);
+	}
+	if (sym_op->m_dst) {
+		printf("Destination mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_dst,
+				 sym_op->m_dst->data_len);
+	}
+
+	printf("Session address = %p\ncipher offset: %d, length: %d\n"
+		"auth offset: %d, length:  %d\n aead offset: %d, length: %d\n",
+		sym_op->session, sym_op->cipher.data.offset,
+		sym_op->cipher.data.length,
+		sym_op->auth.data.offset, sym_op->auth.data.length,
+		sym_op->aead.data.offset, sym_op->aead.data.length);
+	printf("\n");
+
+	printf("******************************************************\n");
+	printf("ctx info:\n");
+	printf("job->sg[0] output info:\n");
+	memcpy(&sg[0], &job->sg[0], sizeof(sg[0]));
+	printf("\taddr = 0x%lx,\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
+		"\n\tbpid = %d\n\toffset = %d\n",
+		(unsigned long)sg[0].addr, sg[0].length, sg[0].final,
+		sg[0].extension, sg[0].bpid, sg[0].offset);
+	printf("\njob->sg[1] input info:\n");
+	memcpy(&sg[1], &job->sg[1], sizeof(sg[1]));
+	hw_sg_to_cpu(&sg[1]);
+	printf("\taddr = 0x%lx,\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
+		"\n\tbpid = %d\n\toffset = %d\n",
+		(unsigned long)sg[1].addr, sg[1].length, sg[1].final,
+		sg[1].extension, sg[1].bpid, sg[1].offset);
+
+	printf("\nctx pool addr = %p\n", ctx->ctx_pool);
+	if (ctx->ctx_pool)
+		printf("ctx pool available counts = %d\n",
+			rte_mempool_avail_count(ctx->ctx_pool));
+
+	printf("\nop pool addr = %p\n", op->mempool);
+	if (op->mempool)
+		printf("op pool available counts = %d\n",
+			rte_mempool_avail_count(op->mempool));
+
+	printf("********************************************************\n");
+	printf("Queue data:\n");
+	printf("\tFQID = 0x%x\n\tstate = %d\n\tnb_desc = %d\n"
+		"\tctx_pool = %p\n\trx_pkts = %d\n\ttx_pkts"
+	       "= %d\n\trx_errs = %d\n\ttx_errs = %d\n\n",
+		qp->outq.fqid, qp->outq.state, qp->outq.nb_desc,
+		qp->ctx_pool, qp->rx_pkts, qp->tx_pkts,
+		qp->rx_errs, qp->tx_errs);
+}
+
 /* qp is lockless, should be accessed by only one thread */
 static int
 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
@@ -716,7 +861,12 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
 		if (!ctx->fd_status) {
 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 		} else {
-			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
+			if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) {
+				DPAA_SEC_DP_WARN("SEC return err:0x%x\n",
+						  ctx->fd_status);
+				if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP)
+					dpaa_sec_dump(ctx, qp);
+			}
 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 		}
 		ops[pkts++] = op;
@@ -3458,6 +3608,42 @@ dpaa_sec_uninit(struct rte_cryptodev *dev)
 	return 0;
 }
 
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
+{
+	dpaa_sec_dp_dump = atoi(value);
+	if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) {
+		DPAA_SEC_WARN("WARN: DPAA_SEC_DP_DUMP_LEVEL is not "
+			      "supported, changing to FULL error prints\n");
+		dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP;
+	}
+
+	return 0;
+}
+
+static void
+dpaa_sec_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
+
+	if (!devargs)
+		return;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return;
+
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return;
+	}
+
+	rte_kvargs_process(kvlist, key,
+				check_devargs_handler, NULL);
+	rte_kvargs_free(kvlist);
+}
+
 static int
 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
@@ -3533,6 +3719,8 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 		}
 	}
 
+	dpaa_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
+
 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
 	return 0;
 
@@ -3649,4 +3837,6 @@ static struct cryptodev_driver dpaa_sec_crypto_drv;
 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
 		dpaa_cryptodev_driver_id);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA_SEC_PMD,
+		DRIVER_DUMP_MODE "=<int>");
 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* RE: [EXT] [PATCH v2 8/8] crypto/dpaa_sec: add debug framework
  2021-12-28  9:10   ` [PATCH v2 8/8] crypto/dpaa_sec: add debug framework Gagandeep Singh
@ 2022-01-21 11:20     ` Akhil Goyal
  0 siblings, 0 replies; 42+ messages in thread
From: Akhil Goyal @ 2022-01-21 11:20 UTC (permalink / raw)
  To: Gagandeep Singh, dev

> Adding useful debug prints in DPAA driver for
> easy debugging.
> 
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> ---
>  doc/guides/cryptodevs/dpaa_sec.rst |  10 ++
>  drivers/bus/dpaa/dpaa_bus.c        |  16 ++-
>  drivers/crypto/dpaa_sec/dpaa_sec.c | 192 ++++++++++++++++++++++++++++-
>  3 files changed, 213 insertions(+), 5 deletions(-)
> 
Fix checkpatch issues

Warning in drivers/crypto/dpaa_sec/dpaa_sec.c:
Using %l format, prefer %PRI*64 if type is [u]int64_t

^ permalink raw reply	[flat|nested] 42+ messages in thread

* RE: [EXT] [PATCH v2 5/8] crypto/dpaa2_sec: change digest size for AES_CMAC
  2021-12-28  9:10   ` [PATCH v2 5/8] crypto/dpaa2_sec: change digest size for AES_CMAC Gagandeep Singh
@ 2022-01-21 11:23     ` Akhil Goyal
  2022-02-08 14:11       ` Gagandeep Singh
  0 siblings, 1 reply; 42+ messages in thread
From: Akhil Goyal @ 2022-01-21 11:23 UTC (permalink / raw)
  To: Gagandeep Singh, dev; +Cc: Hemant Agrawal

> From: Hemant Agrawal <hemant.agrawal@nxp.com>
> 
> Change the digest size to supported value by the HW engine.
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
>  drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
> b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
> index 6aa1c01e95..ab652936bc 100644
> --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
> +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
> @@ -579,11 +579,11 @@ static const struct rte_cryptodev_capabilities
> dpaa2_sec_capabilities[] = {
>  					.increment = 1
>  				},
>  				.digest_size = {
> -					.min = 4,
> +					.min = 12,
>  					.max = 16,
>  					.increment = 4
>  				},
> -				.aad_size = { 0 }
> +				.iv_size = { 0 }
Digest size is changed but why is aad_size removed in this patch.


^ permalink raw reply	[flat|nested] 42+ messages in thread

* RE: [EXT] [PATCH v2 4/8] crypto/dpaa2_sec: support AES-GMAC
  2021-12-28  9:10   ` [PATCH v2 4/8] crypto/dpaa2_sec: support AES-GMAC Gagandeep Singh
@ 2022-01-21 11:29     ` Akhil Goyal
  2022-02-08 14:15       ` Gagandeep Singh
  0 siblings, 1 reply; 42+ messages in thread
From: Akhil Goyal @ 2022-01-21 11:29 UTC (permalink / raw)
  To: Gagandeep Singh, dev; +Cc: Akhil Goyal

> From: Akhil Goyal <akhil.goyal@nxp.com>
> 
> This patch supports AES_GMAC algorithm for DPAA2
> driver.
> 
> Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
> Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> ---
>  doc/guides/cryptodevs/features/dpaa2_sec.ini |  1 +
>  drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 14 ++++++++-
>  drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h    | 30 ++++++++++++++++++++
>  lib/cryptodev/rte_crypto_sym.h               |  4 ++-
>  4 files changed, 47 insertions(+), 2 deletions(-)

This patch should be split in two - cryptodev change should be separate patch.

> diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
> index daa090b978..4644fa3e25 100644
> --- a/lib/cryptodev/rte_crypto_sym.h
> +++ b/lib/cryptodev/rte_crypto_sym.h
> @@ -467,8 +467,10 @@ enum rte_crypto_aead_algorithm {
>  	/**< AES algorithm in CCM mode. */
>  	RTE_CRYPTO_AEAD_AES_GCM,
>  	/**< AES algorithm in GCM mode. */
> -	RTE_CRYPTO_AEAD_CHACHA20_POLY1305
> +	RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
>  	/**< Chacha20 cipher with poly1305 authenticator */
> +	RTE_CRYPTO_AEAD_AES_GMAC
> +	/**< AES algorithm in GMAC mode. */
>  };
AES-GMAC is also defined as AUTH algo. It may be removed but that would be
ABI break.
Is it not possible to use AES-GMAC as auth algo?

^ permalink raw reply	[flat|nested] 42+ messages in thread

* RE: [EXT] [PATCH v2 3/8] crypto/dpaa2_sec: ordered queue support
  2021-12-28  9:10   ` [PATCH v2 3/8] crypto/dpaa2_sec: ordered queue support Gagandeep Singh
@ 2022-01-21 11:31     ` Akhil Goyal
  0 siblings, 0 replies; 42+ messages in thread
From: Akhil Goyal @ 2022-01-21 11:31 UTC (permalink / raw)
  To: Gagandeep Singh, dev; +Cc: Nipun Gupta

> From: Nipun Gupta <nipun.gupta@nxp.com>
> 
> This patch supports ordered queue for DPAA2 platform.
> 
> Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
> ---
>  drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 255 +++++++++++++++++++-
>  drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   8 +-
>  drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h    |  14 +-
>  3 files changed, 263 insertions(+), 14 deletions(-)
> 

> +static uint16_t
> +dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
> +			uint16_t nb_ops)
> +{
> +	/* Function to transmit the frames to given device and VQ*/
> +	uint32_t loop;
> +	int32_t ret;
> +	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
> +	uint32_t frames_to_send, num_free_eq_desc, retry_count;
> +	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
> +	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
> +	struct qbman_swp *swp;
> +	uint16_t num_tx = 0;
> +	/*todo - need to support multiple buffer pools */

Remove/fix TODO

> 
> @@ -3780,6 +3983,40 @@ dpaa2_sec_eventq_attach(const struct
> rte_cryptodev *dev,
>  		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
>  		cfg.order_preservation_en = 1;
>  	}
> +
> +	if (event->sched_type == RTE_SCHED_TYPE_ORDERED) {
> +		struct opr_cfg ocfg;
> +
> +		/* Restoration window size = 256 frames */
> +		ocfg.oprrws = 3;
> +		/* Restoration window size = 512 frames for LX2 */
> +		if (dpaa2_svr_family == SVR_LX2160A)
> +			ocfg.oprrws = 4;
> +		/* Auto advance NESN window enabled */
> +		ocfg.oa = 1;
> +		/* Late arrival window size disabled */
> +		ocfg.olws = 0;
> +		/* ORL resource exhaustaion advance NESN disabled */
> +		ocfg.oeane = 0;
> +		/* Loose ordering enabled */
> +		ocfg.oloe = 1;
> +		priv->en_loose_ordered = 1;
> +		/* Strict ordering enabled if explicitly set */
> +		if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) {
> +			ocfg.oloe = 0;
> +			priv->en_loose_ordered = 0;
> +		}

Can we use devarg to enable strict ordering instead of env variable?
Also need to document this.



^ permalink raw reply	[flat|nested] 42+ messages in thread

* RE: [EXT] [PATCH v2 5/8] crypto/dpaa2_sec: change digest size for AES_CMAC
  2022-01-21 11:23     ` [EXT] " Akhil Goyal
@ 2022-02-08 14:11       ` Gagandeep Singh
  0 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-08 14:11 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: Hemant Agrawal



> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Friday, January 21, 2022 4:54 PM
> To: Gagandeep Singh <G.Singh@nxp.com>; dev@dpdk.org
> Cc: Hemant Agrawal <hemant.agrawal@nxp.com>
> Subject: RE: [EXT] [PATCH v2 5/8] crypto/dpaa2_sec: change digest size for
> AES_CMAC
> 
> > From: Hemant Agrawal <hemant.agrawal@nxp.com>
> >
> > Change the digest size to supported value by the HW engine.
> >
> > Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> > ---
> >  drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h | 4 ++--
> >  1 file changed, 2 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
> > b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
> > index 6aa1c01e95..ab652936bc 100644
> > --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
> > +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
> > @@ -579,11 +579,11 @@ static const struct rte_cryptodev_capabilities
> > dpaa2_sec_capabilities[] = {
> >  					.increment = 1
> >  				},
> >  				.digest_size = {
> > -					.min = 4,
> > +					.min = 12,
> >  					.max = 16,
> >  					.increment = 4
> >  				},
> > -				.aad_size = { 0 }
> > +				.iv_size = { 0 }
> Digest size is changed but why is aad_size removed in this patch.
Will be fixed in next version.


^ permalink raw reply	[flat|nested] 42+ messages in thread

* RE: [EXT] [PATCH v2 4/8] crypto/dpaa2_sec: support AES-GMAC
  2022-01-21 11:29     ` [EXT] " Akhil Goyal
@ 2022-02-08 14:15       ` Gagandeep Singh
  0 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-08 14:15 UTC (permalink / raw)
  To: Akhil Goyal, dev; +Cc: Akhil Goyal



> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Friday, January 21, 2022 4:59 PM
> To: Gagandeep Singh <G.Singh@nxp.com>; dev@dpdk.org
> Cc: Akhil Goyal <akhil.goyal@nxp.com>
> Subject: RE: [EXT] [PATCH v2 4/8] crypto/dpaa2_sec: support AES-GMAC
> 
> > From: Akhil Goyal <akhil.goyal@nxp.com>
> >
> > This patch supports AES_GMAC algorithm for DPAA2
> > driver.
> >
> > Signed-off-by: Akhil Goyal <akhil.goyal@nxp.com>
> > Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
> > ---
> >  doc/guides/cryptodevs/features/dpaa2_sec.ini |  1 +
> >  drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c  | 14 ++++++++-
> >  drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h    | 30 ++++++++++++++++++++
> >  lib/cryptodev/rte_crypto_sym.h               |  4 ++-
> >  4 files changed, 47 insertions(+), 2 deletions(-)
> 
> This patch should be split in two - cryptodev change should be separate patch.
> 
> > diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
> > index daa090b978..4644fa3e25 100644
> > --- a/lib/cryptodev/rte_crypto_sym.h
> > +++ b/lib/cryptodev/rte_crypto_sym.h
> > @@ -467,8 +467,10 @@ enum rte_crypto_aead_algorithm {
> >  	/**< AES algorithm in CCM mode. */
> >  	RTE_CRYPTO_AEAD_AES_GCM,
> >  	/**< AES algorithm in GCM mode. */
> > -	RTE_CRYPTO_AEAD_CHACHA20_POLY1305
> > +	RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
> >  	/**< Chacha20 cipher with poly1305 authenticator */
> > +	RTE_CRYPTO_AEAD_AES_GMAC
> > +	/**< AES algorithm in GMAC mode. */
> >  };
> AES-GMAC is also defined as AUTH algo. It may be removed but that would be
> ABI break.
> Is it not possible to use AES-GMAC as auth algo?
There are some issues in this patch. I will send it later.


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v3 0/7] NXP crypto drivers changes
  2021-12-28  9:10   ` [PATCH v2 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
@ 2022-02-10  4:31     ` Gagandeep Singh
  2022-02-10  4:31       ` [PATCH v3 1/7] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
                         ` (7 more replies)
  0 siblings, 8 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10  4:31 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

v3-change-log
* fix checkpatch issues
* use devargs for strict ordering
* fix AES_CMAC capabilities
* remove GMAC patch from this series. I will send
it as separate patch.

v2-change-log
* using dev args for both DPAA1 and DPAA2 drivers to
 dump debug prints on sec error.

Franck LENORMAND (1):
  common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7

Gagandeep Singh (3):
  common/dpaax: change job processing mode for PDCP SDAP
  crypto/dpaa2_sec: add useful debug prints in sec dequeue
  crypto/dpaa_sec: add debug framework

Hemant Agrawal (2):
  crypto/dpaa2_sec: change capabilities for AES_CMAC
  crypto/dpaa2: fix to check next type for auth or cipher

Nipun Gupta (1):
  crypto/dpaa2_sec: ordered queue support

 doc/guides/cryptodevs/dpaa2_sec.rst         |  17 +
 doc/guides/cryptodevs/dpaa_sec.rst          |  10 +
 drivers/bus/dpaa/dpaa_bus.c                 |  16 +-
 drivers/common/dpaax/caamflib/desc/pdcp.h   | 939 ++++----------------
 drivers/common/dpaax/caamflib/desc/sdap.h   | 111 +--
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 421 ++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  13 +-
 drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h    |  14 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c          | 212 ++++-
 9 files changed, 841 insertions(+), 912 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v3 1/7] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7
  2022-02-10  4:31     ` [PATCH v3 0/7] NXP crypto drivers changes Gagandeep Singh
@ 2022-02-10  4:31       ` Gagandeep Singh
  2022-02-10 10:58         ` [PATCH v4 0/7] NXP crypto drivers changes Gagandeep Singh
  2022-02-10  4:31       ` [PATCH v3 2/7] common/dpaax: change job processing mode for PDCP SDAP Gagandeep Singh
                         ` (6 subsequent siblings)
  7 siblings, 1 reply; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10  4:31 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Franck LENORMAND, Gagandeep Singh

From: Franck LENORMAND <franck.lenormand@nxp.com>

DPAA1 and DPAA2 platforms use SEC ERA 8 and 10 only.

This patch removes code in SDAP and PDCP header related to these
ERA to simplify the codebase:
 - Simplify logic using RTA_SEC_ERA_<> macro
 - Remove era_2_sw_hfn_ovrd dedicated to RTA_SEC_ERA_2

Signed-off-by: Franck LENORMAND <franck.lenormand@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/common/dpaax/caamflib/desc/pdcp.h   | 939 ++++----------------
 drivers/common/dpaax/caamflib/desc/sdap.h   |  91 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |  14 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c          |  14 +-
 4 files changed, 183 insertions(+), 875 deletions(-)

diff --git a/drivers/common/dpaax/caamflib/desc/pdcp.h b/drivers/common/dpaax/caamflib/desc/pdcp.h
index 8e8daf5ba8..2fe56c53c6 100644
--- a/drivers/common/dpaax/caamflib/desc/pdcp.h
+++ b/drivers/common/dpaax/caamflib/desc/pdcp.h
@@ -329,91 +329,35 @@ pdcp_insert_cplane_null_op(struct program *p,
 			   struct alginfo *cipherdata __maybe_unused,
 			   struct alginfo *authdata __maybe_unused,
 			   unsigned int dir,
-			   enum pdcp_sn_size sn_size __maybe_unused,
-			   unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			   enum pdcp_sn_size sn_size __maybe_unused)
 {
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
-
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
-		if (dir == OP_TYPE_ENCAP_PROTOCOL)
-			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-		else
-			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-	} else {
-		MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ, 4, 0);
-		MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
-
-		if (dir == OP_TYPE_ENCAP_PROTOCOL) {
-			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-			MATHB(p, VSEQINSZ, SUB, ONE, MATH0, 4, 0);
-		} else {
-			MATHB(p, VSEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4,
-			      IMMED2);
-			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-			MATHB(p, VSEQOUTSZ, SUB, ONE, MATH0, 4, 0);
-		}
-
-		MATHB(p, MATH0, ADD, ONE, MATH0, 4, 0);
+	MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
+	if (dir == OP_TYPE_ENCAP_PROTOCOL)
+		MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+		      IMMED2);
+	else
+		MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+		      IMMED2);
 
-		/*
-		 * Since MOVELEN is available only starting with
-		 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-		 * command dynamically by writing the length from M1 by
-		 * OR-ing the command in the M1 register and MOVE the
-		 * result into the descriptor buffer. Care must be taken
-		 * wrt. the location of the command because of SEC
-		 * pipelining. The actual MOVEs are written at the end
-		 * of the descriptor due to calculations needed on the
-		 * offset in the descriptor for the MOVE command.
-		 */
-		move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
-					     IMMED);
-		move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
-					      WAITCOMP | IMMED);
-	}
 	MATHB(p, VSEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE, 4,
 	      IMMED2);
 	JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		if (dir == OP_TYPE_ENCAP_PROTOCOL)
-			MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
-		else
-			MATHB(p, VSEQOUTSZ, ADD, ZERO, MATH0, 4, 0);
-	}
+	if (dir == OP_TYPE_ENCAP_PROTOCOL)
+		MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
+	else
+		MATHB(p, VSEQOUTSZ, ADD, ZERO, MATH0, 4, 0);
+
 	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 	SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
-	} else {
-		SET_LABEL(p, local_offset);
-
-		/* Shut off automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-		/* Placeholder for MOVE command with length from M1 register */
-		MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-		/* Enable automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-	}
+	MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
 
 	if (dir == OP_TYPE_ENCAP_PROTOCOL) {
 		MATHB(p, MATH1, XOR, MATH1, MATH0, 8, 0);
 		MOVE(p, MATH0, 0, OFIFO, 0, 4, IMMED);
 	}
 
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
-
 	return 0;
 }
 
@@ -422,66 +366,21 @@ insert_copy_frame_op(struct program *p,
 		     struct alginfo *cipherdata __maybe_unused,
 		     unsigned int dir __maybe_unused)
 {
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
-
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ,  4, 0);
-		MATHB(p, SEQINSZ, ADD, ZERO, VSEQOUTSZ,  4, 0);
-	} else {
-		MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ,  4, 0);
-		MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ,  4, 0);
-		MATHB(p, SEQINSZ, ADD, ONE, VSEQOUTSZ,  4, 0);
-		MATHB(p, VSEQOUTSZ, SUB, ONE, VSEQOUTSZ,  4, 0);
-		MATHB(p, VSEQINSZ, SUB, ONE, MATH0,  4, 0);
-		MATHB(p, MATH0, ADD, ONE, MATH0,  4, 0);
+	MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ,  4, 0);
+	MATHB(p, SEQINSZ, ADD, ZERO, VSEQOUTSZ,  4, 0);
 
-		/*
-		 * Since MOVELEN is available only starting with
-		 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-		 * command dynamically by writing the length from M1 by
-		 * OR-ing the command in the M1 register and MOVE the
-		 * result into the descriptor buffer. Care must be taken
-		 * wrt. the location of the command because of SEC
-		 * pipelining. The actual MOVEs are written at the end
-		 * of the descriptor due to calculations needed on the
-		 * offset in the descriptor for the MOVE command.
-		 */
-		move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
-					     IMMED);
-		move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
-					      WAITCOMP | IMMED);
-	}
 	MATHB(p, SEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE,  4,
 	      IFB | IMMED2);
 	JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
 
-	if (rta_sec_era > RTA_SEC_ERA_2)
-		MATHB(p, VSEQINSZ, ADD, ZERO, MATH0,  4, 0);
+	MATHB(p, VSEQINSZ, ADD, ZERO, MATH0,  4, 0);
 
 	SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
-	} else {
-		SET_LABEL(p, local_offset);
 
-		/* Shut off automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-		/* Placeholder for MOVE command with length from M0 register */
-		MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-
-		/* Enable automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-	}
+	MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
 
 	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
 	return 0;
 }
 
@@ -490,13 +389,12 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 			       bool swap __maybe_unused,
 			       struct alginfo *cipherdata __maybe_unused,
 			       struct alginfo *authdata, unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
 	/* 12 bit SN is only supported for protocol offload case */
-	if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size == PDCP_SN_SIZE_12) {
+	if (sn_size == PDCP_SN_SIZE_12) {
 		KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
 
@@ -526,9 +424,6 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		return -ENOTSUP;
 
 	}
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
 
 	switch (authdata->algtype) {
 	case PDCP_AUTH_TYPE_SNOW:
@@ -538,14 +433,7 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		SEQLOAD(p, MATH0, offset, length, 0);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
 
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-				   era_2_sw_hfn_ovrd == 0)) {
-			SEQINPTR(p, 0, length, RTO);
-		} else {
-			SEQINPTR(p, 0, 5, RTO);
-			SEQFIFOLOAD(p, SKIP, 4, 0);
-		}
+		SEQINPTR(p, 0, length, RTO);
 
 		if (swap == false) {
 			MATHB(p, MATH0, AND, sn_mask, MATH1,  8,
@@ -580,40 +468,11 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
 			      IMMED2);
 		} else {
-			if (rta_sec_era > RTA_SEC_ERA_2) {
-				MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
-				      0);
-			} else {
-				MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
-				      0);
-				MATHB(p, MATH1, SUB, ONE, MATH1, 4,
-				      0);
-			}
+			MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
 		}
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
-			MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
-		} else {
-			MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
-			MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
-
-			/*
-			 * Since MOVELEN is available only starting with
-			 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-			 * command dynamically by writing the length from M1 by
-			 * OR-ing the command in the M1 register and MOVE the
-			 * result into the descriptor buffer. Care must be taken
-			 * wrt. the location of the command because of SEC
-			 * pipelining. The actual MOVEs are written at the end
-			 * of the descriptor due to calculations needed on the
-			 * offset in the descriptor for the MOVE command.
-			 */
-			move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
-						     IMMED);
-			move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
-						      8, WAITCOMP | IMMED);
-		}
+		MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+		MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
 
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 		ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9, OP_ALG_AAI_F9,
@@ -622,25 +481,9 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 				     ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
 			      DIR_ENC);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
+		SEQFIFOLOAD(p, MSGINSNOOP, 0,
 				    VLF | LAST1 | LAST2 | FLUSH1);
-			MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
-				    VLF | LAST1 | LAST2 | FLUSH1);
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			/*
-			 * Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
+		MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
 
 		if (dir == OP_TYPE_DECAP_PROTOCOL)
 			SEQFIFOLOAD(p, ICV2, 4, LAST2);
@@ -655,14 +498,7 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		    authdata->keylen, INLINE_KEY(authdata));
 		SEQLOAD(p, MATH0, offset, length, 0);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-		     era_2_sw_hfn_ovrd == 0)) {
-			SEQINPTR(p, 0, length, RTO);
-		} else {
-			SEQINPTR(p, 0, 5, RTO);
-			SEQFIFOLOAD(p, SKIP, 4, 0);
-		}
+		SEQINPTR(p, 0, length, RTO);
 
 		if (swap == false) {
 			MATHB(p, MATH0, AND, sn_mask, MATH1, 8,
@@ -686,40 +522,12 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
 			      IMMED2);
 		} else {
-			if (rta_sec_era > RTA_SEC_ERA_2) {
-				MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
-				      0);
-			} else {
-				MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
-				      0);
-				MATHB(p, MATH1, SUB, ONE, MATH1, 4,
-				      0);
-			}
+			MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
 		}
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
-			MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
-		} else {
-			MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
-			MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
-
-			/*
-			 * Since MOVELEN is available only starting with
-			 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-			 * command dynamically by writing the length from M1 by
-			 * OR-ing the command in the M1 register and MOVE the
-			 * result into the descriptor buffer. Care must be taken
-			 * wrt. the location of the command because of SEC
-			 * pipelining. The actual MOVEs are written at the end
-			 * of the descriptor due to calculations needed on the
-			 * offset in the descriptor for the MOVE command.
-			 */
-			move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
-						     IMMED);
-			move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
-						      8, WAITCOMP | IMMED);
-		}
+		MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+		MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 		ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
 			      OP_ALG_AAI_CMAC,
@@ -728,27 +536,9 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 				     ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
 			      DIR_ENC);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
+		MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
+		SEQFIFOLOAD(p, MSGINSNOOP, 0,
 				    VLF | LAST1 | LAST2 | FLUSH1);
-		} else {
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
-				    VLF | LAST1 | LAST2 | FLUSH1);
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-			/*
-			 * Placeholder for MOVE command with length from
-			 * M1 register
-			 */
-			MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
-
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
 
 		if (dir == OP_TYPE_DECAP_PROTOCOL)
 			SEQFIFOLOAD(p, ICV1, 4, LAST1 | FLUSH1);
@@ -758,10 +548,6 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		break;
 
 	case PDCP_AUTH_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		/* Insert Auth Key */
 		KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
@@ -817,11 +603,6 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		return -EINVAL;
 	}
 
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
-
 	return 0;
 }
 
@@ -831,15 +612,14 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata __maybe_unused,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 	/* Insert Cipher Key */
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18 &&
+	if ((sn_size != PDCP_SN_SIZE_18 &&
 			!(rta_sec_era == RTA_SEC_ERA_8 &&
 				authdata->algtype == 0))
 			|| (rta_sec_era == RTA_SEC_ERA_10)) {
@@ -889,12 +669,7 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 	case PDCP_CIPHER_TYPE_SNOW:
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, WAITCOMP | IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		} else {
-			MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
-			MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
-		}
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 		if (dir == OP_TYPE_ENCAP_PROTOCOL)
 			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
@@ -913,12 +688,7 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 	case PDCP_CIPHER_TYPE_AES:
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x10, 0x10, WAITCOMP | IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		} else {
-			MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
-			MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
-		}
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 		if (dir == OP_TYPE_ENCAP_PROTOCOL)
 			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
@@ -937,11 +707,6 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
 		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
@@ -988,8 +753,7 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
@@ -998,7 +762,7 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 	KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
 	    INLINE_KEY(authdata));
 
-	if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) {
+	if (sn_size != PDCP_SN_SIZE_18) {
 		int pclid;
 
 		if (sn_size == PDCP_SN_SIZE_5)
@@ -1014,18 +778,13 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 	}
 	/* Non-proto is supported only for 5bit cplane and 18bit uplane */
 	switch (sn_size) {
-	case PDCP_SN_SIZE_5:
-		offset = 7;
-		length = 1;
-		sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
-					PDCP_C_PLANE_SN_MASK_BE;
-		break;
 	case PDCP_SN_SIZE_18:
 		offset = 5;
 		length = 3;
 		sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
 					PDCP_U_PLANE_18BIT_SN_MASK_BE;
 		break;
+	case PDCP_SN_SIZE_5:
 	case PDCP_SN_SIZE_7:
 	case PDCP_SN_SIZE_12:
 	case PDCP_SN_SIZE_15:
@@ -1094,20 +853,13 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 		SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
 
 		NFIFOADD(p, IFIFO, ICV2, 4, LAST2);
 
-		if (rta_sec_era <= RTA_SEC_ERA_2) {
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
-		} else {
-			MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
-		}
+		MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
 	}
 
 	return 0;
@@ -1119,19 +871,13 @@ pdcp_insert_uplane_zuc_zuc_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
 	pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
@@ -1141,7 +887,7 @@ pdcp_insert_uplane_zuc_zuc_op(struct program *p,
 	SET_LABEL(p, keyjump);
 	PATCH_JUMP(p, pkeyjump, keyjump);
 
-	if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) {
+	if (sn_size != PDCP_SN_SIZE_18) {
 		int pclid;
 
 		if (sn_size == PDCP_SN_SIZE_5)
@@ -1157,18 +903,13 @@ pdcp_insert_uplane_zuc_zuc_op(struct program *p,
 	}
 	/* Non-proto is supported only for 5bit cplane and 18bit uplane */
 	switch (sn_size) {
-	case PDCP_SN_SIZE_5:
-		offset = 7;
-		length = 1;
-		sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
-					PDCP_C_PLANE_SN_MASK_BE;
-		break;
 	case PDCP_SN_SIZE_18:
 		offset = 5;
 		length = 3;
 		sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
 					PDCP_U_PLANE_18BIT_SN_MASK_BE;
 		break;
+	case PDCP_SN_SIZE_5:
 	case PDCP_SN_SIZE_7:
 	case PDCP_SN_SIZE_12:
 	case PDCP_SN_SIZE_15:
@@ -1243,12 +984,11 @@ pdcp_insert_uplane_aes_aes_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18)) {
+	if (sn_size != PDCP_SN_SIZE_18) {
 		/* Insert Auth Key */
 		KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
@@ -1392,8 +1132,7 @@ pdcp_insert_cplane_acc_op(struct program *p,
 			  struct alginfo *cipherdata,
 			  struct alginfo *authdata,
 			  unsigned int dir,
-			  enum pdcp_sn_size sn_size,
-			  unsigned char era_2_hfn_ovrd __maybe_unused)
+			  enum pdcp_sn_size sn_size)
 {
 	/* Insert Auth Key */
 	KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
@@ -1420,8 +1159,7 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
@@ -1429,14 +1167,12 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 	LABEL(end_desc);
 	LABEL(local_offset);
 	LABEL(jump_to_beginning);
-	LABEL(fifo_load_mac_i_offset);
 	REFERENCE(seqin_ptr_read);
 	REFERENCE(seqin_ptr_write);
 	REFERENCE(seq_out_read);
 	REFERENCE(jump_back_to_sd_cmd);
-	REFERENCE(move_mac_i_to_desc_buf);
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 				cipherdata->keylen, INLINE_KEY(cipherdata));
@@ -1484,56 +1220,17 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 	MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
 	SEQSTORE(p, MATH0, offset, length, 0);
 	if (dir == OP_TYPE_ENCAP_PROTOCOL) {
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-				   era_2_sw_hfn_ovrd == 0)) {
-			SEQINPTR(p, 0, length, RTO);
-		} else {
-			SEQINPTR(p, 0, 5, RTO);
-			SEQFIFOLOAD(p, SKIP, 4, 0);
-		}
+		SEQINPTR(p, 0, length, RTO);
+
 		KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
 		MOVEB(p, MATH2, 0, IFIFOAB1, 0, 0x08, IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-			MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
-			MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
-			      4, IMMED2);
-		} else {
-			MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
-			MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
-			      4, IMMED2);
-			/*
-			 * Note: Although the calculations below might seem a
-			 * little off, the logic is the following:
-			 *
-			 * - SEQ IN PTR RTO below needs the full length of the
-			 *   frame; in case of P4080_REV_2_HFN_OV_WORKAROUND,
-			 *   this means the length of the frame to be processed
-			 *   + 4 bytes (the HFN override flag and value).
-			 *   The length of the frame to be processed minus 1
-			 *   byte is in the VSIL register (because
-			 *   VSIL = SIL + 3, due to 1 byte, the header being
-			 *   already written by the SEQ STORE above). So for
-			 *   calculating the length to use in RTO, I add one
-			 *   to the VSIL value in order to obtain the total
-			 *   frame length. This helps in case of P4080 which
-			 *   can have the value 0 as an operand in a MATH
-			 *   command only as SRC1 When the HFN override
-			 *   workaround is not enabled, the length of the
-			 *   frame is given by the SIL register; the
-			 *   calculation is similar to the one in the SEC 4.2
-			 *   and SEC 5.3 cases.
-			 */
-			if (era_2_sw_hfn_ovrd)
-				MATHB(p, VSEQOUTSZ, ADD, ONE, MATH1, 4,
-				      0);
-			else
-				MATHB(p, SEQINSZ, ADD, MATH3, MATH1, 4,
-				      0);
-		}
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+		MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+		MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
+		      4, IMMED2);
+
 		/*
 		 * Placeholder for filling the length in
 		 * SEQIN PTR RTO below
@@ -1548,24 +1245,14 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			      DIR_DEC);
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 		MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			LOAD(p, CLRW_CLR_C1KEY |
-			     CLRW_CLR_C1CTX |
-			     CLRW_CLR_C1ICV |
-			     CLRW_CLR_C1DATAS |
-			     CLRW_CLR_C1MODE,
-			     CLRW, 0, 4, IMMED);
-		else
-			LOAD(p, CLRW_RESET_CLS1_CHA |
-			     CLRW_CLR_C1KEY |
-			     CLRW_CLR_C1CTX |
-			     CLRW_CLR_C1ICV |
-			     CLRW_CLR_C1DATAS |
-			     CLRW_CLR_C1MODE,
-			     CLRW, 0, 4, IMMED);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
+		LOAD(p, CLRW_RESET_CLS1_CHA |
+		     CLRW_CLR_C1KEY |
+		     CLRW_CLR_C1CTX |
+		     CLRW_CLR_C1ICV |
+		     CLRW_CLR_C1DATAS |
+		     CLRW_CLR_C1MODE,
+		     CLRW, 0, 4, IMMED);
 
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 		    cipherdata->keylen, INLINE_KEY(cipherdata));
@@ -1573,11 +1260,6 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
 		SEQINPTR(p, 0, 0, RTO);
 
-		if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-			SEQFIFOLOAD(p, SKIP, 5, 0);
-			MATHB(p, SEQINSZ, ADD, ONE, SEQINSZ, 4, 0);
-		}
-
 		MATHB(p, SEQINSZ, SUB, length, VSEQINSZ, 4, IMMED2);
 		ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
 			      OP_ALG_AAI_F8,
@@ -1586,10 +1268,7 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			      DIR_ENC);
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-				   era_2_sw_hfn_ovrd == 0))
-			SEQFIFOLOAD(p, SKIP, length, 0);
+		SEQFIFOLOAD(p, SKIP, length, 0);
 
 		SEQFIFOLOAD(p, MSG1, 0, VLF);
 		MOVEB(p, MATH3, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
@@ -1598,13 +1277,9 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 	} else {
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
 
-		if (rta_sec_era >= RTA_SEC_ERA_5)
-			MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+		MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2)
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		else
-			MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 		MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
 /*
@@ -1649,10 +1324,7 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 		    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-		if (rta_sec_era >= RTA_SEC_ERA_4)
-			MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
-		else
-			MOVE(p, CONTEXT1, 0, MATH3, 0, 8, IMMED);
+		MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
 
 		ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
 			      OP_ALG_AAI_F8,
@@ -1662,22 +1334,15 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			move_mac_i_to_desc_buf = MOVE(p, OFIFO, 0, DESCBUF, 0,
-						      4, WAITCOMP | IMMED);
-		else
-			MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+		MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
-		else
-			LOAD(p, CLRW_RESET_CLS1_CHA |
-			     CLRW_CLR_C1KEY |
-			     CLRW_CLR_C1CTX |
-			     CLRW_CLR_C1ICV |
-			     CLRW_CLR_C1DATAS |
-			     CLRW_CLR_C1MODE,
-			     CLRW, 0, 4, IMMED);
+		LOAD(p, CLRW_RESET_CLS1_CHA |
+		     CLRW_CLR_C1KEY |
+		     CLRW_CLR_C1CTX |
+		     CLRW_CLR_C1ICV |
+		     CLRW_CLR_C1DATAS |
+		     CLRW_CLR_C1MODE,
+		     CLRW, 0, 4, IMMED);
 
 		KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
@@ -1698,28 +1363,17 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		/* Read the # of bytes written in the output buffer + 1 (HDR) */
 		MATHI(p, VSEQOUTSZ, ADD, length, VSEQINSZ, 4, IMMED2);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			MOVE(p, MATH3, 0, IFIFOAB1, 0, 8, IMMED);
-		else
-			MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
-
-		if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd)
-			SEQFIFOLOAD(p, SKIP, 4, 0);
+		MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
 
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 
-		if (rta_sec_era >= RTA_SEC_ERA_4) {
-			LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
-			     NFIFOENTRY_DEST_CLASS1 |
-			     NFIFOENTRY_DTYPE_ICV |
-			     NFIFOENTRY_LC1 |
-			     NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
-			MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
-		} else {
-			SET_LABEL(p, fifo_load_mac_i_offset);
-			FIFOLOAD(p, ICV1, fifo_load_mac_i_offset, 4,
-				 LAST1 | FLUSH1 | IMMED);
-		}
+		LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+		     NFIFOENTRY_DEST_CLASS1 |
+		     NFIFOENTRY_DTYPE_ICV |
+		     NFIFOENTRY_LC1 |
+		     NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
+		MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
+
 
 		SET_LABEL(p, end_desc);
 
@@ -1727,18 +1381,10 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			PATCH_MOVE(p, seq_out_read, end_desc + 1);
 			PATCH_JUMP(p, jump_back_to_sd_cmd,
 				   back_to_sd_offset + jump_back_to_sd_cmd - 5);
-
-			if (rta_sec_era <= RTA_SEC_ERA_3)
-				PATCH_MOVE(p, move_mac_i_to_desc_buf,
-					   fifo_load_mac_i_offset + 1);
 		} else {
 			PATCH_MOVE(p, seq_out_read, end_desc + 2);
 			PATCH_JUMP(p, jump_back_to_sd_cmd,
 				   back_to_sd_offset + jump_back_to_sd_cmd - 5);
-
-			if (rta_sec_era <= RTA_SEC_ERA_3)
-				PATCH_MOVE(p, move_mac_i_to_desc_buf,
-					   fifo_load_mac_i_offset + 1);
 		}
 	}
 
@@ -1751,8 +1397,7 @@ pdcp_insert_cplane_aes_snow_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
@@ -1761,7 +1406,7 @@ pdcp_insert_cplane_aes_snow_op(struct program *p,
 	KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
 	    INLINE_KEY(authdata));
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 
@@ -1860,20 +1505,13 @@ pdcp_insert_cplane_aes_snow_op(struct program *p,
 		SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
 
 		NFIFOADD(p, IFIFO, ICV2, 4, LAST2);
 
-		if (rta_sec_era <= RTA_SEC_ERA_2) {
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
-		} else {
-			MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
-		}
+		MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
 	}
 
 	return 0;
@@ -1885,20 +1523,14 @@ pdcp_insert_cplane_snow_zuc_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
@@ -2010,19 +1642,13 @@ pdcp_insert_cplane_aes_zuc_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
@@ -2138,19 +1764,13 @@ pdcp_insert_cplane_zuc_snow_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
@@ -2259,13 +1879,12 @@ pdcp_insert_cplane_zuc_snow_op(struct program *p,
 		SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			/*
-			 * For SEC ERA 6, there's a problem with the OFIFO
-			 * pointer, and thus it needs to be reset here before
-			 * moving to M0.
-			 */
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		/*
+		 * For SEC ERA 6, there's a problem with the OFIFO
+		 * pointer, and thus it needs to be reset here before
+		 * moving to M0.
+		 */
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		/* Put ICV to M0 before sending it to C2 for comparison. */
 		MOVEB(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
@@ -2287,16 +1906,11 @@ pdcp_insert_cplane_zuc_aes_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 
@@ -2459,7 +2073,7 @@ pdcp_insert_uplane_no_int_op(struct program *p,
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size == PDCP_SN_SIZE_15) ||
+	if ((sn_size == PDCP_SN_SIZE_15) ||
 			(rta_sec_era >= RTA_SEC_ERA_10)) {
 		PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_USER,
 			 (uint16_t)cipherdata->algtype);
@@ -2513,10 +2127,6 @@ pdcp_insert_uplane_no_int_op(struct program *p,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
 
@@ -2546,7 +2156,6 @@ static inline int
 insert_hfn_ov_op(struct program *p,
 		 uint32_t shift,
 		 enum pdb_type_e pdb_type,
-		 unsigned char era_2_sw_hfn_ovrd,
 		 bool clear_dpovrd_at_end)
 {
 	uint32_t imm = PDCP_DPOVRD_HFN_OV_EN;
@@ -2554,9 +2163,6 @@ insert_hfn_ov_op(struct program *p,
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era == RTA_SEC_ERA_2 && !era_2_sw_hfn_ovrd)
-		return 0;
-
 	switch (pdb_type) {
 	case PDCP_PDB_TYPE_NO_PDB:
 		/*
@@ -2579,26 +2185,16 @@ insert_hfn_ov_op(struct program *p,
 		return -EINVAL;
 	}
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, DPOVRD, AND, imm, NONE, 8, IFB | IMMED2);
-	} else {
-		SEQLOAD(p, MATH0, 4, 4, 0);
-		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
-		MATHB(p, MATH0, AND, imm, NONE, 8, IFB | IMMED2);
-		SEQSTORE(p, MATH0, 4, 4, 0);
-	}
+	MATHB(p, DPOVRD, AND, imm, NONE, 8, IFB | IMMED2);
 
 	pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, MATH_Z);
 
-	if (rta_sec_era > RTA_SEC_ERA_2)
-		MATHI(p, DPOVRD, LSHIFT, shift, MATH0, 4, IMMED2);
-	else
-		MATHB(p, MATH0, LSHIFT, shift, MATH0, 4, IMMED2);
+	MATHI(p, DPOVRD, LSHIFT, shift, MATH0, 4, IMMED2);
 
 	MATHB(p, MATH0, SHLD, MATH0, MATH0, 8, 0);
 	MOVE(p, MATH0, 0, DESCBUF, hfn_pdb_offset, 4, IMMED);
 
-	if (clear_dpovrd_at_end && (rta_sec_era >= RTA_SEC_ERA_8)) {
+	if (clear_dpovrd_at_end) {
 		/*
 		 * For ERA8, DPOVRD could be handled by the PROTOCOL command
 		 * itself. For now, this is not done. Thus, clear DPOVRD here
@@ -2621,97 +2217,28 @@ cnstr_pdcp_c_plane_pdb(struct program *p,
 		       enum pdcp_sn_size sn_size,
 		       unsigned char bearer,
 		       unsigned char direction,
-		       uint32_t hfn_threshold,
-		       struct alginfo *cipherdata,
-		       struct alginfo *authdata)
+		       uint32_t hfn_threshold)
 {
 	struct pdcp_pdb pdb;
-	enum pdb_type_e
-		pdb_mask[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
-			{	/* NULL */
-				PDCP_PDB_TYPE_NO_PDB,		/* NULL */
-				PDCP_PDB_TYPE_FULL_PDB,		/* SNOW f9 */
-				PDCP_PDB_TYPE_FULL_PDB,		/* AES CMAC */
-				PDCP_PDB_TYPE_FULL_PDB		/* ZUC-I */
-			},
-			{	/* SNOW f8 */
-				PDCP_PDB_TYPE_FULL_PDB,		/* NULL */
-				PDCP_PDB_TYPE_FULL_PDB,		/* SNOW f9 */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* AES CMAC */
-				PDCP_PDB_TYPE_REDUCED_PDB	/* ZUC-I */
-			},
-			{	/* AES CTR */
-				PDCP_PDB_TYPE_FULL_PDB,		/* NULL */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* SNOW f9 */
-				PDCP_PDB_TYPE_FULL_PDB,		/* AES CMAC */
-				PDCP_PDB_TYPE_REDUCED_PDB	/* ZUC-I */
-			},
-			{	/* ZUC-E */
-				PDCP_PDB_TYPE_FULL_PDB,		/* NULL */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* SNOW f9 */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* AES CMAC */
-				PDCP_PDB_TYPE_FULL_PDB		/* ZUC-I */
-			},
-	};
-
-	if (rta_sec_era >= RTA_SEC_ERA_8) {
-		memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
-
-		/* To support 12-bit seq numbers, we use u-plane opt in pdb.
-		 * SEC supports 5-bit only with c-plane opt in pdb.
-		 */
-		if (sn_size == PDCP_SN_SIZE_12) {
-			pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT;
-			pdb.bearer_dir_res = (uint32_t)
-				((bearer << PDCP_U_PLANE_PDB_BEARER_SHIFT) |
-				 (direction << PDCP_U_PLANE_PDB_DIR_SHIFT));
-
-			pdb.hfn_thr_res =
-			hfn_threshold << PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT;
-
-		} else {
-			/* This means 5-bit c-plane.
-			 * Here we use c-plane opt in pdb
-			 */
-
-			/* This is a HW issue. Bit 2 should be set to zero,
-			 * but it does not work this way. Override here.
-			 */
-			pdb.opt_res.rsvd = 0x00000002;
-
-			/* Copy relevant information from user to PDB */
-			pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
-			pdb.bearer_dir_res = (uint32_t)
-				((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
-				(direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
-			pdb.hfn_thr_res =
-			hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
-		}
-
-		/* copy PDB in descriptor*/
-		__rta_out32(p, pdb.opt_res.opt);
-		__rta_out32(p, pdb.hfn_res);
-		__rta_out32(p, pdb.bearer_dir_res);
-		__rta_out32(p, pdb.hfn_thr_res);
 
-		return PDCP_PDB_TYPE_FULL_PDB;
-	}
+	memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
 
-	switch (pdb_mask[cipherdata->algtype][authdata->algtype]) {
-	case PDCP_PDB_TYPE_NO_PDB:
-		break;
+	/* To support 12-bit seq numbers, we use u-plane opt in pdb.
+	 * SEC supports 5-bit only with c-plane opt in pdb.
+	 */
+	if (sn_size == PDCP_SN_SIZE_12) {
+		pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT;
+		pdb.bearer_dir_res = (uint32_t)
+			((bearer << PDCP_U_PLANE_PDB_BEARER_SHIFT) |
+			 (direction << PDCP_U_PLANE_PDB_DIR_SHIFT));
 
-	case PDCP_PDB_TYPE_REDUCED_PDB:
-		__rta_out32(p, (hfn << PDCP_C_PLANE_PDB_HFN_SHIFT));
-		__rta_out32(p,
-			    (uint32_t)((bearer <<
-					PDCP_C_PLANE_PDB_BEARER_SHIFT) |
-					(direction <<
-					 PDCP_C_PLANE_PDB_DIR_SHIFT)));
-		break;
+		pdb.hfn_thr_res =
+		hfn_threshold << PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT;
 
-	case PDCP_PDB_TYPE_FULL_PDB:
-		memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+	} else {
+		/* This means 5-bit c-plane.
+		 * Here we use c-plane opt in pdb
+		 */
 
 		/* This is a HW issue. Bit 2 should be set to zero,
 		 * but it does not work this way. Override here.
@@ -2722,23 +2249,18 @@ cnstr_pdcp_c_plane_pdb(struct program *p,
 		pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
 		pdb.bearer_dir_res = (uint32_t)
 			((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
-			 (direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
+			(direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
 		pdb.hfn_thr_res =
-			hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
-
-		/* copy PDB in descriptor*/
-		__rta_out32(p, pdb.opt_res.opt);
-		__rta_out32(p, pdb.hfn_res);
-		__rta_out32(p, pdb.bearer_dir_res);
-		__rta_out32(p, pdb.hfn_thr_res);
-
-		break;
-
-	default:
-		return PDCP_PDB_TYPE_INVALID;
+		hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
 	}
 
-	return pdb_mask[cipherdata->algtype][authdata->algtype];
+	/* copy PDB in descriptor*/
+	__rta_out32(p, pdb.opt_res.opt);
+	__rta_out32(p, pdb.hfn_res);
+	__rta_out32(p, pdb.bearer_dir_res);
+	__rta_out32(p, pdb.hfn_thr_res);
+
+	return PDCP_PDB_TYPE_FULL_PDB;
 }
 
 /*
@@ -2817,7 +2339,7 @@ cnstr_pdcp_u_plane_pdb(struct program *p,
 		pdb.hfn_thr_res =
 			hfn_threshold<<PDCP_U_PLANE_PDB_18BIT_SN_HFN_THR_SHIFT;
 
-		if (rta_sec_era <= RTA_SEC_ERA_8) {
+		if (rta_sec_era == RTA_SEC_ERA_8) {
 			if (cipherdata && authdata)
 				pdb_type = pdb_mask[cipherdata->algtype]
 						   [authdata->algtype];
@@ -2857,6 +2379,7 @@ cnstr_pdcp_u_plane_pdb(struct program *p,
 
 	return pdb_type;
 }
+
 /**
  * cnstr_shdsc_pdcp_c_plane_encap - Function for creating a PDCP Control Plane
  *                                  encapsulation descriptor.
@@ -2874,9 +2397,6 @@ cnstr_pdcp_u_plane_pdb(struct program *p,
  *              Valid algorithm values are those from cipher_type_pdcp enum.
  * @authdata: pointer to authentication transform definitions
  *            Valid algorithm values are those from auth_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
  *         for reclaiming the space that wasn't used for the descriptor.
@@ -2895,14 +2415,12 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 			       unsigned char direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	static int
 		(*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
 			(struct program*, bool swap, struct alginfo *,
-			 struct alginfo *, unsigned int, enum pdcp_sn_size,
-			unsigned char __maybe_unused) = {
+			 struct alginfo *, unsigned int dir, enum pdcp_sn_size) = {
 		{	/* NULL */
 			pdcp_insert_cplane_null_op,	/* NULL */
 			pdcp_insert_cplane_int_only_op,	/* SNOW f9 */
@@ -2961,11 +2479,6 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 	int err;
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN override for other era than 2");
-		return -EINVAL;
-	}
-
 	if (sn_size != PDCP_SN_SIZE_12 && sn_size != PDCP_SN_SIZE_5) {
 		pr_err("C-plane supports only 5-bit and 12-bit sequence numbers\n");
 		return -EINVAL;
@@ -2984,14 +2497,11 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 			sn_size,
 			bearer,
 			direction,
-			hfn_threshold,
-			cipherdata,
-			authdata);
+			hfn_threshold);
 
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type,
-			       era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3000,8 +2510,7 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 		cipherdata,
 		authdata,
 		OP_TYPE_ENCAP_PROTOCOL,
-		sn_size,
-		era_2_sw_hfn_ovrd);
+		sn_size);
 	if (err)
 		return err;
 
@@ -3027,9 +2536,6 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
  *              Valid algorithm values are those from cipher_type_pdcp enum.
  * @authdata: pointer to authentication transform definitions
  *            Valid algorithm values are those from auth_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -3049,14 +2555,12 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 			       unsigned char direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	static int
 		(*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
 			(struct program*, bool swap, struct alginfo *,
-			 struct alginfo *, unsigned int, enum pdcp_sn_size,
-			 unsigned char) = {
+			 struct alginfo *, unsigned int dir, enum pdcp_sn_size) = {
 		{	/* NULL */
 			pdcp_insert_cplane_null_op,	/* NULL */
 			pdcp_insert_cplane_int_only_op,	/* SNOW f9 */
@@ -3115,11 +2619,6 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 	int err;
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN override for other era than 2");
-		return -EINVAL;
-	}
-
 	if (sn_size != PDCP_SN_SIZE_12 && sn_size != PDCP_SN_SIZE_5) {
 		pr_err("C-plane supports only 5-bit and 12-bit sequence numbers\n");
 		return -EINVAL;
@@ -3138,14 +2637,11 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 			sn_size,
 			bearer,
 			direction,
-			hfn_threshold,
-			cipherdata,
-			authdata);
+			hfn_threshold);
 
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type,
-			       era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3154,8 +2650,7 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 		cipherdata,
 		authdata,
 		OP_TYPE_DECAP_PROTOCOL,
-		sn_size,
-		era_2_sw_hfn_ovrd);
+		sn_size);
 	if (err)
 		return err;
 
@@ -3170,14 +2665,12 @@ pdcp_insert_uplane_with_int_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd,
 			      unsigned int dir)
 {
 	static int
 		(*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
 			(struct program*, bool swap, struct alginfo *,
-			 struct alginfo *, unsigned int, enum pdcp_sn_size,
-			unsigned char __maybe_unused) = {
+			 struct alginfo *, unsigned int dir, enum pdcp_sn_size) = {
 		{	/* NULL */
 			pdcp_insert_cplane_null_op,	/* NULL */
 			pdcp_insert_cplane_int_only_op,	/* SNOW f9 */
@@ -3210,8 +2703,7 @@ pdcp_insert_uplane_with_int_op(struct program *p,
 		cipherdata,
 		authdata,
 		dir,
-		sn_size,
-		era_2_sw_hfn_ovrd);
+		sn_size);
 	if (err)
 		return err;
 
@@ -3234,9 +2726,6 @@ pdcp_insert_uplane_with_int_op(struct program *p,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -3256,8 +2745,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	struct program prg;
 	struct program *p = &prg;
@@ -3292,16 +2780,6 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 	};
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN ovrd for other era than 2");
-		return -EINVAL;
-	}
-
-	if (authdata && !authdata->algtype && rta_sec_era < RTA_SEC_ERA_8) {
-		pr_err("Cannot use u-plane auth with era < 8");
-		return -EINVAL;
-	}
-
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 	if (swap)
 		PROGRAM_SET_BSWAP(p);
@@ -3321,7 +2799,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 	}
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3330,10 +2808,6 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 	case PDCP_SN_SIZE_12:
 		switch (cipherdata->algtype) {
 		case PDCP_CIPHER_TYPE_ZUC:
-			if (rta_sec_era < RTA_SEC_ERA_5) {
-				pr_err("Invalid era for selected algorithm\n");
-				return -ENOTSUP;
-			}
 			/* fallthrough */
 		case PDCP_CIPHER_TYPE_AES:
 		case PDCP_CIPHER_TYPE_SNOW:
@@ -3342,7 +2816,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 					authdata && authdata->algtype == 0){
 				err = pdcp_insert_uplane_with_int_op(p, swap,
 						cipherdata, authdata,
-						sn_size, era_2_sw_hfn_ovrd,
+						sn_size,
 						OP_TYPE_ENCAP_PROTOCOL);
 				if (err)
 					return err;
@@ -3388,7 +2862,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 		if (authdata) {
 			err = pdcp_insert_uplane_with_int_op(p, swap,
 					cipherdata, authdata,
-					sn_size, era_2_sw_hfn_ovrd,
+					sn_size,
 					OP_TYPE_ENCAP_PROTOCOL);
 			if (err)
 				return err;
@@ -3437,9 +2911,6 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -3459,8 +2930,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	struct program prg;
 	struct program *p = &prg;
@@ -3496,16 +2966,6 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN override for other era than 2");
-		return -EINVAL;
-	}
-
-	if (authdata && !authdata->algtype && rta_sec_era < RTA_SEC_ERA_8) {
-		pr_err("Cannot use u-plane auth with era < 8");
-		return -EINVAL;
-	}
-
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 	if (swap)
 		PROGRAM_SET_BSWAP(p);
@@ -3525,7 +2985,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 	}
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3534,10 +2994,6 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 	case PDCP_SN_SIZE_12:
 		switch (cipherdata->algtype) {
 		case PDCP_CIPHER_TYPE_ZUC:
-			if (rta_sec_era < RTA_SEC_ERA_5) {
-				pr_err("Invalid era for selected algorithm\n");
-				return -ENOTSUP;
-			}
 			/* fallthrough */
 		case PDCP_CIPHER_TYPE_AES:
 		case PDCP_CIPHER_TYPE_SNOW:
@@ -3555,7 +3011,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 			else if (authdata && authdata->algtype == 0) {
 				err = pdcp_insert_uplane_with_int_op(p, swap,
 						cipherdata, authdata,
-						sn_size, era_2_sw_hfn_ovrd,
+						sn_size,
 						OP_TYPE_DECAP_PROTOCOL);
 				if (err)
 					return err;
@@ -3589,7 +3045,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 		if (authdata) {
 			err = pdcp_insert_uplane_with_int_op(p, swap,
 					cipherdata, authdata,
-					sn_size, era_2_sw_hfn_ovrd,
+					sn_size,
 					OP_TYPE_DECAP_PROTOCOL);
 			if (err)
 				return err;
@@ -3649,9 +3105,6 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 	struct program prg;
 	struct program *p = &prg;
 	uint32_t iv[3] = {0, 0, 0};
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
 
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 	if (swap)
@@ -3661,52 +3114,15 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 
 	SHR_HDR(p, SHR_ALWAYS, 1, 0);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
-	} else {
-		MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4, 0);
-		MATHB(p, MATH1, SUB, ONE, MATH1, 4, 0);
-		MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
-		MOVE(p, MATH1, 0, MATH0, 0, 8, IMMED);
+	MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+	MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
 
-		/*
-		 * Since MOVELEN is available only starting with
-		 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-		 * command dynamically by writing the length from M1 by
-		 * OR-ing the command in the M1 register and MOVE the
-		 * result into the descriptor buffer. Care must be taken
-		 * wrt. the location of the command because of SEC
-		 * pipelining. The actual MOVEs are written at the end
-		 * of the descriptor due to calculations needed on the
-		 * offset in the descriptor for the MOVE command.
-		 */
-		move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
-					     IMMED);
-		move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
-					      WAITCOMP | IMMED);
-	}
 	MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
 
 	switch (authdata->algtype) {
 	case PDCP_AUTH_TYPE_NULL:
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-			/* Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
+		MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
 
 		LOAD(p, (uintptr_t)iv, MATH0, 0, 8, IMMED | COPY);
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | LAST2 | FLUSH1);
@@ -3730,23 +3146,8 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 			      DIR_ENC);
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SET_LABEL(p, local_offset);
-
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-			/* Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+		MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
 
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
 		SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
 		SEQSTORE(p, CONTEXT2, 0, 4, 0);
 
@@ -3768,32 +3169,14 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 			      DIR_ENC);
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+		MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
 
-			/* Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
-
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
 		SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
 		SEQSTORE(p, CONTEXT1, 0, 4, 0);
 
 		break;
 
 	case PDCP_AUTH_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		iv[0] = 0xFFFFFFFF;
 		iv[1] = swap ? swab32(0xFC000000) : 0xFC000000;
 		iv[2] = 0x00000000; /* unused */
@@ -3819,12 +3202,6 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 		return -EINVAL;
 	}
 
-
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
-
 	return PROGRAM_FINALIZE(p);
 }
 
diff --git a/drivers/common/dpaax/caamflib/desc/sdap.h b/drivers/common/dpaax/caamflib/desc/sdap.h
index b2497a5424..ee03e95990 100644
--- a/drivers/common/dpaax/caamflib/desc/sdap.h
+++ b/drivers/common/dpaax/caamflib/desc/sdap.h
@@ -225,10 +225,6 @@ static inline int pdcp_sdap_insert_no_int_op(struct program *p,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		/* The LSB and MSB is the same for ZUC context */
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
@@ -253,7 +249,6 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap __maybe_unused,
 			     struct alginfo *cipherdata,
 			     struct alginfo *authdata __maybe_unused,
 			     unsigned int dir, enum pdcp_sn_size sn_size,
-			     unsigned char era_2_sw_hfn_ovrd __maybe_unused,
 			     enum pdb_type_e pdb_type)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
@@ -293,12 +288,7 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap __maybe_unused,
 	/* Write header */
 	SEQSTORE(p, MATH0, offset, length, 0);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-	} else {
-		MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
-		MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
-	}
+	MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 	if (dir == OP_TYPE_ENCAP_PROTOCOL)
 		MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
@@ -326,11 +316,6 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap __maybe_unused,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
 
@@ -378,7 +363,6 @@ static inline int
 pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 			  struct alginfo *cipherdata, struct alginfo *authdata,
 			  unsigned int dir, enum pdcp_sn_size sn_size,
-			  unsigned char era_2_sw_hfn_ovrd __maybe_unused,
 			  enum pdb_type_e pdb_type)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
@@ -391,13 +375,6 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 			FULL_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET :
 			REDUCED_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET;
 
-	if (authdata->algtype == PDCP_CIPHER_TYPE_ZUC) {
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-	}
-
 	if (pdcp_sdap_get_sn_parameters(sn_size, swap, &offset, &length,
 					&sn_mask))
 		return -ENOTSUP;
@@ -588,8 +565,7 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 		 */
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		/* Save the content left in the Output FIFO (the ICV) to MATH0
 		 */
@@ -604,13 +580,7 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 		 * Note: As configured by the altsource, this will send
 		 * the
 		 */
-		if (rta_sec_era <= RTA_SEC_ERA_2) {
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
-		} else {
-			MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
-		}
+		MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
 	}
 
 	if (authdata->algtype == PDCP_CIPHER_TYPE_ZUC) {
@@ -638,7 +608,6 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 static inline int pdcp_sdap_insert_no_snoop_op(
 	struct program *p, bool swap __maybe_unused, struct alginfo *cipherdata,
 	struct alginfo *authdata, unsigned int dir, enum pdcp_sn_size sn_size,
-	unsigned char era_2_sw_hfn_ovrd __maybe_unused,
 	enum pdb_type_e pdb_type)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
@@ -649,13 +618,6 @@ static inline int pdcp_sdap_insert_no_snoop_op(
 			FULL_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET :
 			REDUCED_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET;
 
-	if (authdata->algtype == PDCP_CIPHER_TYPE_ZUC) {
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-	}
-
 	if (pdcp_sdap_get_sn_parameters(sn_size, swap, &offset, &length,
 					&sn_mask))
 		return -ENOTSUP;
@@ -842,11 +804,10 @@ pdcp_sdap_insert_cplane_null_op(struct program *p,
 			   struct alginfo *authdata,
 			   unsigned int dir,
 			   enum pdcp_sn_size sn_size,
-			   unsigned char era_2_sw_hfn_ovrd,
 			   enum pdb_type_e pdb_type __maybe_unused)
 {
 	return pdcp_insert_cplane_null_op(p, swap, cipherdata, authdata, dir,
-					  sn_size, era_2_sw_hfn_ovrd);
+					  sn_size);
 }
 
 static inline int
@@ -856,24 +817,22 @@ pdcp_sdap_insert_cplane_int_only_op(struct program *p,
 			   struct alginfo *authdata,
 			   unsigned int dir,
 			   enum pdcp_sn_size sn_size,
-			   unsigned char era_2_sw_hfn_ovrd,
 			   enum pdb_type_e pdb_type __maybe_unused)
 {
 	return pdcp_insert_cplane_int_only_op(p, swap, cipherdata, authdata,
-				dir, sn_size, era_2_sw_hfn_ovrd);
+				dir, sn_size);
 }
 
 static int pdcp_sdap_insert_with_int_op(
 	struct program *p, bool swap __maybe_unused, struct alginfo *cipherdata,
 	struct alginfo *authdata, enum pdcp_sn_size sn_size,
-	unsigned char era_2_sw_hfn_ovrd, unsigned int dir,
+	unsigned int dir,
 	enum pdb_type_e pdb_type)
 {
 	static int (
 		*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])(
 		struct program *, bool swap, struct alginfo *, struct alginfo *,
-		unsigned int, enum pdcp_sn_size,
-		unsigned char __maybe_unused, enum pdb_type_e pdb_type) = {
+		unsigned int dir, enum pdcp_sn_size, enum pdb_type_e pdb_type) = {
 		{
 			/* NULL */
 			pdcp_sdap_insert_cplane_null_op,     /* NULL */
@@ -907,7 +866,7 @@ static int pdcp_sdap_insert_with_int_op(
 
 	err = pdcp_cp_fp[cipherdata->algtype]
 			[authdata->algtype](p, swap, cipherdata, authdata, dir,
-					sn_size, era_2_sw_hfn_ovrd, pdb_type);
+					sn_size, pdb_type);
 	if (err)
 		return err;
 
@@ -925,7 +884,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd,
 			       uint32_t caps_mode)
 {
 	struct program prg;
@@ -966,12 +924,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 
 	LABEL(pdb_end);
 
-	/* Check HFN override for ERA 2 */
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN ovrd for other era than 2");
-		return -EINVAL;
-	}
-
 	/* Check the confidentiality algorithm is supported by the code */
 	switch (cipherdata->algtype) {
 	case PDCP_CIPHER_TYPE_NULL:
@@ -1013,14 +965,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 		return -ENOTSUP;
 	}
 
-	/* Check that we are not performing ZUC algo on old platforms */
-	if (cipherdata->algtype == PDCP_CIPHER_TYPE_ZUC &&
-			rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("ZUC algorithm not supported for era: %d\n",
-				rta_sec_era);
-		return -ENOTSUP;
-	}
-
 	/* Initialize the program */
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 
@@ -1047,7 +991,7 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 	SET_LABEL(p, pdb_end);
 
 	/* Inser the HFN override operation */
-	err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, false);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, false);
 	if (err)
 		return err;
 
@@ -1068,7 +1012,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 	} else {
 		err = pdcp_sdap_insert_with_int_op(p, swap, cipherdata,
 						   authdata, sn_size,
-						   era_2_sw_hfn_ovrd,
 						   caps_mode, pdb_type);
 		if (err) {
 			pr_err("Fail pdcp_sdap_insert_with_int_op\n");
@@ -1096,9 +1039,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -1118,12 +1058,11 @@ cnstr_shdsc_pdcp_sdap_u_plane_encap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	return cnstr_shdsc_pdcp_sdap_u_plane(descbuf, ps, swap, sn_size,
 			hfn, bearer, direction, hfn_threshold, cipherdata,
-			authdata, era_2_sw_hfn_ovrd, OP_TYPE_ENCAP_PROTOCOL);
+			authdata, OP_TYPE_ENCAP_PROTOCOL);
 }
 
 /**
@@ -1141,9 +1080,6 @@ cnstr_shdsc_pdcp_sdap_u_plane_encap(uint32_t *descbuf,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -1163,12 +1099,11 @@ cnstr_shdsc_pdcp_sdap_u_plane_decap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	return cnstr_shdsc_pdcp_sdap_u_plane(descbuf, ps, swap, sn_size, hfn,
 			bearer, direction, hfn_threshold, cipherdata, authdata,
-			era_2_sw_hfn_ovrd, OP_TYPE_DECAP_PROTOCOL);
+			OP_TYPE_DECAP_PROTOCOL);
 }
 
 #endif /* __DESC_SDAP_H__ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index a5b052375d..1e6b3e548a 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3297,8 +3297,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 		else if (session->dir == DIR_DEC)
 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
 					priv->flc_desc[0].desc, 1, swap,
@@ -3307,8 +3306,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 
 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
 		bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc,
@@ -3323,7 +3321,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 			else
 				bufsize = cnstr_shdsc_pdcp_u_plane_encap(
 					priv->flc_desc[0].desc, 1, swap,
@@ -3332,7 +3330,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 		} else if (session->dir == DIR_DEC) {
 			if (pdcp_xform->sdap_enabled)
 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
@@ -3342,7 +3340,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 			else
 				bufsize = cnstr_shdsc_pdcp_u_plane_decap(
 					priv->flc_desc[0].desc, 1, swap,
@@ -3351,7 +3349,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 		}
 	}
 
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index a552e64506..1dedd9eee5 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -296,8 +296,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 					ses->pdcp.bearer,
 					ses->pdcp.pkt_dir,
 					ses->pdcp.hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 		else if (ses->dir == DIR_DEC)
 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
 					cdb->sh_desc, 1, swap,
@@ -306,8 +305,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 					ses->pdcp.bearer,
 					ses->pdcp.pkt_dir,
 					ses->pdcp.hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 	} else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
 		shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
 						     1, swap, &authdata);
@@ -322,7 +320,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 			else
 				shared_desc_len =
 					cnstr_shdsc_pdcp_u_plane_encap(
@@ -332,7 +330,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 		} else if (ses->dir == DIR_DEC) {
 			if (ses->pdcp.sdap_enabled)
 				shared_desc_len =
@@ -343,7 +341,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 			else
 				shared_desc_len =
 					cnstr_shdsc_pdcp_u_plane_decap(
@@ -353,7 +351,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 		}
 	}
 	return shared_desc_len;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v3 2/7] common/dpaax: change job processing mode for PDCP SDAP
  2022-02-10  4:31     ` [PATCH v3 0/7] NXP crypto drivers changes Gagandeep Singh
  2022-02-10  4:31       ` [PATCH v3 1/7] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
@ 2022-02-10  4:31       ` Gagandeep Singh
  2022-02-10  4:31       ` [PATCH v3 3/7] crypto/dpaa2_sec: change capabilities for AES_CMAC Gagandeep Singh
                         ` (5 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10  4:31 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

For PDCP SDAP test cases, HW sec engine process the
jobs in WAIT mode.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/common/dpaax/caamflib/desc/sdap.h | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/common/dpaax/caamflib/desc/sdap.h b/drivers/common/dpaax/caamflib/desc/sdap.h
index ee03e95990..1737e14fa6 100644
--- a/drivers/common/dpaax/caamflib/desc/sdap.h
+++ b/drivers/common/dpaax/caamflib/desc/sdap.h
@@ -895,27 +895,27 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 			{
 				/* NULL */
 				SHR_WAIT,   /* NULL */
-				SHR_ALWAYS, /* SNOW f9 */
-				SHR_ALWAYS, /* AES CMAC */
-				SHR_ALWAYS  /* ZUC-I */
+				SHR_WAIT, /* SNOW f9 */
+				SHR_WAIT, /* AES CMAC */
+				SHR_WAIT  /* ZUC-I */
 			},
 			{
 				/* SNOW f8 */
-				SHR_ALWAYS, /* NULL */
-				SHR_ALWAYS, /* SNOW f9 */
+				SHR_WAIT, /* NULL */
+				SHR_WAIT, /* SNOW f9 */
 				SHR_WAIT,   /* AES CMAC */
 				SHR_WAIT    /* ZUC-I */
 			},
 			{
 				/* AES CTR */
-				SHR_ALWAYS, /* NULL */
-				SHR_ALWAYS, /* SNOW f9 */
-				SHR_ALWAYS, /* AES CMAC */
+				SHR_WAIT, /* NULL */
+				SHR_WAIT, /* SNOW f9 */
+				SHR_WAIT, /* AES CMAC */
 				SHR_WAIT    /* ZUC-I */
 			},
 			{
 				/* ZUC-E */
-				SHR_ALWAYS, /* NULL */
+				SHR_WAIT, /* NULL */
 				SHR_WAIT,   /* SNOW f9 */
 				SHR_WAIT,   /* AES CMAC */
 				SHR_WAIT    /* ZUC-I */
@@ -979,7 +979,7 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 		SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype],
 			0, 0);
 	else
-		SHR_HDR(p, SHR_ALWAYS, 0, 0);
+		SHR_HDR(p, SHR_WAIT, 0, 0);
 
 	/* Construct the PDB */
 	pdb_type = cnstr_pdcp_u_plane_pdb(p, sn_size, hfn, bearer, direction,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v3 3/7] crypto/dpaa2_sec: change capabilities for AES_CMAC
  2022-02-10  4:31     ` [PATCH v3 0/7] NXP crypto drivers changes Gagandeep Singh
  2022-02-10  4:31       ` [PATCH v3 1/7] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
  2022-02-10  4:31       ` [PATCH v3 2/7] common/dpaax: change job processing mode for PDCP SDAP Gagandeep Singh
@ 2022-02-10  4:31       ` Gagandeep Singh
  2022-02-10  4:31       ` [PATCH v3 4/7] crypto/dpaa2_sec: add useful debug prints in sec dequeue Gagandeep Singh
                         ` (4 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10  4:31 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Hemant Agrawal, Gagandeep Singh

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Add IV size and change the digest size to supported
value by the HW engine.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 05bd7c0736..a8f9440632 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -549,11 +549,12 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 					.increment = 1
 				},
 				.digest_size = {
-					.min = 4,
+					.min = 12,
 					.max = 16,
 					.increment = 4
 				},
-				.aad_size = { 0 }
+				.aad_size = { 0 },
+				.iv_size = { 0 }
 			}, }
 		}, }
 	},
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v3 4/7] crypto/dpaa2_sec: add useful debug prints in sec dequeue
  2022-02-10  4:31     ` [PATCH v3 0/7] NXP crypto drivers changes Gagandeep Singh
                         ` (2 preceding siblings ...)
  2022-02-10  4:31       ` [PATCH v3 3/7] crypto/dpaa2_sec: change capabilities for AES_CMAC Gagandeep Singh
@ 2022-02-10  4:31       ` Gagandeep Singh
  2022-02-10  4:31       ` [PATCH v3 5/7] crypto/dpaa2: fix to check next type for auth or cipher Gagandeep Singh
                         ` (3 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10  4:31 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

Few useful debug prints added in dequeue function.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/cryptodevs/dpaa2_sec.rst         |  10 ++
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 136 +++++++++++++++++++-
 2 files changed, 144 insertions(+), 2 deletions(-)

diff --git a/doc/guides/cryptodevs/dpaa2_sec.rst b/doc/guides/cryptodevs/dpaa2_sec.rst
index 06de988d51..875d918068 100644
--- a/doc/guides/cryptodevs/dpaa2_sec.rst
+++ b/doc/guides/cryptodevs/dpaa2_sec.rst
@@ -175,3 +175,13 @@ For enabling logs, use the following EAL parameter:
 
 Using ``crypto.dpaa2`` as log matching criteria, all Crypto PMD logs can be
 enabled which are lower than logging ``level``.
+
+Enabling debug prints
+---------------------
+
+Use dev arg option ``drv_dump_mode=x`` to dump useful debug prints on HW sec
+error. There are 3 dump modes available 0, 1 and 2. Mode 0 means no dump print
+on error, mode 1 means dump HW error code and mode 2 means dump HW error code
+along with other useful debugging information like session, queue, descriptor
+data.
+e.g. ``fslmc:dpseci.1,drv_dump_mode=1``
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 1e6b3e548a..444e1f0043 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -28,6 +28,7 @@
 #include <fsl_dpopr.h>
 #include <fsl_dpseci.h>
 #include <fsl_mc_sys.h>
+#include <rte_hexdump.h>
 
 #include "dpaa2_sec_priv.h"
 #include "dpaa2_sec_event.h"
@@ -50,7 +51,17 @@
 
 #define NO_PREFETCH 0
 
+#define DRIVER_DUMP_MODE "drv_dump_mode"
+
+/* DPAA2_SEC_DP_DUMP levels */
+enum dpaa2_sec_dump_levels {
+	DPAA2_SEC_DP_NO_DUMP,
+	DPAA2_SEC_DP_ERR_DUMP,
+	DPAA2_SEC_DP_FULL_DUMP
+};
+
 uint8_t cryptodev_driver_id;
+uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP;
 
 #ifdef RTE_LIB_SECURITY
 static inline int
@@ -1621,6 +1632,83 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
 	return op;
 }
 
+static void
+dpaa2_sec_dump(struct rte_crypto_op *op)
+{
+	int i;
+	dpaa2_sec_session *sess = NULL;
+	struct ctxt_priv *priv;
+	uint8_t bufsize;
+	struct rte_crypto_sym_op *sym_op;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa2_sec_session *)get_sym_session_private_data(
+			op->sym->session, cryptodev_driver_id);
+#ifdef RTE_LIBRTE_SECURITY
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa2_sec_session *)get_sec_session_private_data(
+			op->sym->sec_session);
+#endif
+
+	if (sess == NULL)
+		goto mbuf_dump;
+
+	priv = (struct ctxt_priv *)sess->ctxt;
+	printf("\n****************************************\n"
+		"session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
+		"\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
+		"\tCipher key len:\t%zd\n", sess->ctxt_type,
+		(sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
+		sess->cipher_alg, sess->auth_alg, sess->aead_alg,
+		sess->cipher_key.length);
+		rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
+				sess->cipher_key.length);
+		rte_hexdump(stdout, "auth key", sess->auth_key.data,
+				sess->auth_key.length);
+	printf("\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
+		"\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only"
+		" len:\t%d\n\taead cipher text:\t%d\n",
+		sess->auth_key.length, sess->iv.length, sess->iv.offset,
+		sess->digest_length, sess->status,
+		sess->ext_params.aead_ctxt.auth_only_len,
+		sess->ext_params.aead_ctxt.auth_cipher_text);
+#ifdef RTE_LIBRTE_SECURITY
+	printf("PDCP session params:\n"
+		"\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
+		"\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n"
+		"\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
+		sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
+		sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
+		sess->pdcp.hfn_threshold);
+
+#endif
+	bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl;
+	printf("Descriptor Dump:\n");
+	for (i = 0; i < bufsize; i++)
+		printf("\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]);
+
+	printf("\n");
+mbuf_dump:
+	sym_op = op->sym;
+	if (sym_op->m_src) {
+		printf("Source mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_src, sym_op->m_src->data_len);
+	}
+	if (sym_op->m_dst) {
+		printf("Destination mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_dst, sym_op->m_dst->data_len);
+	}
+
+	printf("Session address = %p\ncipher offset: %d, length: %d\n"
+		"auth offset: %d, length:  %d\n aead offset: %d, length: %d\n"
+		, sym_op->session,
+		sym_op->cipher.data.offset, sym_op->cipher.data.length,
+		sym_op->auth.data.offset, sym_op->auth.data.length,
+		sym_op->aead.data.offset, sym_op->aead.data.length);
+	printf("\n");
+
+}
+
 static uint16_t
 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 			uint16_t nb_ops)
@@ -1702,8 +1790,13 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 
 		if (unlikely(fd->simple.frc)) {
 			/* TODO Parse SEC errors */
-			DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
-				      fd->simple.frc);
+			if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) {
+				DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
+						 fd->simple.frc);
+				if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP)
+					dpaa2_sec_dump(ops[num_rx]);
+			}
+
 			dpaa2_qp->rx_vq.err_pkts += 1;
 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
 		} else {
@@ -3883,6 +3976,42 @@ dpaa2_sec_uninit(const struct rte_cryptodev *dev)
 	return 0;
 }
 
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
+{
+	dpaa2_sec_dp_dump = atoi(value);
+	if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
+		DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
+			      "supported, changing to FULL error prints\n");
+		dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
+	}
+
+	return 0;
+}
+
+static void
+dpaa2_sec_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
+
+	if (!devargs)
+		return;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return;
+
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return;
+	}
+
+	rte_kvargs_process(kvlist, key,
+			check_devargs_handler, NULL);
+	rte_kvargs_free(kvlist);
+}
+
 static int
 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
@@ -3984,6 +4113,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 		goto init_error;
 	}
 
+	dpaa2_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
 	return 0;
 
@@ -4082,4 +4212,6 @@ static struct cryptodev_driver dpaa2_sec_crypto_drv;
 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD,
+		DRIVER_DUMP_MODE "=<int>");
 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v3 5/7] crypto/dpaa2: fix to check next type for auth or cipher
  2022-02-10  4:31     ` [PATCH v3 0/7] NXP crypto drivers changes Gagandeep Singh
                         ` (3 preceding siblings ...)
  2022-02-10  4:31       ` [PATCH v3 4/7] crypto/dpaa2_sec: add useful debug prints in sec dequeue Gagandeep Singh
@ 2022-02-10  4:31       ` Gagandeep Singh
  2022-02-10  4:31       ` [PATCH v3 6/7] crypto/dpaa2_sec: ordered queue support Gagandeep Singh
                         ` (2 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10  4:31 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Hemant Agrawal, stable

From: Hemant Agrawal <hemant.agrawal@nxp.com>

This patch add more checks on next type for PDCP cases.

Fixes: 45e019608f31 ("crypto/dpaa2_sec: support integrity only PDCP")
Fixes: a1173d55598c ("crypto/dpaa_sec: support PDCP offload")
Cc: stable@dpdk.org

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 6 ++++--
 drivers/crypto/dpaa_sec/dpaa_sec.c          | 6 ++++--
 2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 444e1f0043..cb8aaf6446 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3231,13 +3231,15 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 	/* find xfrm types */
 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 		cipher_xform = &xform->cipher;
-		if (xform->next != NULL) {
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 			session->ext_params.aead_ctxt.auth_cipher_text = true;
 			auth_xform = &xform->next->auth;
 		}
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 		auth_xform = &xform->auth;
-		if (xform->next != NULL) {
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 			session->ext_params.aead_ctxt.auth_cipher_text = false;
 			cipher_xform = &xform->next->cipher;
 		}
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 1dedd9eee5..af166252ca 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -2984,11 +2984,13 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
 	/* find xfrm types */
 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 		cipher_xform = &xform->cipher;
-		if (xform->next != NULL)
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
 			auth_xform = &xform->next->auth;
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 		auth_xform = &xform->auth;
-		if (xform->next != NULL)
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
 			cipher_xform = &xform->next->cipher;
 	} else {
 		DPAA_SEC_ERR("Invalid crypto type");
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v3 6/7] crypto/dpaa2_sec: ordered queue support
  2022-02-10  4:31     ` [PATCH v3 0/7] NXP crypto drivers changes Gagandeep Singh
                         ` (4 preceding siblings ...)
  2022-02-10  4:31       ` [PATCH v3 5/7] crypto/dpaa2: fix to check next type for auth or cipher Gagandeep Singh
@ 2022-02-10  4:31       ` Gagandeep Singh
  2022-02-10  4:31       ` [PATCH v3 7/7] crypto/dpaa_sec: add debug framework Gagandeep Singh
  2022-02-10  7:03       ` [EXT] [PATCH v3 0/7] NXP crypto drivers changes Akhil Goyal
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10  4:31 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Nipun Gupta, Gagandeep Singh

From: Nipun Gupta <nipun.gupta@nxp.com>

This patch supports ordered queue for DPAA2 platform.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/cryptodevs/dpaa2_sec.rst         |   7 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 285 ++++++++++++++++++--
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   8 +-
 drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h    |  14 +-
 4 files changed, 290 insertions(+), 24 deletions(-)

diff --git a/doc/guides/cryptodevs/dpaa2_sec.rst b/doc/guides/cryptodevs/dpaa2_sec.rst
index 875d918068..1a590309a0 100644
--- a/doc/guides/cryptodevs/dpaa2_sec.rst
+++ b/doc/guides/cryptodevs/dpaa2_sec.rst
@@ -185,3 +185,10 @@ on error, mode 1 means dump HW error code and mode 2 means dump HW error code
 along with other useful debugging information like session, queue, descriptor
 data.
 e.g. ``fslmc:dpseci.1,drv_dump_mode=1``
+
+Enable strict ordering
+----------------------
+
+Use dev arg option ``drv_strict_order=1`` to enable strict ordering.
+By default, loose ordering is set for ordered schedule type event.
+e.g. ``fslmc:dpseci.1,drv_strict_order=1``
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index cb8aaf6446..7a0596e44e 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -52,6 +52,7 @@
 #define NO_PREFETCH 0
 
 #define DRIVER_DUMP_MODE "drv_dump_mode"
+#define DRIVER_STRICT_ORDER "drv_strict_order"
 
 /* DPAA2_SEC_DP_DUMP levels */
 enum dpaa2_sec_dump_levels {
@@ -1477,14 +1478,14 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 
 		for (loop = 0; loop < frames_to_send; loop++) {
 			if (*dpaa2_seqn((*ops)->sym->m_src)) {
-				uint8_t dqrr_index =
-					*dpaa2_seqn((*ops)->sym->m_src) - 1;
-
-				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
-				DPAA2_PER_LCORE_DQRR_SIZE--;
-				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
-				*dpaa2_seqn((*ops)->sym->m_src) =
-					DPAA2_INVALID_MBUF_SEQN;
+				if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) {
+					DPAA2_PER_LCORE_DQRR_SIZE--;
+					DPAA2_PER_LCORE_DQRR_HELD &= ~(1 <<
+					*dpaa2_seqn((*ops)->sym->m_src) &
+					QBMAN_EQCR_DCA_IDXMASK);
+				}
+				flags[loop] = *dpaa2_seqn((*ops)->sym->m_src);
+				*dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN;
 			}
 
 			/*Clear the unused FD fields before sending*/
@@ -1709,6 +1710,168 @@ dpaa2_sec_dump(struct rte_crypto_op *op)
 
 }
 
+static void
+dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci)
+{
+	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+	struct rte_crypto_op *op;
+	struct qbman_fd *fd;
+
+	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
+	op = sec_fd_to_mbuf(fd);
+	/* Instead of freeing, enqueue it to the sec tx queue (sec->core)
+	 * after setting an error in FD. But this will have performance impact.
+	 */
+	rte_pktmbuf_free(op->sym->m_src);
+}
+
+static void
+dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
+			     struct rte_mbuf *m,
+			     struct qbman_eq_desc *eqdesc)
+{
+	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+	struct eqresp_metadata *eqresp_meta;
+	struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private;
+	uint16_t orpid, seqnum;
+	uint8_t dq_idx;
+
+	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
+		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
+			DPAA2_EQCR_OPRID_SHIFT;
+		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
+			DPAA2_EQCR_SEQNUM_SHIFT;
+
+
+		if (!priv->en_loose_ordered) {
+			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
+			qbman_eq_desc_set_response(eqdesc, (uint64_t)
+				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
+				dpio_dev->eqresp_pi]), 1);
+			qbman_eq_desc_set_token(eqdesc, 1);
+
+			eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi];
+			eqresp_meta->dpaa2_q = dpaa2_q;
+			eqresp_meta->mp = m->pool;
+
+			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
+				dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0);
+		} else {
+			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
+		}
+	} else {
+		dq_idx = *dpaa2_seqn(m) - 1;
+		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
+		DPAA2_PER_LCORE_DQRR_SIZE--;
+		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
+	}
+	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
+}
+
+
+static uint16_t
+dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
+			uint16_t nb_ops)
+{
+	/* Function to transmit the frames to given device and VQ*/
+	uint32_t loop;
+	int32_t ret;
+	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+	uint32_t frames_to_send, num_free_eq_desc, retry_count;
+	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+	struct qbman_swp *swp;
+	uint16_t num_tx = 0;
+	uint16_t bpid;
+	struct rte_mempool *mb_pool;
+	struct dpaa2_sec_dev_private *priv =
+				dpaa2_qp->tx_vq.crypto_data->dev_private;
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		DPAA2_SEC_ERR("sessionless crypto op not supported");
+		return 0;
+	}
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	while (nb_ops) {
+		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : nb_ops;
+
+		if (!priv->en_loose_ordered) {
+			if (*dpaa2_seqn((*ops)->sym->m_src)) {
+				num_free_eq_desc = dpaa2_free_eq_descriptors();
+				if (num_free_eq_desc < frames_to_send)
+					frames_to_send = num_free_eq_desc;
+			}
+		}
+
+		for (loop = 0; loop < frames_to_send; loop++) {
+			/*Prepare enqueue descriptor*/
+			qbman_eq_desc_clear(&eqdesc[loop]);
+			qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid);
+
+			if (*dpaa2_seqn((*ops)->sym->m_src))
+				dpaa2_sec_set_enqueue_descriptor(
+						&dpaa2_qp->tx_vq,
+						(*ops)->sym->m_src,
+						&eqdesc[loop]);
+			else
+				qbman_eq_desc_set_no_orp(&eqdesc[loop],
+							 DPAA2_EQ_RESP_ERR_FQ);
+
+			/*Clear the unused FD fields before sending*/
+			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+			mb_pool = (*ops)->sym->m_src->pool;
+			bpid = mempool_to_bpid(mb_pool);
+			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
+			if (ret) {
+				DPAA2_SEC_ERR("error: Improper packet contents"
+					      " for crypto operation");
+				goto skip_tx;
+			}
+			ops++;
+		}
+
+		loop = 0;
+		retry_count = 0;
+		while (loop < frames_to_send) {
+			ret = qbman_swp_enqueue_multiple_desc(swp,
+					&eqdesc[loop], &fd_arr[loop],
+					frames_to_send - loop);
+			if (unlikely(ret < 0)) {
+				retry_count++;
+				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+					num_tx += loop;
+					nb_ops -= loop;
+					goto skip_tx;
+				}
+			} else {
+				loop += ret;
+				retry_count = 0;
+			}
+		}
+
+		num_tx += loop;
+		nb_ops -= loop;
+	}
+
+skip_tx:
+	dpaa2_qp->tx_vq.tx_pkts += num_tx;
+	dpaa2_qp->tx_vq.err_pkts += nb_ops;
+	return num_tx;
+}
+
 static uint16_t
 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 			uint16_t nb_ops)
@@ -3622,6 +3785,10 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	/* Change the tx burst function if ordered queues are used */
+	if (priv->en_ordered)
+		dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered;
+
 	memset(&attr, 0, sizeof(struct dpseci_attr));
 
 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
@@ -3834,12 +4001,46 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
 
 	ev->event_ptr = sec_fd_to_mbuf(fd);
 	dqrr_index = qbman_get_dqrr_idx(dq);
-	*dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1;
+	*dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
 	DPAA2_PER_LCORE_DQRR_SIZE++;
 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
 }
 
+static void __rte_hot
+dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
+				const struct qbman_fd *fd,
+				const struct qbman_result *dq,
+				struct dpaa2_queue *rxq,
+				struct rte_event *ev)
+{
+	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
+
+	/* Prefetching mbuf */
+	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+
+	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
+	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+
+	ev->flow_id = rxq->ev.flow_id;
+	ev->sub_event_type = rxq->ev.sub_event_type;
+	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+	ev->op = RTE_EVENT_OP_NEW;
+	ev->sched_type = rxq->ev.sched_type;
+	ev->queue_id = rxq->ev.queue_id;
+	ev->priority = rxq->ev.priority;
+	ev->event_ptr = sec_fd_to_mbuf(fd);
+
+	*dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP;
+	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) <<
+		DPAA2_EQCR_OPRID_SHIFT;
+	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) <<
+		DPAA2_EQCR_SEQNUM_SHIFT;
+
+	qbman_swp_dqrr_consume(swp, dq);
+}
+
 int
 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 		int qp_id,
@@ -3857,6 +4058,8 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
+	else if (event->sched_type == RTE_SCHED_TYPE_ORDERED)
+		qp->rx_vq.cb = dpaa2_sec_process_ordered_event;
 	else
 		return -EINVAL;
 
@@ -3875,6 +4078,37 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
 		cfg.order_preservation_en = 1;
 	}
+
+	if (event->sched_type == RTE_SCHED_TYPE_ORDERED) {
+		struct opr_cfg ocfg;
+
+		/* Restoration window size = 256 frames */
+		ocfg.oprrws = 3;
+		/* Restoration window size = 512 frames for LX2 */
+		if (dpaa2_svr_family == SVR_LX2160A)
+			ocfg.oprrws = 4;
+		/* Auto advance NESN window enabled */
+		ocfg.oa = 1;
+		/* Late arrival window size disabled */
+		ocfg.olws = 0;
+		/* ORL resource exhaustaion advance NESN disabled */
+		ocfg.oeane = 0;
+
+		if (priv->en_loose_ordered)
+			ocfg.oloe = 1;
+		else
+			ocfg.oloe = 0;
+
+		ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token,
+				   qp_id, OPR_OPT_CREATE, &ocfg);
+		if (ret) {
+			RTE_LOG(ERR, PMD, "Error setting opr: ret: %d\n", ret);
+			return ret;
+		}
+		qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf;
+		priv->en_ordered = 1;
+	}
+
 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
 				  qp_id, &cfg);
 	if (ret) {
@@ -3979,24 +4213,34 @@ dpaa2_sec_uninit(const struct rte_cryptodev *dev)
 }
 
 static int
-check_devargs_handler(__rte_unused const char *key, const char *value,
-		      __rte_unused void *opaque)
+check_devargs_handler(const char *key, const char *value,
+		      void *opaque)
 {
-	dpaa2_sec_dp_dump = atoi(value);
-	if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
-		DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
-			      "supported, changing to FULL error prints\n");
-		dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
+	struct rte_cryptodev *dev = (struct rte_cryptodev *)opaque;
+	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+
+	if (!strcmp(key, "drv_strict_order")) {
+		priv->en_loose_ordered = false;
+	} else {
+		dpaa2_sec_dp_dump = atoi(value);
+		if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
+			DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
+				      "supported, changing to FULL error"
+				      " prints\n");
+			dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
+		}
 	}
 
 	return 0;
 }
 
 static void
-dpaa2_sec_get_devargs(struct rte_devargs *devargs, const char *key)
+dpaa2_sec_get_devargs(struct rte_cryptodev *cryptodev, const char *key)
 {
 	struct rte_kvargs *kvlist;
+	struct rte_devargs *devargs;
 
+	devargs = cryptodev->device->devargs;
 	if (!devargs)
 		return;
 
@@ -4010,7 +4254,7 @@ dpaa2_sec_get_devargs(struct rte_devargs *devargs, const char *key)
 	}
 
 	rte_kvargs_process(kvlist, key,
-			check_devargs_handler, NULL);
+			check_devargs_handler, (void *)cryptodev);
 	rte_kvargs_free(kvlist);
 }
 
@@ -4101,6 +4345,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
 	internals->hw = dpseci;
 	internals->token = token;
+	internals->en_loose_ordered = true;
 
 	snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
 			getpid(), cryptodev->data->dev_id);
@@ -4115,7 +4360,8 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 		goto init_error;
 	}
 
-	dpaa2_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
+	dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE);
+	dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER);
 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
 	return 0;
 
@@ -4215,5 +4461,6 @@ RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD,
+		DRIVER_STRICT_ORDER "=<int>"
 		DRIVER_DUMP_MODE "=<int>");
 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index a8f9440632..e4a82114c2 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -1,8 +1,6 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- *
- *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2016,2020-2021 NXP
- *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2016,2019-2021 NXP
  */
 
 #ifndef _DPAA2_SEC_PMD_PRIVATE_H_
@@ -37,6 +35,8 @@ struct dpaa2_sec_dev_private {
 	uint16_t token; /**< Token required by DPxxx objects */
 	unsigned int max_nb_queue_pairs;
 	/**< Max number of queue pairs supported by device */
+	uint8_t en_ordered;
+	uint8_t en_loose_ordered;
 };
 
 struct dpaa2_sec_qp {
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 279e8f4d4a..c295c04f24 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
  *
  * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2020 NXP
  *
  */
 #ifndef __FSL_DPSECI_H
@@ -11,6 +11,8 @@
  * Contains initialization APIs and runtime control APIs for DPSECI
  */
 
+#include <fsl_dpopr.h>
+
 struct fsl_mc_io;
 
 /**
@@ -41,6 +43,16 @@ int dpseci_close(struct fsl_mc_io *mc_io,
  */
 #define DPSECI_OPT_HAS_CG				0x000020
 
+/**
+ * Enable the Order Restoration support
+ */
+#define DPSECI_OPT_HAS_OPR				0x000040
+
+/**
+ * Order Point Records are shared for the entire DPSECI
+ */
+#define DPSECI_OPT_OPR_SHARED				0x000080
+
 /**
  * struct dpseci_cfg - Structure representing DPSECI configuration
  * @options: Any combination of the following options:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v3 7/7] crypto/dpaa_sec: add debug framework
  2022-02-10  4:31     ` [PATCH v3 0/7] NXP crypto drivers changes Gagandeep Singh
                         ` (5 preceding siblings ...)
  2022-02-10  4:31       ` [PATCH v3 6/7] crypto/dpaa2_sec: ordered queue support Gagandeep Singh
@ 2022-02-10  4:31       ` Gagandeep Singh
  2022-02-10  7:03       ` [EXT] [PATCH v3 0/7] NXP crypto drivers changes Akhil Goyal
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10  4:31 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

Adding useful debug prints in DPAA driver for
easy debugging.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/cryptodevs/dpaa_sec.rst |  10 ++
 drivers/bus/dpaa/dpaa_bus.c        |  16 ++-
 drivers/crypto/dpaa_sec/dpaa_sec.c | 192 ++++++++++++++++++++++++++++-
 3 files changed, 213 insertions(+), 5 deletions(-)

diff --git a/doc/guides/cryptodevs/dpaa_sec.rst b/doc/guides/cryptodevs/dpaa_sec.rst
index bac82421bc..0c8d6cf3da 100644
--- a/doc/guides/cryptodevs/dpaa_sec.rst
+++ b/doc/guides/cryptodevs/dpaa_sec.rst
@@ -123,3 +123,13 @@ For enabling logs, use the following EAL parameter:
 
 Using ``pmd.crypto.dpaa`` as log matching criteria, all Crypto PMD logs can be
 enabled which are lower than logging ``level``.
+
+Enabling debug prints
+---------------------
+
+Use dev arg option ``drv_dump_mode=x`` to dump useful debug prints on HW sec
+error. There are 3 dump modes available 0, 1 and 2. Mode 0 means no dump print
+on error, mode 1 means dump HW error code and mode 2 means dump HW error code
+along with other useful debugging information like session, queue, descriptor
+data.
+e.g. ``dpaa_bus:dpaa_sec-1,drv_dump_mode=1``
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index 737ac8d8c5..a1db25dce9 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -429,6 +429,7 @@ rte_dpaa_bus_parse(const char *name, void *out)
 {
 	unsigned int i, j;
 	size_t delta;
+	size_t max_name_len;
 
 	/* There are two ways of passing device name, with and without
 	 * separator. "dpaa_bus:fm1-mac3" with separator, and "fm1-mac3"
@@ -444,14 +445,21 @@ rte_dpaa_bus_parse(const char *name, void *out)
 		delta = 5;
 	}
 
-	if (sscanf(&name[delta], "fm%u-mac%u", &i, &j) != 2 ||
-	    i >= 2 || j >= 16) {
-		return -EINVAL;
+	if (strncmp("dpaa_sec", &name[delta], 8) == 0) {
+		if (sscanf(&name[delta], "dpaa_sec-%u", &i) != 1 ||
+				i < 1 || i > 4)
+			return -EINVAL;
+		max_name_len = sizeof("dpaa_sec-.") - 1;
+	} else {
+		if (sscanf(&name[delta], "fm%u-mac%u", &i, &j) != 2 ||
+				i >= 2 || j >= 16)
+			return -EINVAL;
+
+		max_name_len = sizeof("fm.-mac..") - 1;
 	}
 
 	if (out != NULL) {
 		char *out_name = out;
-		const size_t max_name_len = sizeof("fm.-mac..") - 1;
 
 		/* Do not check for truncation, either name ends with
 		 * '\0' or the device name is followed by parameters and there
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index af166252ca..2a926ca124 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -27,6 +27,7 @@
 #include <rte_memcpy.h>
 #include <rte_string_fns.h>
 #include <rte_spinlock.h>
+#include <rte_hexdump.h>
 
 #include <fsl_usd.h>
 #include <fsl_qman.h>
@@ -45,6 +46,17 @@
 #include <dpaa_sec_log.h>
 #include <dpaax_iova_table.h>
 
+#define DRIVER_DUMP_MODE "drv_dump_mode"
+
+/* DPAA_SEC_DP_DUMP levels */
+enum dpaa_sec_dump_levels {
+	DPAA_SEC_DP_NO_DUMP,
+	DPAA_SEC_DP_ERR_DUMP,
+	DPAA_SEC_DP_FULL_DUMP
+};
+
+uint8_t dpaa_sec_dp_dump = DPAA_SEC_DP_ERR_DUMP;
+
 uint8_t dpaa_cryptodev_driver_id;
 
 static inline void
@@ -649,6 +661,139 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 	return 0;
 }
 
+static void
+dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp)
+{
+	struct dpaa_sec_job *job = &ctx->job;
+	struct rte_crypto_op *op = ctx->op;
+	dpaa_sec_session *sess = NULL;
+	struct sec_cdb c_cdb, *cdb;
+	uint8_t bufsize;
+	struct rte_crypto_sym_op *sym_op;
+	struct qm_sg_entry sg[2];
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa_sec_session *)
+			get_sym_session_private_data(
+					op->sym->session,
+					dpaa_cryptodev_driver_id);
+#ifdef RTE_LIBRTE_SECURITY
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa_sec_session *)
+			get_sec_session_private_data(
+					op->sym->sec_session);
+#endif
+	if (sess == NULL) {
+		printf("session is NULL\n");
+		goto mbuf_dump;
+	}
+
+	cdb = &sess->cdb;
+	rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb));
+#ifdef RTE_LIBRTE_SECURITY
+	printf("\nsession protocol type = %d\n", sess->proto_alg);
+#endif
+	printf("\n****************************************\n"
+		"session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
+		"\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
+		"\tCipher key len:\t%"PRIu64"\n\tCipher alg:\t%d\n"
+		"\tCipher algmode:\t%d\n", sess->ctxt,
+		(sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
+		sess->cipher_alg, sess->auth_alg, sess->aead_alg,
+		sess->cipher_key.length, sess->cipher_key.alg,
+		sess->cipher_key.algmode);
+		rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
+				sess->cipher_key.length);
+		rte_hexdump(stdout, "auth key", sess->auth_key.data,
+				sess->auth_key.length);
+	printf("\tAuth key len:\t%"PRIu64"\n\tAuth alg:\t%d\n"
+		"\tAuth algmode:\t%d\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
+		"\tdigest length:\t%d\n\tauth only len:\t\t%d\n"
+		"\taead cipher text:\t%d\n",
+		sess->auth_key.length, sess->auth_key.alg,
+		sess->auth_key.algmode,
+		sess->iv.length, sess->iv.offset,
+		sess->digest_length, sess->auth_only_len,
+		sess->auth_cipher_text);
+#ifdef RTE_LIBRTE_SECURITY
+	printf("PDCP session params:\n"
+		"\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
+		"\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:"
+		"\t%d\n\thfn:\t\t%d\n"
+		"\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
+		sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
+		sess->pdcp.sn_size, sess->pdcp.sdap_enabled,
+		sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
+		sess->pdcp.hfn_threshold);
+#endif
+	c_cdb.sh_hdr.hi.word = rte_be_to_cpu_32(c_cdb.sh_hdr.hi.word);
+	c_cdb.sh_hdr.lo.word = rte_be_to_cpu_32(c_cdb.sh_hdr.lo.word);
+	bufsize = c_cdb.sh_hdr.hi.field.idlen;
+
+	printf("cdb = %p\n\n", cdb);
+	printf("Descriptor size = %d\n", bufsize);
+	int m;
+	for (m = 0; m < bufsize; m++)
+		printf("0x%x\n", rte_be_to_cpu_32(c_cdb.sh_desc[m]));
+
+	printf("\n");
+mbuf_dump:
+	sym_op = op->sym;
+	if (sym_op->m_src) {
+		printf("Source mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_src,
+				 sym_op->m_src->data_len);
+	}
+	if (sym_op->m_dst) {
+		printf("Destination mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_dst,
+				 sym_op->m_dst->data_len);
+	}
+
+	printf("Session address = %p\ncipher offset: %d, length: %d\n"
+		"auth offset: %d, length:  %d\n aead offset: %d, length: %d\n",
+		sym_op->session, sym_op->cipher.data.offset,
+		sym_op->cipher.data.length,
+		sym_op->auth.data.offset, sym_op->auth.data.length,
+		sym_op->aead.data.offset, sym_op->aead.data.length);
+	printf("\n");
+
+	printf("******************************************************\n");
+	printf("ctx info:\n");
+	printf("job->sg[0] output info:\n");
+	memcpy(&sg[0], &job->sg[0], sizeof(sg[0]));
+	printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
+		"\n\tbpid = %d\n\toffset = %d\n",
+		(unsigned long)sg[0].addr, sg[0].length, sg[0].final,
+		sg[0].extension, sg[0].bpid, sg[0].offset);
+	printf("\njob->sg[1] input info:\n");
+	memcpy(&sg[1], &job->sg[1], sizeof(sg[1]));
+	hw_sg_to_cpu(&sg[1]);
+	printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
+		"\n\tbpid = %d\n\toffset = %d\n",
+		(unsigned long)sg[1].addr, sg[1].length, sg[1].final,
+		sg[1].extension, sg[1].bpid, sg[1].offset);
+
+	printf("\nctx pool addr = %p\n", ctx->ctx_pool);
+	if (ctx->ctx_pool)
+		printf("ctx pool available counts = %d\n",
+			rte_mempool_avail_count(ctx->ctx_pool));
+
+	printf("\nop pool addr = %p\n", op->mempool);
+	if (op->mempool)
+		printf("op pool available counts = %d\n",
+			rte_mempool_avail_count(op->mempool));
+
+	printf("********************************************************\n");
+	printf("Queue data:\n");
+	printf("\tFQID = 0x%x\n\tstate = %d\n\tnb_desc = %d\n"
+		"\tctx_pool = %p\n\trx_pkts = %d\n\ttx_pkts"
+	       "= %d\n\trx_errs = %d\n\ttx_errs = %d\n\n",
+		qp->outq.fqid, qp->outq.state, qp->outq.nb_desc,
+		qp->ctx_pool, qp->rx_pkts, qp->tx_pkts,
+		qp->rx_errs, qp->tx_errs);
+}
+
 /* qp is lockless, should be accessed by only one thread */
 static int
 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
@@ -716,7 +861,12 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
 		if (!ctx->fd_status) {
 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 		} else {
-			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
+			if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) {
+				DPAA_SEC_DP_WARN("SEC return err:0x%x\n",
+						  ctx->fd_status);
+				if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP)
+					dpaa_sec_dump(ctx, qp);
+			}
 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 		}
 		ops[pkts++] = op;
@@ -3458,6 +3608,42 @@ dpaa_sec_uninit(struct rte_cryptodev *dev)
 	return 0;
 }
 
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
+{
+	dpaa_sec_dp_dump = atoi(value);
+	if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) {
+		DPAA_SEC_WARN("WARN: DPAA_SEC_DP_DUMP_LEVEL is not "
+			      "supported, changing to FULL error prints\n");
+		dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP;
+	}
+
+	return 0;
+}
+
+static void
+dpaa_sec_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
+
+	if (!devargs)
+		return;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return;
+
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return;
+	}
+
+	rte_kvargs_process(kvlist, key,
+				check_devargs_handler, NULL);
+	rte_kvargs_free(kvlist);
+}
+
 static int
 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
@@ -3533,6 +3719,8 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 		}
 	}
 
+	dpaa_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
+
 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
 	return 0;
 
@@ -3649,4 +3837,6 @@ static struct cryptodev_driver dpaa_sec_crypto_drv;
 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
 		dpaa_cryptodev_driver_id);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA_SEC_PMD,
+		DRIVER_DUMP_MODE "=<int>");
 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* RE: [EXT] [PATCH v3 0/7] NXP crypto drivers changes
  2022-02-10  4:31     ` [PATCH v3 0/7] NXP crypto drivers changes Gagandeep Singh
                         ` (6 preceding siblings ...)
  2022-02-10  4:31       ` [PATCH v3 7/7] crypto/dpaa_sec: add debug framework Gagandeep Singh
@ 2022-02-10  7:03       ` Akhil Goyal
  7 siblings, 0 replies; 42+ messages in thread
From: Akhil Goyal @ 2022-02-10  7:03 UTC (permalink / raw)
  To: Gagandeep Singh, dev

> v3-change-log
> * fix checkpatch issues
> * use devargs for strict ordering
> * fix AES_CMAC capabilities
> * remove GMAC patch from this series. I will send
> it as separate patch.
> 
> v2-change-log
> * using dev args for both DPAA1 and DPAA2 drivers to
>  dump debug prints on sec error.
> 
> Franck LENORMAND (1):
>   common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7
> 
> Gagandeep Singh (3):
>   common/dpaax: change job processing mode for PDCP SDAP
>   crypto/dpaa2_sec: add useful debug prints in sec dequeue
>   crypto/dpaa_sec: add debug framework
> 
> Hemant Agrawal (2):
>   crypto/dpaa2_sec: change capabilities for AES_CMAC
>   crypto/dpaa2: fix to check next type for auth or cipher
> 
> Nipun Gupta (1):
>   crypto/dpaa2_sec: ordered queue support
> 
>  doc/guides/cryptodevs/dpaa2_sec.rst         |  17 +
>  doc/guides/cryptodevs/dpaa_sec.rst          |  10 +
>  drivers/bus/dpaa/dpaa_bus.c                 |  16 +-
>  drivers/common/dpaax/caamflib/desc/pdcp.h   | 939 ++++----------------
>  drivers/common/dpaax/caamflib/desc/sdap.h   | 111 +--
>  drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 421 ++++++++-
>  drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |  13 +-
>  drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h    |  14 +-
>  drivers/crypto/dpaa_sec/dpaa_sec.c          | 212 ++++-
>  9 files changed, 841 insertions(+), 912 deletions(-)
> 
Fix http://mails.dpdk.org/archives/test-report/2022-February/258660.html


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v4 0/7] NXP crypto drivers changes
  2022-02-10  4:31       ` [PATCH v3 1/7] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
@ 2022-02-10 10:58         ` Gagandeep Singh
  2022-02-10 10:58           ` [PATCH v4 1/7] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
                             ` (7 more replies)
  0 siblings, 8 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10 10:58 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

v4-change-log
* fix i386 compilation

v3-change-log
* fix checkpatch issues
* use devargs for strict ordering
* fix AES_CMAC capabilities
* remove GMAC patch from this series. I will send
it as separate patch.

v2-change-log
* using dev args for both DPAA1 and DPAA2 drivers to
 dump debug prints on sec error.

Franck LENORMAND (1):
  common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7

Gagandeep Singh (3):
  common/dpaax: change job processing mode for PDCP SDAP
  crypto/dpaa2_sec: add useful debug prints in sec dequeue
  crypto/dpaa_sec: add debug framework

Hemant Agrawal (2):
  crypto/dpaa2_sec: change capabilities for AES_CMAC
  crypto/dpaa2: fix to check next type for auth or cipher

Nipun Gupta (1):
  crypto/dpaa2_sec: ordered queue support

 doc/guides/cryptodevs/dpaa2_sec.rst         |  17 +
 doc/guides/cryptodevs/dpaa_sec.rst          |  10 +
 drivers/bus/dpaa/dpaa_bus.c                 |  16 +-
 drivers/common/dpaax/caamflib/desc/pdcp.h   | 939 ++++----------------
 drivers/common/dpaax/caamflib/desc/sdap.h   | 111 +--
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 422 ++++++++-
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   7 +-
 drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h    |  14 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c          | 212 ++++-
 9 files changed, 840 insertions(+), 908 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v4 1/7] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7
  2022-02-10 10:58         ` [PATCH v4 0/7] NXP crypto drivers changes Gagandeep Singh
@ 2022-02-10 10:58           ` Gagandeep Singh
  2022-02-10 10:58           ` [PATCH v4 2/7] common/dpaax: change job processing mode for PDCP SDAP Gagandeep Singh
                             ` (6 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10 10:58 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Franck LENORMAND, Gagandeep Singh

From: Franck LENORMAND <franck.lenormand@nxp.com>

DPAA1 and DPAA2 platforms use SEC ERA 8 and 10 only.

This patch removes code in SDAP and PDCP header related to these
ERA to simplify the codebase:
 - Simplify logic using RTA_SEC_ERA_<> macro
 - Remove era_2_sw_hfn_ovrd dedicated to RTA_SEC_ERA_2

Signed-off-by: Franck LENORMAND <franck.lenormand@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/common/dpaax/caamflib/desc/pdcp.h   | 939 ++++----------------
 drivers/common/dpaax/caamflib/desc/sdap.h   |  91 +-
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c |  14 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c          |  14 +-
 4 files changed, 183 insertions(+), 875 deletions(-)

diff --git a/drivers/common/dpaax/caamflib/desc/pdcp.h b/drivers/common/dpaax/caamflib/desc/pdcp.h
index 8e8daf5ba8..2fe56c53c6 100644
--- a/drivers/common/dpaax/caamflib/desc/pdcp.h
+++ b/drivers/common/dpaax/caamflib/desc/pdcp.h
@@ -329,91 +329,35 @@ pdcp_insert_cplane_null_op(struct program *p,
 			   struct alginfo *cipherdata __maybe_unused,
 			   struct alginfo *authdata __maybe_unused,
 			   unsigned int dir,
-			   enum pdcp_sn_size sn_size __maybe_unused,
-			   unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			   enum pdcp_sn_size sn_size __maybe_unused)
 {
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
-
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
-		if (dir == OP_TYPE_ENCAP_PROTOCOL)
-			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-		else
-			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-	} else {
-		MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ, 4, 0);
-		MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
-
-		if (dir == OP_TYPE_ENCAP_PROTOCOL) {
-			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-			MATHB(p, VSEQINSZ, SUB, ONE, MATH0, 4, 0);
-		} else {
-			MATHB(p, VSEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4,
-			      IMMED2);
-			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
-			      IMMED2);
-			MATHB(p, VSEQOUTSZ, SUB, ONE, MATH0, 4, 0);
-		}
-
-		MATHB(p, MATH0, ADD, ONE, MATH0, 4, 0);
+	MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
+	if (dir == OP_TYPE_ENCAP_PROTOCOL)
+		MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+		      IMMED2);
+	else
+		MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+		      IMMED2);
 
-		/*
-		 * Since MOVELEN is available only starting with
-		 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-		 * command dynamically by writing the length from M1 by
-		 * OR-ing the command in the M1 register and MOVE the
-		 * result into the descriptor buffer. Care must be taken
-		 * wrt. the location of the command because of SEC
-		 * pipelining. The actual MOVEs are written at the end
-		 * of the descriptor due to calculations needed on the
-		 * offset in the descriptor for the MOVE command.
-		 */
-		move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
-					     IMMED);
-		move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
-					      WAITCOMP | IMMED);
-	}
 	MATHB(p, VSEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE, 4,
 	      IMMED2);
 	JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		if (dir == OP_TYPE_ENCAP_PROTOCOL)
-			MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
-		else
-			MATHB(p, VSEQOUTSZ, ADD, ZERO, MATH0, 4, 0);
-	}
+	if (dir == OP_TYPE_ENCAP_PROTOCOL)
+		MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
+	else
+		MATHB(p, VSEQOUTSZ, ADD, ZERO, MATH0, 4, 0);
+
 	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 	SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
-	} else {
-		SET_LABEL(p, local_offset);
-
-		/* Shut off automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-		/* Placeholder for MOVE command with length from M1 register */
-		MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-		/* Enable automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-	}
+	MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
 
 	if (dir == OP_TYPE_ENCAP_PROTOCOL) {
 		MATHB(p, MATH1, XOR, MATH1, MATH0, 8, 0);
 		MOVE(p, MATH0, 0, OFIFO, 0, 4, IMMED);
 	}
 
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
-
 	return 0;
 }
 
@@ -422,66 +366,21 @@ insert_copy_frame_op(struct program *p,
 		     struct alginfo *cipherdata __maybe_unused,
 		     unsigned int dir __maybe_unused)
 {
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
-
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ,  4, 0);
-		MATHB(p, SEQINSZ, ADD, ZERO, VSEQOUTSZ,  4, 0);
-	} else {
-		MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ,  4, 0);
-		MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ,  4, 0);
-		MATHB(p, SEQINSZ, ADD, ONE, VSEQOUTSZ,  4, 0);
-		MATHB(p, VSEQOUTSZ, SUB, ONE, VSEQOUTSZ,  4, 0);
-		MATHB(p, VSEQINSZ, SUB, ONE, MATH0,  4, 0);
-		MATHB(p, MATH0, ADD, ONE, MATH0,  4, 0);
+	MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ,  4, 0);
+	MATHB(p, SEQINSZ, ADD, ZERO, VSEQOUTSZ,  4, 0);
 
-		/*
-		 * Since MOVELEN is available only starting with
-		 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-		 * command dynamically by writing the length from M1 by
-		 * OR-ing the command in the M1 register and MOVE the
-		 * result into the descriptor buffer. Care must be taken
-		 * wrt. the location of the command because of SEC
-		 * pipelining. The actual MOVEs are written at the end
-		 * of the descriptor due to calculations needed on the
-		 * offset in the descriptor for the MOVE command.
-		 */
-		move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
-					     IMMED);
-		move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
-					      WAITCOMP | IMMED);
-	}
 	MATHB(p, SEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE,  4,
 	      IFB | IMMED2);
 	JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
 
-	if (rta_sec_era > RTA_SEC_ERA_2)
-		MATHB(p, VSEQINSZ, ADD, ZERO, MATH0,  4, 0);
+	MATHB(p, VSEQINSZ, ADD, ZERO, MATH0,  4, 0);
 
 	SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
-	} else {
-		SET_LABEL(p, local_offset);
 
-		/* Shut off automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-		/* Placeholder for MOVE command with length from M0 register */
-		MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-
-		/* Enable automatic Info FIFO entries */
-		LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-	}
+	MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
 
 	SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
 	return 0;
 }
 
@@ -490,13 +389,12 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 			       bool swap __maybe_unused,
 			       struct alginfo *cipherdata __maybe_unused,
 			       struct alginfo *authdata, unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
 	/* 12 bit SN is only supported for protocol offload case */
-	if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size == PDCP_SN_SIZE_12) {
+	if (sn_size == PDCP_SN_SIZE_12) {
 		KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
 
@@ -526,9 +424,6 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		return -ENOTSUP;
 
 	}
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
 
 	switch (authdata->algtype) {
 	case PDCP_AUTH_TYPE_SNOW:
@@ -538,14 +433,7 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		SEQLOAD(p, MATH0, offset, length, 0);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
 
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-				   era_2_sw_hfn_ovrd == 0)) {
-			SEQINPTR(p, 0, length, RTO);
-		} else {
-			SEQINPTR(p, 0, 5, RTO);
-			SEQFIFOLOAD(p, SKIP, 4, 0);
-		}
+		SEQINPTR(p, 0, length, RTO);
 
 		if (swap == false) {
 			MATHB(p, MATH0, AND, sn_mask, MATH1,  8,
@@ -580,40 +468,11 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
 			      IMMED2);
 		} else {
-			if (rta_sec_era > RTA_SEC_ERA_2) {
-				MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
-				      0);
-			} else {
-				MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
-				      0);
-				MATHB(p, MATH1, SUB, ONE, MATH1, 4,
-				      0);
-			}
+			MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
 		}
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
-			MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
-		} else {
-			MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
-			MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
-
-			/*
-			 * Since MOVELEN is available only starting with
-			 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-			 * command dynamically by writing the length from M1 by
-			 * OR-ing the command in the M1 register and MOVE the
-			 * result into the descriptor buffer. Care must be taken
-			 * wrt. the location of the command because of SEC
-			 * pipelining. The actual MOVEs are written at the end
-			 * of the descriptor due to calculations needed on the
-			 * offset in the descriptor for the MOVE command.
-			 */
-			move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
-						     IMMED);
-			move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
-						      8, WAITCOMP | IMMED);
-		}
+		MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+		MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
 
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 		ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9, OP_ALG_AAI_F9,
@@ -622,25 +481,9 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 				     ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
 			      DIR_ENC);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
+		SEQFIFOLOAD(p, MSGINSNOOP, 0,
 				    VLF | LAST1 | LAST2 | FLUSH1);
-			MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
-				    VLF | LAST1 | LAST2 | FLUSH1);
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			/*
-			 * Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
+		MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
 
 		if (dir == OP_TYPE_DECAP_PROTOCOL)
 			SEQFIFOLOAD(p, ICV2, 4, LAST2);
@@ -655,14 +498,7 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		    authdata->keylen, INLINE_KEY(authdata));
 		SEQLOAD(p, MATH0, offset, length, 0);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-		     era_2_sw_hfn_ovrd == 0)) {
-			SEQINPTR(p, 0, length, RTO);
-		} else {
-			SEQINPTR(p, 0, 5, RTO);
-			SEQFIFOLOAD(p, SKIP, 4, 0);
-		}
+		SEQINPTR(p, 0, length, RTO);
 
 		if (swap == false) {
 			MATHB(p, MATH0, AND, sn_mask, MATH1, 8,
@@ -686,40 +522,12 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 			MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
 			      IMMED2);
 		} else {
-			if (rta_sec_era > RTA_SEC_ERA_2) {
-				MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
-				      0);
-			} else {
-				MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
-				      0);
-				MATHB(p, MATH1, SUB, ONE, MATH1, 4,
-				      0);
-			}
+			MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
 		}
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
-			MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
-		} else {
-			MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
-			MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
-
-			/*
-			 * Since MOVELEN is available only starting with
-			 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-			 * command dynamically by writing the length from M1 by
-			 * OR-ing the command in the M1 register and MOVE the
-			 * result into the descriptor buffer. Care must be taken
-			 * wrt. the location of the command because of SEC
-			 * pipelining. The actual MOVEs are written at the end
-			 * of the descriptor due to calculations needed on the
-			 * offset in the descriptor for the MOVE command.
-			 */
-			move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
-						     IMMED);
-			move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
-						      8, WAITCOMP | IMMED);
-		}
+		MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+		MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 		ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
 			      OP_ALG_AAI_CMAC,
@@ -728,27 +536,9 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 				     ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
 			      DIR_ENC);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
+		MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
+		SEQFIFOLOAD(p, MSGINSNOOP, 0,
 				    VLF | LAST1 | LAST2 | FLUSH1);
-		} else {
-			SEQFIFOLOAD(p, MSGINSNOOP, 0,
-				    VLF | LAST1 | LAST2 | FLUSH1);
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-			/*
-			 * Placeholder for MOVE command with length from
-			 * M1 register
-			 */
-			MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
-
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
 
 		if (dir == OP_TYPE_DECAP_PROTOCOL)
 			SEQFIFOLOAD(p, ICV1, 4, LAST1 | FLUSH1);
@@ -758,10 +548,6 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		break;
 
 	case PDCP_AUTH_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		/* Insert Auth Key */
 		KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
@@ -817,11 +603,6 @@ pdcp_insert_cplane_int_only_op(struct program *p,
 		return -EINVAL;
 	}
 
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
-
 	return 0;
 }
 
@@ -831,15 +612,14 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata __maybe_unused,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 	/* Insert Cipher Key */
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18 &&
+	if ((sn_size != PDCP_SN_SIZE_18 &&
 			!(rta_sec_era == RTA_SEC_ERA_8 &&
 				authdata->algtype == 0))
 			|| (rta_sec_era == RTA_SEC_ERA_10)) {
@@ -889,12 +669,7 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 	case PDCP_CIPHER_TYPE_SNOW:
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, WAITCOMP | IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		} else {
-			MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
-			MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
-		}
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 		if (dir == OP_TYPE_ENCAP_PROTOCOL)
 			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
@@ -913,12 +688,7 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 	case PDCP_CIPHER_TYPE_AES:
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x10, 0x10, WAITCOMP | IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		} else {
-			MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
-			MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
-		}
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 		if (dir == OP_TYPE_ENCAP_PROTOCOL)
 			MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
@@ -937,11 +707,6 @@ pdcp_insert_cplane_enc_only_op(struct program *p,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
 		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
@@ -988,8 +753,7 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
@@ -998,7 +762,7 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 	KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
 	    INLINE_KEY(authdata));
 
-	if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) {
+	if (sn_size != PDCP_SN_SIZE_18) {
 		int pclid;
 
 		if (sn_size == PDCP_SN_SIZE_5)
@@ -1014,18 +778,13 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 	}
 	/* Non-proto is supported only for 5bit cplane and 18bit uplane */
 	switch (sn_size) {
-	case PDCP_SN_SIZE_5:
-		offset = 7;
-		length = 1;
-		sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
-					PDCP_C_PLANE_SN_MASK_BE;
-		break;
 	case PDCP_SN_SIZE_18:
 		offset = 5;
 		length = 3;
 		sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
 					PDCP_U_PLANE_18BIT_SN_MASK_BE;
 		break;
+	case PDCP_SN_SIZE_5:
 	case PDCP_SN_SIZE_7:
 	case PDCP_SN_SIZE_12:
 	case PDCP_SN_SIZE_15:
@@ -1094,20 +853,13 @@ pdcp_insert_uplane_snow_snow_op(struct program *p,
 		SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
 
 		NFIFOADD(p, IFIFO, ICV2, 4, LAST2);
 
-		if (rta_sec_era <= RTA_SEC_ERA_2) {
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
-		} else {
-			MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
-		}
+		MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
 	}
 
 	return 0;
@@ -1119,19 +871,13 @@ pdcp_insert_uplane_zuc_zuc_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
 	pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
@@ -1141,7 +887,7 @@ pdcp_insert_uplane_zuc_zuc_op(struct program *p,
 	SET_LABEL(p, keyjump);
 	PATCH_JUMP(p, pkeyjump, keyjump);
 
-	if (rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) {
+	if (sn_size != PDCP_SN_SIZE_18) {
 		int pclid;
 
 		if (sn_size == PDCP_SN_SIZE_5)
@@ -1157,18 +903,13 @@ pdcp_insert_uplane_zuc_zuc_op(struct program *p,
 	}
 	/* Non-proto is supported only for 5bit cplane and 18bit uplane */
 	switch (sn_size) {
-	case PDCP_SN_SIZE_5:
-		offset = 7;
-		length = 1;
-		sn_mask = (swap == false) ? PDCP_C_PLANE_SN_MASK :
-					PDCP_C_PLANE_SN_MASK_BE;
-		break;
 	case PDCP_SN_SIZE_18:
 		offset = 5;
 		length = 3;
 		sn_mask = (swap == false) ? PDCP_U_PLANE_18BIT_SN_MASK :
 					PDCP_U_PLANE_18BIT_SN_MASK_BE;
 		break;
+	case PDCP_SN_SIZE_5:
 	case PDCP_SN_SIZE_7:
 	case PDCP_SN_SIZE_12:
 	case PDCP_SN_SIZE_15:
@@ -1243,12 +984,11 @@ pdcp_insert_uplane_aes_aes_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18)) {
+	if (sn_size != PDCP_SN_SIZE_18) {
 		/* Insert Auth Key */
 		KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
@@ -1392,8 +1132,7 @@ pdcp_insert_cplane_acc_op(struct program *p,
 			  struct alginfo *cipherdata,
 			  struct alginfo *authdata,
 			  unsigned int dir,
-			  enum pdcp_sn_size sn_size,
-			  unsigned char era_2_hfn_ovrd __maybe_unused)
+			  enum pdcp_sn_size sn_size)
 {
 	/* Insert Auth Key */
 	KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
@@ -1420,8 +1159,7 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
@@ -1429,14 +1167,12 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 	LABEL(end_desc);
 	LABEL(local_offset);
 	LABEL(jump_to_beginning);
-	LABEL(fifo_load_mac_i_offset);
 	REFERENCE(seqin_ptr_read);
 	REFERENCE(seqin_ptr_write);
 	REFERENCE(seq_out_read);
 	REFERENCE(jump_back_to_sd_cmd);
-	REFERENCE(move_mac_i_to_desc_buf);
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 				cipherdata->keylen, INLINE_KEY(cipherdata));
@@ -1484,56 +1220,17 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 	MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
 	SEQSTORE(p, MATH0, offset, length, 0);
 	if (dir == OP_TYPE_ENCAP_PROTOCOL) {
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-				   era_2_sw_hfn_ovrd == 0)) {
-			SEQINPTR(p, 0, length, RTO);
-		} else {
-			SEQINPTR(p, 0, 5, RTO);
-			SEQFIFOLOAD(p, SKIP, 4, 0);
-		}
+		SEQINPTR(p, 0, length, RTO);
+
 		KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
 		MOVEB(p, MATH2, 0, IFIFOAB1, 0, 0x08, IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-			MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
-			MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
-			      4, IMMED2);
-		} else {
-			MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
-			MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
-			      4, IMMED2);
-			/*
-			 * Note: Although the calculations below might seem a
-			 * little off, the logic is the following:
-			 *
-			 * - SEQ IN PTR RTO below needs the full length of the
-			 *   frame; in case of P4080_REV_2_HFN_OV_WORKAROUND,
-			 *   this means the length of the frame to be processed
-			 *   + 4 bytes (the HFN override flag and value).
-			 *   The length of the frame to be processed minus 1
-			 *   byte is in the VSIL register (because
-			 *   VSIL = SIL + 3, due to 1 byte, the header being
-			 *   already written by the SEQ STORE above). So for
-			 *   calculating the length to use in RTO, I add one
-			 *   to the VSIL value in order to obtain the total
-			 *   frame length. This helps in case of P4080 which
-			 *   can have the value 0 as an operand in a MATH
-			 *   command only as SRC1 When the HFN override
-			 *   workaround is not enabled, the length of the
-			 *   frame is given by the SIL register; the
-			 *   calculation is similar to the one in the SEC 4.2
-			 *   and SEC 5.3 cases.
-			 */
-			if (era_2_sw_hfn_ovrd)
-				MATHB(p, VSEQOUTSZ, ADD, ONE, MATH1, 4,
-				      0);
-			else
-				MATHB(p, SEQINSZ, ADD, MATH3, MATH1, 4,
-				      0);
-		}
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+		MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+		MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
+		      4, IMMED2);
+
 		/*
 		 * Placeholder for filling the length in
 		 * SEQIN PTR RTO below
@@ -1548,24 +1245,14 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			      DIR_DEC);
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 		MOVEB(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			LOAD(p, CLRW_CLR_C1KEY |
-			     CLRW_CLR_C1CTX |
-			     CLRW_CLR_C1ICV |
-			     CLRW_CLR_C1DATAS |
-			     CLRW_CLR_C1MODE,
-			     CLRW, 0, 4, IMMED);
-		else
-			LOAD(p, CLRW_RESET_CLS1_CHA |
-			     CLRW_CLR_C1KEY |
-			     CLRW_CLR_C1CTX |
-			     CLRW_CLR_C1ICV |
-			     CLRW_CLR_C1DATAS |
-			     CLRW_CLR_C1MODE,
-			     CLRW, 0, 4, IMMED);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
+		LOAD(p, CLRW_RESET_CLS1_CHA |
+		     CLRW_CLR_C1KEY |
+		     CLRW_CLR_C1CTX |
+		     CLRW_CLR_C1ICV |
+		     CLRW_CLR_C1DATAS |
+		     CLRW_CLR_C1MODE,
+		     CLRW, 0, 4, IMMED);
 
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 		    cipherdata->keylen, INLINE_KEY(cipherdata));
@@ -1573,11 +1260,6 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
 		SEQINPTR(p, 0, 0, RTO);
 
-		if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-			SEQFIFOLOAD(p, SKIP, 5, 0);
-			MATHB(p, SEQINSZ, ADD, ONE, SEQINSZ, 4, 0);
-		}
-
 		MATHB(p, SEQINSZ, SUB, length, VSEQINSZ, 4, IMMED2);
 		ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
 			      OP_ALG_AAI_F8,
@@ -1586,10 +1268,7 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			      DIR_ENC);
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-		if (rta_sec_era > RTA_SEC_ERA_2 ||
-		    (rta_sec_era == RTA_SEC_ERA_2 &&
-				   era_2_sw_hfn_ovrd == 0))
-			SEQFIFOLOAD(p, SKIP, length, 0);
+		SEQFIFOLOAD(p, SKIP, length, 0);
 
 		SEQFIFOLOAD(p, MSG1, 0, VLF);
 		MOVEB(p, MATH3, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
@@ -1598,13 +1277,9 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 	} else {
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
 
-		if (rta_sec_era >= RTA_SEC_ERA_5)
-			MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+		MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
 
-		if (rta_sec_era > RTA_SEC_ERA_2)
-			MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		else
-			MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 		MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
 /*
@@ -1649,10 +1324,7 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 		    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-		if (rta_sec_era >= RTA_SEC_ERA_4)
-			MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
-		else
-			MOVE(p, CONTEXT1, 0, MATH3, 0, 8, IMMED);
+		MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
 
 		ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
 			      OP_ALG_AAI_F8,
@@ -1662,22 +1334,15 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			move_mac_i_to_desc_buf = MOVE(p, OFIFO, 0, DESCBUF, 0,
-						      4, WAITCOMP | IMMED);
-		else
-			MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+		MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
-		else
-			LOAD(p, CLRW_RESET_CLS1_CHA |
-			     CLRW_CLR_C1KEY |
-			     CLRW_CLR_C1CTX |
-			     CLRW_CLR_C1ICV |
-			     CLRW_CLR_C1DATAS |
-			     CLRW_CLR_C1MODE,
-			     CLRW, 0, 4, IMMED);
+		LOAD(p, CLRW_RESET_CLS1_CHA |
+		     CLRW_CLR_C1KEY |
+		     CLRW_CLR_C1CTX |
+		     CLRW_CLR_C1ICV |
+		     CLRW_CLR_C1DATAS |
+		     CLRW_CLR_C1MODE,
+		     CLRW, 0, 4, IMMED);
 
 		KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
 		    authdata->keylen, INLINE_KEY(authdata));
@@ -1698,28 +1363,17 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 		/* Read the # of bytes written in the output buffer + 1 (HDR) */
 		MATHI(p, VSEQOUTSZ, ADD, length, VSEQINSZ, 4, IMMED2);
 
-		if (rta_sec_era <= RTA_SEC_ERA_3)
-			MOVE(p, MATH3, 0, IFIFOAB1, 0, 8, IMMED);
-		else
-			MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
-
-		if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd)
-			SEQFIFOLOAD(p, SKIP, 4, 0);
+		MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
 
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
 
-		if (rta_sec_era >= RTA_SEC_ERA_4) {
-			LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
-			     NFIFOENTRY_DEST_CLASS1 |
-			     NFIFOENTRY_DTYPE_ICV |
-			     NFIFOENTRY_LC1 |
-			     NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
-			MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
-		} else {
-			SET_LABEL(p, fifo_load_mac_i_offset);
-			FIFOLOAD(p, ICV1, fifo_load_mac_i_offset, 4,
-				 LAST1 | FLUSH1 | IMMED);
-		}
+		LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+		     NFIFOENTRY_DEST_CLASS1 |
+		     NFIFOENTRY_DTYPE_ICV |
+		     NFIFOENTRY_LC1 |
+		     NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
+		MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
+
 
 		SET_LABEL(p, end_desc);
 
@@ -1727,18 +1381,10 @@ pdcp_insert_cplane_snow_aes_op(struct program *p,
 			PATCH_MOVE(p, seq_out_read, end_desc + 1);
 			PATCH_JUMP(p, jump_back_to_sd_cmd,
 				   back_to_sd_offset + jump_back_to_sd_cmd - 5);
-
-			if (rta_sec_era <= RTA_SEC_ERA_3)
-				PATCH_MOVE(p, move_mac_i_to_desc_buf,
-					   fifo_load_mac_i_offset + 1);
 		} else {
 			PATCH_MOVE(p, seq_out_read, end_desc + 2);
 			PATCH_JUMP(p, jump_back_to_sd_cmd,
 				   back_to_sd_offset + jump_back_to_sd_cmd - 5);
-
-			if (rta_sec_era <= RTA_SEC_ERA_3)
-				PATCH_MOVE(p, move_mac_i_to_desc_buf,
-					   fifo_load_mac_i_offset + 1);
 		}
 	}
 
@@ -1751,8 +1397,7 @@ pdcp_insert_cplane_aes_snow_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
@@ -1761,7 +1406,7 @@ pdcp_insert_cplane_aes_snow_op(struct program *p,
 	KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
 	    INLINE_KEY(authdata));
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 
@@ -1860,20 +1505,13 @@ pdcp_insert_cplane_aes_snow_op(struct program *p,
 		SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
 
 		NFIFOADD(p, IFIFO, ICV2, 4, LAST2);
 
-		if (rta_sec_era <= RTA_SEC_ERA_2) {
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
-		} else {
-			MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
-		}
+		MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
 	}
 
 	return 0;
@@ -1885,20 +1523,14 @@ pdcp_insert_cplane_snow_zuc_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
@@ -2010,19 +1642,13 @@ pdcp_insert_cplane_aes_zuc_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
@@ -2138,19 +1764,13 @@ pdcp_insert_cplane_zuc_snow_op(struct program *p,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
 			       unsigned int dir,
-			       enum pdcp_sn_size sn_size,
-			       unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			       enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
-
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 		KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
@@ -2259,13 +1879,12 @@ pdcp_insert_cplane_zuc_snow_op(struct program *p,
 		SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			/*
-			 * For SEC ERA 6, there's a problem with the OFIFO
-			 * pointer, and thus it needs to be reset here before
-			 * moving to M0.
-			 */
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		/*
+		 * For SEC ERA 6, there's a problem with the OFIFO
+		 * pointer, and thus it needs to be reset here before
+		 * moving to M0.
+		 */
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		/* Put ICV to M0 before sending it to C2 for comparison. */
 		MOVEB(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
@@ -2287,16 +1906,11 @@ pdcp_insert_cplane_zuc_aes_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      unsigned int dir,
-			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+			      enum pdcp_sn_size sn_size)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
-	if (rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("Invalid era for selected algorithm\n");
-		return -ENOTSUP;
-	}
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size != PDCP_SN_SIZE_18) ||
+	if ((sn_size != PDCP_SN_SIZE_18) ||
 		(rta_sec_era == RTA_SEC_ERA_10)) {
 		int pclid;
 
@@ -2459,7 +2073,7 @@ pdcp_insert_uplane_no_int_op(struct program *p,
 	KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
 	    cipherdata->keylen, INLINE_KEY(cipherdata));
 
-	if ((rta_sec_era >= RTA_SEC_ERA_8 && sn_size == PDCP_SN_SIZE_15) ||
+	if ((sn_size == PDCP_SN_SIZE_15) ||
 			(rta_sec_era >= RTA_SEC_ERA_10)) {
 		PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_USER,
 			 (uint16_t)cipherdata->algtype);
@@ -2513,10 +2127,6 @@ pdcp_insert_uplane_no_int_op(struct program *p,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
 
@@ -2546,7 +2156,6 @@ static inline int
 insert_hfn_ov_op(struct program *p,
 		 uint32_t shift,
 		 enum pdb_type_e pdb_type,
-		 unsigned char era_2_sw_hfn_ovrd,
 		 bool clear_dpovrd_at_end)
 {
 	uint32_t imm = PDCP_DPOVRD_HFN_OV_EN;
@@ -2554,9 +2163,6 @@ insert_hfn_ov_op(struct program *p,
 	LABEL(keyjump);
 	REFERENCE(pkeyjump);
 
-	if (rta_sec_era == RTA_SEC_ERA_2 && !era_2_sw_hfn_ovrd)
-		return 0;
-
 	switch (pdb_type) {
 	case PDCP_PDB_TYPE_NO_PDB:
 		/*
@@ -2579,26 +2185,16 @@ insert_hfn_ov_op(struct program *p,
 		return -EINVAL;
 	}
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, DPOVRD, AND, imm, NONE, 8, IFB | IMMED2);
-	} else {
-		SEQLOAD(p, MATH0, 4, 4, 0);
-		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
-		MATHB(p, MATH0, AND, imm, NONE, 8, IFB | IMMED2);
-		SEQSTORE(p, MATH0, 4, 4, 0);
-	}
+	MATHB(p, DPOVRD, AND, imm, NONE, 8, IFB | IMMED2);
 
 	pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, MATH_Z);
 
-	if (rta_sec_era > RTA_SEC_ERA_2)
-		MATHI(p, DPOVRD, LSHIFT, shift, MATH0, 4, IMMED2);
-	else
-		MATHB(p, MATH0, LSHIFT, shift, MATH0, 4, IMMED2);
+	MATHI(p, DPOVRD, LSHIFT, shift, MATH0, 4, IMMED2);
 
 	MATHB(p, MATH0, SHLD, MATH0, MATH0, 8, 0);
 	MOVE(p, MATH0, 0, DESCBUF, hfn_pdb_offset, 4, IMMED);
 
-	if (clear_dpovrd_at_end && (rta_sec_era >= RTA_SEC_ERA_8)) {
+	if (clear_dpovrd_at_end) {
 		/*
 		 * For ERA8, DPOVRD could be handled by the PROTOCOL command
 		 * itself. For now, this is not done. Thus, clear DPOVRD here
@@ -2621,97 +2217,28 @@ cnstr_pdcp_c_plane_pdb(struct program *p,
 		       enum pdcp_sn_size sn_size,
 		       unsigned char bearer,
 		       unsigned char direction,
-		       uint32_t hfn_threshold,
-		       struct alginfo *cipherdata,
-		       struct alginfo *authdata)
+		       uint32_t hfn_threshold)
 {
 	struct pdcp_pdb pdb;
-	enum pdb_type_e
-		pdb_mask[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
-			{	/* NULL */
-				PDCP_PDB_TYPE_NO_PDB,		/* NULL */
-				PDCP_PDB_TYPE_FULL_PDB,		/* SNOW f9 */
-				PDCP_PDB_TYPE_FULL_PDB,		/* AES CMAC */
-				PDCP_PDB_TYPE_FULL_PDB		/* ZUC-I */
-			},
-			{	/* SNOW f8 */
-				PDCP_PDB_TYPE_FULL_PDB,		/* NULL */
-				PDCP_PDB_TYPE_FULL_PDB,		/* SNOW f9 */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* AES CMAC */
-				PDCP_PDB_TYPE_REDUCED_PDB	/* ZUC-I */
-			},
-			{	/* AES CTR */
-				PDCP_PDB_TYPE_FULL_PDB,		/* NULL */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* SNOW f9 */
-				PDCP_PDB_TYPE_FULL_PDB,		/* AES CMAC */
-				PDCP_PDB_TYPE_REDUCED_PDB	/* ZUC-I */
-			},
-			{	/* ZUC-E */
-				PDCP_PDB_TYPE_FULL_PDB,		/* NULL */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* SNOW f9 */
-				PDCP_PDB_TYPE_REDUCED_PDB,	/* AES CMAC */
-				PDCP_PDB_TYPE_FULL_PDB		/* ZUC-I */
-			},
-	};
-
-	if (rta_sec_era >= RTA_SEC_ERA_8) {
-		memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
-
-		/* To support 12-bit seq numbers, we use u-plane opt in pdb.
-		 * SEC supports 5-bit only with c-plane opt in pdb.
-		 */
-		if (sn_size == PDCP_SN_SIZE_12) {
-			pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT;
-			pdb.bearer_dir_res = (uint32_t)
-				((bearer << PDCP_U_PLANE_PDB_BEARER_SHIFT) |
-				 (direction << PDCP_U_PLANE_PDB_DIR_SHIFT));
-
-			pdb.hfn_thr_res =
-			hfn_threshold << PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT;
-
-		} else {
-			/* This means 5-bit c-plane.
-			 * Here we use c-plane opt in pdb
-			 */
-
-			/* This is a HW issue. Bit 2 should be set to zero,
-			 * but it does not work this way. Override here.
-			 */
-			pdb.opt_res.rsvd = 0x00000002;
-
-			/* Copy relevant information from user to PDB */
-			pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
-			pdb.bearer_dir_res = (uint32_t)
-				((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
-				(direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
-			pdb.hfn_thr_res =
-			hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
-		}
-
-		/* copy PDB in descriptor*/
-		__rta_out32(p, pdb.opt_res.opt);
-		__rta_out32(p, pdb.hfn_res);
-		__rta_out32(p, pdb.bearer_dir_res);
-		__rta_out32(p, pdb.hfn_thr_res);
 
-		return PDCP_PDB_TYPE_FULL_PDB;
-	}
+	memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
 
-	switch (pdb_mask[cipherdata->algtype][authdata->algtype]) {
-	case PDCP_PDB_TYPE_NO_PDB:
-		break;
+	/* To support 12-bit seq numbers, we use u-plane opt in pdb.
+	 * SEC supports 5-bit only with c-plane opt in pdb.
+	 */
+	if (sn_size == PDCP_SN_SIZE_12) {
+		pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT;
+		pdb.bearer_dir_res = (uint32_t)
+			((bearer << PDCP_U_PLANE_PDB_BEARER_SHIFT) |
+			 (direction << PDCP_U_PLANE_PDB_DIR_SHIFT));
 
-	case PDCP_PDB_TYPE_REDUCED_PDB:
-		__rta_out32(p, (hfn << PDCP_C_PLANE_PDB_HFN_SHIFT));
-		__rta_out32(p,
-			    (uint32_t)((bearer <<
-					PDCP_C_PLANE_PDB_BEARER_SHIFT) |
-					(direction <<
-					 PDCP_C_PLANE_PDB_DIR_SHIFT)));
-		break;
+		pdb.hfn_thr_res =
+		hfn_threshold << PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT;
 
-	case PDCP_PDB_TYPE_FULL_PDB:
-		memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+	} else {
+		/* This means 5-bit c-plane.
+		 * Here we use c-plane opt in pdb
+		 */
 
 		/* This is a HW issue. Bit 2 should be set to zero,
 		 * but it does not work this way. Override here.
@@ -2722,23 +2249,18 @@ cnstr_pdcp_c_plane_pdb(struct program *p,
 		pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
 		pdb.bearer_dir_res = (uint32_t)
 			((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
-			 (direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
+			(direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
 		pdb.hfn_thr_res =
-			hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
-
-		/* copy PDB in descriptor*/
-		__rta_out32(p, pdb.opt_res.opt);
-		__rta_out32(p, pdb.hfn_res);
-		__rta_out32(p, pdb.bearer_dir_res);
-		__rta_out32(p, pdb.hfn_thr_res);
-
-		break;
-
-	default:
-		return PDCP_PDB_TYPE_INVALID;
+		hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
 	}
 
-	return pdb_mask[cipherdata->algtype][authdata->algtype];
+	/* copy PDB in descriptor*/
+	__rta_out32(p, pdb.opt_res.opt);
+	__rta_out32(p, pdb.hfn_res);
+	__rta_out32(p, pdb.bearer_dir_res);
+	__rta_out32(p, pdb.hfn_thr_res);
+
+	return PDCP_PDB_TYPE_FULL_PDB;
 }
 
 /*
@@ -2817,7 +2339,7 @@ cnstr_pdcp_u_plane_pdb(struct program *p,
 		pdb.hfn_thr_res =
 			hfn_threshold<<PDCP_U_PLANE_PDB_18BIT_SN_HFN_THR_SHIFT;
 
-		if (rta_sec_era <= RTA_SEC_ERA_8) {
+		if (rta_sec_era == RTA_SEC_ERA_8) {
 			if (cipherdata && authdata)
 				pdb_type = pdb_mask[cipherdata->algtype]
 						   [authdata->algtype];
@@ -2857,6 +2379,7 @@ cnstr_pdcp_u_plane_pdb(struct program *p,
 
 	return pdb_type;
 }
+
 /**
  * cnstr_shdsc_pdcp_c_plane_encap - Function for creating a PDCP Control Plane
  *                                  encapsulation descriptor.
@@ -2874,9 +2397,6 @@ cnstr_pdcp_u_plane_pdb(struct program *p,
  *              Valid algorithm values are those from cipher_type_pdcp enum.
  * @authdata: pointer to authentication transform definitions
  *            Valid algorithm values are those from auth_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
  *         for reclaiming the space that wasn't used for the descriptor.
@@ -2895,14 +2415,12 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 			       unsigned char direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	static int
 		(*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
 			(struct program*, bool swap, struct alginfo *,
-			 struct alginfo *, unsigned int, enum pdcp_sn_size,
-			unsigned char __maybe_unused) = {
+			 struct alginfo *, unsigned int dir, enum pdcp_sn_size) = {
 		{	/* NULL */
 			pdcp_insert_cplane_null_op,	/* NULL */
 			pdcp_insert_cplane_int_only_op,	/* SNOW f9 */
@@ -2961,11 +2479,6 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 	int err;
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN override for other era than 2");
-		return -EINVAL;
-	}
-
 	if (sn_size != PDCP_SN_SIZE_12 && sn_size != PDCP_SN_SIZE_5) {
 		pr_err("C-plane supports only 5-bit and 12-bit sequence numbers\n");
 		return -EINVAL;
@@ -2984,14 +2497,11 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 			sn_size,
 			bearer,
 			direction,
-			hfn_threshold,
-			cipherdata,
-			authdata);
+			hfn_threshold);
 
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type,
-			       era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3000,8 +2510,7 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
 		cipherdata,
 		authdata,
 		OP_TYPE_ENCAP_PROTOCOL,
-		sn_size,
-		era_2_sw_hfn_ovrd);
+		sn_size);
 	if (err)
 		return err;
 
@@ -3027,9 +2536,6 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
  *              Valid algorithm values are those from cipher_type_pdcp enum.
  * @authdata: pointer to authentication transform definitions
  *            Valid algorithm values are those from auth_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -3049,14 +2555,12 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 			       unsigned char direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	static int
 		(*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
 			(struct program*, bool swap, struct alginfo *,
-			 struct alginfo *, unsigned int, enum pdcp_sn_size,
-			 unsigned char) = {
+			 struct alginfo *, unsigned int dir, enum pdcp_sn_size) = {
 		{	/* NULL */
 			pdcp_insert_cplane_null_op,	/* NULL */
 			pdcp_insert_cplane_int_only_op,	/* SNOW f9 */
@@ -3115,11 +2619,6 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 	int err;
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN override for other era than 2");
-		return -EINVAL;
-	}
-
 	if (sn_size != PDCP_SN_SIZE_12 && sn_size != PDCP_SN_SIZE_5) {
 		pr_err("C-plane supports only 5-bit and 12-bit sequence numbers\n");
 		return -EINVAL;
@@ -3138,14 +2637,11 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 			sn_size,
 			bearer,
 			direction,
-			hfn_threshold,
-			cipherdata,
-			authdata);
+			hfn_threshold);
 
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type,
-			       era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3154,8 +2650,7 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
 		cipherdata,
 		authdata,
 		OP_TYPE_DECAP_PROTOCOL,
-		sn_size,
-		era_2_sw_hfn_ovrd);
+		sn_size);
 	if (err)
 		return err;
 
@@ -3170,14 +2665,12 @@ pdcp_insert_uplane_with_int_op(struct program *p,
 			      struct alginfo *cipherdata,
 			      struct alginfo *authdata,
 			      enum pdcp_sn_size sn_size,
-			      unsigned char era_2_sw_hfn_ovrd,
 			      unsigned int dir)
 {
 	static int
 		(*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
 			(struct program*, bool swap, struct alginfo *,
-			 struct alginfo *, unsigned int, enum pdcp_sn_size,
-			unsigned char __maybe_unused) = {
+			 struct alginfo *, unsigned int dir, enum pdcp_sn_size) = {
 		{	/* NULL */
 			pdcp_insert_cplane_null_op,	/* NULL */
 			pdcp_insert_cplane_int_only_op,	/* SNOW f9 */
@@ -3210,8 +2703,7 @@ pdcp_insert_uplane_with_int_op(struct program *p,
 		cipherdata,
 		authdata,
 		dir,
-		sn_size,
-		era_2_sw_hfn_ovrd);
+		sn_size);
 	if (err)
 		return err;
 
@@ -3234,9 +2726,6 @@ pdcp_insert_uplane_with_int_op(struct program *p,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -3256,8 +2745,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	struct program prg;
 	struct program *p = &prg;
@@ -3292,16 +2780,6 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 	};
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN ovrd for other era than 2");
-		return -EINVAL;
-	}
-
-	if (authdata && !authdata->algtype && rta_sec_era < RTA_SEC_ERA_8) {
-		pr_err("Cannot use u-plane auth with era < 8");
-		return -EINVAL;
-	}
-
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 	if (swap)
 		PROGRAM_SET_BSWAP(p);
@@ -3321,7 +2799,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 	}
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3330,10 +2808,6 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 	case PDCP_SN_SIZE_12:
 		switch (cipherdata->algtype) {
 		case PDCP_CIPHER_TYPE_ZUC:
-			if (rta_sec_era < RTA_SEC_ERA_5) {
-				pr_err("Invalid era for selected algorithm\n");
-				return -ENOTSUP;
-			}
 			/* fallthrough */
 		case PDCP_CIPHER_TYPE_AES:
 		case PDCP_CIPHER_TYPE_SNOW:
@@ -3342,7 +2816,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 					authdata && authdata->algtype == 0){
 				err = pdcp_insert_uplane_with_int_op(p, swap,
 						cipherdata, authdata,
-						sn_size, era_2_sw_hfn_ovrd,
+						sn_size,
 						OP_TYPE_ENCAP_PROTOCOL);
 				if (err)
 					return err;
@@ -3388,7 +2862,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
 		if (authdata) {
 			err = pdcp_insert_uplane_with_int_op(p, swap,
 					cipherdata, authdata,
-					sn_size, era_2_sw_hfn_ovrd,
+					sn_size,
 					OP_TYPE_ENCAP_PROTOCOL);
 			if (err)
 				return err;
@@ -3437,9 +2911,6 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -3459,8 +2930,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	struct program prg;
 	struct program *p = &prg;
@@ -3496,16 +2966,6 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 
 	LABEL(pdb_end);
 
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN override for other era than 2");
-		return -EINVAL;
-	}
-
-	if (authdata && !authdata->algtype && rta_sec_era < RTA_SEC_ERA_8) {
-		pr_err("Cannot use u-plane auth with era < 8");
-		return -EINVAL;
-	}
-
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 	if (swap)
 		PROGRAM_SET_BSWAP(p);
@@ -3525,7 +2985,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 	}
 	SET_LABEL(p, pdb_end);
 
-	err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, true);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, true);
 	if (err)
 		return err;
 
@@ -3534,10 +2994,6 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 	case PDCP_SN_SIZE_12:
 		switch (cipherdata->algtype) {
 		case PDCP_CIPHER_TYPE_ZUC:
-			if (rta_sec_era < RTA_SEC_ERA_5) {
-				pr_err("Invalid era for selected algorithm\n");
-				return -ENOTSUP;
-			}
 			/* fallthrough */
 		case PDCP_CIPHER_TYPE_AES:
 		case PDCP_CIPHER_TYPE_SNOW:
@@ -3555,7 +3011,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 			else if (authdata && authdata->algtype == 0) {
 				err = pdcp_insert_uplane_with_int_op(p, swap,
 						cipherdata, authdata,
-						sn_size, era_2_sw_hfn_ovrd,
+						sn_size,
 						OP_TYPE_DECAP_PROTOCOL);
 				if (err)
 					return err;
@@ -3589,7 +3045,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
 		if (authdata) {
 			err = pdcp_insert_uplane_with_int_op(p, swap,
 					cipherdata, authdata,
-					sn_size, era_2_sw_hfn_ovrd,
+					sn_size,
 					OP_TYPE_DECAP_PROTOCOL);
 			if (err)
 				return err;
@@ -3649,9 +3105,6 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 	struct program prg;
 	struct program *p = &prg;
 	uint32_t iv[3] = {0, 0, 0};
-	LABEL(local_offset);
-	REFERENCE(move_cmd_read_descbuf);
-	REFERENCE(move_cmd_write_descbuf);
 
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 	if (swap)
@@ -3661,52 +3114,15 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 
 	SHR_HDR(p, SHR_ALWAYS, 1, 0);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-		MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
-	} else {
-		MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4, 0);
-		MATHB(p, MATH1, SUB, ONE, MATH1, 4, 0);
-		MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
-		MOVE(p, MATH1, 0, MATH0, 0, 8, IMMED);
+	MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+	MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
 
-		/*
-		 * Since MOVELEN is available only starting with
-		 * SEC ERA 3, use poor man's MOVELEN: create a MOVE
-		 * command dynamically by writing the length from M1 by
-		 * OR-ing the command in the M1 register and MOVE the
-		 * result into the descriptor buffer. Care must be taken
-		 * wrt. the location of the command because of SEC
-		 * pipelining. The actual MOVEs are written at the end
-		 * of the descriptor due to calculations needed on the
-		 * offset in the descriptor for the MOVE command.
-		 */
-		move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
-					     IMMED);
-		move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
-					      WAITCOMP | IMMED);
-	}
 	MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
 
 	switch (authdata->algtype) {
 	case PDCP_AUTH_TYPE_NULL:
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-			/* Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
-
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
+		MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
 
 		LOAD(p, (uintptr_t)iv, MATH0, 0, 8, IMMED | COPY);
 		SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | LAST2 | FLUSH1);
@@ -3730,23 +3146,8 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 			      DIR_ENC);
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SET_LABEL(p, local_offset);
-
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-
-			/* Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+		MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
 
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
 		SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
 		SEQSTORE(p, CONTEXT2, 0, 4, 0);
 
@@ -3768,32 +3169,14 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 			      DIR_ENC);
 		SEQFIFOSTORE(p, MSG, 0, 0, VLF);
 
-		if (rta_sec_era > RTA_SEC_ERA_2) {
-			MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
-		} else {
-			SET_LABEL(p, local_offset);
-
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+		MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
 
-			/* Placeholder for MOVE command with length from M1
-			 * register
-			 */
-			MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
-
-			/* Enable automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
-		}
 		SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
 		SEQSTORE(p, CONTEXT1, 0, 4, 0);
 
 		break;
 
 	case PDCP_AUTH_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		iv[0] = 0xFFFFFFFF;
 		iv[1] = swap ? swab32(0xFC000000) : 0xFC000000;
 		iv[2] = 0x00000000; /* unused */
@@ -3819,12 +3202,6 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
 		return -EINVAL;
 	}
 
-
-	if (rta_sec_era < RTA_SEC_ERA_3) {
-		PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
-		PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
-	}
-
 	return PROGRAM_FINALIZE(p);
 }
 
diff --git a/drivers/common/dpaax/caamflib/desc/sdap.h b/drivers/common/dpaax/caamflib/desc/sdap.h
index 07f55b5b40..f0e712093a 100644
--- a/drivers/common/dpaax/caamflib/desc/sdap.h
+++ b/drivers/common/dpaax/caamflib/desc/sdap.h
@@ -225,10 +225,6 @@ static inline int pdcp_sdap_insert_no_int_op(struct program *p,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
 		/* The LSB and MSB is the same for ZUC context */
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
@@ -253,7 +249,6 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap __maybe_unused,
 			     struct alginfo *cipherdata,
 			     struct alginfo *authdata __maybe_unused,
 			     unsigned int dir, enum pdcp_sn_size sn_size,
-			     unsigned char era_2_sw_hfn_ovrd __maybe_unused,
 			     enum pdb_type_e pdb_type)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
@@ -293,12 +288,7 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap __maybe_unused,
 	/* Write header */
 	SEQSTORE(p, MATH0, offset, length, 0);
 
-	if (rta_sec_era > RTA_SEC_ERA_2) {
-		MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
-	} else {
-		MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
-		MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
-	}
+	MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
 
 	if (dir == OP_TYPE_ENCAP_PROTOCOL)
 		MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
@@ -326,11 +316,6 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap __maybe_unused,
 		break;
 
 	case PDCP_CIPHER_TYPE_ZUC:
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-
 		MOVEB(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
 		MOVEB(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
 
@@ -378,7 +363,6 @@ static inline int
 pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 			  struct alginfo *cipherdata, struct alginfo *authdata,
 			  unsigned int dir, enum pdcp_sn_size sn_size,
-			  unsigned char era_2_sw_hfn_ovrd __maybe_unused,
 			  enum pdb_type_e pdb_type)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
@@ -391,13 +375,6 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 			FULL_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET :
 			REDUCED_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET;
 
-	if (authdata->algtype == PDCP_CIPHER_TYPE_ZUC) {
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-	}
-
 	if (pdcp_sdap_get_sn_parameters(sn_size, swap, &offset, &length,
 					&sn_mask))
 		return -ENOTSUP;
@@ -588,8 +565,7 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 		 */
 		JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
 
-		if (rta_sec_era >= RTA_SEC_ERA_6)
-			LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+		LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
 
 		/* Save the content left in the Output FIFO (the ICV) to MATH0
 		 */
@@ -604,13 +580,7 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 		 * Note: As configured by the altsource, this will send
 		 * the
 		 */
-		if (rta_sec_era <= RTA_SEC_ERA_2) {
-			/* Shut off automatic Info FIFO entries */
-			LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
-			MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
-		} else {
-			MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
-		}
+		MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
 	}
 
 	if (authdata->algtype == PDCP_CIPHER_TYPE_ZUC) {
@@ -638,7 +608,6 @@ pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
 static inline int pdcp_sdap_insert_no_snoop_op(
 	struct program *p, bool swap __maybe_unused, struct alginfo *cipherdata,
 	struct alginfo *authdata, unsigned int dir, enum pdcp_sn_size sn_size,
-	unsigned char era_2_sw_hfn_ovrd __maybe_unused,
 	enum pdb_type_e pdb_type)
 {
 	uint32_t offset = 0, length = 0, sn_mask = 0;
@@ -649,13 +618,6 @@ static inline int pdcp_sdap_insert_no_snoop_op(
 			FULL_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET :
 			REDUCED_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET;
 
-	if (authdata->algtype == PDCP_CIPHER_TYPE_ZUC) {
-		if (rta_sec_era < RTA_SEC_ERA_5) {
-			pr_err("Invalid era for selected algorithm\n");
-			return -ENOTSUP;
-		}
-	}
-
 	if (pdcp_sdap_get_sn_parameters(sn_size, swap, &offset, &length,
 					&sn_mask))
 		return -ENOTSUP;
@@ -842,11 +804,10 @@ pdcp_sdap_insert_cplane_null_op(struct program *p,
 			   struct alginfo *authdata,
 			   unsigned int dir,
 			   enum pdcp_sn_size sn_size,
-			   unsigned char era_2_sw_hfn_ovrd,
 			   enum pdb_type_e pdb_type __maybe_unused)
 {
 	return pdcp_insert_cplane_null_op(p, swap, cipherdata, authdata, dir,
-					  sn_size, era_2_sw_hfn_ovrd);
+					  sn_size);
 }
 
 static inline int
@@ -856,24 +817,22 @@ pdcp_sdap_insert_cplane_int_only_op(struct program *p,
 			   struct alginfo *authdata,
 			   unsigned int dir,
 			   enum pdcp_sn_size sn_size,
-			   unsigned char era_2_sw_hfn_ovrd,
 			   enum pdb_type_e pdb_type __maybe_unused)
 {
 	return pdcp_insert_cplane_int_only_op(p, swap, cipherdata, authdata,
-				dir, sn_size, era_2_sw_hfn_ovrd);
+				dir, sn_size);
 }
 
 static int pdcp_sdap_insert_with_int_op(
 	struct program *p, bool swap __maybe_unused, struct alginfo *cipherdata,
 	struct alginfo *authdata, enum pdcp_sn_size sn_size,
-	unsigned char era_2_sw_hfn_ovrd, unsigned int dir,
+	unsigned int dir,
 	enum pdb_type_e pdb_type)
 {
 	static int (
 		*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])(
 		struct program *, bool swap, struct alginfo *, struct alginfo *,
-		unsigned int, enum pdcp_sn_size,
-		unsigned char __maybe_unused, enum pdb_type_e pdb_type) = {
+		unsigned int dir, enum pdcp_sn_size, enum pdb_type_e pdb_type) = {
 		{
 			/* NULL */
 			pdcp_sdap_insert_cplane_null_op,     /* NULL */
@@ -907,7 +866,7 @@ static int pdcp_sdap_insert_with_int_op(
 
 	err = pdcp_cp_fp[cipherdata->algtype]
 			[authdata->algtype](p, swap, cipherdata, authdata, dir,
-					sn_size, era_2_sw_hfn_ovrd, pdb_type);
+					sn_size, pdb_type);
 	if (err)
 		return err;
 
@@ -925,7 +884,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
 			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd,
 			       uint32_t caps_mode)
 {
 	struct program prg;
@@ -966,12 +924,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 
 	LABEL(pdb_end);
 
-	/* Check HFN override for ERA 2 */
-	if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
-		pr_err("Cannot select SW HFN ovrd for other era than 2");
-		return -EINVAL;
-	}
-
 	/* Check the confidentiality algorithm is supported by the code */
 	switch (cipherdata->algtype) {
 	case PDCP_CIPHER_TYPE_NULL:
@@ -1013,14 +965,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 		return -ENOTSUP;
 	}
 
-	/* Check that we are not performing ZUC algo on old platforms */
-	if (cipherdata->algtype == PDCP_CIPHER_TYPE_ZUC &&
-			rta_sec_era < RTA_SEC_ERA_5) {
-		pr_err("ZUC algorithm not supported for era: %d\n",
-				rta_sec_era);
-		return -ENOTSUP;
-	}
-
 	/* Initialize the program */
 	PROGRAM_CNTXT_INIT(p, descbuf, 0);
 
@@ -1047,7 +991,7 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 	SET_LABEL(p, pdb_end);
 
 	/* Inser the HFN override operation */
-	err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, false);
+	err = insert_hfn_ov_op(p, sn_size, pdb_type, false);
 	if (err)
 		return err;
 
@@ -1068,7 +1012,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 	} else {
 		err = pdcp_sdap_insert_with_int_op(p, swap, cipherdata,
 						   authdata, sn_size,
-						   era_2_sw_hfn_ovrd,
 						   caps_mode, pdb_type);
 		if (err) {
 			pr_err("Fail pdcp_sdap_insert_with_int_op\n");
@@ -1096,9 +1039,6 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -1118,12 +1058,11 @@ cnstr_shdsc_pdcp_sdap_u_plane_encap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	return cnstr_shdsc_pdcp_sdap_u_plane(descbuf, ps, swap, sn_size,
 			hfn, bearer, direction, hfn_threshold, cipherdata,
-			authdata, era_2_sw_hfn_ovrd, OP_TYPE_ENCAP_PROTOCOL);
+			authdata, OP_TYPE_ENCAP_PROTOCOL);
 }
 
 /**
@@ -1141,9 +1080,6 @@ cnstr_shdsc_pdcp_sdap_u_plane_encap(uint32_t *descbuf,
  *                 keys should be renegotiated at the earliest convenience.
  * @cipherdata: pointer to block cipher transform definitions
  *              Valid algorithm values are those from cipher_type_pdcp enum.
- * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
- *                     this descriptor. Note: Can only be used for
- *                     SEC ERA 2.
  *
  * Return: size of descriptor written in words or negative number on error.
  *         Once the function returns, the value of this parameter can be used
@@ -1163,12 +1099,11 @@ cnstr_shdsc_pdcp_sdap_u_plane_decap(uint32_t *descbuf,
 			       unsigned short direction,
 			       uint32_t hfn_threshold,
 			       struct alginfo *cipherdata,
-			       struct alginfo *authdata,
-			       unsigned char era_2_sw_hfn_ovrd)
+			       struct alginfo *authdata)
 {
 	return cnstr_shdsc_pdcp_sdap_u_plane(descbuf, ps, swap, sn_size, hfn,
 			bearer, direction, hfn_threshold, cipherdata, authdata,
-			era_2_sw_hfn_ovrd, OP_TYPE_DECAP_PROTOCOL);
+			OP_TYPE_DECAP_PROTOCOL);
 }
 
 #endif /* __DESC_SDAP_H__ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index a5b052375d..1e6b3e548a 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3297,8 +3297,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 		else if (session->dir == DIR_DEC)
 			bufsize = cnstr_shdsc_pdcp_c_plane_decap(
 					priv->flc_desc[0].desc, 1, swap,
@@ -3307,8 +3306,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 
 	} else if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
 		bufsize = cnstr_shdsc_pdcp_short_mac(priv->flc_desc[0].desc,
@@ -3323,7 +3321,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 			else
 				bufsize = cnstr_shdsc_pdcp_u_plane_encap(
 					priv->flc_desc[0].desc, 1, swap,
@@ -3332,7 +3330,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 		} else if (session->dir == DIR_DEC) {
 			if (pdcp_xform->sdap_enabled)
 				bufsize = cnstr_shdsc_pdcp_sdap_u_plane_decap(
@@ -3342,7 +3340,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 			else
 				bufsize = cnstr_shdsc_pdcp_u_plane_decap(
 					priv->flc_desc[0].desc, 1, swap,
@@ -3351,7 +3349,7 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 					pdcp_xform->bearer,
 					pdcp_xform->pkt_dir,
 					pdcp_xform->hfn_threshold,
-					&cipherdata, p_authdata, 0);
+					&cipherdata, p_authdata);
 		}
 	}
 
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index f20acdd123..1137b142e9 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -296,8 +296,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 					ses->pdcp.bearer,
 					ses->pdcp.pkt_dir,
 					ses->pdcp.hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 		else if (ses->dir == DIR_DEC)
 			shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
 					cdb->sh_desc, 1, swap,
@@ -306,8 +305,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 					ses->pdcp.bearer,
 					ses->pdcp.pkt_dir,
 					ses->pdcp.hfn_threshold,
-					&cipherdata, &authdata,
-					0);
+					&cipherdata, &authdata);
 	} else if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_SHORT_MAC) {
 		shared_desc_len = cnstr_shdsc_pdcp_short_mac(cdb->sh_desc,
 						     1, swap, &authdata);
@@ -322,7 +320,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 			else
 				shared_desc_len =
 					cnstr_shdsc_pdcp_u_plane_encap(
@@ -332,7 +330,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 		} else if (ses->dir == DIR_DEC) {
 			if (ses->pdcp.sdap_enabled)
 				shared_desc_len =
@@ -343,7 +341,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 			else
 				shared_desc_len =
 					cnstr_shdsc_pdcp_u_plane_decap(
@@ -353,7 +351,7 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
 						ses->pdcp.bearer,
 						ses->pdcp.pkt_dir,
 						ses->pdcp.hfn_threshold,
-						&cipherdata, p_authdata, 0);
+						&cipherdata, p_authdata);
 		}
 	}
 	return shared_desc_len;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v4 2/7] common/dpaax: change job processing mode for PDCP SDAP
  2022-02-10 10:58         ` [PATCH v4 0/7] NXP crypto drivers changes Gagandeep Singh
  2022-02-10 10:58           ` [PATCH v4 1/7] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
@ 2022-02-10 10:58           ` Gagandeep Singh
  2022-02-10 10:58           ` [PATCH v4 3/7] crypto/dpaa2_sec: change capabilities for AES_CMAC Gagandeep Singh
                             ` (5 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10 10:58 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

For PDCP SDAP test cases, HW sec engine process the
jobs in WAIT mode.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/common/dpaax/caamflib/desc/sdap.h | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/drivers/common/dpaax/caamflib/desc/sdap.h b/drivers/common/dpaax/caamflib/desc/sdap.h
index f0e712093a..07a544295e 100644
--- a/drivers/common/dpaax/caamflib/desc/sdap.h
+++ b/drivers/common/dpaax/caamflib/desc/sdap.h
@@ -895,27 +895,27 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 			{
 				/* NULL */
 				SHR_WAIT,   /* NULL */
-				SHR_ALWAYS, /* SNOW f9 */
-				SHR_ALWAYS, /* AES CMAC */
-				SHR_ALWAYS  /* ZUC-I */
+				SHR_WAIT, /* SNOW f9 */
+				SHR_WAIT, /* AES CMAC */
+				SHR_WAIT  /* ZUC-I */
 			},
 			{
 				/* SNOW f8 */
-				SHR_ALWAYS, /* NULL */
-				SHR_ALWAYS, /* SNOW f9 */
+				SHR_WAIT, /* NULL */
+				SHR_WAIT, /* SNOW f9 */
 				SHR_WAIT,   /* AES CMAC */
 				SHR_WAIT    /* ZUC-I */
 			},
 			{
 				/* AES CTR */
-				SHR_ALWAYS, /* NULL */
-				SHR_ALWAYS, /* SNOW f9 */
-				SHR_ALWAYS, /* AES CMAC */
+				SHR_WAIT, /* NULL */
+				SHR_WAIT, /* SNOW f9 */
+				SHR_WAIT, /* AES CMAC */
 				SHR_WAIT    /* ZUC-I */
 			},
 			{
 				/* ZUC-E */
-				SHR_ALWAYS, /* NULL */
+				SHR_WAIT, /* NULL */
 				SHR_WAIT,   /* SNOW f9 */
 				SHR_WAIT,   /* AES CMAC */
 				SHR_WAIT    /* ZUC-I */
@@ -979,7 +979,7 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
 		SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype],
 			0, 0);
 	else
-		SHR_HDR(p, SHR_ALWAYS, 0, 0);
+		SHR_HDR(p, SHR_WAIT, 0, 0);
 
 	/* Construct the PDB */
 	pdb_type = cnstr_pdcp_u_plane_pdb(p, sn_size, hfn, bearer, direction,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v4 3/7] crypto/dpaa2_sec: change capabilities for AES_CMAC
  2022-02-10 10:58         ` [PATCH v4 0/7] NXP crypto drivers changes Gagandeep Singh
  2022-02-10 10:58           ` [PATCH v4 1/7] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
  2022-02-10 10:58           ` [PATCH v4 2/7] common/dpaax: change job processing mode for PDCP SDAP Gagandeep Singh
@ 2022-02-10 10:58           ` Gagandeep Singh
  2022-02-10 10:58           ` [PATCH v4 4/7] crypto/dpaa2_sec: add useful debug prints in sec dequeue Gagandeep Singh
                             ` (4 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10 10:58 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Hemant Agrawal, Gagandeep Singh

From: Hemant Agrawal <hemant.agrawal@nxp.com>

Add IV size and change the digest size to supported
value by the HW engine.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 05bd7c0736..a8f9440632 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -549,11 +549,12 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
 					.increment = 1
 				},
 				.digest_size = {
-					.min = 4,
+					.min = 12,
 					.max = 16,
 					.increment = 4
 				},
-				.aad_size = { 0 }
+				.aad_size = { 0 },
+				.iv_size = { 0 }
 			}, }
 		}, }
 	},
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v4 4/7] crypto/dpaa2_sec: add useful debug prints in sec dequeue
  2022-02-10 10:58         ` [PATCH v4 0/7] NXP crypto drivers changes Gagandeep Singh
                             ` (2 preceding siblings ...)
  2022-02-10 10:58           ` [PATCH v4 3/7] crypto/dpaa2_sec: change capabilities for AES_CMAC Gagandeep Singh
@ 2022-02-10 10:58           ` Gagandeep Singh
  2022-02-10 10:58           ` [PATCH v4 5/7] crypto/dpaa2: fix to check next type for auth or cipher Gagandeep Singh
                             ` (3 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10 10:58 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

Few useful debug prints added in dequeue function.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/cryptodevs/dpaa2_sec.rst         |  10 ++
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 136 +++++++++++++++++++-
 2 files changed, 144 insertions(+), 2 deletions(-)

diff --git a/doc/guides/cryptodevs/dpaa2_sec.rst b/doc/guides/cryptodevs/dpaa2_sec.rst
index 06de988d51..875d918068 100644
--- a/doc/guides/cryptodevs/dpaa2_sec.rst
+++ b/doc/guides/cryptodevs/dpaa2_sec.rst
@@ -175,3 +175,13 @@ For enabling logs, use the following EAL parameter:
 
 Using ``crypto.dpaa2`` as log matching criteria, all Crypto PMD logs can be
 enabled which are lower than logging ``level``.
+
+Enabling debug prints
+---------------------
+
+Use dev arg option ``drv_dump_mode=x`` to dump useful debug prints on HW sec
+error. There are 3 dump modes available 0, 1 and 2. Mode 0 means no dump print
+on error, mode 1 means dump HW error code and mode 2 means dump HW error code
+along with other useful debugging information like session, queue, descriptor
+data.
+e.g. ``fslmc:dpseci.1,drv_dump_mode=1``
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 1e6b3e548a..444e1f0043 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -28,6 +28,7 @@
 #include <fsl_dpopr.h>
 #include <fsl_dpseci.h>
 #include <fsl_mc_sys.h>
+#include <rte_hexdump.h>
 
 #include "dpaa2_sec_priv.h"
 #include "dpaa2_sec_event.h"
@@ -50,7 +51,17 @@
 
 #define NO_PREFETCH 0
 
+#define DRIVER_DUMP_MODE "drv_dump_mode"
+
+/* DPAA2_SEC_DP_DUMP levels */
+enum dpaa2_sec_dump_levels {
+	DPAA2_SEC_DP_NO_DUMP,
+	DPAA2_SEC_DP_ERR_DUMP,
+	DPAA2_SEC_DP_FULL_DUMP
+};
+
 uint8_t cryptodev_driver_id;
+uint8_t dpaa2_sec_dp_dump = DPAA2_SEC_DP_ERR_DUMP;
 
 #ifdef RTE_LIB_SECURITY
 static inline int
@@ -1621,6 +1632,83 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
 	return op;
 }
 
+static void
+dpaa2_sec_dump(struct rte_crypto_op *op)
+{
+	int i;
+	dpaa2_sec_session *sess = NULL;
+	struct ctxt_priv *priv;
+	uint8_t bufsize;
+	struct rte_crypto_sym_op *sym_op;
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa2_sec_session *)get_sym_session_private_data(
+			op->sym->session, cryptodev_driver_id);
+#ifdef RTE_LIBRTE_SECURITY
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa2_sec_session *)get_sec_session_private_data(
+			op->sym->sec_session);
+#endif
+
+	if (sess == NULL)
+		goto mbuf_dump;
+
+	priv = (struct ctxt_priv *)sess->ctxt;
+	printf("\n****************************************\n"
+		"session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
+		"\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
+		"\tCipher key len:\t%zd\n", sess->ctxt_type,
+		(sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
+		sess->cipher_alg, sess->auth_alg, sess->aead_alg,
+		sess->cipher_key.length);
+		rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
+				sess->cipher_key.length);
+		rte_hexdump(stdout, "auth key", sess->auth_key.data,
+				sess->auth_key.length);
+	printf("\tAuth key len:\t%zd\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
+		"\tdigest length:\t%d\n\tstatus:\t\t%d\n\taead auth only"
+		" len:\t%d\n\taead cipher text:\t%d\n",
+		sess->auth_key.length, sess->iv.length, sess->iv.offset,
+		sess->digest_length, sess->status,
+		sess->ext_params.aead_ctxt.auth_only_len,
+		sess->ext_params.aead_ctxt.auth_cipher_text);
+#ifdef RTE_LIBRTE_SECURITY
+	printf("PDCP session params:\n"
+		"\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
+		"\t%d\n\tsn_size:\t%d\n\thfn_ovd_offset:\t%d\n\thfn:\t\t%d\n"
+		"\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
+		sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
+		sess->pdcp.sn_size, sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
+		sess->pdcp.hfn_threshold);
+
+#endif
+	bufsize = (uint8_t)priv->flc_desc[0].flc.word1_sdl;
+	printf("Descriptor Dump:\n");
+	for (i = 0; i < bufsize; i++)
+		printf("\tDESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]);
+
+	printf("\n");
+mbuf_dump:
+	sym_op = op->sym;
+	if (sym_op->m_src) {
+		printf("Source mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_src, sym_op->m_src->data_len);
+	}
+	if (sym_op->m_dst) {
+		printf("Destination mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_dst, sym_op->m_dst->data_len);
+	}
+
+	printf("Session address = %p\ncipher offset: %d, length: %d\n"
+		"auth offset: %d, length:  %d\n aead offset: %d, length: %d\n"
+		, sym_op->session,
+		sym_op->cipher.data.offset, sym_op->cipher.data.length,
+		sym_op->auth.data.offset, sym_op->auth.data.length,
+		sym_op->aead.data.offset, sym_op->aead.data.length);
+	printf("\n");
+
+}
+
 static uint16_t
 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 			uint16_t nb_ops)
@@ -1702,8 +1790,13 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 
 		if (unlikely(fd->simple.frc)) {
 			/* TODO Parse SEC errors */
-			DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
-				      fd->simple.frc);
+			if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_NO_DUMP) {
+				DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
+						 fd->simple.frc);
+				if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_ERR_DUMP)
+					dpaa2_sec_dump(ops[num_rx]);
+			}
+
 			dpaa2_qp->rx_vq.err_pkts += 1;
 			ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
 		} else {
@@ -3883,6 +3976,42 @@ dpaa2_sec_uninit(const struct rte_cryptodev *dev)
 	return 0;
 }
 
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
+{
+	dpaa2_sec_dp_dump = atoi(value);
+	if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
+		DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
+			      "supported, changing to FULL error prints\n");
+		dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
+	}
+
+	return 0;
+}
+
+static void
+dpaa2_sec_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
+
+	if (!devargs)
+		return;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return;
+
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return;
+	}
+
+	rte_kvargs_process(kvlist, key,
+			check_devargs_handler, NULL);
+	rte_kvargs_free(kvlist);
+}
+
 static int
 dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
@@ -3984,6 +4113,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 		goto init_error;
 	}
 
+	dpaa2_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
 	return 0;
 
@@ -4082,4 +4212,6 @@ static struct cryptodev_driver dpaa2_sec_crypto_drv;
 RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD,
+		DRIVER_DUMP_MODE "=<int>");
 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v4 5/7] crypto/dpaa2: fix to check next type for auth or cipher
  2022-02-10 10:58         ` [PATCH v4 0/7] NXP crypto drivers changes Gagandeep Singh
                             ` (3 preceding siblings ...)
  2022-02-10 10:58           ` [PATCH v4 4/7] crypto/dpaa2_sec: add useful debug prints in sec dequeue Gagandeep Singh
@ 2022-02-10 10:58           ` Gagandeep Singh
  2022-02-10 10:58           ` [PATCH v4 6/7] crypto/dpaa2_sec: ordered queue support Gagandeep Singh
                             ` (2 subsequent siblings)
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10 10:58 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Hemant Agrawal, stable

From: Hemant Agrawal <hemant.agrawal@nxp.com>

This patch add more checks on next type for PDCP cases.

Fixes: 45e019608f31 ("crypto/dpaa2_sec: support integrity only PDCP")
Fixes: a1173d55598c ("crypto/dpaa_sec: support PDCP offload")
Cc: stable@dpdk.org

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 6 ++++--
 drivers/crypto/dpaa_sec/dpaa_sec.c          | 6 ++++--
 2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 444e1f0043..cb8aaf6446 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3231,13 +3231,15 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
 	/* find xfrm types */
 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 		cipher_xform = &xform->cipher;
-		if (xform->next != NULL) {
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 			session->ext_params.aead_ctxt.auth_cipher_text = true;
 			auth_xform = &xform->next->auth;
 		}
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 		auth_xform = &xform->auth;
-		if (xform->next != NULL) {
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 			session->ext_params.aead_ctxt.auth_cipher_text = false;
 			cipher_xform = &xform->next->cipher;
 		}
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 1137b142e9..75e437f696 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -2984,11 +2984,13 @@ dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
 	/* find xfrm types */
 	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
 		cipher_xform = &xform->cipher;
-		if (xform->next != NULL)
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
 			auth_xform = &xform->next->auth;
 	} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
 		auth_xform = &xform->auth;
-		if (xform->next != NULL)
+		if (xform->next != NULL &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
 			cipher_xform = &xform->next->cipher;
 	} else {
 		DPAA_SEC_ERR("Invalid crypto type");
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v4 6/7] crypto/dpaa2_sec: ordered queue support
  2022-02-10 10:58         ` [PATCH v4 0/7] NXP crypto drivers changes Gagandeep Singh
                             ` (4 preceding siblings ...)
  2022-02-10 10:58           ` [PATCH v4 5/7] crypto/dpaa2: fix to check next type for auth or cipher Gagandeep Singh
@ 2022-02-10 10:58           ` Gagandeep Singh
  2022-02-10 10:58           ` [PATCH v4 7/7] crypto/dpaa_sec: add debug framework Gagandeep Singh
  2022-02-12 11:21           ` [EXT] [PATCH v4 0/7] NXP crypto drivers changes Akhil Goyal
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10 10:58 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Nipun Gupta, Gagandeep Singh

From: Nipun Gupta <nipun.gupta@nxp.com>

This patch supports ordered queue for DPAA2 platform.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/cryptodevs/dpaa2_sec.rst         |   7 +
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 288 ++++++++++++++++++--
 drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h   |   2 +
 drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h    |  14 +-
 4 files changed, 290 insertions(+), 21 deletions(-)

diff --git a/doc/guides/cryptodevs/dpaa2_sec.rst b/doc/guides/cryptodevs/dpaa2_sec.rst
index 875d918068..1a590309a0 100644
--- a/doc/guides/cryptodevs/dpaa2_sec.rst
+++ b/doc/guides/cryptodevs/dpaa2_sec.rst
@@ -185,3 +185,10 @@ on error, mode 1 means dump HW error code and mode 2 means dump HW error code
 along with other useful debugging information like session, queue, descriptor
 data.
 e.g. ``fslmc:dpseci.1,drv_dump_mode=1``
+
+Enable strict ordering
+----------------------
+
+Use dev arg option ``drv_strict_order=1`` to enable strict ordering.
+By default, loose ordering is set for ordered schedule type event.
+e.g. ``fslmc:dpseci.1,drv_strict_order=1``
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index cb8aaf6446..e62d04852b 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -52,6 +52,7 @@
 #define NO_PREFETCH 0
 
 #define DRIVER_DUMP_MODE "drv_dump_mode"
+#define DRIVER_STRICT_ORDER "drv_strict_order"
 
 /* DPAA2_SEC_DP_DUMP levels */
 enum dpaa2_sec_dump_levels {
@@ -1477,14 +1478,14 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 
 		for (loop = 0; loop < frames_to_send; loop++) {
 			if (*dpaa2_seqn((*ops)->sym->m_src)) {
-				uint8_t dqrr_index =
-					*dpaa2_seqn((*ops)->sym->m_src) - 1;
-
-				flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
-				DPAA2_PER_LCORE_DQRR_SIZE--;
-				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
-				*dpaa2_seqn((*ops)->sym->m_src) =
-					DPAA2_INVALID_MBUF_SEQN;
+				if (*dpaa2_seqn((*ops)->sym->m_src) & QBMAN_ENQUEUE_FLAG_DCA) {
+					DPAA2_PER_LCORE_DQRR_SIZE--;
+					DPAA2_PER_LCORE_DQRR_HELD &= ~(1 <<
+					*dpaa2_seqn((*ops)->sym->m_src) &
+					QBMAN_EQCR_DCA_IDXMASK);
+				}
+				flags[loop] = *dpaa2_seqn((*ops)->sym->m_src);
+				*dpaa2_seqn((*ops)->sym->m_src) = DPAA2_INVALID_MBUF_SEQN;
 			}
 
 			/*Clear the unused FD fields before sending*/
@@ -1709,6 +1710,168 @@ dpaa2_sec_dump(struct rte_crypto_op *op)
 
 }
 
+static void
+dpaa2_sec_free_eqresp_buf(uint16_t eqresp_ci)
+{
+	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+	struct rte_crypto_op *op;
+	struct qbman_fd *fd;
+
+	fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]);
+	op = sec_fd_to_mbuf(fd);
+	/* Instead of freeing, enqueue it to the sec tx queue (sec->core)
+	 * after setting an error in FD. But this will have performance impact.
+	 */
+	rte_pktmbuf_free(op->sym->m_src);
+}
+
+static void
+dpaa2_sec_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
+			     struct rte_mbuf *m,
+			     struct qbman_eq_desc *eqdesc)
+{
+	struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
+	struct eqresp_metadata *eqresp_meta;
+	struct dpaa2_sec_dev_private *priv = dpaa2_q->crypto_data->dev_private;
+	uint16_t orpid, seqnum;
+	uint8_t dq_idx;
+
+	if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
+		orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
+			DPAA2_EQCR_OPRID_SHIFT;
+		seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
+			DPAA2_EQCR_SEQNUM_SHIFT;
+
+
+		if (!priv->en_loose_ordered) {
+			qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0);
+			qbman_eq_desc_set_response(eqdesc, (uint64_t)
+				DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[
+				dpio_dev->eqresp_pi]), 1);
+			qbman_eq_desc_set_token(eqdesc, 1);
+
+			eqresp_meta = &dpio_dev->eqresp_meta[dpio_dev->eqresp_pi];
+			eqresp_meta->dpaa2_q = dpaa2_q;
+			eqresp_meta->mp = m->pool;
+
+			dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ?
+				dpio_dev->eqresp_pi++ : (dpio_dev->eqresp_pi = 0);
+		} else {
+			qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
+		}
+	} else {
+		dq_idx = *dpaa2_seqn(m) - 1;
+		qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
+		DPAA2_PER_LCORE_DQRR_SIZE--;
+		DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
+	}
+	*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
+}
+
+
+static uint16_t
+dpaa2_sec_enqueue_burst_ordered(void *qp, struct rte_crypto_op **ops,
+			uint16_t nb_ops)
+{
+	/* Function to transmit the frames to given device and VQ*/
+	uint32_t loop;
+	int32_t ret;
+	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+	uint32_t frames_to_send, num_free_eq_desc, retry_count;
+	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+	struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+	struct qbman_swp *swp;
+	uint16_t num_tx = 0;
+	uint16_t bpid;
+	struct rte_mempool *mb_pool;
+	struct dpaa2_sec_dev_private *priv =
+				dpaa2_qp->tx_vq.crypto_data->dev_private;
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+		DPAA2_SEC_ERR("sessionless crypto op not supported");
+		return 0;
+	}
+
+	if (!DPAA2_PER_LCORE_DPIO) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			DPAA2_SEC_ERR("Failure in affining portal");
+			return 0;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	while (nb_ops) {
+		frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
+			dpaa2_eqcr_size : nb_ops;
+
+		if (!priv->en_loose_ordered) {
+			if (*dpaa2_seqn((*ops)->sym->m_src)) {
+				num_free_eq_desc = dpaa2_free_eq_descriptors();
+				if (num_free_eq_desc < frames_to_send)
+					frames_to_send = num_free_eq_desc;
+			}
+		}
+
+		for (loop = 0; loop < frames_to_send; loop++) {
+			/*Prepare enqueue descriptor*/
+			qbman_eq_desc_clear(&eqdesc[loop]);
+			qbman_eq_desc_set_fq(&eqdesc[loop], dpaa2_qp->tx_vq.fqid);
+
+			if (*dpaa2_seqn((*ops)->sym->m_src))
+				dpaa2_sec_set_enqueue_descriptor(
+						&dpaa2_qp->tx_vq,
+						(*ops)->sym->m_src,
+						&eqdesc[loop]);
+			else
+				qbman_eq_desc_set_no_orp(&eqdesc[loop],
+							 DPAA2_EQ_RESP_ERR_FQ);
+
+			/*Clear the unused FD fields before sending*/
+			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+			mb_pool = (*ops)->sym->m_src->pool;
+			bpid = mempool_to_bpid(mb_pool);
+			ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
+			if (ret) {
+				DPAA2_SEC_ERR("error: Improper packet contents"
+					      " for crypto operation");
+				goto skip_tx;
+			}
+			ops++;
+		}
+
+		loop = 0;
+		retry_count = 0;
+		while (loop < frames_to_send) {
+			ret = qbman_swp_enqueue_multiple_desc(swp,
+					&eqdesc[loop], &fd_arr[loop],
+					frames_to_send - loop);
+			if (unlikely(ret < 0)) {
+				retry_count++;
+				if (retry_count > DPAA2_MAX_TX_RETRY_COUNT) {
+					num_tx += loop;
+					nb_ops -= loop;
+					goto skip_tx;
+				}
+			} else {
+				loop += ret;
+				retry_count = 0;
+			}
+		}
+
+		num_tx += loop;
+		nb_ops -= loop;
+	}
+
+skip_tx:
+	dpaa2_qp->tx_vq.tx_pkts += num_tx;
+	dpaa2_qp->tx_vq.err_pkts += nb_ops;
+	return num_tx;
+}
+
 static uint16_t
 dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
 			uint16_t nb_ops)
@@ -3622,6 +3785,10 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev)
 
 	PMD_INIT_FUNC_TRACE();
 
+	/* Change the tx burst function if ordered queues are used */
+	if (priv->en_ordered)
+		dev->enqueue_burst = dpaa2_sec_enqueue_burst_ordered;
+
 	memset(&attr, 0, sizeof(struct dpseci_attr));
 
 	ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
@@ -3834,12 +4001,46 @@ dpaa2_sec_process_atomic_event(struct qbman_swp *swp __rte_unused,
 
 	ev->event_ptr = sec_fd_to_mbuf(fd);
 	dqrr_index = qbman_get_dqrr_idx(dq);
-	*dpaa2_seqn(crypto_op->sym->m_src) = dqrr_index + 1;
+	*dpaa2_seqn(crypto_op->sym->m_src) = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
 	DPAA2_PER_LCORE_DQRR_SIZE++;
 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
 }
 
+static void __rte_hot
+dpaa2_sec_process_ordered_event(struct qbman_swp *swp,
+				const struct qbman_fd *fd,
+				const struct qbman_result *dq,
+				struct dpaa2_queue *rxq,
+				struct rte_event *ev)
+{
+	struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
+
+	/* Prefetching mbuf */
+	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+
+	/* Prefetching ipsec crypto_op stored in priv data of mbuf */
+	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+
+	ev->flow_id = rxq->ev.flow_id;
+	ev->sub_event_type = rxq->ev.sub_event_type;
+	ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+	ev->op = RTE_EVENT_OP_NEW;
+	ev->sched_type = rxq->ev.sched_type;
+	ev->queue_id = rxq->ev.queue_id;
+	ev->priority = rxq->ev.priority;
+	ev->event_ptr = sec_fd_to_mbuf(fd);
+
+	*dpaa2_seqn(crypto_op->sym->m_src) = DPAA2_ENQUEUE_FLAG_ORP;
+	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_odpid(dq) <<
+		DPAA2_EQCR_OPRID_SHIFT;
+	*dpaa2_seqn(crypto_op->sym->m_src) |= qbman_result_DQ_seqnum(dq) <<
+		DPAA2_EQCR_SEQNUM_SHIFT;
+
+	qbman_swp_dqrr_consume(swp, dq);
+}
+
 int
 dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 		int qp_id,
@@ -3857,6 +4058,8 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 		qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
 	else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
 		qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
+	else if (event->sched_type == RTE_SCHED_TYPE_ORDERED)
+		qp->rx_vq.cb = dpaa2_sec_process_ordered_event;
 	else
 		return -EINVAL;
 
@@ -3875,6 +4078,37 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
 		cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
 		cfg.order_preservation_en = 1;
 	}
+
+	if (event->sched_type == RTE_SCHED_TYPE_ORDERED) {
+		struct opr_cfg ocfg;
+
+		/* Restoration window size = 256 frames */
+		ocfg.oprrws = 3;
+		/* Restoration window size = 512 frames for LX2 */
+		if (dpaa2_svr_family == SVR_LX2160A)
+			ocfg.oprrws = 4;
+		/* Auto advance NESN window enabled */
+		ocfg.oa = 1;
+		/* Late arrival window size disabled */
+		ocfg.olws = 0;
+		/* ORL resource exhaustaion advance NESN disabled */
+		ocfg.oeane = 0;
+
+		if (priv->en_loose_ordered)
+			ocfg.oloe = 1;
+		else
+			ocfg.oloe = 0;
+
+		ret = dpseci_set_opr(dpseci, CMD_PRI_LOW, priv->token,
+				   qp_id, OPR_OPT_CREATE, &ocfg);
+		if (ret) {
+			RTE_LOG(ERR, PMD, "Error setting opr: ret: %d\n", ret);
+			return ret;
+		}
+		qp->tx_vq.cb_eqresp_free = dpaa2_sec_free_eqresp_buf;
+		priv->en_ordered = 1;
+	}
+
 	ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
 				  qp_id, &cfg);
 	if (ret) {
@@ -3979,24 +4213,35 @@ dpaa2_sec_uninit(const struct rte_cryptodev *dev)
 }
 
 static int
-check_devargs_handler(__rte_unused const char *key, const char *value,
-		      __rte_unused void *opaque)
+check_devargs_handler(const char *key, const char *value,
+		      void *opaque)
 {
-	dpaa2_sec_dp_dump = atoi(value);
-	if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
-		DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
-			      "supported, changing to FULL error prints\n");
-		dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
-	}
+	struct rte_cryptodev *dev = (struct rte_cryptodev *)opaque;
+	struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+
+	if (!strcmp(key, "drv_strict_order")) {
+		priv->en_loose_ordered = false;
+	} else if (!strcmp(key, "drv_dump_mode")) {
+		dpaa2_sec_dp_dump = atoi(value);
+		if (dpaa2_sec_dp_dump > DPAA2_SEC_DP_FULL_DUMP) {
+			DPAA2_SEC_WARN("WARN: DPAA2_SEC_DP_DUMP_LEVEL is not "
+				      "supported, changing to FULL error"
+				      " prints\n");
+			dpaa2_sec_dp_dump = DPAA2_SEC_DP_FULL_DUMP;
+		}
+	} else
+		return -1;
 
 	return 0;
 }
 
 static void
-dpaa2_sec_get_devargs(struct rte_devargs *devargs, const char *key)
+dpaa2_sec_get_devargs(struct rte_cryptodev *cryptodev, const char *key)
 {
 	struct rte_kvargs *kvlist;
+	struct rte_devargs *devargs;
 
+	devargs = cryptodev->device->devargs;
 	if (!devargs)
 		return;
 
@@ -4010,7 +4255,7 @@ dpaa2_sec_get_devargs(struct rte_devargs *devargs, const char *key)
 	}
 
 	rte_kvargs_process(kvlist, key,
-			check_devargs_handler, NULL);
+			check_devargs_handler, (void *)cryptodev);
 	rte_kvargs_free(kvlist);
 }
 
@@ -4101,6 +4346,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 	cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
 	internals->hw = dpseci;
 	internals->token = token;
+	internals->en_loose_ordered = true;
 
 	snprintf(str, sizeof(str), "sec_fle_pool_p%d_%d",
 			getpid(), cryptodev->data->dev_id);
@@ -4115,7 +4361,8 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
 		goto init_error;
 	}
 
-	dpaa2_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
+	dpaa2_sec_get_devargs(cryptodev, DRIVER_DUMP_MODE);
+	dpaa2_sec_get_devargs(cryptodev, DRIVER_STRICT_ORDER);
 	DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
 	return 0;
 
@@ -4215,5 +4462,6 @@ RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
 		rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA2_SEC_PMD,
+		DRIVER_STRICT_ORDER "=<int>"
 		DRIVER_DUMP_MODE "=<int>");
 RTE_LOG_REGISTER(dpaa2_logtype_sec, pmd.crypto.dpaa2, NOTICE);
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index a8f9440632..3094778a7a 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -37,6 +37,8 @@ struct dpaa2_sec_dev_private {
 	uint16_t token; /**< Token required by DPxxx objects */
 	unsigned int max_nb_queue_pairs;
 	/**< Max number of queue pairs supported by device */
+	uint8_t en_ordered;
+	uint8_t en_loose_ordered;
 };
 
 struct dpaa2_sec_qp {
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 279e8f4d4a..c295c04f24 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
  *
  * Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2020 NXP
  *
  */
 #ifndef __FSL_DPSECI_H
@@ -11,6 +11,8 @@
  * Contains initialization APIs and runtime control APIs for DPSECI
  */
 
+#include <fsl_dpopr.h>
+
 struct fsl_mc_io;
 
 /**
@@ -41,6 +43,16 @@ int dpseci_close(struct fsl_mc_io *mc_io,
  */
 #define DPSECI_OPT_HAS_CG				0x000020
 
+/**
+ * Enable the Order Restoration support
+ */
+#define DPSECI_OPT_HAS_OPR				0x000040
+
+/**
+ * Order Point Records are shared for the entire DPSECI
+ */
+#define DPSECI_OPT_OPR_SHARED				0x000080
+
 /**
  * struct dpseci_cfg - Structure representing DPSECI configuration
  * @options: Any combination of the following options:
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v4 7/7] crypto/dpaa_sec: add debug framework
  2022-02-10 10:58         ` [PATCH v4 0/7] NXP crypto drivers changes Gagandeep Singh
                             ` (5 preceding siblings ...)
  2022-02-10 10:58           ` [PATCH v4 6/7] crypto/dpaa2_sec: ordered queue support Gagandeep Singh
@ 2022-02-10 10:58           ` Gagandeep Singh
  2022-02-12 11:21           ` [EXT] [PATCH v4 0/7] NXP crypto drivers changes Akhil Goyal
  7 siblings, 0 replies; 42+ messages in thread
From: Gagandeep Singh @ 2022-02-10 10:58 UTC (permalink / raw)
  To: gakhil, dev; +Cc: Gagandeep Singh

Adding useful debug prints in DPAA driver for
easy debugging.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
---
 doc/guides/cryptodevs/dpaa_sec.rst |  10 ++
 drivers/bus/dpaa/dpaa_bus.c        |  16 ++-
 drivers/crypto/dpaa_sec/dpaa_sec.c | 192 ++++++++++++++++++++++++++++-
 3 files changed, 213 insertions(+), 5 deletions(-)

diff --git a/doc/guides/cryptodevs/dpaa_sec.rst b/doc/guides/cryptodevs/dpaa_sec.rst
index bac82421bc..0c8d6cf3da 100644
--- a/doc/guides/cryptodevs/dpaa_sec.rst
+++ b/doc/guides/cryptodevs/dpaa_sec.rst
@@ -123,3 +123,13 @@ For enabling logs, use the following EAL parameter:
 
 Using ``pmd.crypto.dpaa`` as log matching criteria, all Crypto PMD logs can be
 enabled which are lower than logging ``level``.
+
+Enabling debug prints
+---------------------
+
+Use dev arg option ``drv_dump_mode=x`` to dump useful debug prints on HW sec
+error. There are 3 dump modes available 0, 1 and 2. Mode 0 means no dump print
+on error, mode 1 means dump HW error code and mode 2 means dump HW error code
+along with other useful debugging information like session, queue, descriptor
+data.
+e.g. ``dpaa_bus:dpaa_sec-1,drv_dump_mode=1``
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index 5546a9cb8d..e442bc4c33 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -429,6 +429,7 @@ rte_dpaa_bus_parse(const char *name, void *out)
 {
 	unsigned int i, j;
 	size_t delta;
+	size_t max_name_len;
 
 	/* There are two ways of passing device name, with and without
 	 * separator. "dpaa_bus:fm1-mac3" with separator, and "fm1-mac3"
@@ -444,14 +445,21 @@ rte_dpaa_bus_parse(const char *name, void *out)
 		delta = 5;
 	}
 
-	if (sscanf(&name[delta], "fm%u-mac%u", &i, &j) != 2 ||
-	    i >= 2 || j >= 16) {
-		return -EINVAL;
+	if (strncmp("dpaa_sec", &name[delta], 8) == 0) {
+		if (sscanf(&name[delta], "dpaa_sec-%u", &i) != 1 ||
+				i < 1 || i > 4)
+			return -EINVAL;
+		max_name_len = sizeof("dpaa_sec-.") - 1;
+	} else {
+		if (sscanf(&name[delta], "fm%u-mac%u", &i, &j) != 2 ||
+				i >= 2 || j >= 16)
+			return -EINVAL;
+
+		max_name_len = sizeof("fm.-mac..") - 1;
 	}
 
 	if (out != NULL) {
 		char *out_name = out;
-		const size_t max_name_len = sizeof("fm.-mac..") - 1;
 
 		/* Do not check for truncation, either name ends with
 		 * '\0' or the device name is followed by parameters and there
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 75e437f696..ed12d6663b 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -27,6 +27,7 @@
 #include <rte_memcpy.h>
 #include <rte_string_fns.h>
 #include <rte_spinlock.h>
+#include <rte_hexdump.h>
 
 #include <fsl_usd.h>
 #include <fsl_qman.h>
@@ -45,6 +46,17 @@
 #include <dpaa_sec_log.h>
 #include <dpaax_iova_table.h>
 
+#define DRIVER_DUMP_MODE "drv_dump_mode"
+
+/* DPAA_SEC_DP_DUMP levels */
+enum dpaa_sec_dump_levels {
+	DPAA_SEC_DP_NO_DUMP,
+	DPAA_SEC_DP_ERR_DUMP,
+	DPAA_SEC_DP_FULL_DUMP
+};
+
+uint8_t dpaa_sec_dp_dump = DPAA_SEC_DP_ERR_DUMP;
+
 uint8_t dpaa_cryptodev_driver_id;
 
 static inline void
@@ -649,6 +661,139 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 	return 0;
 }
 
+static void
+dpaa_sec_dump(struct dpaa_sec_op_ctx *ctx, struct dpaa_sec_qp *qp)
+{
+	struct dpaa_sec_job *job = &ctx->job;
+	struct rte_crypto_op *op = ctx->op;
+	dpaa_sec_session *sess = NULL;
+	struct sec_cdb c_cdb, *cdb;
+	uint8_t bufsize;
+	struct rte_crypto_sym_op *sym_op;
+	struct qm_sg_entry sg[2];
+
+	if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+		sess = (dpaa_sec_session *)
+			get_sym_session_private_data(
+					op->sym->session,
+					dpaa_cryptodev_driver_id);
+#ifdef RTE_LIBRTE_SECURITY
+	else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+		sess = (dpaa_sec_session *)
+			get_sec_session_private_data(
+					op->sym->sec_session);
+#endif
+	if (sess == NULL) {
+		printf("session is NULL\n");
+		goto mbuf_dump;
+	}
+
+	cdb = &sess->cdb;
+	rte_memcpy(&c_cdb, cdb, sizeof(struct sec_cdb));
+#ifdef RTE_LIBRTE_SECURITY
+	printf("\nsession protocol type = %d\n", sess->proto_alg);
+#endif
+	printf("\n****************************************\n"
+		"session params:\n\tContext type:\t%d\n\tDirection:\t%s\n"
+		"\tCipher alg:\t%d\n\tAuth alg:\t%d\n\tAead alg:\t%d\n"
+		"\tCipher key len:\t%"PRIu64"\n\tCipher alg:\t%d\n"
+		"\tCipher algmode:\t%d\n", sess->ctxt,
+		(sess->dir == DIR_ENC) ? "DIR_ENC" : "DIR_DEC",
+		sess->cipher_alg, sess->auth_alg, sess->aead_alg,
+		(uint64_t)sess->cipher_key.length, sess->cipher_key.alg,
+		sess->cipher_key.algmode);
+		rte_hexdump(stdout, "cipher key", sess->cipher_key.data,
+				sess->cipher_key.length);
+		rte_hexdump(stdout, "auth key", sess->auth_key.data,
+				sess->auth_key.length);
+	printf("\tAuth key len:\t%"PRIu64"\n\tAuth alg:\t%d\n"
+		"\tAuth algmode:\t%d\n\tIV len:\t\t%d\n\tIV offset:\t%d\n"
+		"\tdigest length:\t%d\n\tauth only len:\t\t%d\n"
+		"\taead cipher text:\t%d\n",
+		(uint64_t)sess->auth_key.length, sess->auth_key.alg,
+		sess->auth_key.algmode,
+		sess->iv.length, sess->iv.offset,
+		sess->digest_length, sess->auth_only_len,
+		sess->auth_cipher_text);
+#ifdef RTE_LIBRTE_SECURITY
+	printf("PDCP session params:\n"
+		"\tDomain:\t\t%d\n\tBearer:\t\t%d\n\tpkt_dir:\t%d\n\thfn_ovd:"
+		"\t%d\n\tsn_size:\t%d\n\tsdap_enabled:\t%d\n\thfn_ovd_offset:"
+		"\t%d\n\thfn:\t\t%d\n"
+		"\thfn_threshold:\t0x%x\n", sess->pdcp.domain,
+		sess->pdcp.bearer, sess->pdcp.pkt_dir, sess->pdcp.hfn_ovd,
+		sess->pdcp.sn_size, sess->pdcp.sdap_enabled,
+		sess->pdcp.hfn_ovd_offset, sess->pdcp.hfn,
+		sess->pdcp.hfn_threshold);
+#endif
+	c_cdb.sh_hdr.hi.word = rte_be_to_cpu_32(c_cdb.sh_hdr.hi.word);
+	c_cdb.sh_hdr.lo.word = rte_be_to_cpu_32(c_cdb.sh_hdr.lo.word);
+	bufsize = c_cdb.sh_hdr.hi.field.idlen;
+
+	printf("cdb = %p\n\n", cdb);
+	printf("Descriptor size = %d\n", bufsize);
+	int m;
+	for (m = 0; m < bufsize; m++)
+		printf("0x%x\n", rte_be_to_cpu_32(c_cdb.sh_desc[m]));
+
+	printf("\n");
+mbuf_dump:
+	sym_op = op->sym;
+	if (sym_op->m_src) {
+		printf("Source mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_src,
+				 sym_op->m_src->data_len);
+	}
+	if (sym_op->m_dst) {
+		printf("Destination mbuf:\n");
+		rte_pktmbuf_dump(stdout, sym_op->m_dst,
+				 sym_op->m_dst->data_len);
+	}
+
+	printf("Session address = %p\ncipher offset: %d, length: %d\n"
+		"auth offset: %d, length:  %d\n aead offset: %d, length: %d\n",
+		sym_op->session, sym_op->cipher.data.offset,
+		sym_op->cipher.data.length,
+		sym_op->auth.data.offset, sym_op->auth.data.length,
+		sym_op->aead.data.offset, sym_op->aead.data.length);
+	printf("\n");
+
+	printf("******************************************************\n");
+	printf("ctx info:\n");
+	printf("job->sg[0] output info:\n");
+	memcpy(&sg[0], &job->sg[0], sizeof(sg[0]));
+	printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
+		"\n\tbpid = %d\n\toffset = %d\n",
+		(uint64_t)sg[0].addr, sg[0].length, sg[0].final,
+		sg[0].extension, sg[0].bpid, sg[0].offset);
+	printf("\njob->sg[1] input info:\n");
+	memcpy(&sg[1], &job->sg[1], sizeof(sg[1]));
+	hw_sg_to_cpu(&sg[1]);
+	printf("\taddr = %"PRIx64",\n\tlen = %d,\n\tfinal = %d,\n\textention = %d"
+		"\n\tbpid = %d\n\toffset = %d\n",
+		(uint64_t)sg[1].addr, sg[1].length, sg[1].final,
+		sg[1].extension, sg[1].bpid, sg[1].offset);
+
+	printf("\nctx pool addr = %p\n", ctx->ctx_pool);
+	if (ctx->ctx_pool)
+		printf("ctx pool available counts = %d\n",
+			rte_mempool_avail_count(ctx->ctx_pool));
+
+	printf("\nop pool addr = %p\n", op->mempool);
+	if (op->mempool)
+		printf("op pool available counts = %d\n",
+			rte_mempool_avail_count(op->mempool));
+
+	printf("********************************************************\n");
+	printf("Queue data:\n");
+	printf("\tFQID = 0x%x\n\tstate = %d\n\tnb_desc = %d\n"
+		"\tctx_pool = %p\n\trx_pkts = %d\n\ttx_pkts"
+	       "= %d\n\trx_errs = %d\n\ttx_errs = %d\n\n",
+		qp->outq.fqid, qp->outq.state, qp->outq.nb_desc,
+		qp->ctx_pool, qp->rx_pkts, qp->tx_pkts,
+		qp->rx_errs, qp->tx_errs);
+}
+
 /* qp is lockless, should be accessed by only one thread */
 static int
 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
@@ -716,7 +861,12 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
 		if (!ctx->fd_status) {
 			op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 		} else {
-			DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
+			if (dpaa_sec_dp_dump > DPAA_SEC_DP_NO_DUMP) {
+				DPAA_SEC_DP_WARN("SEC return err:0x%x\n",
+						  ctx->fd_status);
+				if (dpaa_sec_dp_dump > DPAA_SEC_DP_ERR_DUMP)
+					dpaa_sec_dump(ctx, qp);
+			}
 			op->status = RTE_CRYPTO_OP_STATUS_ERROR;
 		}
 		ops[pkts++] = op;
@@ -3458,6 +3608,42 @@ dpaa_sec_uninit(struct rte_cryptodev *dev)
 	return 0;
 }
 
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+		      __rte_unused void *opaque)
+{
+	dpaa_sec_dp_dump = atoi(value);
+	if (dpaa_sec_dp_dump > DPAA_SEC_DP_FULL_DUMP) {
+		DPAA_SEC_WARN("WARN: DPAA_SEC_DP_DUMP_LEVEL is not "
+			      "supported, changing to FULL error prints\n");
+		dpaa_sec_dp_dump = DPAA_SEC_DP_FULL_DUMP;
+	}
+
+	return 0;
+}
+
+static void
+dpaa_sec_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+	struct rte_kvargs *kvlist;
+
+	if (!devargs)
+		return;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return;
+
+	if (!rte_kvargs_count(kvlist, key)) {
+		rte_kvargs_free(kvlist);
+		return;
+	}
+
+	rte_kvargs_process(kvlist, key,
+				check_devargs_handler, NULL);
+	rte_kvargs_free(kvlist);
+}
+
 static int
 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
@@ -3533,6 +3719,8 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 		}
 	}
 
+	dpaa_sec_get_devargs(cryptodev->device->devargs, DRIVER_DUMP_MODE);
+
 	RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
 	return 0;
 
@@ -3649,4 +3837,6 @@ static struct cryptodev_driver dpaa_sec_crypto_drv;
 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
 		dpaa_cryptodev_driver_id);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_DPAA_SEC_PMD,
+		DRIVER_DUMP_MODE "=<int>");
 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);
-- 
2.25.1


^ permalink raw reply	[flat|nested] 42+ messages in thread

* RE: [EXT] [PATCH v4 0/7] NXP crypto drivers changes
  2022-02-10 10:58         ` [PATCH v4 0/7] NXP crypto drivers changes Gagandeep Singh
                             ` (6 preceding siblings ...)
  2022-02-10 10:58           ` [PATCH v4 7/7] crypto/dpaa_sec: add debug framework Gagandeep Singh
@ 2022-02-12 11:21           ` Akhil Goyal
  7 siblings, 0 replies; 42+ messages in thread
From: Akhil Goyal @ 2022-02-12 11:21 UTC (permalink / raw)
  To: Gagandeep Singh, dev

> v4-change-log
> * fix i386 compilation
> 
> v3-change-log
> * fix checkpatch issues
> * use devargs for strict ordering
> * fix AES_CMAC capabilities
> * remove GMAC patch from this series. I will send
> it as separate patch.
> 
> v2-change-log
> * using dev args for both DPAA1 and DPAA2 drivers to
>  dump debug prints on sec error.
> 
Applied to dpdk-next-crypto

Thanks.

^ permalink raw reply	[flat|nested] 42+ messages in thread

end of thread, other threads:[~2022-02-12 11:21 UTC | newest]

Thread overview: 42+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-20 10:27 [PATCH 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
2021-12-20 10:27 ` [PATCH 2/8] common/dpaax: change job processing mode for PDCP SDAP Gagandeep Singh
2021-12-20 10:27 ` [PATCH 3/8] crypto/dpaa2_sec: ordered queue support Gagandeep Singh
2021-12-20 10:27 ` [PATCH 4/8] crypto/dpaa2_sec: support AES-GMAC Gagandeep Singh
2021-12-20 10:27 ` [PATCH 5/8] crypto/dpaa2_sec: change digest size for AES_CMAC Gagandeep Singh
2021-12-20 10:27 ` [PATCH 6/8] crypto/dpaa2_sec: add useful debug prints in sec dequeue Gagandeep Singh
2021-12-20 10:27 ` [PATCH 7/8] crypto/dpaa2: fix to check next type for auth or cipher Gagandeep Singh
2021-12-20 10:27 ` [PATCH 8/8] crypto/dpaa_sec: add debug framework Gagandeep Singh
2021-12-24 13:02   ` [EXT] " Akhil Goyal
2021-12-28  9:10 ` [PATCH v2 0/8] NXP crypto drivers changes Gagandeep Singh
2021-12-28  9:10   ` [PATCH v2 1/8] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
2022-02-10  4:31     ` [PATCH v3 0/7] NXP crypto drivers changes Gagandeep Singh
2022-02-10  4:31       ` [PATCH v3 1/7] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
2022-02-10 10:58         ` [PATCH v4 0/7] NXP crypto drivers changes Gagandeep Singh
2022-02-10 10:58           ` [PATCH v4 1/7] common/dpaax: caamflib: Remove code related to SEC ERA 1 to 7 Gagandeep Singh
2022-02-10 10:58           ` [PATCH v4 2/7] common/dpaax: change job processing mode for PDCP SDAP Gagandeep Singh
2022-02-10 10:58           ` [PATCH v4 3/7] crypto/dpaa2_sec: change capabilities for AES_CMAC Gagandeep Singh
2022-02-10 10:58           ` [PATCH v4 4/7] crypto/dpaa2_sec: add useful debug prints in sec dequeue Gagandeep Singh
2022-02-10 10:58           ` [PATCH v4 5/7] crypto/dpaa2: fix to check next type for auth or cipher Gagandeep Singh
2022-02-10 10:58           ` [PATCH v4 6/7] crypto/dpaa2_sec: ordered queue support Gagandeep Singh
2022-02-10 10:58           ` [PATCH v4 7/7] crypto/dpaa_sec: add debug framework Gagandeep Singh
2022-02-12 11:21           ` [EXT] [PATCH v4 0/7] NXP crypto drivers changes Akhil Goyal
2022-02-10  4:31       ` [PATCH v3 2/7] common/dpaax: change job processing mode for PDCP SDAP Gagandeep Singh
2022-02-10  4:31       ` [PATCH v3 3/7] crypto/dpaa2_sec: change capabilities for AES_CMAC Gagandeep Singh
2022-02-10  4:31       ` [PATCH v3 4/7] crypto/dpaa2_sec: add useful debug prints in sec dequeue Gagandeep Singh
2022-02-10  4:31       ` [PATCH v3 5/7] crypto/dpaa2: fix to check next type for auth or cipher Gagandeep Singh
2022-02-10  4:31       ` [PATCH v3 6/7] crypto/dpaa2_sec: ordered queue support Gagandeep Singh
2022-02-10  4:31       ` [PATCH v3 7/7] crypto/dpaa_sec: add debug framework Gagandeep Singh
2022-02-10  7:03       ` [EXT] [PATCH v3 0/7] NXP crypto drivers changes Akhil Goyal
2021-12-28  9:10   ` [PATCH v2 2/8] common/dpaax: change job processing mode for PDCP SDAP Gagandeep Singh
2021-12-28  9:10   ` [PATCH v2 3/8] crypto/dpaa2_sec: ordered queue support Gagandeep Singh
2022-01-21 11:31     ` [EXT] " Akhil Goyal
2021-12-28  9:10   ` [PATCH v2 4/8] crypto/dpaa2_sec: support AES-GMAC Gagandeep Singh
2022-01-21 11:29     ` [EXT] " Akhil Goyal
2022-02-08 14:15       ` Gagandeep Singh
2021-12-28  9:10   ` [PATCH v2 5/8] crypto/dpaa2_sec: change digest size for AES_CMAC Gagandeep Singh
2022-01-21 11:23     ` [EXT] " Akhil Goyal
2022-02-08 14:11       ` Gagandeep Singh
2021-12-28  9:10   ` [PATCH v2 6/8] crypto/dpaa2_sec: add useful debug prints in sec dequeue Gagandeep Singh
2021-12-28  9:10   ` [PATCH v2 7/8] crypto/dpaa2: fix to check next type for auth or cipher Gagandeep Singh
2021-12-28  9:10   ` [PATCH v2 8/8] crypto/dpaa_sec: add debug framework Gagandeep Singh
2022-01-21 11:20     ` [EXT] " Akhil Goyal

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).