DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/3] support new full context firmware
@ 2022-06-20  7:18 Tejasree Kondoj
  2022-06-20  7:18 ` [PATCH 1/3] crypto/cnxk: move IPsec SA creation to common Tejasree Kondoj
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Tejasree Kondoj @ 2022-06-20  7:18 UTC (permalink / raw)
  To: Akhil Goyal
  Cc: Jerin Jacob, Anoob Joseph, Nithin Dabilpuram,
	Vidya Sagar Velumuri, Archana Muniganti, Ankur Dwivedi,
	Kiran Kumar K, Sunil Kumar Kori, Satha Rao, dev

This series adds support for new full context
microcode in cn9k IPsec PMD.

Anoob Joseph (1):
  crypto/cnxk: improvements to fastpath handling

Tejasree Kondoj (1):
  crypto/cnxk: add anti-replay as per new firmware

Vidya Sagar Velumuri (1):
  crypto/cnxk: move IPsec SA creation to common

 drivers/common/cnxk/cnxk_security.c           | 398 ++++++++++++
 drivers/common/cnxk/cnxk_security.h           |  11 +
 drivers/common/cnxk/roc_cpt.c                 |  93 +++
 drivers/common/cnxk/roc_cpt.h                 |   3 +
 drivers/common/cnxk/roc_ie_on.h               |  24 +-
 drivers/common/cnxk/version.map               |   3 +
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c      |  96 ++-
 drivers/crypto/cnxk/cn9k_ipsec.c              | 606 +++---------------
 drivers/crypto/cnxk/cn9k_ipsec.h              |   7 +-
 drivers/crypto/cnxk/cn9k_ipsec_la_ops.h       | 125 +---
 .../crypto/cnxk/cnxk_cryptodev_capabilities.c |   1 +
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h      |  13 +-
 12 files changed, 736 insertions(+), 644 deletions(-)

-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 1/3] crypto/cnxk: move IPsec SA creation to common
  2022-06-20  7:18 [PATCH 0/3] support new full context firmware Tejasree Kondoj
@ 2022-06-20  7:18 ` Tejasree Kondoj
  2022-06-20  7:18 ` [PATCH 2/3] crypto/cnxk: improvements to fastpath handling Tejasree Kondoj
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Tejasree Kondoj @ 2022-06-20  7:18 UTC (permalink / raw)
  To: Akhil Goyal
  Cc: Vidya Sagar Velumuri, Jerin Jacob, Anoob Joseph,
	Nithin Dabilpuram, Archana Muniganti, Ankur Dwivedi,
	Kiran Kumar K, Sunil Kumar Kori, Satha Rao, dev

From: Vidya Sagar Velumuri <vvelumuri@marvell.com>

Move the IPsec SA creation to common.
The code can be used by fastpath also to create the SAs
Add changes to support new full context microcode

Signed-off-by: Vidya Sagar Velumuri <vvelumuri@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
---
 drivers/common/cnxk/cnxk_security.c      | 398 +++++++++++++++
 drivers/common/cnxk/cnxk_security.h      |  11 +
 drivers/common/cnxk/roc_cpt.c            |  93 ++++
 drivers/common/cnxk/roc_cpt.h            |   3 +
 drivers/common/cnxk/roc_ie_on.h          |  21 +-
 drivers/common/cnxk/version.map          |   3 +
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c |  24 +
 drivers/crypto/cnxk/cn9k_ipsec.c         | 594 +++--------------------
 drivers/crypto/cnxk/cn9k_ipsec_la_ops.h  |  16 +-
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h |   1 +
 10 files changed, 631 insertions(+), 533 deletions(-)

diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index 72ee5ee91f..dca8742be3 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -971,3 +971,401 @@ cnxk_ipsec_outb_rlens_get(struct cnxk_ipsec_outb_rlens *rlens,
 	rlens->max_extended_len = partial_len + roundup_len + roundup_byte;
 	return 0;
 }
+
+static inline int
+on_ipsec_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
+		    struct rte_crypto_sym_xform *crypto_xform,
+		    struct roc_ie_on_sa_ctl *ctl)
+{
+	struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
+	int aes_key_len = 0;
+
+	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		auth_xform = crypto_xform;
+		cipher_xform = crypto_xform->next;
+	} else {
+		cipher_xform = crypto_xform;
+		auth_xform = crypto_xform->next;
+	}
+
+	if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
+		ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
+	else
+		ctl->direction = ROC_IE_SA_DIR_INBOUND;
+
+	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+		if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
+			ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
+		else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
+			ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
+		else
+			return -EINVAL;
+	}
+
+	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
+		ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT;
+		ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
+	} else if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+		ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL;
+	else
+		return -EINVAL;
+
+	if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
+		ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_AH;
+	else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
+		ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP;
+	else
+		return -EINVAL;
+
+	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		switch (crypto_xform->aead.algo) {
+		case RTE_CRYPTO_AEAD_AES_GCM:
+			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM;
+			aes_key_len = crypto_xform->aead.key.length;
+			break;
+		default:
+			plt_err("Unsupported AEAD algorithm");
+			return -ENOTSUP;
+		}
+	} else {
+		if (cipher_xform != NULL) {
+			switch (cipher_xform->cipher.algo) {
+			case RTE_CRYPTO_CIPHER_NULL:
+				ctl->enc_type = ROC_IE_ON_SA_ENC_NULL;
+				break;
+			case RTE_CRYPTO_CIPHER_AES_CBC:
+				ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
+				aes_key_len = cipher_xform->cipher.key.length;
+				break;
+			case RTE_CRYPTO_CIPHER_AES_CTR:
+				ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR;
+				aes_key_len = cipher_xform->cipher.key.length;
+				break;
+			default:
+				plt_err("Unsupported cipher algorithm");
+				return -ENOTSUP;
+			}
+		}
+
+		switch (auth_xform->auth.algo) {
+		case RTE_CRYPTO_AUTH_NULL:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
+			break;
+		case RTE_CRYPTO_AUTH_MD5_HMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_MD5;
+			break;
+		case RTE_CRYPTO_AUTH_SHA1_HMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1;
+			break;
+		case RTE_CRYPTO_AUTH_SHA224_HMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_224;
+			break;
+		case RTE_CRYPTO_AUTH_SHA256_HMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_256;
+			break;
+		case RTE_CRYPTO_AUTH_SHA384_HMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_384;
+			break;
+		case RTE_CRYPTO_AUTH_SHA512_HMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_512;
+			break;
+		case RTE_CRYPTO_AUTH_AES_GMAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_AES_GMAC;
+			aes_key_len = auth_xform->auth.key.length;
+			break;
+		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+			ctl->auth_type = ROC_IE_ON_SA_AUTH_AES_XCBC_128;
+			break;
+		default:
+			plt_err("Unsupported auth algorithm");
+			return -ENOTSUP;
+		}
+	}
+
+	/* Set AES key length */
+	if (ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CBC ||
+	    ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CCM ||
+	    ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CTR ||
+	    ctl->enc_type == ROC_IE_ON_SA_ENC_AES_GCM ||
+	    ctl->auth_type == ROC_IE_ON_SA_AUTH_AES_GMAC) {
+		switch (aes_key_len) {
+		case 16:
+			ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
+			break;
+		case 24:
+			ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
+			break;
+		case 32:
+			ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
+			break;
+		default:
+			plt_err("Invalid AES key length");
+			return -EINVAL;
+		}
+	}
+
+	if (ipsec->options.esn)
+		ctl->esn_en = 1;
+
+	if (ipsec->options.udp_encap == 1)
+		ctl->encap_type = ROC_IE_ON_SA_ENCAP_UDP;
+
+	ctl->copy_df = ipsec->options.copy_df;
+
+	ctl->spi = rte_cpu_to_be_32(ipsec->spi);
+
+	rte_io_wmb();
+
+	ctl->valid = 1;
+
+	return 0;
+}
+
+static inline int
+on_fill_ipsec_common_sa(struct rte_security_ipsec_xform *ipsec,
+			struct rte_crypto_sym_xform *crypto_xform,
+			struct roc_ie_on_common_sa *common_sa)
+{
+	struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
+	const uint8_t *cipher_key;
+	int cipher_key_len = 0;
+	int ret;
+
+	ret = on_ipsec_sa_ctl_set(ipsec, crypto_xform, &common_sa->ctl);
+	if (ret)
+		return ret;
+
+	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+		auth_xform = crypto_xform;
+		cipher_xform = crypto_xform->next;
+	} else {
+		cipher_xform = crypto_xform;
+		auth_xform = crypto_xform->next;
+	}
+
+	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+		if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+			memcpy(common_sa->iv.gcm.nonce, &ipsec->salt, 4);
+		cipher_key = crypto_xform->aead.key.data;
+		cipher_key_len = crypto_xform->aead.key.length;
+	} else {
+		if (cipher_xform) {
+			cipher_key = cipher_xform->cipher.key.data;
+			cipher_key_len = cipher_xform->cipher.key.length;
+		}
+
+		if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+			memcpy(common_sa->iv.gcm.nonce, &ipsec->salt, 4);
+			cipher_key = auth_xform->auth.key.data;
+			cipher_key_len = auth_xform->auth.key.length;
+		}
+	}
+
+	if (cipher_key_len != 0)
+		memcpy(common_sa->cipher_key, cipher_key, cipher_key_len);
+
+	return 0;
+}
+
+int
+cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
+			     struct rte_crypto_sym_xform *crypto_xform,
+			     struct roc_ie_on_outb_sa *out_sa)
+{
+	struct roc_ie_on_ip_template *template = NULL;
+	struct rte_crypto_sym_xform *auth_xform;
+	struct roc_ie_on_sa_ctl *ctl;
+	struct rte_ipv6_hdr *ip6;
+	struct rte_ipv4_hdr *ip4;
+	const uint8_t *auth_key;
+	int auth_key_len = 0;
+	size_t ctx_len;
+	int ret;
+
+	ctl = &out_sa->common_sa.ctl;
+
+	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+		auth_xform = crypto_xform;
+	else
+		auth_xform = crypto_xform->next;
+
+	ret = on_fill_ipsec_common_sa(ipsec, crypto_xform, &out_sa->common_sa);
+	if (ret)
+		return ret;
+
+	if (ctl->enc_type == ROC_IE_ON_SA_ENC_AES_GCM ||
+	    ctl->auth_type == ROC_IE_ON_SA_AUTH_NULL ||
+	    ctl->auth_type == ROC_IE_ON_SA_AUTH_AES_GMAC) {
+		template = &out_sa->aes_gcm.template;
+		ctx_len = offsetof(struct roc_ie_on_outb_sa, aes_gcm.template);
+	} else {
+		switch (ctl->auth_type) {
+		case ROC_IE_ON_SA_AUTH_SHA1:
+			template = &out_sa->sha1.template;
+			ctx_len = offsetof(struct roc_ie_on_outb_sa,
+					   sha1.template);
+			break;
+		case ROC_IE_ON_SA_AUTH_SHA2_256:
+		case ROC_IE_ON_SA_AUTH_SHA2_384:
+		case ROC_IE_ON_SA_AUTH_SHA2_512:
+			template = &out_sa->sha2.template;
+			ctx_len = offsetof(struct roc_ie_on_outb_sa,
+					   sha2.template);
+			break;
+		case ROC_IE_ON_SA_AUTH_AES_XCBC_128:
+			template = &out_sa->aes_xcbc.template;
+			ctx_len = offsetof(struct roc_ie_on_outb_sa,
+					   aes_xcbc.template);
+			break;
+		default:
+			plt_err("Unsupported auth algorithm");
+			return -EINVAL;
+		}
+	}
+
+	ip4 = (struct rte_ipv4_hdr *)&template->ip4.ipv4_hdr;
+	if (ipsec->options.udp_encap) {
+		ip4->next_proto_id = IPPROTO_UDP;
+		template->ip4.udp_src = rte_be_to_cpu_16(4500);
+		template->ip4.udp_dst = rte_be_to_cpu_16(4500);
+	} else {
+		if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
+			ip4->next_proto_id = IPPROTO_AH;
+		else
+			ip4->next_proto_id = IPPROTO_ESP;
+	}
+
+	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+		if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+			uint16_t frag_off = 0;
+
+			ctx_len += sizeof(template->ip4);
+
+			ip4->version_ihl = RTE_IPV4_VHL_DEF;
+			ip4->time_to_live = ipsec->tunnel.ipv4.ttl;
+			ip4->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
+			if (ipsec->tunnel.ipv4.df)
+				frag_off |= RTE_IPV4_HDR_DF_FLAG;
+			ip4->fragment_offset = rte_cpu_to_be_16(frag_off);
+
+			memcpy(&ip4->src_addr, &ipsec->tunnel.ipv4.src_ip,
+			       sizeof(struct in_addr));
+			memcpy(&ip4->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
+			       sizeof(struct in_addr));
+		} else if (ipsec->tunnel.type ==
+			   RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
+			ctx_len += sizeof(template->ip6);
+
+			ip6 = (struct rte_ipv6_hdr *)&template->ip6.ipv6_hdr;
+			if (ipsec->options.udp_encap) {
+				ip6->proto = IPPROTO_UDP;
+				template->ip6.udp_src = rte_be_to_cpu_16(4500);
+				template->ip6.udp_dst = rte_be_to_cpu_16(4500);
+			} else {
+				ip6->proto = (ipsec->proto ==
+					      RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
+						     IPPROTO_ESP :
+						     IPPROTO_AH;
+			}
+			ip6->vtc_flow =
+				rte_cpu_to_be_32(0x60000000 |
+						 ((ipsec->tunnel.ipv6.dscp
+						   << RTE_IPV6_HDR_TC_SHIFT) &
+						  RTE_IPV6_HDR_TC_MASK) |
+						 ((ipsec->tunnel.ipv6.flabel
+						   << RTE_IPV6_HDR_FL_SHIFT) &
+						  RTE_IPV6_HDR_FL_MASK));
+			ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
+			memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
+			       sizeof(struct in6_addr));
+			memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
+			       sizeof(struct in6_addr));
+		}
+	} else
+		ctx_len += sizeof(template->ip4);
+
+	ctx_len += RTE_ALIGN_CEIL(ctx_len, 8);
+
+	if (crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
+		auth_key = auth_xform->auth.key.data;
+		auth_key_len = auth_xform->auth.key.length;
+
+		switch (auth_xform->auth.algo) {
+		case RTE_CRYPTO_AUTH_AES_GMAC:
+		case RTE_CRYPTO_AUTH_NULL:
+			break;
+		case RTE_CRYPTO_AUTH_SHA1_HMAC:
+			memcpy(out_sa->sha1.hmac_key, auth_key, auth_key_len);
+			break;
+		case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		case RTE_CRYPTO_AUTH_SHA512_HMAC:
+			memcpy(out_sa->sha2.hmac_key, auth_key, auth_key_len);
+			break;
+		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+			memcpy(out_sa->aes_xcbc.key, auth_key, auth_key_len);
+			break;
+		default:
+			plt_err("Unsupported auth algorithm %u",
+				auth_xform->auth.algo);
+			return -ENOTSUP;
+		}
+	}
+
+	return ctx_len;
+}
+
+int
+cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
+			    struct rte_crypto_sym_xform *crypto_xform,
+			    struct roc_ie_on_inb_sa *in_sa)
+{
+	struct rte_crypto_sym_xform *auth_xform = crypto_xform;
+	const uint8_t *auth_key;
+	int auth_key_len = 0;
+	size_t ctx_len = 0;
+	int ret;
+
+	ret = on_fill_ipsec_common_sa(ipsec, crypto_xform, &in_sa->common_sa);
+	if (ret)
+		return ret;
+
+	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD ||
+	    auth_xform->auth.algo == RTE_CRYPTO_AUTH_NULL ||
+	    auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+		ctx_len = offsetof(struct roc_ie_on_inb_sa,
+				   sha1_or_gcm.hmac_key[0]);
+	} else {
+		auth_key = auth_xform->auth.key.data;
+		auth_key_len = auth_xform->auth.key.length;
+
+		switch (auth_xform->auth.algo) {
+		case RTE_CRYPTO_AUTH_NULL:
+			break;
+		case RTE_CRYPTO_AUTH_SHA1_HMAC:
+			memcpy(in_sa->sha1_or_gcm.hmac_key, auth_key,
+			       auth_key_len);
+			ctx_len = offsetof(struct roc_ie_on_inb_sa,
+					   sha1_or_gcm.selector);
+			break;
+		case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		case RTE_CRYPTO_AUTH_SHA512_HMAC:
+			memcpy(in_sa->sha2.hmac_key, auth_key, auth_key_len);
+			ctx_len = offsetof(struct roc_ie_on_inb_sa,
+					   sha2.selector);
+			break;
+		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+			memcpy(in_sa->aes_xcbc.key, auth_key, auth_key_len);
+			ctx_len = offsetof(struct roc_ie_on_inb_sa,
+					   aes_xcbc.selector);
+			break;
+		default:
+			plt_err("Unsupported auth algorithm %u",
+				auth_xform->auth.algo);
+			return -ENOTSUP;
+		}
+	}
+
+	return ctx_len;
+}
diff --git a/drivers/common/cnxk/cnxk_security.h b/drivers/common/cnxk/cnxk_security.h
index 02cdad269c..4e477ec53f 100644
--- a/drivers/common/cnxk/cnxk_security.h
+++ b/drivers/common/cnxk/cnxk_security.h
@@ -59,4 +59,15 @@ cnxk_onf_ipsec_outb_sa_fill(struct roc_onf_ipsec_outb_sa *sa,
 bool __roc_api cnxk_onf_ipsec_inb_sa_valid(struct roc_onf_ipsec_inb_sa *sa);
 bool __roc_api cnxk_onf_ipsec_outb_sa_valid(struct roc_onf_ipsec_outb_sa *sa);
 
+/* [CN9K] */
+int __roc_api
+cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
+			    struct rte_crypto_sym_xform *crypto_xform,
+			    struct roc_ie_on_inb_sa *in_sa);
+
+int __roc_api
+cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
+			     struct rte_crypto_sym_xform *crypto_xform,
+			     struct roc_ie_on_outb_sa *out_sa);
+
 #endif /* _CNXK_SECURITY_H__ */
diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index 742723ad1d..e5b179e8e1 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -981,3 +981,96 @@ roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
 
 	return 0;
 }
+
+int
+roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa, uint8_t opcode,
+		     uint16_t ctx_len, uint8_t egrp)
+{
+	union cpt_res_s res, *hw_res;
+	struct cpt_inst_s inst;
+	uint64_t lmt_status;
+	int ret = 0;
+
+	hw_res = plt_zmalloc(sizeof(*hw_res), ROC_CPT_RES_ALIGN);
+	if (unlikely(hw_res == NULL)) {
+		plt_err("Couldn't allocate memory for result address");
+		return -ENOMEM;
+	}
+
+	hw_res->cn9k.compcode = CPT_COMP_NOT_DONE;
+
+	inst.w4.s.opcode_major = opcode;
+	inst.w4.s.opcode_minor = ctx_len >> 3;
+	inst.w4.s.param1 = 0;
+	inst.w4.s.param2 = 0;
+	inst.w4.s.dlen = ctx_len;
+	inst.dptr = rte_mempool_virt2iova(sa);
+	inst.rptr = 0;
+	inst.w7.s.cptr = rte_mempool_virt2iova(sa);
+	inst.w7.s.egrp = egrp;
+
+	inst.w0.u64 = 0;
+	inst.w2.u64 = 0;
+	inst.w3.u64 = 0;
+	inst.res_addr = (uintptr_t)hw_res;
+
+	rte_io_wmb();
+
+	do {
+		/* Copy CPT command to LMTLINE */
+		roc_lmt_mov64((void *)lf->lmt_base, &inst);
+		lmt_status = roc_lmt_submit_ldeor(lf->io_addr);
+	} while (lmt_status == 0);
+
+	const uint64_t timeout = plt_tsc_cycles() + 60 * plt_tsc_hz();
+
+	/* Wait until CPT instruction completes */
+	do {
+		res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
+		if (unlikely(plt_tsc_cycles() > timeout)) {
+			plt_err("Request timed out");
+			ret = -ETIMEDOUT;
+			goto free;
+		}
+	} while (res.cn9k.compcode == CPT_COMP_NOT_DONE);
+
+	if (unlikely(res.cn9k.compcode != CPT_COMP_GOOD)) {
+		ret = res.cn9k.compcode;
+		switch (ret) {
+		case CPT_COMP_INSTERR:
+			plt_err("Request failed with instruction error");
+			break;
+		case CPT_COMP_FAULT:
+			plt_err("Request failed with DMA fault");
+			break;
+		case CPT_COMP_HWERR:
+			plt_err("Request failed with hardware error");
+			break;
+		default:
+			plt_err("Request failed with unknown hardware completion code : 0x%x",
+				ret);
+		}
+		ret = -EINVAL;
+		goto free;
+	}
+
+	if (unlikely(res.cn9k.uc_compcode != ROC_IE_ON_UCC_SUCCESS)) {
+		ret = res.cn9k.uc_compcode;
+		switch (ret) {
+		case ROC_IE_ON_AUTH_UNSUPPORTED:
+			plt_err("Invalid auth type");
+			break;
+		case ROC_IE_ON_ENCRYPT_UNSUPPORTED:
+			plt_err("Invalid encrypt type");
+			break;
+		default:
+			plt_err("Request failed with unknown microcode completion code : 0x%x",
+				ret);
+		}
+		ret = -ENOTSUP;
+	}
+
+free:
+	plt_free(hw_res);
+	return ret;
+}
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 99cb8b2862..1b2032b547 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -181,4 +181,7 @@ void __roc_api roc_cpt_parse_hdr_dump(const struct cpt_parse_hdr_s *cpth);
 int __roc_api roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr,
 				void *sa_cptr, uint16_t sa_len);
 
+int __roc_api roc_on_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa,
+				   uint8_t opcode, uint16_t ctx_len,
+				   uint8_t egrp);
 #endif /* _ROC_CPT_H_ */
diff --git a/drivers/common/cnxk/roc_ie_on.h b/drivers/common/cnxk/roc_ie_on.h
index 7dd7b6595f..37f711c643 100644
--- a/drivers/common/cnxk/roc_ie_on.h
+++ b/drivers/common/cnxk/roc_ie_on.h
@@ -23,7 +23,7 @@ enum roc_ie_on_ucc_ipsec {
 };
 
 /* Helper macros */
-#define ROC_IE_ON_INB_RPTR_HDR 0x8
+#define ROC_IE_ON_INB_RPTR_HDR 16
 #define ROC_IE_ON_MAX_IV_LEN   16
 #define ROC_IE_ON_PER_PKT_IV   BIT(43)
 
@@ -67,9 +67,17 @@ enum {
 struct roc_ie_on_outb_hdr {
 	uint32_t ip_id;
 	uint32_t seq;
+	uint32_t esn;
+	uint32_t df_tos;
 	uint8_t iv[16];
 };
 
+struct roc_ie_on_inb_hdr {
+	uint32_t sa_index;
+	uint64_t seq;
+	uint32_t pad;
+};
+
 union roc_ie_on_bit_perfect_iv {
 	uint8_t aes_iv[16];
 	uint8_t des_iv[8];
@@ -113,7 +121,7 @@ struct roc_ie_on_ip_template {
 union roc_on_ipsec_outb_param1 {
 	uint16_t u16;
 	struct {
-		uint16_t frag_num : 4;
+		uint16_t l2hdr_len : 4;
 		uint16_t rsvd_4_6 : 3;
 		uint16_t gre_select : 1;
 		uint16_t dsiv : 1;
@@ -171,8 +179,13 @@ struct roc_ie_on_common_sa {
 	union roc_ie_on_bit_perfect_iv iv;
 
 	/* w7 */
-	uint32_t esn_hi;
-	uint32_t esn_low;
+	union {
+		uint64_t u64;
+		struct {
+			uint32_t th;
+			uint32_t tl;
+		};
+	} seq_t;
 };
 
 struct roc_ie_on_outb_sa {
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index a77f3f6e3c..db61fe575d 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -23,6 +23,8 @@ INTERNAL {
 	cnxk_ot_ipsec_outb_sa_fill;
 	cnxk_ot_ipsec_inb_sa_valid;
 	cnxk_ot_ipsec_outb_sa_valid;
+	cnxk_on_ipsec_inb_sa_create;
+	cnxk_on_ipsec_outb_sa_create;
 	roc_ae_ec_grp_get;
 	roc_ae_ec_grp_put;
 	roc_ae_fpm_get;
@@ -72,6 +74,7 @@ INTERNAL {
 	roc_cpt_parse_hdr_dump;
 	roc_cpt_rxc_time_cfg;
 	roc_cpt_ctx_write;
+	roc_on_cpt_ctx_write;
 	roc_dpi_configure;
 	roc_dpi_dev_fini;
 	roc_dpi_dev_init;
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index eccaf398df..7720730120 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -43,7 +43,9 @@ cn9k_cpt_sec_inst_fill(struct rte_crypto_op *op,
 		       struct cpt_inst_s *inst)
 {
 	struct rte_crypto_sym_op *sym_op = op->sym;
+	struct roc_ie_on_common_sa *common_sa;
 	struct cn9k_sec_session *priv;
+	struct roc_ie_on_sa_ctl *ctl;
 	struct cn9k_ipsec_sa *sa;
 
 	if (unlikely(sym_op->m_dst && sym_op->m_dst != sym_op->m_src)) {
@@ -64,6 +66,12 @@ cn9k_cpt_sec_inst_fill(struct rte_crypto_op *op,
 
 	infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_DIR_INBOUND;
 
+	common_sa = &sa->in_sa.common_sa;
+	ctl = &common_sa->ctl;
+
+	if (ctl->esn_en)
+		infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_INB_ESN;
+
 	return process_inb_sa(op, sa, inst);
 }
 
@@ -491,14 +499,28 @@ cn9k_cpt_sec_post_process(struct rte_crypto_op *cop,
 {
 	struct rte_crypto_sym_op *sym_op = cop->sym;
 	struct rte_mbuf *m = sym_op->m_src;
+	struct cn9k_sec_session *priv;
+	struct cn9k_ipsec_sa *sa;
 	struct rte_ipv6_hdr *ip6;
 	struct rte_ipv4_hdr *ip;
 	uint16_t m_len = 0;
 	char *data;
 
+	priv = get_sec_session_private_data(cop->sym->sec_session);
+	sa = &priv->sa;
+
 	if (infl_req->op_flags & CPT_OP_FLAGS_IPSEC_DIR_INBOUND) {
+		struct roc_ie_on_common_sa *common_sa = &sa->in_sa.common_sa;
+
 		data = rte_pktmbuf_mtod(m, char *);
+		if (infl_req->op_flags == CPT_OP_FLAGS_IPSEC_INB_ESN) {
+			struct roc_ie_on_inb_hdr *inb_hdr =
+				(struct roc_ie_on_inb_hdr *)data;
+			uint64_t seq = rte_be_to_cpu_64(inb_hdr->seq);
 
+			if (seq > common_sa->seq_t.u64)
+				common_sa->seq_t.u64 = seq;
+		}
 		ip = (struct rte_ipv4_hdr *)(data + ROC_IE_ON_INB_RPTR_HDR);
 
 		if (((ip->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER) ==
@@ -515,6 +537,8 @@ cn9k_cpt_sec_post_process(struct rte_crypto_op *cop,
 		m->data_len = m_len;
 		m->pkt_len = m_len;
 		m->data_off += ROC_IE_ON_INB_RPTR_HDR;
+	} else {
+		rte_pktmbuf_adj(m, sa->custom_hdr_len);
 	}
 }
 
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.c b/drivers/crypto/cnxk/cn9k_ipsec.c
index 82b8dae786..85f3f26c32 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.c
+++ b/drivers/crypto/cnxk/cn9k_ipsec.c
@@ -15,331 +15,26 @@
 
 #include "roc_api.h"
 
-static inline int
-cn9k_cpt_enq_sa_write(struct cn9k_ipsec_sa *sa, struct cnxk_cpt_qp *qp,
-		      uint8_t opcode, size_t ctx_len)
-{
-	struct roc_cpt *roc_cpt = qp->lf.roc_cpt;
-	uint64_t lmtline = qp->lmtline.lmt_base;
-	uint64_t io_addr = qp->lmtline.io_addr;
-	uint64_t lmt_status, time_out;
-	struct cpt_cn9k_res_s *res;
-	struct cpt_inst_s inst;
-	uint64_t *mdata;
-	int ret = 0;
-
-	if (unlikely(rte_mempool_get(qp->meta_info.pool, (void **)&mdata) < 0))
-		return -ENOMEM;
-
-	res = (struct cpt_cn9k_res_s *)RTE_PTR_ALIGN(mdata, 16);
-	res->compcode = CPT_COMP_NOT_DONE;
-
-	inst.w4.s.opcode_major = opcode;
-	inst.w4.s.opcode_minor = ctx_len >> 3;
-	inst.w4.s.param1 = 0;
-	inst.w4.s.param2 = 0;
-	inst.w4.s.dlen = ctx_len;
-	inst.dptr = rte_mempool_virt2iova(sa);
-	inst.rptr = 0;
-	inst.w7.s.cptr = rte_mempool_virt2iova(sa);
-	inst.w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
-
-	inst.w0.u64 = 0;
-	inst.w2.u64 = 0;
-	inst.w3.u64 = 0;
-	inst.res_addr = rte_mempool_virt2iova(res);
-
-	rte_io_wmb();
-
-	do {
-		/* Copy CPT command to LMTLINE */
-		roc_lmt_mov64((void *)lmtline, &inst);
-		lmt_status = roc_lmt_submit_ldeor(io_addr);
-	} while (lmt_status == 0);
-
-	time_out = rte_get_timer_cycles() +
-		   DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
-
-	while (res->compcode == CPT_COMP_NOT_DONE) {
-		if (rte_get_timer_cycles() > time_out) {
-			rte_mempool_put(qp->meta_info.pool, mdata);
-			plt_err("Request timed out");
-			return -ETIMEDOUT;
-		}
-		rte_io_rmb();
-	}
-
-	if (unlikely(res->compcode != CPT_COMP_GOOD)) {
-		ret = res->compcode;
-		switch (ret) {
-		case CPT_COMP_INSTERR:
-			plt_err("Request failed with instruction error");
-			break;
-		case CPT_COMP_FAULT:
-			plt_err("Request failed with DMA fault");
-			break;
-		case CPT_COMP_HWERR:
-			plt_err("Request failed with hardware error");
-			break;
-		default:
-			plt_err("Request failed with unknown hardware "
-				"completion code : 0x%x",
-				ret);
-		}
-		ret = -EINVAL;
-		goto mempool_put;
-	}
-
-	if (unlikely(res->uc_compcode != ROC_IE_ON_UCC_SUCCESS)) {
-		ret = res->uc_compcode;
-		switch (ret) {
-		case ROC_IE_ON_AUTH_UNSUPPORTED:
-			plt_err("Invalid auth type");
-			break;
-		case ROC_IE_ON_ENCRYPT_UNSUPPORTED:
-			plt_err("Invalid encrypt type");
-			break;
-		default:
-			plt_err("Request failed with unknown microcode "
-				"completion code : 0x%x",
-				ret);
-		}
-		ret = -ENOTSUP;
-	}
-
-mempool_put:
-	rte_mempool_put(qp->meta_info.pool, mdata);
-	return ret;
-}
-
-static inline int
-ipsec_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
-		 struct rte_crypto_sym_xform *crypto_xform,
-		 struct roc_ie_on_sa_ctl *ctl)
-{
-	struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
-	int aes_key_len = 0;
-
-	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
-		auth_xform = crypto_xform;
-		cipher_xform = crypto_xform->next;
-	} else {
-		cipher_xform = crypto_xform;
-		auth_xform = crypto_xform->next;
-	}
-
-	if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
-		ctl->direction = ROC_IE_SA_DIR_OUTBOUND;
-	else
-		ctl->direction = ROC_IE_SA_DIR_INBOUND;
-
-	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
-		if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
-			ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
-		else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
-			ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
-		else
-			return -EINVAL;
-	}
-
-	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
-		ctl->ipsec_mode = ROC_IE_SA_MODE_TRANSPORT;
-		ctl->outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
-	} else if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
-		ctl->ipsec_mode = ROC_IE_SA_MODE_TUNNEL;
-	else
-		return -EINVAL;
-
-	if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
-		ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_AH;
-	else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
-		ctl->ipsec_proto = ROC_IE_SA_PROTOCOL_ESP;
-	else
-		return -EINVAL;
-
-	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-		switch (crypto_xform->aead.algo) {
-		case RTE_CRYPTO_AEAD_AES_GCM:
-			ctl->enc_type = ROC_IE_ON_SA_ENC_AES_GCM;
-			aes_key_len = crypto_xform->aead.key.length;
-			break;
-		default:
-			plt_err("Unsupported AEAD algorithm");
-			return -ENOTSUP;
-		}
-	} else {
-		if (cipher_xform != NULL) {
-			switch (cipher_xform->cipher.algo) {
-			case RTE_CRYPTO_CIPHER_NULL:
-				ctl->enc_type = ROC_IE_ON_SA_ENC_NULL;
-				break;
-			case RTE_CRYPTO_CIPHER_AES_CBC:
-				ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CBC;
-				aes_key_len = cipher_xform->cipher.key.length;
-				break;
-			case RTE_CRYPTO_CIPHER_AES_CTR:
-				ctl->enc_type = ROC_IE_ON_SA_ENC_AES_CTR;
-				aes_key_len = cipher_xform->cipher.key.length;
-				break;
-			default:
-				plt_err("Unsupported cipher algorithm");
-				return -ENOTSUP;
-			}
-		}
-
-		switch (auth_xform->auth.algo) {
-		case RTE_CRYPTO_AUTH_NULL:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_NULL;
-			break;
-		case RTE_CRYPTO_AUTH_MD5_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_MD5;
-			break;
-		case RTE_CRYPTO_AUTH_SHA1_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA1;
-			break;
-		case RTE_CRYPTO_AUTH_SHA224_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_224;
-			break;
-		case RTE_CRYPTO_AUTH_SHA256_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_256;
-			break;
-		case RTE_CRYPTO_AUTH_SHA384_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_384;
-			break;
-		case RTE_CRYPTO_AUTH_SHA512_HMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_SHA2_512;
-			break;
-		case RTE_CRYPTO_AUTH_AES_GMAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_AES_GMAC;
-			aes_key_len = auth_xform->auth.key.length;
-			break;
-		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
-			ctl->auth_type = ROC_IE_ON_SA_AUTH_AES_XCBC_128;
-			break;
-		default:
-			plt_err("Unsupported auth algorithm");
-			return -ENOTSUP;
-		}
-	}
-
-	/* Set AES key length */
-	if (ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CBC ||
-	    ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CCM ||
-	    ctl->enc_type == ROC_IE_ON_SA_ENC_AES_CTR ||
-	    ctl->enc_type == ROC_IE_ON_SA_ENC_AES_GCM ||
-	    ctl->auth_type == ROC_IE_ON_SA_AUTH_AES_GMAC) {
-		switch (aes_key_len) {
-		case 16:
-			ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
-			break;
-		case 24:
-			ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
-			break;
-		case 32:
-			ctl->aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
-			break;
-		default:
-			plt_err("Invalid AES key length");
-			return -EINVAL;
-		}
-	}
-
-	if (ipsec->options.esn)
-		ctl->esn_en = 1;
-
-	if (ipsec->options.udp_encap == 1)
-		ctl->encap_type = ROC_IE_ON_SA_ENCAP_UDP;
-
-	ctl->copy_df = ipsec->options.copy_df;
-
-	ctl->spi = rte_cpu_to_be_32(ipsec->spi);
-
-	rte_io_wmb();
-
-	ctl->valid = 1;
-
-	return 0;
-}
-
-static inline int
-fill_ipsec_common_sa(struct rte_security_ipsec_xform *ipsec,
-		     struct rte_crypto_sym_xform *crypto_xform,
-		     struct roc_ie_on_common_sa *common_sa)
-{
-	struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
-	const uint8_t *cipher_key;
-	int cipher_key_len = 0;
-	int ret;
-
-	ret = ipsec_sa_ctl_set(ipsec, crypto_xform, &common_sa->ctl);
-	if (ret)
-		return ret;
-
-	if (ipsec->esn.value) {
-		common_sa->esn_low = ipsec->esn.low;
-		common_sa->esn_hi = ipsec->esn.hi;
-	}
-
-	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
-		auth_xform = crypto_xform;
-		cipher_xform = crypto_xform->next;
-	} else {
-		cipher_xform = crypto_xform;
-		auth_xform = crypto_xform->next;
-	}
-
-	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
-		if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
-			memcpy(common_sa->iv.gcm.nonce, &ipsec->salt, 4);
-		cipher_key = crypto_xform->aead.key.data;
-		cipher_key_len = crypto_xform->aead.key.length;
-	} else {
-		if (cipher_xform) {
-			cipher_key = cipher_xform->cipher.key.data;
-			cipher_key_len = cipher_xform->cipher.key.length;
-		}
-
-		if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
-			memcpy(common_sa->iv.gcm.nonce, &ipsec->salt, 4);
-			cipher_key = auth_xform->auth.key.data;
-			cipher_key_len = auth_xform->auth.key.length;
-		}
-	}
-
-	if (cipher_key_len != 0)
-		memcpy(common_sa->cipher_key, cipher_key, cipher_key_len);
-
-	return 0;
-}
-
 static int
 cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 			  struct rte_security_ipsec_xform *ipsec,
 			  struct rte_crypto_sym_xform *crypto_xform,
 			  struct rte_security_session *sec_sess)
 {
-	struct roc_ie_on_ip_template *template = NULL;
 	struct roc_cpt *roc_cpt = qp->lf.roc_cpt;
-	struct rte_crypto_sym_xform *auth_xform;
 	union roc_on_ipsec_outb_param1 param1;
 	struct cnxk_cpt_inst_tmpl *inst_tmpl;
-	struct roc_ie_on_outb_sa *out_sa;
 	struct cn9k_sec_session *sess;
-	struct roc_ie_on_sa_ctl *ctl;
 	struct cn9k_ipsec_sa *sa;
-	struct rte_ipv6_hdr *ip6;
-	struct rte_ipv4_hdr *ip4;
-	const uint8_t *auth_key;
 	union cpt_inst_w4 w4;
 	union cpt_inst_w7 w7;
-	int auth_key_len = 0;
 	size_t ctx_len;
+	uint8_t opcode;
+	uint8_t egrp;
 	int ret;
 
 	sess = get_sec_session_private_data(sec_sess);
 	sa = &sess->sa;
-	out_sa = &sa->out_sa;
-	ctl = &out_sa->common_sa.ctl;
 
 	memset(sa, 0, sizeof(struct cn9k_ipsec_sa));
 
@@ -353,153 +48,16 @@ cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 	if (ipsec->esn.value)
 		sa->esn = ipsec->esn.value;
 
-	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
-		auth_xform = crypto_xform;
-	else
-		auth_xform = crypto_xform->next;
-
-	ret = fill_ipsec_common_sa(ipsec, crypto_xform, &out_sa->common_sa);
-	if (ret)
-		return ret;
-
 	ret = cnxk_ipsec_outb_rlens_get(&sa->rlens, ipsec, crypto_xform);
 	if (ret)
 		return ret;
 
-	if (ctl->enc_type == ROC_IE_ON_SA_ENC_AES_GCM ||
-	    ctl->auth_type == ROC_IE_ON_SA_AUTH_NULL ||
-	    ctl->auth_type == ROC_IE_ON_SA_AUTH_AES_GMAC) {
-		template = &out_sa->aes_gcm.template;
-		ctx_len = offsetof(struct roc_ie_on_outb_sa, aes_gcm.template);
-	} else {
-		switch (ctl->auth_type) {
-		case ROC_IE_ON_SA_AUTH_SHA1:
-			template = &out_sa->sha1.template;
-			ctx_len = offsetof(struct roc_ie_on_outb_sa,
-					   sha1.template);
-			break;
-		case ROC_IE_ON_SA_AUTH_SHA2_256:
-		case ROC_IE_ON_SA_AUTH_SHA2_384:
-		case ROC_IE_ON_SA_AUTH_SHA2_512:
-			template = &out_sa->sha2.template;
-			ctx_len = offsetof(struct roc_ie_on_outb_sa,
-					   sha2.template);
-			break;
-		case ROC_IE_ON_SA_AUTH_AES_XCBC_128:
-			template = &out_sa->aes_xcbc.template;
-			ctx_len = offsetof(struct roc_ie_on_outb_sa,
-					   aes_xcbc.template);
-			break;
-		default:
-			plt_err("Unsupported auth algorithm");
-			return -EINVAL;
-		}
-	}
-
-	ip4 = (struct rte_ipv4_hdr *)&template->ip4.ipv4_hdr;
-	if (ipsec->options.udp_encap) {
-		ip4->next_proto_id = IPPROTO_UDP;
-		template->ip4.udp_src = rte_be_to_cpu_16(4500);
-		template->ip4.udp_dst = rte_be_to_cpu_16(4500);
-	} else {
-		if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
-			ip4->next_proto_id = IPPROTO_AH;
-		else
-			ip4->next_proto_id = IPPROTO_ESP;
-	}
-
-	if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
-		if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
-			uint16_t frag_off = 0;
-			ctx_len += sizeof(template->ip4);
-
-			ip4->version_ihl = RTE_IPV4_VHL_DEF;
-			ip4->time_to_live = ipsec->tunnel.ipv4.ttl;
-			ip4->type_of_service |= (ipsec->tunnel.ipv4.dscp << 2);
-			if (ipsec->tunnel.ipv4.df)
-				frag_off |= RTE_IPV4_HDR_DF_FLAG;
-			ip4->fragment_offset = rte_cpu_to_be_16(frag_off);
-
-			memcpy(&ip4->src_addr, &ipsec->tunnel.ipv4.src_ip,
-			       sizeof(struct in_addr));
-			memcpy(&ip4->dst_addr, &ipsec->tunnel.ipv4.dst_ip,
-			       sizeof(struct in_addr));
-		} else if (ipsec->tunnel.type ==
-			   RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
-			ctx_len += sizeof(template->ip6);
-
-			ip6 = (struct rte_ipv6_hdr *)&template->ip6.ipv6_hdr;
-			if (ipsec->options.udp_encap) {
-				ip6->proto = IPPROTO_UDP;
-				template->ip6.udp_src = rte_be_to_cpu_16(4500);
-				template->ip6.udp_dst = rte_be_to_cpu_16(4500);
-			} else {
-				ip6->proto = (ipsec->proto ==
-					      RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
-						     IPPROTO_ESP :
-						     IPPROTO_AH;
-			}
-			ip6->vtc_flow =
-				rte_cpu_to_be_32(0x60000000 |
-						 ((ipsec->tunnel.ipv6.dscp
-						   << RTE_IPV6_HDR_TC_SHIFT) &
-						  RTE_IPV6_HDR_TC_MASK) |
-						 ((ipsec->tunnel.ipv6.flabel
-						   << RTE_IPV6_HDR_FL_SHIFT) &
-						  RTE_IPV6_HDR_FL_MASK));
-			ip6->hop_limits = ipsec->tunnel.ipv6.hlimit;
-			memcpy(&ip6->src_addr, &ipsec->tunnel.ipv6.src_addr,
-			       sizeof(struct in6_addr));
-			memcpy(&ip6->dst_addr, &ipsec->tunnel.ipv6.dst_addr,
-			       sizeof(struct in6_addr));
-		}
-	} else
-		ctx_len += sizeof(template->ip4);
-
-	ctx_len += RTE_ALIGN_CEIL(ctx_len, 8);
-
-	if (crypto_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
-		auth_key = auth_xform->auth.key.data;
-		auth_key_len = auth_xform->auth.key.length;
-
-		switch (auth_xform->auth.algo) {
-		case RTE_CRYPTO_AUTH_AES_GMAC:
-		case RTE_CRYPTO_AUTH_NULL:
-			break;
-		case RTE_CRYPTO_AUTH_SHA1_HMAC:
-			memcpy(out_sa->sha1.hmac_key, auth_key, auth_key_len);
-			break;
-		case RTE_CRYPTO_AUTH_SHA256_HMAC:
-		case RTE_CRYPTO_AUTH_SHA384_HMAC:
-		case RTE_CRYPTO_AUTH_SHA512_HMAC:
-			memcpy(out_sa->sha2.hmac_key, auth_key, auth_key_len);
-			break;
-		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
-			memcpy(out_sa->aes_xcbc.key, auth_key, auth_key_len);
-			break;
-		default:
-			plt_err("Unsupported auth algorithm %u",
-				auth_xform->auth.algo);
-			return -ENOTSUP;
-		}
-	}
-
-	inst_tmpl = &sa->inst;
-
-	w4.u64 = 0;
-	w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_PROCESS_OUTBOUND_IPSEC;
-	w4.s.opcode_minor = ctx_len >> 3;
-
-	param1.u16 = 0;
-	param1.s.ikev2 = 1;
-
-	sa->custom_hdr_len = sizeof(struct roc_ie_on_outb_hdr) -
-			     ROC_IE_ON_MAX_IV_LEN;
+	sa->custom_hdr_len =
+		sizeof(struct roc_ie_on_outb_hdr) - ROC_IE_ON_MAX_IV_LEN;
 
 #ifdef LA_IPSEC_DEBUG
 	/* Use IV from application in debug mode */
 	if (ipsec->options.iv_gen_disable == 1) {
-		param1.s.per_pkt_iv = ROC_IE_ON_IV_SRC_FROM_DPTR;
 		sa->custom_hdr_len = sizeof(struct roc_ie_on_outb_hdr);
 
 		if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
@@ -520,17 +78,49 @@ cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 	}
 #endif
 
-	w4.s.param1 = param1.u16;
+	ret = cnxk_on_ipsec_outb_sa_create(ipsec, crypto_xform, &sa->out_sa);
 
-	inst_tmpl->w4 = w4.u64;
+	if (ret < 0)
+		return ret;
+
+	ctx_len = ret;
+	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND;
+	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
+	ret = roc_on_cpt_ctx_write(&qp->lf, (void *)&sa->out_sa, opcode,
+				   ctx_len, egrp);
+
+	if (ret)
+		return ret;
+
+	w4.u64 = 0;
+	w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_PROCESS_OUTBOUND_IPSEC;
+	w4.s.opcode_minor = ctx_len >> 3;
+
+	param1.u16 = 0;
+	param1.s.ikev2 = 1;
+
+#ifdef LA_IPSEC_DEBUG
+	/* Use IV from application in debug mode */
+	if (ipsec->options.iv_gen_disable == 1)
+		param1.s.per_pkt_iv = ROC_IE_ON_IV_SRC_FROM_DPTR;
+#else
+	if (ipsec->options.iv_gen_disable != 0) {
+		plt_err("Application provided IV is not supported");
+		return -ENOTSUP;
+	}
+#endif
+
+	w4.s.param1 = param1.u16;
 
 	w7.u64 = 0;
-	w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
-	w7.s.cptr = rte_mempool_virt2iova(out_sa);
+	w7.s.egrp = egrp;
+	w7.s.cptr = rte_mempool_virt2iova(&sa->out_sa);
+
+	inst_tmpl = &sa->inst;
+	inst_tmpl->w4 = w4.u64;
 	inst_tmpl->w7 = w7.u64;
 
-	return cn9k_cpt_enq_sa_write(
-		sa, qp, ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_OUTBOUND, ctx_len);
+	return 0;
 }
 
 static int
@@ -539,71 +129,54 @@ cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 			 struct rte_crypto_sym_xform *crypto_xform,
 			 struct rte_security_session *sec_sess)
 {
-	struct rte_crypto_sym_xform *auth_xform = crypto_xform;
 	struct roc_cpt *roc_cpt = qp->lf.roc_cpt;
-	union roc_on_ipsec_inb_param2 param2;
 	struct cnxk_cpt_inst_tmpl *inst_tmpl;
-	struct roc_ie_on_inb_sa *in_sa;
+	union roc_on_ipsec_inb_param2 param2;
 	struct cn9k_sec_session *sess;
 	struct cn9k_ipsec_sa *sa;
-	const uint8_t *auth_key;
 	union cpt_inst_w4 w4;
 	union cpt_inst_w7 w7;
-	int auth_key_len = 0;
 	size_t ctx_len = 0;
-	int ret;
+	uint8_t opcode;
+	uint8_t egrp;
+	int ret = 0;
 
 	sess = get_sec_session_private_data(sec_sess);
 	sa = &sess->sa;
-	in_sa = &sa->in_sa;
 
 	memset(sa, 0, sizeof(struct cn9k_ipsec_sa));
 
 	sa->dir = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
 	sa->replay_win_sz = ipsec->replay_win_sz;
 
-	ret = fill_ipsec_common_sa(ipsec, crypto_xform, &in_sa->common_sa);
-	if (ret)
-		return ret;
-
-	if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD ||
-	    auth_xform->auth.algo == RTE_CRYPTO_AUTH_NULL ||
-	    auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
-		ctx_len = offsetof(struct roc_ie_on_inb_sa,
-				   sha1_or_gcm.hmac_key[0]);
-	} else {
-		auth_key = auth_xform->auth.key.data;
-		auth_key_len = auth_xform->auth.key.length;
-
-		switch (auth_xform->auth.algo) {
-		case RTE_CRYPTO_AUTH_NULL:
-			break;
-		case RTE_CRYPTO_AUTH_SHA1_HMAC:
-			memcpy(in_sa->sha1_or_gcm.hmac_key, auth_key,
-			       auth_key_len);
-			ctx_len = offsetof(struct roc_ie_on_inb_sa,
-					   sha1_or_gcm.selector);
-			break;
-		case RTE_CRYPTO_AUTH_SHA256_HMAC:
-		case RTE_CRYPTO_AUTH_SHA384_HMAC:
-		case RTE_CRYPTO_AUTH_SHA512_HMAC:
-			memcpy(in_sa->sha2.hmac_key, auth_key, auth_key_len);
-			ctx_len = offsetof(struct roc_ie_on_inb_sa,
-					   sha2.selector);
-			break;
-		case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
-			memcpy(in_sa->aes_xcbc.key, auth_key, auth_key_len);
-			ctx_len = offsetof(struct roc_ie_on_inb_sa,
-					   aes_xcbc.selector);
-			break;
-		default:
-			plt_err("Unsupported auth algorithm %u",
-				auth_xform->auth.algo);
+	if (sa->replay_win_sz) {
+		if (sa->replay_win_sz > CNXK_ON_AR_WIN_SIZE_MAX) {
+			plt_err("Replay window size:%u is not supported",
+				sa->replay_win_sz);
 			return -ENOTSUP;
 		}
+
+		/* Set window bottom to 1, base and top to size of window */
+		sa->ar.winb = 1;
+		sa->ar.wint = sa->replay_win_sz;
+		sa->ar.base = sa->replay_win_sz;
+
+		sa->in_sa.common_sa.seq_t.tl = sa->seq_lo;
+		sa->in_sa.common_sa.seq_t.th = sa->seq_hi;
 	}
 
-	inst_tmpl = &sa->inst;
+	ret = cnxk_on_ipsec_inb_sa_create(ipsec, crypto_xform, &sa->in_sa);
+
+	if (ret < 0)
+		return ret;
+
+	ctx_len = ret;
+	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND;
+	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
+	ret = roc_on_cpt_ctx_write(&qp->lf, (void *)&sa->in_sa, opcode, ctx_len,
+				   egrp);
+	if (ret)
+		return ret;
 
 	w4.u64 = 0;
 	w4.s.opcode_major = ROC_IE_ON_MAJOR_OP_PROCESS_INBOUND_IPSEC;
@@ -613,31 +186,14 @@ cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 	param2.s.ikev2 = 1;
 	w4.s.param2 = param2.u16;
 
-	inst_tmpl->w4 = w4.u64;
+	w7.s.egrp = egrp;
+	w7.s.cptr = rte_mempool_virt2iova(&sa->in_sa);
 
-	w7.u64 = 0;
-	w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
-	w7.s.cptr = rte_mempool_virt2iova(in_sa);
+	inst_tmpl = &sa->inst;
+	inst_tmpl->w4 = w4.u64;
 	inst_tmpl->w7 = w7.u64;
 
-	if (sa->replay_win_sz) {
-		if (sa->replay_win_sz > CNXK_ON_AR_WIN_SIZE_MAX) {
-			plt_err("Replay window size:%u is not supported",
-				sa->replay_win_sz);
-			return -ENOTSUP;
-		}
-
-		/* Set window bottom to 1, base and top to size of window */
-		sa->ar.winb = 1;
-		sa->ar.wint = sa->replay_win_sz;
-		sa->ar.base = sa->replay_win_sz;
-
-		in_sa->common_sa.esn_low = sa->seq_lo;
-		in_sa->common_sa.esn_hi = sa->seq_hi;
-	}
-
-	return cn9k_cpt_enq_sa_write(
-		sa, qp, ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND, ctx_len);
+	return 0;
 }
 
 static inline int
diff --git a/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h b/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
index df89aaca4e..bbb4404a89 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
+++ b/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
@@ -20,7 +20,7 @@ ipsec_po_out_rlen_get(struct cn9k_ipsec_sa *sa, uint32_t plen)
 	enc_payload_len = RTE_ALIGN_CEIL(plen + sa->rlens.roundup_len,
 					 sa->rlens.roundup_byte);
 
-	return sa->rlens.partial_len + enc_payload_len;
+	return sa->custom_hdr_len + sa->rlens.partial_len + enc_payload_len;
 }
 
 static __rte_always_inline int
@@ -41,8 +41,8 @@ ipsec_antireplay_check(struct cn9k_ipsec_sa *sa, uint32_t win_sz,
 	ctl = &common_sa->ctl;
 
 	esn = ctl->esn_en;
-	esn_low = rte_be_to_cpu_32(common_sa->esn_low);
-	esn_hi = rte_be_to_cpu_32(common_sa->esn_hi);
+	esn_low = rte_be_to_cpu_32(common_sa->seq_t.tl);
+	esn_hi = rte_be_to_cpu_32(common_sa->seq_t.th);
 
 	esp = rte_pktmbuf_mtod_offset(m, void *, sizeof(struct rte_ipv4_hdr));
 	seql = rte_be_to_cpu_32(esp->seq);
@@ -62,8 +62,8 @@ ipsec_antireplay_check(struct cn9k_ipsec_sa *sa, uint32_t win_sz,
 	if (esn && !ret) {
 		seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
 		if (seq > seq_in_sa) {
-			common_sa->esn_low = rte_cpu_to_be_32(seql);
-			common_sa->esn_hi = rte_cpu_to_be_32(seqh);
+			common_sa->seq_t.tl = rte_cpu_to_be_32(seql);
+			common_sa->seq_t.th = rte_cpu_to_be_32(seqh);
 		}
 	}
 
@@ -77,13 +77,10 @@ process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
 	const unsigned int hdr_len = sa->custom_hdr_len;
 	struct rte_crypto_sym_op *sym_op = cop->sym;
 	struct rte_mbuf *m_src = sym_op->m_src;
-	struct roc_ie_on_outb_sa *out_sa;
 	struct roc_ie_on_outb_hdr *hdr;
 	uint32_t dlen, rlen;
 	int32_t extend_tail;
 
-	out_sa = &sa->out_sa;
-
 	dlen = rte_pktmbuf_pkt_len(m_src) + hdr_len;
 	rlen = ipsec_po_out_rlen_get(sa, dlen - hdr_len);
 
@@ -114,8 +111,7 @@ process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
 
 	hdr->seq = rte_cpu_to_be_32(sa->seq_lo);
 	hdr->ip_id = rte_cpu_to_be_32(sa->ip_id);
-
-	out_sa->common_sa.esn_hi = sa->seq_hi;
+	hdr->esn = rte_cpu_to_be_32(sa->seq_hi);
 
 	sa->ip_id++;
 	sa->esn++;
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index 7ece0214dc..ec99e6d660 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -33,6 +33,7 @@ struct cpt_qp_meta_info {
 #define CPT_OP_FLAGS_METABUF	       (1 << 1)
 #define CPT_OP_FLAGS_AUTH_VERIFY       (1 << 0)
 #define CPT_OP_FLAGS_IPSEC_DIR_INBOUND (1 << 2)
+#define CPT_OP_FLAGS_IPSEC_INB_ESN     (1 << 3)
 
 struct cpt_inflight_req {
 	union cpt_res_s res;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 2/3] crypto/cnxk: improvements to fastpath handling
  2022-06-20  7:18 [PATCH 0/3] support new full context firmware Tejasree Kondoj
  2022-06-20  7:18 ` [PATCH 1/3] crypto/cnxk: move IPsec SA creation to common Tejasree Kondoj
@ 2022-06-20  7:18 ` Tejasree Kondoj
  2022-06-20  7:18 ` [PATCH 3/3] crypto/cnxk: add anti-replay as per new firmware Tejasree Kondoj
  2022-06-21 11:35 ` [PATCH 0/3] support new full context firmware Akhil Goyal
  3 siblings, 0 replies; 5+ messages in thread
From: Tejasree Kondoj @ 2022-06-20  7:18 UTC (permalink / raw)
  To: Akhil Goyal
  Cc: Anoob Joseph, Jerin Jacob, Nithin Dabilpuram,
	Vidya Sagar Velumuri, Archana Muniganti, Ankur Dwivedi,
	Kiran Kumar K, Sunil Kumar Kori, Satha Rao, dev

From: Anoob Joseph <anoobj@marvell.com>

Remove SA & packet accesses in dequeue path by adjusting the headers in
the enqueue path for outbound packets. For inbound packets, add extra
esn_en flag in the SA to minimize cache line accesses in the datapath.

Also, use seq_lo for IPID. IPID just need to be unique. Instead of
incrementing per packet, use ESN low bits.

Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c | 69 ++++++++++++++----------
 drivers/crypto/cnxk/cn9k_ipsec.c         | 11 ++--
 drivers/crypto/cnxk/cn9k_ipsec.h         |  7 ++-
 drivers/crypto/cnxk/cn9k_ipsec_la_ops.h  | 55 +++++++++++--------
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h | 12 ++---
 5 files changed, 87 insertions(+), 67 deletions(-)

diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index 7720730120..8aab9c9f60 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -43,10 +43,12 @@ cn9k_cpt_sec_inst_fill(struct rte_crypto_op *op,
 		       struct cpt_inst_s *inst)
 {
 	struct rte_crypto_sym_op *sym_op = op->sym;
-	struct roc_ie_on_common_sa *common_sa;
 	struct cn9k_sec_session *priv;
-	struct roc_ie_on_sa_ctl *ctl;
 	struct cn9k_ipsec_sa *sa;
+	int ret;
+
+	priv = get_sec_session_private_data(op->sym->sec_session);
+	sa = &priv->sa;
 
 	if (unlikely(sym_op->m_dst && sym_op->m_dst != sym_op->m_src)) {
 		plt_dp_err("Out of place is not supported");
@@ -58,21 +60,17 @@ cn9k_cpt_sec_inst_fill(struct rte_crypto_op *op,
 		return -ENOTSUP;
 	}
 
-	priv = get_sec_session_private_data(op->sym->sec_session);
-	sa = &priv->sa;
-
 	if (sa->dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
-		return process_outb_sa(op, sa, inst);
-
-	infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_DIR_INBOUND;
-
-	common_sa = &sa->in_sa.common_sa;
-	ctl = &common_sa->ctl;
-
-	if (ctl->esn_en)
-		infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_INB_ESN;
+		ret = process_outb_sa(op, sa, inst);
+	else {
+		infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_DIR_INBOUND;
+		process_inb_sa(op, sa, inst);
+		if (unlikely(sa->esn_en))
+			infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_INB_ESN;
+		ret = 0;
+	}
 
-	return process_inb_sa(op, sa, inst);
+	return ret;
 }
 
 static inline struct cnxk_se_sess *
@@ -234,19 +232,29 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
 	};
 
 	pend_q = &qp->pend_q;
-
-	const uint64_t lmt_base = qp->lf.lmt_base;
-	const uint64_t io_addr = qp->lf.io_addr;
-	const uint64_t pq_mask = pend_q->pq_mask;
+	rte_prefetch2(pend_q);
 
 	/* Clear w0, w2, w3 of both inst */
 
+#if defined(RTE_ARCH_ARM64)
+	uint64x2_t zero = vdupq_n_u64(0);
+
+	vst1q_u64(&inst[0].w0.u64, zero);
+	vst1q_u64(&inst[1].w0.u64, zero);
+	vst1q_u64(&inst[0].w2.u64, zero);
+	vst1q_u64(&inst[1].w2.u64, zero);
+#else
 	inst[0].w0.u64 = 0;
 	inst[0].w2.u64 = 0;
 	inst[0].w3.u64 = 0;
 	inst[1].w0.u64 = 0;
 	inst[1].w2.u64 = 0;
 	inst[1].w3.u64 = 0;
+#endif
+
+	const uint64_t lmt_base = qp->lf.lmt_base;
+	const uint64_t io_addr = qp->lf.io_addr;
+	const uint64_t pq_mask = pend_q->pq_mask;
 
 	head = pend_q->head;
 	nb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);
@@ -506,21 +514,26 @@ cn9k_cpt_sec_post_process(struct rte_crypto_op *cop,
 	uint16_t m_len = 0;
 	char *data;
 
-	priv = get_sec_session_private_data(cop->sym->sec_session);
-	sa = &priv->sa;
-
 	if (infl_req->op_flags & CPT_OP_FLAGS_IPSEC_DIR_INBOUND) {
-		struct roc_ie_on_common_sa *common_sa = &sa->in_sa.common_sa;
+		struct roc_ie_on_common_sa *common_sa;
 
 		data = rte_pktmbuf_mtod(m, char *);
-		if (infl_req->op_flags == CPT_OP_FLAGS_IPSEC_INB_ESN) {
-			struct roc_ie_on_inb_hdr *inb_hdr =
-				(struct roc_ie_on_inb_hdr *)data;
-			uint64_t seq = rte_be_to_cpu_64(inb_hdr->seq);
+		if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_IPSEC_INB_ESN)) {
+			struct roc_ie_on_inb_hdr *inb_hdr;
+			uint64_t seq;
+
+			priv = get_sec_session_private_data(
+				sym_op->sec_session);
+			sa = &priv->sa;
+			common_sa = &sa->in_sa.common_sa;
+
+			inb_hdr = (struct roc_ie_on_inb_hdr *)data;
+			seq = rte_be_to_cpu_64(inb_hdr->seq);
 
 			if (seq > common_sa->seq_t.u64)
 				common_sa->seq_t.u64 = seq;
 		}
+
 		ip = (struct rte_ipv4_hdr *)(data + ROC_IE_ON_INB_RPTR_HDR);
 
 		if (((ip->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER) ==
@@ -537,8 +550,6 @@ cn9k_cpt_sec_post_process(struct rte_crypto_op *cop,
 		m->data_len = m_len;
 		m->pkt_len = m_len;
 		m->data_off += ROC_IE_ON_INB_RPTR_HDR;
-	} else {
-		rte_pktmbuf_adj(m, sa->custom_hdr_len);
 	}
 }
 
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.c b/drivers/crypto/cnxk/cn9k_ipsec.c
index 85f3f26c32..49a775eb7f 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.c
+++ b/drivers/crypto/cnxk/cn9k_ipsec.c
@@ -40,13 +40,8 @@ cn9k_ipsec_outb_sa_create(struct cnxk_cpt_qp *qp,
 
 	/* Initialize lookaside IPsec private data */
 	sa->dir = RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
-	/* Start ip id from 1 */
-	sa->ip_id = 1;
-	sa->seq_lo = 1;
-	sa->seq_hi = 0;
 
-	if (ipsec->esn.value)
-		sa->esn = ipsec->esn.value;
+	sa->esn = ipsec->esn.value;
 
 	ret = cnxk_ipsec_outb_rlens_get(&sa->rlens, ipsec, crypto_xform);
 	if (ret)
@@ -166,10 +161,12 @@ cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 	}
 
 	ret = cnxk_on_ipsec_inb_sa_create(ipsec, crypto_xform, &sa->in_sa);
-
 	if (ret < 0)
 		return ret;
 
+	if (sa->in_sa.common_sa.ctl.esn_en)
+		sa->esn_en = 1;
+
 	ctx_len = ret;
 	opcode = ROC_IE_ON_MAJOR_OP_WRITE_IPSEC_INBOUND;
 	egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.h b/drivers/crypto/cnxk/cn9k_ipsec.h
index 499dbc2782..bed5976096 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.h
+++ b/drivers/crypto/cnxk/cn9k_ipsec.h
@@ -28,8 +28,6 @@ struct cn9k_ipsec_sa {
 	uint8_t custom_hdr_len;
 	/** Response length calculation data */
 	struct cnxk_ipsec_outb_rlens rlens;
-	/** Outbound IP-ID */
-	uint16_t ip_id;
 	/** ESN */
 	union {
 		uint64_t esn;
@@ -42,6 +40,11 @@ struct cn9k_ipsec_sa {
 	struct cnxk_on_ipsec_ar ar;
 	/** Anti replay window size */
 	uint32_t replay_win_sz;
+	/*
+	 * ESN enable flag. Copy of in_sa ctl.esn_en to have single cache line
+	 * access in the non-esn fastpath.
+	 */
+	uint8_t esn_en;
 	/** Queue pair */
 	struct cnxk_cpt_qp *qp;
 };
diff --git a/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h b/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
index bbb4404a89..65dbb629b1 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
+++ b/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
@@ -77,29 +77,36 @@ process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
 	const unsigned int hdr_len = sa->custom_hdr_len;
 	struct rte_crypto_sym_op *sym_op = cop->sym;
 	struct rte_mbuf *m_src = sym_op->m_src;
+	uint32_t dlen, rlen, pkt_len, seq_lo;
+	uint16_t data_off = m_src->data_off;
 	struct roc_ie_on_outb_hdr *hdr;
-	uint32_t dlen, rlen;
 	int32_t extend_tail;
+	uint64_t esn;
 
-	dlen = rte_pktmbuf_pkt_len(m_src) + hdr_len;
-	rlen = ipsec_po_out_rlen_get(sa, dlen - hdr_len);
+	pkt_len = rte_pktmbuf_pkt_len(m_src);
+	dlen = pkt_len + hdr_len;
+	rlen = ipsec_po_out_rlen_get(sa, pkt_len);
 
 	extend_tail = rlen - dlen;
 	if (unlikely(extend_tail > rte_pktmbuf_tailroom(m_src))) {
-		plt_dp_err("Not enough tail room (required: %d, available: %d",
+		plt_dp_err("Not enough tail room (required: %d, available: %d)",
 			   extend_tail, rte_pktmbuf_tailroom(m_src));
 		return -ENOMEM;
 	}
 
-	m_src->data_len += extend_tail;
-	m_src->pkt_len += extend_tail;
-
-	hdr = (struct roc_ie_on_outb_hdr *)rte_pktmbuf_prepend(m_src, hdr_len);
-	if (unlikely(hdr == NULL)) {
-		plt_dp_err("Not enough head room");
+	if (unlikely(hdr_len > data_off)) {
+		plt_dp_err("Not enough head room (required: %d, available: %d)",
+			   hdr_len, rte_pktmbuf_headroom(m_src));
 		return -ENOMEM;
 	}
 
+	pkt_len += extend_tail;
+
+	m_src->data_len = pkt_len;
+	m_src->pkt_len = pkt_len;
+
+	hdr = PLT_PTR_ADD(m_src->buf_addr, data_off - hdr_len);
+
 #ifdef LA_IPSEC_DEBUG
 	if (sa->inst.w4 & ROC_IE_ON_PER_PKT_IV) {
 		memcpy(&hdr->iv[0],
@@ -109,23 +116,28 @@ process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
 	}
 #endif
 
-	hdr->seq = rte_cpu_to_be_32(sa->seq_lo);
-	hdr->ip_id = rte_cpu_to_be_32(sa->ip_id);
-	hdr->esn = rte_cpu_to_be_32(sa->seq_hi);
+	esn = ++sa->esn;
+
+	/* Set ESN seq hi */
+	hdr->esn = rte_cpu_to_be_32(esn >> 32);
 
-	sa->ip_id++;
-	sa->esn++;
+	/* Set ESN seq lo */
+	seq_lo = rte_cpu_to_be_32(esn & (BIT_ULL(32) - 1));
+	hdr->seq = seq_lo;
+
+	/* Set IPID same as seq_lo */
+	hdr->ip_id = seq_lo;
 
 	/* Prepare CPT instruction */
 	inst->w4.u64 = sa->inst.w4 | dlen;
-	inst->dptr = rte_pktmbuf_iova(m_src);
-	inst->rptr = inst->dptr;
+	inst->dptr = PLT_U64_CAST(hdr);
+	inst->rptr = PLT_U64_CAST(hdr);
 	inst->w7.u64 = sa->inst.w7;
 
 	return 0;
 }
 
-static __rte_always_inline int
+static __rte_always_inline void
 process_inb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
 	       struct cpt_inst_s *inst)
 {
@@ -149,16 +161,13 @@ process_inb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
 			inst->dptr = rte_pktmbuf_iova(m_src);
 			inst->rptr = inst->dptr;
 			inst->w7.u64 = sa->inst.w7;
-			return 0;
+			return;
 		}
 	}
 
 	/* Prepare CPT instruction */
 	inst->w4.u64 = sa->inst.w4 | rte_pktmbuf_pkt_len(m_src);
-	inst->dptr = rte_pktmbuf_iova(m_src);
-	inst->rptr = inst->dptr;
+	inst->dptr = inst->rptr = rte_pktmbuf_iova(m_src);
 	inst->w7.u64 = sa->inst.w7;
-
-	return 0;
 }
 #endif /* __CN9K_IPSEC_LA_OPS_H__ */
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index ec99e6d660..0b41d47de9 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -70,16 +70,16 @@ struct cnxk_cpt_qp {
 	/**< Crypto LF */
 	struct pending_queue pend_q;
 	/**< Pending queue */
-	struct rte_mempool *sess_mp;
-	/**< Session mempool */
-	struct rte_mempool *sess_mp_priv;
-	/**< Session private data mempool */
-	struct cpt_qp_meta_info meta_info;
-	/**< Metabuf info required to support operations on the queue pair */
 	struct roc_cpt_lmtline lmtline;
 	/**< Lmtline information */
+	struct cpt_qp_meta_info meta_info;
+	/**< Metabuf info required to support operations on the queue pair */
 	struct crypto_adpter_info ca;
 	/**< Crypto adapter related info */
+	struct rte_mempool *sess_mp;
+	/**< Session mempool */
+	struct rte_mempool *sess_mp_priv;
+	/**< Session private data mempool */
 };
 
 int cnxk_cpt_dev_config(struct rte_cryptodev *dev,
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH 3/3] crypto/cnxk: add anti-replay as per new firmware
  2022-06-20  7:18 [PATCH 0/3] support new full context firmware Tejasree Kondoj
  2022-06-20  7:18 ` [PATCH 1/3] crypto/cnxk: move IPsec SA creation to common Tejasree Kondoj
  2022-06-20  7:18 ` [PATCH 2/3] crypto/cnxk: improvements to fastpath handling Tejasree Kondoj
@ 2022-06-20  7:18 ` Tejasree Kondoj
  2022-06-21 11:35 ` [PATCH 0/3] support new full context firmware Akhil Goyal
  3 siblings, 0 replies; 5+ messages in thread
From: Tejasree Kondoj @ 2022-06-20  7:18 UTC (permalink / raw)
  To: Akhil Goyal
  Cc: Jerin Jacob, Anoob Joseph, Nithin Dabilpuram,
	Vidya Sagar Velumuri, Archana Muniganti, Ankur Dwivedi,
	Kiran Kumar K, Sunil Kumar Kori, Satha Rao, dev

Adding anti-replay changes as per new FP-FC microcode.

Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
 drivers/common/cnxk/roc_ie_on.h               |  5 +-
 drivers/crypto/cnxk/cn9k_cryptodev_ops.c      | 63 +++++++++++++----
 drivers/crypto/cnxk/cn9k_ipsec.c              |  3 +
 drivers/crypto/cnxk/cn9k_ipsec_la_ops.h       | 68 -------------------
 .../crypto/cnxk/cnxk_cryptodev_capabilities.c |  1 +
 drivers/crypto/cnxk/cnxk_cryptodev_ops.h      |  2 +-
 6 files changed, 58 insertions(+), 84 deletions(-)

diff --git a/drivers/common/cnxk/roc_ie_on.h b/drivers/common/cnxk/roc_ie_on.h
index 37f711c643..2d93cb609c 100644
--- a/drivers/common/cnxk/roc_ie_on.h
+++ b/drivers/common/cnxk/roc_ie_on.h
@@ -18,8 +18,6 @@ enum roc_ie_on_ucc_ipsec {
 	ROC_IE_ON_UCC_SUCCESS = 0,
 	ROC_IE_ON_AUTH_UNSUPPORTED = 0xB0,
 	ROC_IE_ON_ENCRYPT_UNSUPPORTED = 0xB1,
-	/* Software defined completion code for anti-replay failed packets */
-	ROC_IE_ON_SWCC_ANTI_REPLAY = 0xE7,
 };
 
 /* Helper macros */
@@ -74,7 +72,8 @@ struct roc_ie_on_outb_hdr {
 
 struct roc_ie_on_inb_hdr {
 	uint32_t sa_index;
-	uint64_t seq;
+	uint32_t seql;
+	uint32_t seqh;
 	uint32_t pad;
 };
 
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index 8aab9c9f60..06dc18d195 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -65,8 +65,8 @@ cn9k_cpt_sec_inst_fill(struct rte_crypto_op *op,
 	else {
 		infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_DIR_INBOUND;
 		process_inb_sa(op, sa, inst);
-		if (unlikely(sa->esn_en))
-			infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_INB_ESN;
+		if (unlikely(sa->replay_win_sz))
+			infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_INB_REPLAY;
 		ret = 0;
 	}
 
@@ -501,6 +501,45 @@ cn9k_cpt_crypto_adapter_enqueue(uintptr_t base, struct rte_crypto_op *op)
 	return 1;
 }
 
+static inline int
+ipsec_antireplay_check(struct cn9k_ipsec_sa *sa, uint32_t win_sz,
+		       struct roc_ie_on_inb_hdr *data)
+{
+	struct roc_ie_on_common_sa *common_sa;
+	struct roc_ie_on_inb_sa *in_sa;
+	struct roc_ie_on_sa_ctl *ctl;
+	uint32_t seql, seqh = 0;
+	uint64_t seq;
+	uint8_t esn;
+	int ret;
+
+	in_sa = &sa->in_sa;
+	common_sa = &in_sa->common_sa;
+	ctl = &common_sa->ctl;
+
+	esn = ctl->esn_en;
+	seql = rte_be_to_cpu_32(data->seql);
+
+	if (!esn) {
+		seq = (uint64_t)seql;
+	} else {
+		seqh = rte_be_to_cpu_32(data->seqh);
+		seq = ((uint64_t)seqh << 32) | seql;
+	}
+
+	if (unlikely(seq == 0))
+		return IPSEC_ANTI_REPLAY_FAILED;
+
+	ret = cnxk_on_anti_replay_check(seq, &sa->ar, win_sz);
+	if (esn && !ret) {
+		common_sa = &sa->in_sa.common_sa;
+		if (seq > common_sa->seq_t.u64)
+			common_sa->seq_t.u64 = seq;
+	}
+
+	return ret;
+}
+
 static inline void
 cn9k_cpt_sec_post_process(struct rte_crypto_op *cop,
 			  struct cpt_inflight_req *infl_req)
@@ -515,23 +554,23 @@ cn9k_cpt_sec_post_process(struct rte_crypto_op *cop,
 	char *data;
 
 	if (infl_req->op_flags & CPT_OP_FLAGS_IPSEC_DIR_INBOUND) {
-		struct roc_ie_on_common_sa *common_sa;
 
 		data = rte_pktmbuf_mtod(m, char *);
-		if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_IPSEC_INB_ESN)) {
-			struct roc_ie_on_inb_hdr *inb_hdr;
-			uint64_t seq;
+		if (unlikely(infl_req->op_flags &
+			     CPT_OP_FLAGS_IPSEC_INB_REPLAY)) {
+			int ret;
 
 			priv = get_sec_session_private_data(
 				sym_op->sec_session);
 			sa = &priv->sa;
-			common_sa = &sa->in_sa.common_sa;
 
-			inb_hdr = (struct roc_ie_on_inb_hdr *)data;
-			seq = rte_be_to_cpu_64(inb_hdr->seq);
-
-			if (seq > common_sa->seq_t.u64)
-				common_sa->seq_t.u64 = seq;
+			ret = ipsec_antireplay_check(
+				sa, sa->replay_win_sz,
+				(struct roc_ie_on_inb_hdr *)data);
+			if (unlikely(ret)) {
+				cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+				return;
+			}
 		}
 
 		ip = (struct rte_ipv4_hdr *)(data + ROC_IE_ON_INB_RPTR_HDR);
diff --git a/drivers/crypto/cnxk/cn9k_ipsec.c b/drivers/crypto/cnxk/cn9k_ipsec.c
index 49a775eb7f..cb9cf174a4 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec.c
+++ b/drivers/crypto/cnxk/cn9k_ipsec.c
@@ -156,6 +156,9 @@ cn9k_ipsec_inb_sa_create(struct cnxk_cpt_qp *qp,
 		sa->ar.wint = sa->replay_win_sz;
 		sa->ar.base = sa->replay_win_sz;
 
+		sa->seq_lo = ipsec->esn.low;
+		sa->seq_hi = ipsec->esn.hi;
+
 		sa->in_sa.common_sa.seq_t.tl = sa->seq_lo;
 		sa->in_sa.common_sa.seq_t.th = sa->seq_hi;
 	}
diff --git a/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h b/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
index 65dbb629b1..e469596756 100644
--- a/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
+++ b/drivers/crypto/cnxk/cn9k_ipsec_la_ops.h
@@ -23,53 +23,6 @@ ipsec_po_out_rlen_get(struct cn9k_ipsec_sa *sa, uint32_t plen)
 	return sa->custom_hdr_len + sa->rlens.partial_len + enc_payload_len;
 }
 
-static __rte_always_inline int
-ipsec_antireplay_check(struct cn9k_ipsec_sa *sa, uint32_t win_sz,
-		       struct rte_mbuf *m)
-{
-	uint32_t esn_low = 0, esn_hi = 0, seql = 0, seqh = 0;
-	struct roc_ie_on_common_sa *common_sa;
-	struct roc_ie_on_inb_sa *in_sa;
-	struct roc_ie_on_sa_ctl *ctl;
-	uint64_t seq_in_sa, seq = 0;
-	struct rte_esp_hdr *esp;
-	uint8_t esn;
-	int ret;
-
-	in_sa = &sa->in_sa;
-	common_sa = &in_sa->common_sa;
-	ctl = &common_sa->ctl;
-
-	esn = ctl->esn_en;
-	esn_low = rte_be_to_cpu_32(common_sa->seq_t.tl);
-	esn_hi = rte_be_to_cpu_32(common_sa->seq_t.th);
-
-	esp = rte_pktmbuf_mtod_offset(m, void *, sizeof(struct rte_ipv4_hdr));
-	seql = rte_be_to_cpu_32(esp->seq);
-
-	if (!esn) {
-		seq = (uint64_t)seql;
-	} else {
-		seqh = cnxk_on_anti_replay_get_seqh(win_sz, seql, esn_hi,
-						    esn_low);
-		seq = ((uint64_t)seqh << 32) | seql;
-	}
-
-	if (unlikely(seq == 0))
-		return IPSEC_ANTI_REPLAY_FAILED;
-
-	ret = cnxk_on_anti_replay_check(seq, &sa->ar, win_sz);
-	if (esn && !ret) {
-		seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
-		if (seq > seq_in_sa) {
-			common_sa->seq_t.tl = rte_cpu_to_be_32(seql);
-			common_sa->seq_t.th = rte_cpu_to_be_32(seqh);
-		}
-	}
-
-	return ret;
-}
-
 static __rte_always_inline int
 process_outb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
 		struct cpt_inst_s *inst)
@@ -143,27 +96,6 @@ process_inb_sa(struct rte_crypto_op *cop, struct cn9k_ipsec_sa *sa,
 {
 	struct rte_crypto_sym_op *sym_op = cop->sym;
 	struct rte_mbuf *m_src = sym_op->m_src;
-	int ret;
-
-	if (sa->replay_win_sz) {
-		ret = ipsec_antireplay_check(sa, sa->replay_win_sz, m_src);
-		if (unlikely(ret)) {
-			/* Use PASSTHROUGH op for failed antireplay packet */
-			inst->w4.u64 = 0;
-			inst->w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
-			inst->w4.s.opcode_minor =
-				ROC_SE_MISC_MINOR_OP_PASSTHROUGH;
-			inst->w4.s.param1 = 1;
-			/* Send out completion code only */
-			inst->w4.s.param2 =
-				(ROC_IE_ON_SWCC_ANTI_REPLAY << 8) | 0x1;
-			inst->w4.s.dlen = 1;
-			inst->dptr = rte_pktmbuf_iova(m_src);
-			inst->rptr = inst->dptr;
-			inst->w7.u64 = sa->inst.w7;
-			return;
-		}
-	}
 
 	/* Prepare CPT instruction */
 	inst->w4.u64 = sa->inst.w4 | rte_pktmbuf_pkt_len(m_src);
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
index ba9eaf2325..705d67e91f 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
@@ -1269,6 +1269,7 @@ cn9k_sec_caps_update(struct rte_security_capability *sec_cap)
 #endif
 	}
 	sec_cap->ipsec.replay_win_sz_max = CNXK_ON_AR_WIN_SIZE_MAX;
+	sec_cap->ipsec.options.esn = 1;
 }
 
 void
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index 0b41d47de9..ffe4ae19aa 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -33,7 +33,7 @@ struct cpt_qp_meta_info {
 #define CPT_OP_FLAGS_METABUF	       (1 << 1)
 #define CPT_OP_FLAGS_AUTH_VERIFY       (1 << 0)
 #define CPT_OP_FLAGS_IPSEC_DIR_INBOUND (1 << 2)
-#define CPT_OP_FLAGS_IPSEC_INB_ESN     (1 << 3)
+#define CPT_OP_FLAGS_IPSEC_INB_REPLAY  (1 << 3)
 
 struct cpt_inflight_req {
 	union cpt_res_s res;
-- 
2.25.1


^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH 0/3] support new full context firmware
  2022-06-20  7:18 [PATCH 0/3] support new full context firmware Tejasree Kondoj
                   ` (2 preceding siblings ...)
  2022-06-20  7:18 ` [PATCH 3/3] crypto/cnxk: add anti-replay as per new firmware Tejasree Kondoj
@ 2022-06-21 11:35 ` Akhil Goyal
  3 siblings, 0 replies; 5+ messages in thread
From: Akhil Goyal @ 2022-06-21 11:35 UTC (permalink / raw)
  To: Tejasree Kondoj
  Cc: Jerin Jacob Kollanukkaran, Anoob Joseph, Nithin Kumar Dabilpuram,
	Vidya Sagar Velumuri, Archana Muniganti, Ankur Dwivedi,
	Kiran Kumar Kokkilagadda, Sunil Kumar Kori,
	Satha Koteswara Rao Kottidi, dev

> This series adds support for new full context
> microcode in cn9k IPsec PMD.
> 
> Anoob Joseph (1):
>   crypto/cnxk: improvements to fastpath handling
> 
> Tejasree Kondoj (1):
>   crypto/cnxk: add anti-replay as per new firmware
> 
> Vidya Sagar Velumuri (1):
>   crypto/cnxk: move IPsec SA creation to common
> 
>  drivers/common/cnxk/cnxk_security.c           | 398 ++++++++++++
>  drivers/common/cnxk/cnxk_security.h           |  11 +
>  drivers/common/cnxk/roc_cpt.c                 |  93 +++
>  drivers/common/cnxk/roc_cpt.h                 |   3 +
>  drivers/common/cnxk/roc_ie_on.h               |  24 +-
>  drivers/common/cnxk/version.map               |   3 +
>  drivers/crypto/cnxk/cn9k_cryptodev_ops.c      |  96 ++-
>  drivers/crypto/cnxk/cn9k_ipsec.c              | 606 +++---------------
>  drivers/crypto/cnxk/cn9k_ipsec.h              |   7 +-
>  drivers/crypto/cnxk/cn9k_ipsec_la_ops.h       | 125 +---
>  .../crypto/cnxk/cnxk_cryptodev_capabilities.c |   1 +
>  drivers/crypto/cnxk/cnxk_cryptodev_ops.h      |  13 +-
>  12 files changed, 736 insertions(+), 644 deletions(-)
Series
Acked-by: Akhil Goyal <gakhil@marvell.com>

Applied to dpdk-next-crypto

Thanks.

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-06-21 11:35 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-20  7:18 [PATCH 0/3] support new full context firmware Tejasree Kondoj
2022-06-20  7:18 ` [PATCH 1/3] crypto/cnxk: move IPsec SA creation to common Tejasree Kondoj
2022-06-20  7:18 ` [PATCH 2/3] crypto/cnxk: improvements to fastpath handling Tejasree Kondoj
2022-06-20  7:18 ` [PATCH 3/3] crypto/cnxk: add anti-replay as per new firmware Tejasree Kondoj
2022-06-21 11:35 ` [PATCH 0/3] support new full context firmware Akhil Goyal

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).