From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: <jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>,
"Kiran Kumar K" <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>,
Harman Kalra <hkalra@marvell.com>
Cc: <dev@dpdk.org>
Subject: [PATCH v2 12/33] common/cnxk: support for cn20k IPsec session
Date: Mon, 24 Feb 2025 15:08:54 +0530 [thread overview]
Message-ID: <20250224093915.1253215-12-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20250224093915.1253215-1-ndabilpuram@marvell.com>
Add support for cn20k IPsec session create/destroy.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/cnxk_security.c | 546 +++++++++++++++++++++++++++-
drivers/common/cnxk/cnxk_security.h | 12 +-
drivers/common/cnxk/version.map | 2 +
3 files changed, 557 insertions(+), 3 deletions(-)
diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index 8953e901a1..3a747ed441 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -4,10 +4,10 @@
#include <rte_udp.h>
-#include "cnxk_security.h"
-
#include "roc_api.h"
+#include "cnxk_security.h"
+
static int
ot_ipsec_sa_common_param_fill(union roc_ot_ipsec_sa_word2 *w2, uint8_t *cipher_key,
uint8_t *salt_key, uint8_t *hmac_opad_ipad,
@@ -1183,3 +1183,545 @@ cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
return ctx_len;
}
+
+static int
+ow_ipsec_sa_common_param_fill(union roc_ow_ipsec_sa_word2 *w2, uint8_t *cipher_key,
+ uint8_t *salt_key, uint8_t *hmac_opad_ipad,
+ struct rte_security_ipsec_xform *ipsec_xfrm,
+ struct rte_crypto_sym_xform *crypto_xfrm)
+{
+ struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
+ const uint8_t *key = NULL;
+ uint8_t ccm_flag = 0;
+ uint32_t *tmp_salt;
+ uint64_t *tmp_key;
+ int i, length = 0;
+
+ /* Set direction */
+ if (ipsec_xfrm->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
+ w2->s.dir = ROC_IE_SA_DIR_OUTBOUND;
+ else
+ w2->s.dir = ROC_IE_SA_DIR_INBOUND;
+
+ if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xfrm = crypto_xfrm;
+ cipher_xfrm = crypto_xfrm->next;
+ } else {
+ cipher_xfrm = crypto_xfrm;
+ auth_xfrm = crypto_xfrm->next;
+ }
+
+ /* Set protocol - ESP vs AH */
+ switch (ipsec_xfrm->proto) {
+ case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
+ w2->s.protocol = ROC_IE_SA_PROTOCOL_ESP;
+ break;
+ case RTE_SECURITY_IPSEC_SA_PROTO_AH:
+ w2->s.protocol = ROC_IE_SA_PROTOCOL_AH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set mode - transport vs tunnel */
+ switch (ipsec_xfrm->mode) {
+ case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
+ w2->s.mode = ROC_IE_SA_MODE_TRANSPORT;
+ break;
+ case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
+ w2->s.mode = ROC_IE_SA_MODE_TUNNEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set encryption algorithm */
+ if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ key = crypto_xfrm->aead.key.data;
+ length = crypto_xfrm->aead.key.length;
+
+ switch (crypto_xfrm->aead.algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ w2->s.enc_type = ROC_IE_SA_ENC_AES_GCM;
+ w2->s.auth_type = ROC_IE_SA_AUTH_NULL;
+ memcpy(salt_key, &ipsec_xfrm->salt, 4);
+ tmp_salt = (uint32_t *)salt_key;
+ *tmp_salt = rte_be_to_cpu_32(*tmp_salt);
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ w2->s.enc_type = ROC_IE_SA_ENC_AES_CCM;
+ w2->s.auth_type = ROC_IE_SA_AUTH_NULL;
+ ccm_flag = 0x07 & ~ROC_CPT_AES_CCM_CTR_LEN;
+ *salt_key = ccm_flag;
+ memcpy(PLT_PTR_ADD(salt_key, 1), &ipsec_xfrm->salt, 3);
+ tmp_salt = (uint32_t *)salt_key;
+ *tmp_salt = rte_be_to_cpu_32(*tmp_salt);
+ break;
+ default:
+ return -ENOTSUP;
+ }
+ } else {
+ if (cipher_xfrm != NULL) {
+ switch (cipher_xfrm->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ w2->s.enc_type = ROC_IE_SA_ENC_NULL;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ w2->s.enc_type = ROC_IE_SA_ENC_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ w2->s.enc_type = ROC_IE_SA_ENC_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ w2->s.enc_type = ROC_IE_SA_ENC_3DES_CBC;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ key = cipher_xfrm->cipher.key.data;
+ length = cipher_xfrm->cipher.key.length;
+ }
+
+ switch (auth_xfrm->auth.algo) {
+ case RTE_CRYPTO_AUTH_NULL:
+ if (w2->s.dir == ROC_IE_SA_DIR_INBOUND && ipsec_xfrm->replay_win_sz) {
+ plt_err("anti-replay can't be supported with integrity service disabled");
+ return -EINVAL;
+ }
+ w2->s.auth_type = ROC_IE_SA_AUTH_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ w2->s.auth_type = ROC_IE_SA_AUTH_SHA1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ w2->s.auth_type = ROC_IE_SA_AUTH_SHA2_256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ w2->s.auth_type = ROC_IE_SA_AUTH_SHA2_384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ w2->s.auth_type = ROC_IE_SA_AUTH_SHA2_512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ w2->s.auth_type = ROC_IE_SA_AUTH_AES_XCBC_128;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ w2->s.auth_type = ROC_IE_SA_AUTH_AES_GMAC;
+ key = auth_xfrm->auth.key.data;
+ length = auth_xfrm->auth.key.length;
+ memcpy(salt_key, &ipsec_xfrm->salt, 4);
+ tmp_salt = (uint32_t *)salt_key;
+ *tmp_salt = rte_be_to_cpu_32(*tmp_salt);
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ if (auth_xfrm->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
+ const uint8_t *auth_key = auth_xfrm->auth.key.data;
+ roc_aes_xcbc_key_derive(auth_key, hmac_opad_ipad);
+ } else {
+ roc_se_hmac_opad_ipad_gen(w2->s.auth_type, auth_xfrm->auth.key.data,
+ auth_xfrm->auth.key.length, &hmac_opad_ipad[0],
+ ROC_SE_IPSEC);
+ }
+
+ tmp_key = (uint64_t *)hmac_opad_ipad;
+ for (i = 0; i < (int)(ROC_CTX_MAX_OPAD_IPAD_LEN / sizeof(uint64_t)); i++)
+ tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
+ }
+
+ /* Set encapsulation type */
+ if (ipsec_xfrm->options.udp_encap)
+ w2->s.encap_type = ROC_IE_OT_SA_ENCAP_UDP;
+
+ w2->s.spi = ipsec_xfrm->spi;
+
+ if (key != NULL && length != 0) {
+ /* Copy encryption key */
+ memcpy(cipher_key, key, length);
+ tmp_key = (uint64_t *)cipher_key;
+ for (i = 0; i < (int)(ROC_CTX_MAX_CKEY_LEN / sizeof(uint64_t)); i++)
+ tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
+ }
+
+ /* Set AES key length */
+ if (w2->s.enc_type == ROC_IE_SA_ENC_AES_CBC || w2->s.enc_type == ROC_IE_SA_ENC_AES_CCM ||
+ w2->s.enc_type == ROC_IE_SA_ENC_AES_CTR || w2->s.enc_type == ROC_IE_SA_ENC_AES_GCM ||
+ w2->s.enc_type == ROC_IE_SA_ENC_AES_CCM || w2->s.auth_type == ROC_IE_SA_AUTH_AES_GMAC) {
+ switch (length) {
+ case ROC_CPT_AES128_KEY_LEN:
+ w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
+ break;
+ case ROC_CPT_AES192_KEY_LEN:
+ w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
+ break;
+ case ROC_CPT_AES256_KEY_LEN:
+ w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
+ break;
+ default:
+ plt_err("Invalid AES key length");
+ return -EINVAL;
+ }
+ }
+
+ if (ipsec_xfrm->life.packets_soft_limit != 0 || ipsec_xfrm->life.packets_hard_limit != 0) {
+ if (ipsec_xfrm->life.bytes_soft_limit != 0 ||
+ ipsec_xfrm->life.bytes_hard_limit != 0) {
+ plt_err("Expiry tracking with both packets & bytes is not supported");
+ return -EINVAL;
+ }
+ w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_PKTS;
+ }
+
+ if (ipsec_xfrm->life.bytes_soft_limit != 0 || ipsec_xfrm->life.bytes_hard_limit != 0) {
+ if (ipsec_xfrm->life.packets_soft_limit != 0 ||
+ ipsec_xfrm->life.packets_hard_limit != 0) {
+ plt_err("Expiry tracking with both packets & bytes is not supported");
+ return -EINVAL;
+ }
+ w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_OCTETS;
+ }
+
+ return 0;
+}
+
+static size_t
+ow_ipsec_inb_ctx_size(struct roc_ow_ipsec_inb_sa *sa)
+{
+ size_t size;
+
+ /* Variable based on Anti-replay Window */
+ size = offsetof(struct roc_ow_ipsec_inb_sa, ctx) +
+ offsetof(struct roc_ow_ipsec_inb_ctx_update_reg, ar_winbits);
+
+ if (sa->w0.s.ar_win)
+ size += (1 << (sa->w0.s.ar_win - 1)) * sizeof(uint64_t);
+
+ return size;
+}
+
+static void
+ow_ipsec_update_ipv6_addr_endianness(uint64_t *addr)
+{
+ *addr = rte_be_to_cpu_64(*addr);
+ addr++;
+ *addr = rte_be_to_cpu_64(*addr);
+}
+
+static int
+ow_ipsec_inb_tunnel_hdr_fill(struct roc_ow_ipsec_inb_sa *sa,
+ struct rte_security_ipsec_xform *ipsec_xfrm)
+{
+ struct rte_security_ipsec_tunnel_param *tunnel;
+
+ if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+ return 0;
+
+ if (ipsec_xfrm->options.tunnel_hdr_verify == 0)
+ return 0;
+
+ tunnel = &ipsec_xfrm->tunnel;
+
+ switch (tunnel->type) {
+ case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
+ sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
+ memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip, sizeof(struct in_addr));
+ memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip, sizeof(struct in_addr));
+
+ /* IP Source and Dest are in LE/CPU endian */
+ sa->outer_hdr.ipv4.src_addr = rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
+ sa->outer_hdr.ipv4.dst_addr = rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
+
+ break;
+ case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
+ sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
+ memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
+ sizeof(struct in6_addr));
+ memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
+ sizeof(struct in6_addr));
+
+ /* IP Source and Dest are in LE/CPU endian */
+ ow_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.src_addr);
+ ow_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.dst_addr);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (ipsec_xfrm->options.tunnel_hdr_verify) {
+ case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR:
+ sa->w2.s.ip_hdr_verify = ROC_IE_OT_SA_IP_HDR_VERIFY_DST_ADDR;
+ break;
+ case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR:
+ sa->w2.s.ip_hdr_verify = ROC_IE_OT_SA_IP_HDR_VERIFY_SRC_DST_ADDR;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+int
+cnxk_ow_ipsec_inb_sa_fill(struct roc_ow_ipsec_inb_sa *sa,
+ struct rte_security_ipsec_xform *ipsec_xfrm,
+ struct rte_crypto_sym_xform *crypto_xfrm)
+{
+ uint16_t sport = 4500, dport = 4500;
+ union roc_ow_ipsec_sa_word2 w2;
+ uint32_t replay_win_sz;
+ size_t offset;
+ int rc;
+
+ /* Initialize the SA */
+ roc_ow_ipsec_inb_sa_init(sa);
+
+ w2.u64 = 0;
+ rc = ow_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->w8.s.salt, sa->hmac_opad_ipad,
+ ipsec_xfrm, crypto_xfrm);
+ if (rc)
+ return rc;
+
+ /* Updata common word2 data */
+ sa->w2.u64 = w2.u64;
+
+ /* Only support power-of-two window sizes supported */
+ replay_win_sz = ipsec_xfrm->replay_win_sz;
+ if (replay_win_sz) {
+ if (!rte_is_power_of_2(replay_win_sz) || replay_win_sz > ROC_AR_WIN_SIZE_MAX)
+ return -ENOTSUP;
+
+ sa->w0.s.ar_win = rte_log2_u32(replay_win_sz) - 5;
+ }
+
+ rc = ow_ipsec_inb_tunnel_hdr_fill(sa, ipsec_xfrm);
+ if (rc)
+ return rc;
+
+ /* Default options for pkt_out and pkt_fmt are with
+ * second pass meta and no defrag.
+ */
+ sa->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_META;
+ sa->w0.s.pkt_output = ROC_IE_OT_SA_PKT_OUTPUT_NO_FRAG;
+ sa->w0.s.pkind = ROC_IE_OT_CPT_PKIND;
+
+ if (ipsec_xfrm->options.ip_reassembly_en)
+ sa->w0.s.pkt_output = ROC_IE_OT_SA_PKT_OUTPUT_HW_BASED_DEFRAG;
+
+ /* ESN */
+ sa->w2.s.esn_en = !!ipsec_xfrm->options.esn;
+ if (ipsec_xfrm->options.udp_encap) {
+ if (ipsec_xfrm->udp.sport)
+ sport = ipsec_xfrm->udp.sport;
+
+ if (ipsec_xfrm->udp.dport)
+ dport = ipsec_xfrm->udp.dport;
+
+ sa->w10.s.udp_src_port = sport;
+ sa->w10.s.udp_dst_port = dport;
+ }
+
+ if (ipsec_xfrm->options.udp_ports_verify)
+ sa->w2.s.udp_ports_verify = 1;
+
+ offset = offsetof(struct roc_ow_ipsec_inb_sa, ctx);
+ /* Word offset for HW managed SA field */
+ sa->w0.s.hw_ctx_off = offset / 8;
+ /* Context push size for inbound spans up to hw_ctx including
+ * ar_base field, in 8b units
+ */
+ sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
+ /* Entire context size in 128B units */
+ sa->w0.s.ctx_size =
+ (PLT_ALIGN_CEIL(ow_ipsec_inb_ctx_size(sa), ROC_CTX_UNIT_128B) / ROC_CTX_UNIT_128B) -
+ 1;
+
+ /**
+ * CPT MC triggers expiry when counter value changes from 2 to 1. To
+ * mitigate this behaviour add 1 to the life counter values provided.
+ */
+
+ if (ipsec_xfrm->life.bytes_soft_limit) {
+ sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
+ sa->w0.s.soft_life_dec = 1;
+ }
+
+ if (ipsec_xfrm->life.packets_soft_limit) {
+ sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
+ sa->w0.s.soft_life_dec = 1;
+ }
+
+ if (ipsec_xfrm->life.bytes_hard_limit) {
+ sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
+ sa->w0.s.hard_life_dec = 1;
+ }
+
+ if (ipsec_xfrm->life.packets_hard_limit) {
+ sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
+ sa->w0.s.hard_life_dec = 1;
+ }
+
+ rte_wmb();
+
+ /* Enable SA */
+ sa->w2.s.valid = 1;
+ return 0;
+}
+
+int
+cnxk_ow_ipsec_outb_sa_fill(struct roc_ow_ipsec_outb_sa *sa,
+ struct rte_security_ipsec_xform *ipsec_xfrm,
+ struct rte_crypto_sym_xform *crypto_xfrm)
+{
+ struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
+ uint16_t sport = 4500, dport = 4500;
+ union roc_ow_ipsec_sa_word2 w2;
+ size_t offset;
+ int rc;
+
+ /* Initialize the SA */
+ roc_ow_ipsec_outb_sa_init(sa);
+
+ w2.u64 = 0;
+ rc = ow_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->iv.s.salt, sa->hmac_opad_ipad,
+ ipsec_xfrm, crypto_xfrm);
+ if (rc)
+ return rc;
+
+ /* Update common word2 data */
+ sa->w2.u64 = w2.u64;
+
+ if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+ goto skip_tunnel_info;
+
+ /* Tunnel header info */
+ switch (tunnel->type) {
+ case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
+ sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
+ memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip, sizeof(struct in_addr));
+ memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip, sizeof(struct in_addr));
+
+ /* IP Source and Dest seems to be in LE/CPU endian */
+ sa->outer_hdr.ipv4.src_addr = rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
+ sa->outer_hdr.ipv4.dst_addr = rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
+
+ /* Outer header DF bit source */
+ if (!ipsec_xfrm->options.copy_df) {
+ sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = ROC_IE_OT_SA_COPY_FROM_SA;
+ sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv4.df;
+ } else {
+ sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
+ ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
+ }
+
+ /* Outer header DSCP source */
+ if (!ipsec_xfrm->options.copy_dscp) {
+ sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
+ sa->w10.s.dscp = tunnel->ipv4.dscp;
+ } else {
+ sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
+ }
+ break;
+ case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
+ sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
+ memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
+ sizeof(struct in6_addr));
+ memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
+ sizeof(struct in6_addr));
+
+ /* IP Source and Dest are in LE/CPU endian */
+ ow_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.src_addr);
+ ow_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.dst_addr);
+
+ /* Outer header flow label source */
+ if (!ipsec_xfrm->options.copy_flabel) {
+ sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = ROC_IE_OT_SA_COPY_FROM_SA;
+
+ sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv6.flabel;
+ } else {
+ sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
+ ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
+ }
+
+ /* Outer header DSCP source */
+ if (!ipsec_xfrm->options.copy_dscp) {
+ sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
+ sa->w10.s.dscp = tunnel->ipv6.dscp;
+ } else {
+ sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+skip_tunnel_info:
+ /* ESN */
+ sa->w0.s.esn_en = !!ipsec_xfrm->options.esn;
+
+ if (ipsec_xfrm->esn.value)
+ sa->ctx.esn_val = ipsec_xfrm->esn.value - 1;
+
+ if (ipsec_xfrm->options.udp_encap) {
+ if (ipsec_xfrm->udp.sport)
+ sport = ipsec_xfrm->udp.sport;
+
+ if (ipsec_xfrm->udp.dport)
+ dport = ipsec_xfrm->udp.dport;
+
+ sa->w10.s.udp_src_port = sport;
+ sa->w10.s.udp_dst_port = dport;
+ }
+
+ offset = offsetof(struct roc_ow_ipsec_outb_sa, ctx);
+ /* Word offset for HW managed SA field */
+ sa->w0.s.hw_ctx_off = offset / 8;
+
+ /* Context push size is up to err ctl in HW ctx */
+ sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
+
+ /* Entire context size in 128B units */
+ offset = sizeof(struct roc_ow_ipsec_outb_sa);
+ sa->w0.s.ctx_size = (PLT_ALIGN_CEIL(offset, ROC_CTX_UNIT_128B) / ROC_CTX_UNIT_128B) - 1;
+
+ /* IPID gen */
+ sa->w2.s.ipid_gen = 1;
+
+ /**
+ * CPT MC triggers expiry when counter value changes from 2 to 1. To
+ * mitigate this behaviour add 1 to the life counter values provided.
+ */
+
+ if (ipsec_xfrm->life.bytes_soft_limit) {
+ sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
+ sa->w0.s.soft_life_dec = 1;
+ }
+
+ if (ipsec_xfrm->life.packets_soft_limit) {
+ sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
+ sa->w0.s.soft_life_dec = 1;
+ }
+
+ if (ipsec_xfrm->life.bytes_hard_limit) {
+ sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
+ sa->w0.s.hard_life_dec = 1;
+ }
+
+ if (ipsec_xfrm->life.packets_hard_limit) {
+ sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
+ sa->w0.s.hard_life_dec = 1;
+ }
+
+ /* There are two words of CPT_CTX_HW_S for ucode to skip */
+ sa->w0.s.ctx_hdr_size = 1;
+ sa->w0.s.aop_valid = 1;
+
+ rte_wmb();
+
+ /* Enable SA */
+ sa->w2.s.valid = 1;
+ return 0;
+}
diff --git a/drivers/common/cnxk/cnxk_security.h b/drivers/common/cnxk/cnxk_security.h
index 8ede6c88a3..e324fa2cb9 100644
--- a/drivers/common/cnxk/cnxk_security.h
+++ b/drivers/common/cnxk/cnxk_security.h
@@ -10,6 +10,7 @@
#include "roc_cpt.h"
#include "roc_ie_on.h"
#include "roc_ie_ot.h"
+#include "roc_ie_ow.h"
/* Response length calculation data */
struct cnxk_ipsec_outb_rlens {
@@ -36,7 +37,7 @@ uint8_t __roc_api
cnxk_ipsec_outb_roundup_byte(enum rte_crypto_cipher_algorithm c_algo,
enum rte_crypto_aead_algorithm aead_algo);
-/* [CN10K, .) */
+/* [CN10K] */
int __roc_api
cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
struct rte_security_ipsec_xform *ipsec_xfrm,
@@ -56,4 +57,13 @@ int __roc_api cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec
int __roc_api cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
struct rte_crypto_sym_xform *crypto_xform,
struct roc_ie_on_outb_sa *out_sa);
+/* [CN20K, .) */
+int __roc_api cnxk_ow_ipsec_inb_sa_fill(struct roc_ow_ipsec_inb_sa *sa,
+ struct rte_security_ipsec_xform *ipsec_xfrm,
+ struct rte_crypto_sym_xform *crypto_xfrm);
+int __roc_api cnxk_ow_ipsec_outb_sa_fill(struct roc_ow_ipsec_outb_sa *sa,
+ struct rte_security_ipsec_xform *ipsec_xfrm,
+ struct rte_crypto_sym_xform *crypto_xfrm);
+bool __roc_api cnxk_ow_ipsec_inb_sa_valid(struct roc_ow_ipsec_inb_sa *sa);
+bool __roc_api cnxk_ow_ipsec_outb_sa_valid(struct roc_ow_ipsec_outb_sa *sa);
#endif /* _CNXK_SECURITY_H__ */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 03b37d7841..6a3c28e8a9 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -23,6 +23,8 @@ INTERNAL {
cnxk_ot_ipsec_outb_sa_fill;
cnxk_ot_ipsec_inb_sa_valid;
cnxk_ot_ipsec_outb_sa_valid;
+ cnxk_ow_ipsec_inb_sa_fill;
+ cnxk_ow_ipsec_outb_sa_fill;
cnxk_on_ipsec_inb_sa_create;
cnxk_on_ipsec_outb_sa_create;
roc_ae_ec_grp_get;
--
2.34.1
next prev parent reply other threads:[~2025-02-24 9:40 UTC|newest]
Thread overview: 101+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
2025-01-31 8:04 ` [PATCH 02/34] common/cnxk: remove unused param in SA init Nithin Dabilpuram
2025-01-31 8:04 ` [PATCH 03/34] net/cnxk: remove unnecessary delay on stats read Nithin Dabilpuram
2025-01-31 8:04 ` [PATCH 04/34] common/cnxk: move CTX defines to common Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 05/34] common/cnxk: add cn20k CPT result struct Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 06/34] common/cnxk: enable IE with cn9k and cn10k only Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 07/34] common/cnxk: make special handling only for 9k Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 08/34] common/cnxk: add CPT cn20k device enumeration Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 09/34] common/cnxk: add CPT LMT defines Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 10/34] common/cnxk: add 20k defines for IPsec Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 11/34] common/cnxk: update default eng group for cn20k Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 12/34] common/cnxk: support for cn20k IPsec session Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 13/34] common/cnxk: add cn20k meta pkt structs Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 14/34] common/cnxk: support for inline IPsec for cn20k Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 15/34] common/cnxk: support inline SA context invalidate Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 16/34] common/cnxk: update feature flags for cn20k Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 17/34] common/cnxk: add mbox define for inline profile support Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 18/34] common/cnxk: support for inline inbound queue Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 19/34] common/cnxk: add NIX inline reassembly profile config Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 20/34] common/cnxk: add API to fetch inline profile ID Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 21/34] common/cnxk: add NPC action2 support Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 22/34] common/cnxk: support for NPC inline rule for cn20k Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 23/34] net/cnxk: support for cn20k inline IPsec session Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 24/34] common/cnxk: update CPT RXC time config mbox for cn20k Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 25/34] net/cnxk: store pool buffer size in lookup memory Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 26/34] net/cnxk: inline IPsec Rx support for cn20k Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 27/34] event/cnxk: " Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 28/34] common/cnxk: enable allmulti mode on rpm/cgx VF Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 29/34] net/cnxk: fix of NIX send header L3 type Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 30/34] common/cnxk: fix inbound IPsec sa setup Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 31/34] common/cnxk: add stats reset for inline device Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 32/34] common/cnxk: change the error log to a debug log Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 33/34] net/cnxk: update MC address list configure API Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 34/34] common/cnxk: move interrupt handling to platform-specific Nithin Dabilpuram
2025-02-24 7:17 ` Jerin Jacob
2025-02-24 9:38 ` [PATCH v2 01/33] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 02/33] common/cnxk: remove unused param in SA init Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 03/33] net/cnxk: remove unnecessary delay on stats read Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 04/33] common/cnxk: move CTX defines to common Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 05/33] common/cnxk: add cn20k CPT result struct Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 06/33] common/cnxk: enable IE with cn9k and cn10k only Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 07/33] common/cnxk: make special handling only for 9k Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 08/33] common/cnxk: add CPT cn20k device enumeration Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 09/33] common/cnxk: add CPT LMT defines Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 10/33] common/cnxk: add 20k defines for IPsec Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 11/33] common/cnxk: update default eng group for cn20k Nithin Dabilpuram
2025-02-24 9:38 ` Nithin Dabilpuram [this message]
2025-02-24 9:38 ` [PATCH v2 13/33] common/cnxk: add cn20k meta pkt structs Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 14/33] common/cnxk: support for inline IPsec for cn20k Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 15/33] common/cnxk: support inline SA context invalidate Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 16/33] common/cnxk: update feature flags for cn20k Nithin Dabilpuram
2025-02-24 9:38 ` [PATCH v2 17/33] common/cnxk: add mbox define for inline profile support Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 18/33] common/cnxk: support for inline inbound queue Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 19/33] common/cnxk: add NIX inline reassembly profile config Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 20/33] common/cnxk: add API to fetch inline profile ID Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 21/33] common/cnxk: add NPC action2 support Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 22/33] common/cnxk: support for NPC inline rule for cn20k Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 23/33] net/cnxk: support for cn20k inline IPsec session Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 24/33] common/cnxk: update CPT RXC time config mbox for cn20k Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 25/33] net/cnxk: store pool buffer size in lookup memory Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 26/33] net/cnxk: inline IPsec Rx support for cn20k Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 27/33] event/cnxk: " Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 28/33] common/cnxk: enable allmulti mode on rpm/cgx VF Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 29/33] common/cnxk: fix inbound IPsec sa setup Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 30/33] common/cnxk: add stats reset for inline device Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 31/33] common/cnxk: change the error log to a debug log Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 32/33] net/cnxk: update MC address list configure API Nithin Dabilpuram
2025-02-24 9:39 ` [PATCH v2 33/33] common/cnxk: move interrupt handling to platform-specific Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 01/33] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 02/33] common/cnxk: remove unused param in SA init Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 03/33] net/cnxk: remove unnecessary delay on stats read Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 04/33] common/cnxk: move CTX defines to common Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 05/33] common/cnxk: add cn20k CPT result struct Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 06/33] common/cnxk: enable IE with cn9k and cn10k only Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 07/33] common/cnxk: make special handling only for 9k Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 08/33] common/cnxk: add CPT cn20k device enumeration Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 09/33] common/cnxk: add CPT LMT defines Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 10/33] common/cnxk: add 20k defines for IPsec Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 11/33] common/cnxk: update default eng group for cn20k Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 12/33] common/cnxk: support for cn20k IPsec session Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 13/33] common/cnxk: add cn20k meta pkt structs Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 14/33] common/cnxk: support for inline IPsec for cn20k Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 15/33] common/cnxk: support inline SA context invalidate Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 16/33] common/cnxk: update feature flags for cn20k Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 17/33] common/cnxk: add mbox define for inline profile support Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 18/33] common/cnxk: support for inline inbound queue Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 19/33] common/cnxk: add NIX inline reassembly profile config Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 20/33] common/cnxk: add API to fetch inline profile ID Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 21/33] common/cnxk: add NPC action2 support Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 22/33] common/cnxk: support for NPC inline rule for cn20k Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 23/33] net/cnxk: support for cn20k inline IPsec session Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 24/33] common/cnxk: update CPT RXC time config mbox for cn20k Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 25/33] net/cnxk: store pool buffer size in lookup memory Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 26/33] net/cnxk: inline IPsec Rx support for cn20k Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 27/33] event/cnxk: " Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 28/33] common/cnxk: enable allmulti mode on rpm/cgx VF Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 29/33] common/cnxk: fix inbound IPsec sa setup Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 30/33] common/cnxk: add stats reset for inline device Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 31/33] common/cnxk: change the error log to a debug log Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 32/33] net/cnxk: update MC address list configure API Nithin Dabilpuram
2025-02-24 9:52 ` [PATCH v3 33/33] common/cnxk: move interrupt handling to platform-specific Nithin Dabilpuram
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250224093915.1253215-12-ndabilpuram@marvell.com \
--to=ndabilpuram@marvell.com \
--cc=dev@dpdk.org \
--cc=hkalra@marvell.com \
--cc=jerinj@marvell.com \
--cc=kirankumark@marvell.com \
--cc=skori@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).