DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: Nithin Dabilpuram <ndabilpuram@marvell.com>,
	Kiran Kumar K <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>, Ray Kinsella <mdr@ashroe.eu>,
	Anatoly Burakov <anatoly.burakov@intel.com>
Cc: <jerinj@marvell.com>, <schalla@marvell.com>, <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH 15/27] net/cnxk: add inline security support for cn9k
Date: Thu, 2 Sep 2021 07:44:53 +0530	[thread overview]
Message-ID: <20210902021505.17607-16-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20210902021505.17607-1-ndabilpuram@marvell.com>

Add support for inline inbound and outbound IPSec for SA create,
destroy and other NIX / CPT LF configurations.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/net/cnxk/cn9k_ethdev.c         |  23 +++
 drivers/net/cnxk/cn9k_ethdev.h         |  61 +++++++
 drivers/net/cnxk/cn9k_ethdev_sec.c     | 313 +++++++++++++++++++++++++++++++++
 drivers/net/cnxk/cn9k_rx.h             |   1 +
 drivers/net/cnxk/cn9k_tx.h             |   1 +
 drivers/net/cnxk/cnxk_ethdev.c         | 214 +++++++++++++++++++++-
 drivers/net/cnxk/cnxk_ethdev.h         | 121 ++++++++++++-
 drivers/net/cnxk/cnxk_ethdev_devargs.c |  88 ++++++++-
 drivers/net/cnxk/cnxk_ethdev_sec.c     | 278 +++++++++++++++++++++++++++++
 drivers/net/cnxk/cnxk_lookup.c         |  50 +++++-
 drivers/net/cnxk/meson.build           |   2 +
 drivers/net/cnxk/version.map           |   5 +
 12 files changed, 1146 insertions(+), 11 deletions(-)
 create mode 100644 drivers/net/cnxk/cn9k_ethdev_sec.c
 create mode 100644 drivers/net/cnxk/cnxk_ethdev_sec.c

diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index 115e678..08c86f9 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -36,6 +36,9 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
 	if (!dev->ptype_disable)
 		flags |= NIX_RX_OFFLOAD_PTYPE_F;
 
+	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+		flags |= NIX_RX_OFFLOAD_SECURITY_F;
+
 	return flags;
 }
 
@@ -101,6 +104,9 @@ nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
 	if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
 		flags |= NIX_TX_OFFLOAD_TSTAMP_F;
 
+	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+		flags |= NIX_TX_OFFLOAD_SECURITY_F;
+
 	return flags;
 }
 
@@ -179,8 +185,10 @@ cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 			const struct rte_eth_txconf *tx_conf)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_cpt_lf *inl_lf;
 	struct cn9k_eth_txq *txq;
 	struct roc_nix_sq *sq;
+	uint16_t crypto_qid;
 	int rc;
 
 	RTE_SET_USED(socket);
@@ -200,6 +208,19 @@ cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
 	txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
 
+	/* Fetch CPT LF info for outbound if present */
+	if (dev->outb.lf_base) {
+		crypto_qid = qid % dev->outb.nb_crypto_qs;
+		inl_lf = dev->outb.lf_base + crypto_qid;
+
+		txq->cpt_io_addr = inl_lf->io_addr;
+		txq->cpt_fc = inl_lf->fc_addr;
+		txq->cpt_desc = inl_lf->nb_desc * 0.7;
+		txq->sa_base = (uint64_t)dev->outb.sa_base;
+		txq->sa_base |= eth_dev->data->port_id;
+		PLT_STATIC_ASSERT(BIT_ULL(16) == ROC_NIX_INL_SA_BASE_ALIGN);
+	}
+
 	nix_form_default_desc(dev, txq, qid);
 	txq->lso_tun_fmt = dev->lso_tun_fmt;
 	return 0;
@@ -508,6 +529,8 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 	nix_eth_dev_ops_override();
 	npc_flow_ops_override();
 
+	cn9k_eth_sec_ops_override();
+
 	/* Common probe */
 	rc = cnxk_nix_probe(pci_drv, pci_dev);
 	if (rc)
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index 3d4a206..f8818b8 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -5,6 +5,7 @@
 #define __CN9K_ETHDEV_H__
 
 #include <cnxk_ethdev.h>
+#include <cnxk_security.h>
 
 struct cn9k_eth_txq {
 	uint64_t cmd[8];
@@ -15,6 +16,10 @@ struct cn9k_eth_txq {
 	uint64_t lso_tun_fmt;
 	uint16_t sqes_per_sqb_log2;
 	int16_t nb_sqb_bufs_adj;
+	rte_iova_t cpt_io_addr;
+	uint64_t sa_base;
+	uint64_t *cpt_fc;
+	uint16_t cpt_desc;
 } __plt_cache_aligned;
 
 struct cn9k_eth_rxq {
@@ -32,8 +37,64 @@ struct cn9k_eth_rxq {
 	struct cnxk_timesync_info *tstamp;
 } __plt_cache_aligned;
 
+/* Private data in sw rsvd area of struct roc_onf_ipsec_inb_sa */
+struct cn9k_inb_priv_data {
+	void *userdata;
+	struct cnxk_eth_sec_sess *eth_sec;
+};
+
+/* Private data in sw rsvd area of struct roc_onf_ipsec_outb_sa */
+struct cn9k_outb_priv_data {
+	union {
+		uint64_t esn;
+		struct {
+			uint32_t seq;
+			uint32_t esn_hi;
+		};
+	};
+
+	/* Rlen computation data */
+	struct cnxk_ipsec_outb_rlens rlens;
+
+	/* IP identifier */
+	uint16_t ip_id;
+
+	/* SA index */
+	uint32_t sa_idx;
+
+	/* Flags */
+	uint16_t copy_salt : 1;
+
+	/* Salt */
+	uint32_t nonce;
+
+	/* User data pointer */
+	void *userdata;
+
+	/* Back pointer to eth sec session */
+	struct cnxk_eth_sec_sess *eth_sec;
+};
+
+struct cn9k_sec_sess_priv {
+	union {
+		struct {
+			uint32_t sa_idx;
+			uint8_t inb_sa : 1;
+			uint8_t rsvd1 : 2;
+			uint8_t roundup_byte : 5;
+			uint8_t roundup_len;
+			uint16_t partial_len;
+		};
+
+		uint64_t u64;
+	};
+} __rte_packed;
+
 /* Rx and Tx routines */
 void cn9k_eth_set_rx_function(struct rte_eth_dev *eth_dev);
 void cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev);
 
+/* Security context setup */
+void cn9k_eth_sec_ops_override(void);
+
 #endif /* __CN9K_ETHDEV_H__ */
diff --git a/drivers/net/cnxk/cn9k_ethdev_sec.c b/drivers/net/cnxk/cn9k_ethdev_sec.c
new file mode 100644
index 0000000..3ec7497
--- /dev/null
+++ b/drivers/net/cnxk/cn9k_ethdev_sec.c
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_security.h>
+#include <rte_security_driver.h>
+
+#include <cn9k_ethdev.h>
+#include <cnxk_security.h>
+
+static struct rte_cryptodev_capabilities cn9k_eth_sec_crypto_caps[] = {
+	{	/* AES GCM */
+		.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+		{.sym = {
+			.xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+			{.aead = {
+				.algo = RTE_CRYPTO_AEAD_AES_GCM,
+				.block_size = 16,
+				.key_size = {
+					.min = 16,
+					.max = 32,
+					.increment = 8
+				},
+				.digest_size = {
+					.min = 16,
+					.max = 16,
+					.increment = 0
+				},
+				.aad_size = {
+					.min = 8,
+					.max = 12,
+					.increment = 4
+				},
+				.iv_size = {
+					.min = 12,
+					.max = 12,
+					.increment = 0
+				}
+			}, }
+		}, }
+	},
+	RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_security_capability cn9k_eth_sec_capabilities[] = {
+	{	/* IPsec Inline Protocol ESP Tunnel Ingress */
+		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
+		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+	},
+	{	/* IPsec Inline Protocol ESP Tunnel Egress */
+		.action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+		.protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+		.ipsec = {
+			.proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+			.mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+			.direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+			.options = { 0 }
+		},
+		.crypto_capabilities = cn9k_eth_sec_crypto_caps,
+		.ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+	},
+	{
+		.action = RTE_SECURITY_ACTION_TYPE_NONE
+	}
+};
+
+static int
+cn9k_eth_sec_session_create(void *device,
+			    struct rte_security_session_conf *conf,
+			    struct rte_security_session *sess,
+			    struct rte_mempool *mempool)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_security_ipsec_xform *ipsec;
+	struct cn9k_sec_sess_priv sess_priv;
+	struct rte_crypto_sym_xform *crypto;
+	struct cnxk_eth_sec_sess *eth_sec;
+	bool inbound;
+	int rc = 0;
+
+	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+		return -ENOTSUP;
+
+	if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
+		return -ENOTSUP;
+
+	if (rte_security_dynfield_register() < 0)
+		return -ENOTSUP;
+
+	ipsec = &conf->ipsec;
+	crypto = conf->crypto_xform;
+	inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
+
+	/* Search if a session already exists */
+	if (cnxk_eth_sec_sess_get_by_spi(dev, ipsec->spi, inbound)) {
+		plt_err("%s SA with SPI %u already in use",
+			inbound ? "Inbound" : "Outbound", ipsec->spi);
+		return -EEXIST;
+	}
+
+	if (rte_mempool_get(mempool, (void **)&eth_sec)) {
+		plt_err("Could not allocate security session private data");
+		return -ENOMEM;
+	}
+
+	memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
+	sess_priv.u64 = 0;
+
+	if (inbound) {
+		struct cn9k_inb_priv_data *inb_priv;
+		struct roc_onf_ipsec_inb_sa *inb_sa;
+
+		PLT_STATIC_ASSERT(sizeof(struct cn9k_inb_priv_data) <
+				  ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD);
+
+		/* Get Inbound SA from NIX_RX_IPSEC_SA_BASE. Assume no inline
+		 * device always for CN9K.
+		 */
+		inb_sa = (struct roc_onf_ipsec_inb_sa *)
+			roc_nix_inl_inb_sa_get(&dev->nix, false, ipsec->spi);
+		if (!inb_sa) {
+			plt_err("Failed to create ingress sa");
+			rc = -EFAULT;
+			goto mempool_put;
+		}
+
+		/* Check if SA is already in use */
+		if (inb_sa->ctl.valid) {
+			plt_err("Inbound SA with SPI %u already in use",
+				ipsec->spi);
+			rc = -EBUSY;
+			goto mempool_put;
+		}
+
+		memset(inb_sa, 0, sizeof(struct roc_onf_ipsec_inb_sa));
+
+		/* Fill inbound sa params */
+		rc = cnxk_onf_ipsec_inb_sa_fill(inb_sa, ipsec, crypto);
+		if (rc) {
+			plt_err("Failed to init inbound sa, rc=%d", rc);
+			goto mempool_put;
+		}
+
+		inb_priv = roc_nix_inl_onf_ipsec_inb_sa_sw_rsvd(inb_sa);
+		/* Back pointer to get eth_sec */
+		inb_priv->eth_sec = eth_sec;
+
+		/* Save userdata in inb private area */
+		inb_priv->userdata = conf->userdata;
+
+		sess_priv.inb_sa = 1;
+		sess_priv.sa_idx = ipsec->spi;
+
+		/* Pointer from eth_sec -> inb_sa */
+		eth_sec->sa = inb_sa;
+		eth_sec->sess = sess;
+		eth_sec->sa_idx = ipsec->spi;
+		eth_sec->spi = ipsec->spi;
+		eth_sec->inb = true;
+
+		TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
+		dev->inb.nb_sess++;
+	} else {
+		struct cn9k_outb_priv_data *outb_priv;
+		struct roc_onf_ipsec_outb_sa *outb_sa;
+		uintptr_t sa_base = dev->outb.sa_base;
+		struct cnxk_ipsec_outb_rlens *rlens;
+		uint32_t sa_idx;
+
+		PLT_STATIC_ASSERT(sizeof(struct cn9k_outb_priv_data) <
+				  ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD);
+
+		/* Alloc an sa index */
+		rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx);
+		if (rc)
+			goto mempool_put;
+
+		outb_sa = roc_nix_inl_onf_ipsec_outb_sa(sa_base, sa_idx);
+		outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(outb_sa);
+		rlens = &outb_priv->rlens;
+
+		memset(outb_sa, 0, sizeof(struct roc_onf_ipsec_outb_sa));
+
+		/* Fill outbound sa params */
+		rc = cnxk_onf_ipsec_outb_sa_fill(outb_sa, ipsec, crypto);
+		if (rc) {
+			plt_err("Failed to init outbound sa, rc=%d", rc);
+			rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
+			goto mempool_put;
+		}
+
+		/* Save userdata */
+		outb_priv->userdata = conf->userdata;
+		outb_priv->sa_idx = sa_idx;
+		outb_priv->eth_sec = eth_sec;
+		/* Start sequence number with 1 */
+		outb_priv->seq = 1;
+
+		memcpy(&outb_priv->nonce, outb_sa->nonce, 4);
+		if (outb_sa->ctl.enc_type == ROC_IE_ON_SA_ENC_AES_GCM)
+			outb_priv->copy_salt = 1;
+
+		/* Save rlen info */
+		cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
+
+		sess_priv.sa_idx = outb_priv->sa_idx;
+		sess_priv.roundup_byte = rlens->roundup_byte;
+		sess_priv.roundup_len = rlens->roundup_len;
+		sess_priv.partial_len = rlens->partial_len;
+
+		/* Pointer from eth_sec -> outb_sa */
+		eth_sec->sa = outb_sa;
+		eth_sec->sess = sess;
+		eth_sec->sa_idx = sa_idx;
+		eth_sec->spi = ipsec->spi;
+
+		TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
+		dev->outb.nb_sess++;
+	}
+
+	/* Sync SA content */
+	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+
+	plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u",
+		    inbound ? "inbound" : "outbound", eth_sec->spi,
+		    eth_sec->sa_idx);
+	/*
+	 * Update fast path info in priv area.
+	 */
+	set_sec_session_private_data(sess, (void *)sess_priv.u64);
+
+	return 0;
+mempool_put:
+	rte_mempool_put(mempool, eth_sec);
+	return rc;
+}
+
+static int
+cn9k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_onf_ipsec_outb_sa *outb_sa;
+	struct roc_onf_ipsec_inb_sa *inb_sa;
+	struct cnxk_eth_sec_sess *eth_sec;
+	struct rte_mempool *mp;
+
+	eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
+	if (!eth_sec)
+		return -ENOENT;
+
+	if (eth_sec->inb) {
+		inb_sa = eth_sec->sa;
+		/* Disable SA */
+		inb_sa->ctl.valid = 0;
+
+		TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
+		dev->inb.nb_sess--;
+	} else {
+		outb_sa = eth_sec->sa;
+		/* Disable SA */
+		outb_sa->ctl.valid = 0;
+
+		/* Release Outbound SA index */
+		cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
+		TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
+		dev->outb.nb_sess--;
+	}
+
+	/* Sync SA content */
+	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+
+	plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u",
+		    eth_sec->inb ? "inbound" : "outbound", eth_sec->spi,
+		    eth_sec->sa_idx);
+
+	/* Put eth_sec object back to pool */
+	mp = rte_mempool_from_obj(eth_sec);
+	set_sec_session_private_data(sess, NULL);
+	rte_mempool_put(mp, eth_sec);
+	return 0;
+}
+
+static const struct rte_security_capability *
+cn9k_eth_sec_capabilities_get(void *device __rte_unused)
+{
+	return cn9k_eth_sec_capabilities;
+}
+
+void
+cn9k_eth_sec_ops_override(void)
+{
+	static int init_once;
+
+	if (init_once)
+		return;
+	init_once = 1;
+
+	/* Update platform specific ops */
+	cnxk_eth_sec_ops.session_create = cn9k_eth_sec_session_create;
+	cnxk_eth_sec_ops.session_destroy = cn9k_eth_sec_session_destroy;
+	cnxk_eth_sec_ops.capabilities_get = cn9k_eth_sec_capabilities_get;
+}
diff --git a/drivers/net/cnxk/cn9k_rx.h b/drivers/net/cnxk/cn9k_rx.h
index a3bf4e0..59545af 100644
--- a/drivers/net/cnxk/cn9k_rx.h
+++ b/drivers/net/cnxk/cn9k_rx.h
@@ -17,6 +17,7 @@
 #define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(3)
 #define NIX_RX_OFFLOAD_TSTAMP_F	     BIT(4)
 #define NIX_RX_OFFLOAD_VLAN_STRIP_F  BIT(5)
+#define NIX_RX_OFFLOAD_SECURITY_F    BIT(6)
 
 /* Flags to control cqe_to_mbuf conversion function.
  * Defining it from backwards to denote its been
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index ed65cd3..a27ff76 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -13,6 +13,7 @@
 #define NIX_TX_OFFLOAD_MBUF_NOFF_F    BIT(3)
 #define NIX_TX_OFFLOAD_TSO_F	      BIT(4)
 #define NIX_TX_OFFLOAD_TSTAMP_F	      BIT(5)
+#define NIX_TX_OFFLOAD_SECURITY_F     BIT(6)
 
 /* Flags to control xmit_prepare function.
  * Defining it from backwards to denote its been
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 0e3652e..60a4df5 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -38,6 +38,159 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
 	return speed_capa;
 }
 
+int
+cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
+{
+	struct roc_nix *nix = &dev->nix;
+
+	if (dev->inb.inl_dev == use_inl_dev)
+		return 0;
+
+	plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
+		    dev->inb.nb_sess, !!dev->inb.inl_dev);
+
+	/* Change the mode */
+	dev->inb.inl_dev = use_inl_dev;
+
+	/* Update RoC for NPC rule insertion */
+	roc_nix_inb_mode_set(nix, use_inl_dev);
+
+	/* Setup lookup mem */
+	return cnxk_nix_lookup_mem_sa_base_set(dev);
+}
+
+static int
+nix_security_setup(struct cnxk_eth_dev *dev)
+{
+	struct roc_nix *nix = &dev->nix;
+	int i, rc = 0;
+
+	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+		/* Setup Inline Inbound */
+		rc = roc_nix_inl_inb_init(nix);
+		if (rc) {
+			plt_err("Failed to initialize nix inline inb, rc=%d",
+				rc);
+			return rc;
+		}
+
+		/* By default pick using inline device for poll mode.
+		 * Will be overridden when event mode rq's are setup.
+		 */
+		cnxk_nix_inb_mode_set(dev, true);
+	}
+
+	/* Setup Inline outbound */
+	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+		struct plt_bitmap *bmap;
+		size_t bmap_sz;
+		void *mem;
+
+		/* Cannot ask for Tx Inline without SAs */
+		if (!dev->outb.max_sa)
+			return -EINVAL;
+
+		/* Setup enough descriptors for all tx queues */
+		nix->outb_nb_desc = dev->outb.nb_desc;
+		nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
+
+		/* Setup Inline Outbound */
+		rc = roc_nix_inl_outb_init(nix);
+		if (rc) {
+			plt_err("Failed to initialize nix inline outb, rc=%d",
+				rc);
+			goto cleanup;
+		}
+
+		rc = -ENOMEM;
+		/* Allocate a bitmap to alloc and free sa indexes */
+		bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
+		mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
+		if (mem == NULL) {
+			plt_err("Outbound SA bmap alloc failed");
+
+			rc |= roc_nix_inl_outb_fini(nix);
+			goto cleanup;
+		}
+
+		rc = -EIO;
+		bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
+		if (!bmap) {
+			plt_err("Outbound SA bmap init failed");
+
+			rc |= roc_nix_inl_outb_fini(nix);
+			plt_free(mem);
+			goto cleanup;
+		}
+
+		for (i = 0; i < dev->outb.max_sa; i++)
+			plt_bitmap_set(bmap, i);
+
+		dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
+		dev->outb.sa_bmap_mem = mem;
+		dev->outb.sa_bmap = bmap;
+		dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
+	}
+
+	return 0;
+cleanup:
+	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+		rc |= roc_nix_inl_inb_fini(nix);
+	return rc;
+}
+
+static int
+nix_security_release(struct cnxk_eth_dev *dev)
+{
+	struct rte_eth_dev *eth_dev = dev->eth_dev;
+	struct cnxk_eth_sec_sess *eth_sec, *tvar;
+	struct roc_nix *nix = &dev->nix;
+	int rc, ret = 0;
+
+	/* Cleanup Inline inbound */
+	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+		/* Destroy inbound sessions */
+		tvar = NULL;
+		TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
+			cnxk_eth_sec_ops.session_destroy(eth_dev,
+							 eth_sec->sess);
+
+		/* Clear lookup mem */
+		cnxk_nix_lookup_mem_sa_base_clear(dev);
+
+		rc = roc_nix_inl_inb_fini(nix);
+		if (rc)
+			plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
+		ret |= rc;
+	}
+
+	/* Cleanup Inline outbound */
+	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+		/* Destroy outbound sessions */
+		tvar = NULL;
+		TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
+			cnxk_eth_sec_ops.session_destroy(eth_dev,
+							 eth_sec->sess);
+
+		rc = roc_nix_inl_outb_fini(nix);
+		if (rc)
+			plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
+		ret |= rc;
+
+		plt_bitmap_free(dev->outb.sa_bmap);
+		plt_free(dev->outb.sa_bmap_mem);
+		dev->outb.sa_bmap = NULL;
+		dev->outb.sa_bmap_mem = NULL;
+	}
+
+	dev->inb.inl_dev = false;
+	roc_nix_inb_mode_set(nix, false);
+	dev->nb_rxq_sso = 0;
+	dev->inb.nb_sess = 0;
+	dev->outb.nb_sess = 0;
+	return ret;
+}
+
 static void
 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
 {
@@ -194,6 +347,12 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 		eth_dev->data->tx_queues[qid] = NULL;
 	}
 
+	/* When Tx Security offload is enabled, increase tx desc count by
+	 * max possible outbound desc count.
+	 */
+	if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+		nb_desc += dev->outb.nb_desc;
+
 	/* Setup ROC SQ */
 	sq = &dev->sqs[qid];
 	sq->qid = qid;
@@ -266,6 +425,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 			struct rte_mempool *mp)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct roc_nix *nix = &dev->nix;
 	struct cnxk_eth_rxq_sp *rxq_sp;
 	struct rte_mempool_ops *ops;
 	const char *platform_ops;
@@ -328,6 +488,10 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	rq->later_skip = sizeof(struct rte_mbuf);
 	rq->lpb_size = mp->elt_size;
 
+	/* Enable Inline IPSec on RQ, will not be used for Poll mode */
+	if (roc_nix_inl_inb_is_enabled(nix))
+		rq->ipsech_ena = true;
+
 	rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
 	if (rc) {
 		plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
@@ -350,6 +514,13 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	rxq_sp->qconf.nb_desc = nb_desc;
 	rxq_sp->qconf.mp = mp;
 
+	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+		/* Setup rq reference for inline dev if present */
+		rc = roc_nix_inl_dev_rq_get(rq);
+		if (rc)
+			goto free_mem;
+	}
+
 	plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
 		    cq->nb_desc);
 
@@ -370,6 +541,8 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
 	}
 
 	return 0;
+free_mem:
+	plt_free(rxq_sp);
 rq_fini:
 	rc |= roc_nix_rq_fini(rq);
 cq_fini:
@@ -394,11 +567,15 @@ cnxk_nix_rx_queue_release(void *rxq)
 	rxq_sp = cnxk_eth_rxq_to_sp(rxq);
 	dev = rxq_sp->dev;
 	qid = rxq_sp->qid;
+	rq = &dev->rqs[qid];
 
 	plt_nix_dbg("Releasing rxq %u", qid);
 
+	/* Release rq reference for inline dev if present */
+	if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+		roc_nix_inl_dev_rq_put(rq);
+
 	/* Cleanup ROC RQ */
-	rq = &dev->rqs[qid];
 	rc = roc_nix_rq_fini(rq);
 	if (rc)
 		plt_err("Failed to cleanup rq, rc=%d", rc);
@@ -804,6 +981,12 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		rc = nix_store_queue_cfg_and_then_release(eth_dev);
 		if (rc)
 			goto fail_configure;
+
+		/* Cleanup security support */
+		rc = nix_security_release(dev);
+		if (rc)
+			goto fail_configure;
+
 		roc_nix_tm_fini(nix);
 		roc_nix_lf_free(nix);
 	}
@@ -958,6 +1141,12 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 		plt_err("Failed to initialize flow control rc=%d", rc);
 		goto cq_fini;
 	}
+
+	/* Setup Inline security support */
+	rc = nix_security_setup(dev);
+	if (rc)
+		goto cq_fini;
+
 	/*
 	 * Restore queue config when reconfigure followed by
 	 * reconfigure and no queue configure invoked from application case.
@@ -965,7 +1154,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 	if (dev->configured == 1) {
 		rc = nix_restore_queue_cfg(eth_dev);
 		if (rc)
-			goto cq_fini;
+			goto sec_release;
 	}
 
 	/* Update the mac address */
@@ -987,6 +1176,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
 	dev->nb_txq = data->nb_tx_queues;
 	return 0;
 
+sec_release:
+	rc |= nix_security_release(dev);
 cq_fini:
 	roc_nix_unregister_cq_irqs(nix);
 q_irq_fini:
@@ -1282,12 +1473,25 @@ static int
 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
 {
 	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	struct rte_security_ctx *sec_ctx;
 	struct roc_nix *nix = &dev->nix;
 	struct rte_pci_device *pci_dev;
 	int rc, max_entries;
 
 	eth_dev->dev_ops = &cnxk_eth_dev_ops;
 
+	/* Alloc security context */
+	sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
+	if (!sec_ctx)
+		return -ENOMEM;
+	sec_ctx->device = eth_dev;
+	sec_ctx->ops = &cnxk_eth_sec_ops;
+	sec_ctx->flags =
+		(RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
+	eth_dev->security_ctx = sec_ctx;
+	TAILQ_INIT(&dev->inb.list);
+	TAILQ_INIT(&dev->outb.list);
+
 	/* For secondary processes, the primary has done all the work */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
@@ -1400,6 +1604,9 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 	struct roc_nix *nix = &dev->nix;
 	int rc, i;
 
+	plt_free(eth_dev->security_ctx);
+	eth_dev->security_ctx = NULL;
+
 	/* Nothing to be done for secondary processes */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
@@ -1429,6 +1636,9 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
 	}
 	eth_dev->data->nb_rx_queues = 0;
 
+	/* Free security resources */
+	nix_security_release(dev);
+
 	/* Free tm resources */
 	roc_nix_tm_fini(nix);
 
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 2528b3c..5ae791f 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -13,6 +13,9 @@
 #include <rte_mbuf.h>
 #include <rte_mbuf_pool_ops.h>
 #include <rte_mempool.h>
+#include <rte_security.h>
+#include <rte_security_driver.h>
+#include <rte_tailq.h>
 #include <rte_time.h>
 
 #include "roc_api.h"
@@ -70,14 +73,14 @@
 	 DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO |                  \
 	 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | DEV_TX_OFFLOAD_GENEVE_TNL_TSO |        \
 	 DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_MULTI_SEGS |              \
-	 DEV_TX_OFFLOAD_IPV4_CKSUM)
+	 DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_SECURITY)
 
 #define CNXK_NIX_RX_OFFLOAD_CAPA                                               \
 	(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM |                 \
 	 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER |            \
 	 DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |         \
 	 DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_TIMESTAMP |                  \
-	 DEV_RX_OFFLOAD_VLAN_STRIP)
+	 DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_SECURITY)
 
 #define RSS_IPV4_ENABLE                                                        \
 	(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP |         \
@@ -112,6 +115,11 @@
 #define PTYPE_TUNNEL_ARRAY_SZ	  BIT(PTYPE_TUNNEL_WIDTH)
 #define PTYPE_ARRAY_SZ                                                         \
 	((PTYPE_NON_TUNNEL_ARRAY_SZ + PTYPE_TUNNEL_ARRAY_SZ) * sizeof(uint16_t))
+
+/* NIX_RX_PARSE_S's ERRCODE + ERRLEV (12 bits) */
+#define ERRCODE_ERRLEN_WIDTH 12
+#define ERR_ARRAY_SZ	     ((BIT(ERRCODE_ERRLEN_WIDTH)) * sizeof(uint32_t))
+
 /* Fastpath lookup */
 #define CNXK_NIX_FASTPATH_LOOKUP_MEM "cnxk_nix_fastpath_lookup_mem"
 
@@ -119,6 +127,9 @@
 	((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) |                               \
 	 (1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
 
+/* Subtype from inline outbound error event */
+#define CNXK_ETHDEV_SEC_OUTB_EV_SUB 0xFFUL
+
 struct cnxk_fc_cfg {
 	enum rte_eth_fc_mode mode;
 	uint8_t rx_pause;
@@ -144,6 +155,82 @@ struct cnxk_timesync_info {
 	uint64_t *tx_tstamp;
 } __plt_cache_aligned;
 
+/* Security session private data */
+struct cnxk_eth_sec_sess {
+	/* List entry */
+	TAILQ_ENTRY(cnxk_eth_sec_sess) entry;
+
+	/* Inbound SA is from NIX_RX_IPSEC_SA_BASE or
+	 * Outbound SA from roc_nix_inl_outb_sa_base_get()
+	 */
+	void *sa;
+
+	/* SA index */
+	uint32_t sa_idx;
+
+	/* SPI */
+	uint32_t spi;
+
+	/* Back pointer to session */
+	struct rte_security_session *sess;
+
+	/* Inbound */
+	bool inb;
+
+	/* Inbound session on inl dev */
+	bool inl_dev;
+};
+
+TAILQ_HEAD(cnxk_eth_sec_sess_list, cnxk_eth_sec_sess);
+
+/* Inbound security data */
+struct cnxk_eth_dev_sec_inb {
+	/* IPSec inbound max SPI */
+	uint16_t max_spi;
+
+	/* Using inbound with inline device */
+	bool inl_dev;
+
+	/* Device argument to force inline device for inb */
+	bool force_inl_dev;
+
+	/* Active sessions */
+	uint16_t nb_sess;
+
+	/* List of sessions */
+	struct cnxk_eth_sec_sess_list list;
+};
+
+/* Outbound security data */
+struct cnxk_eth_dev_sec_outb {
+	/* IPSec outbound max SA */
+	uint16_t max_sa;
+
+	/* Per CPT LF descriptor count */
+	uint32_t nb_desc;
+
+	/* SA Bitmap */
+	struct plt_bitmap *sa_bmap;
+
+	/* SA bitmap memory */
+	void *sa_bmap_mem;
+
+	/* SA base */
+	uint64_t sa_base;
+
+	/* CPT LF base */
+	struct roc_cpt_lf *lf_base;
+
+	/* Crypto queues => CPT lf count */
+	uint16_t nb_crypto_qs;
+
+	/* Active sessions */
+	uint16_t nb_sess;
+
+	/* List of sessions */
+	struct cnxk_eth_sec_sess_list list;
+};
+
 struct cnxk_eth_dev {
 	/* ROC NIX */
 	struct roc_nix nix;
@@ -159,6 +246,7 @@ struct cnxk_eth_dev {
 	/* Configured queue count */
 	uint16_t nb_rxq;
 	uint16_t nb_txq;
+	uint16_t nb_rxq_sso;
 	uint8_t configured;
 
 	/* Max macfilter entries */
@@ -223,6 +311,10 @@ struct cnxk_eth_dev {
 	/* Per queue statistics counters */
 	uint32_t txq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
 	uint32_t rxq_stat_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+
+	/* Security data */
+	struct cnxk_eth_dev_sec_inb inb;
+	struct cnxk_eth_dev_sec_outb outb;
 };
 
 struct cnxk_eth_rxq_sp {
@@ -261,6 +353,9 @@ extern struct eth_dev_ops cnxk_eth_dev_ops;
 /* Common flow ops */
 extern struct rte_flow_ops cnxk_flow_ops;
 
+/* Common security ops */
+extern struct rte_security_ops cnxk_eth_sec_ops;
+
 /* Ops */
 int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
 		   struct rte_pci_device *pci_dev);
@@ -383,6 +478,18 @@ int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs,
 /* Debug */
 int cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev,
 			 struct rte_dev_reg_info *regs);
+/* Security */
+int cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p);
+int cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev *dev, uint32_t idx);
+int cnxk_nix_lookup_mem_sa_base_set(struct cnxk_eth_dev *dev);
+int cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev);
+__rte_internal
+int cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev);
+struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev,
+						       uint32_t spi, bool inb);
+struct cnxk_eth_sec_sess *
+cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
+			      struct rte_security_session *sess);
 
 /* Other private functions */
 int nix_recalc_mtu(struct rte_eth_dev *eth_dev);
@@ -493,4 +600,14 @@ cnxk_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf,
 	}
 }
 
+static __rte_always_inline uintptr_t
+cnxk_nix_sa_base_get(uint16_t port, const void *lookup_mem)
+{
+	uintptr_t sa_base_tbl;
+
+	sa_base_tbl = (uintptr_t)lookup_mem;
+	sa_base_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
+	return *((const uintptr_t *)sa_base_tbl + port);
+}
+
 #endif /* __CNXK_ETHDEV_H__ */
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 37720fb..c0b949e 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -8,6 +8,61 @@
 #include "cnxk_ethdev.h"
 
 static int
+parse_outb_nb_desc(const char *key, const char *value, void *extra_args)
+{
+	RTE_SET_USED(key);
+	uint32_t val;
+
+	val = atoi(value);
+
+	*(uint16_t *)extra_args = val;
+
+	return 0;
+}
+
+static int
+parse_outb_nb_crypto_qs(const char *key, const char *value, void *extra_args)
+{
+	RTE_SET_USED(key);
+	uint32_t val;
+
+	val = atoi(value);
+
+	if (val < 1 || val > 64)
+		return -EINVAL;
+
+	*(uint16_t *)extra_args = val;
+
+	return 0;
+}
+
+static int
+parse_ipsec_in_max_spi(const char *key, const char *value, void *extra_args)
+{
+	RTE_SET_USED(key);
+	uint32_t val;
+
+	val = atoi(value);
+
+	*(uint16_t *)extra_args = val;
+
+	return 0;
+}
+
+static int
+parse_ipsec_out_max_sa(const char *key, const char *value, void *extra_args)
+{
+	RTE_SET_USED(key);
+	uint32_t val;
+
+	val = atoi(value);
+
+	*(uint16_t *)extra_args = val;
+
+	return 0;
+}
+
+static int
 parse_flow_max_priority(const char *key, const char *value, void *extra_args)
 {
 	RTE_SET_USED(key);
@@ -117,15 +172,25 @@ parse_switch_header_type(const char *key, const char *value, void *extra_args)
 #define CNXK_SWITCH_HEADER_TYPE "switch_header"
 #define CNXK_RSS_TAG_AS_XOR	"tag_as_xor"
 #define CNXK_LOCK_RX_CTX	"lock_rx_ctx"
+#define CNXK_IPSEC_IN_MAX_SPI	"ipsec_in_max_spi"
+#define CNXK_IPSEC_OUT_MAX_SA	"ipsec_out_max_sa"
+#define CNXK_OUTB_NB_DESC	"outb_nb_desc"
+#define CNXK_FORCE_INB_INL_DEV	"force_inb_inl_dev"
+#define CNXK_OUTB_NB_CRYPTO_QS	"outb_nb_crypto_qs"
 
 int
 cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 {
 	uint16_t reta_sz = ROC_NIX_RSS_RETA_SZ_64;
 	uint16_t sqb_count = CNXK_NIX_TX_MAX_SQB;
+	uint16_t ipsec_in_max_spi = BIT(8) - 1;
+	uint16_t ipsec_out_max_sa = BIT(12);
 	uint16_t flow_prealloc_size = 1;
 	uint16_t switch_header_type = 0;
 	uint16_t flow_max_priority = 3;
+	uint16_t force_inb_inl_dev = 0;
+	uint16_t outb_nb_crypto_qs = 1;
+	uint16_t outb_nb_desc = 8200;
 	uint16_t rss_tag_as_xor = 0;
 	uint16_t scalar_enable = 0;
 	uint8_t lock_rx_ctx = 0;
@@ -153,10 +218,27 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 	rte_kvargs_process(kvlist, CNXK_RSS_TAG_AS_XOR, &parse_flag,
 			   &rss_tag_as_xor);
 	rte_kvargs_process(kvlist, CNXK_LOCK_RX_CTX, &parse_flag, &lock_rx_ctx);
+	rte_kvargs_process(kvlist, CNXK_IPSEC_IN_MAX_SPI,
+			   &parse_ipsec_in_max_spi, &ipsec_in_max_spi);
+	rte_kvargs_process(kvlist, CNXK_IPSEC_OUT_MAX_SA,
+			   &parse_ipsec_out_max_sa, &ipsec_out_max_sa);
+	rte_kvargs_process(kvlist, CNXK_OUTB_NB_DESC, &parse_outb_nb_desc,
+			   &outb_nb_desc);
+	rte_kvargs_process(kvlist, CNXK_OUTB_NB_CRYPTO_QS,
+			   &parse_outb_nb_crypto_qs, &outb_nb_crypto_qs);
+	rte_kvargs_process(kvlist, CNXK_FORCE_INB_INL_DEV, &parse_flag,
+			   &force_inb_inl_dev);
 	rte_kvargs_free(kvlist);
 
 null_devargs:
 	dev->scalar_ena = !!scalar_enable;
+	dev->inb.force_inl_dev = !!force_inb_inl_dev;
+	dev->inb.max_spi = ipsec_in_max_spi;
+	dev->outb.max_sa = ipsec_out_max_sa;
+	dev->outb.nb_desc = outb_nb_desc;
+	dev->outb.nb_crypto_qs = outb_nb_crypto_qs;
+	dev->nix.ipsec_in_max_spi = ipsec_in_max_spi;
+	dev->nix.ipsec_out_max_sa = ipsec_out_max_sa;
 	dev->nix.rss_tag_as_xor = !!rss_tag_as_xor;
 	dev->nix.max_sqb_count = sqb_count;
 	dev->nix.reta_sz = reta_sz;
@@ -177,4 +259,8 @@ RTE_PMD_REGISTER_PARAM_STRING(net_cnxk,
 			      CNXK_FLOW_PREALLOC_SIZE "=<1-32>"
 			      CNXK_FLOW_MAX_PRIORITY "=<1-32>"
 			      CNXK_SWITCH_HEADER_TYPE "=<higig2|dsa|chlen90b>"
-			      CNXK_RSS_TAG_AS_XOR "=1");
+			      CNXK_RSS_TAG_AS_XOR "=1"
+			      CNXK_IPSEC_IN_MAX_SPI "=<1-65535>"
+			      CNXK_OUTB_NB_DESC "=<1-65535>"
+			      CNXK_OUTB_NB_CRYPTO_QS "=<1-64>"
+			      CNXK_FORCE_INB_INL_DEV "=1");
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
new file mode 100644
index 0000000..c002c30
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <cnxk_ethdev.h>
+
+#define CNXK_NIX_INL_SELFTEST	      "selftest"
+#define CNXK_NIX_INL_IPSEC_IN_MAX_SPI "ipsec_in_max_spi"
+
+#define CNXK_NIX_INL_DEV_NAME RTE_STR(cnxk_nix_inl_dev_)
+#define CNXK_NIX_INL_DEV_NAME_LEN                                              \
+	(sizeof(CNXK_NIX_INL_DEV_NAME) + PCI_PRI_STR_SIZE)
+
+static inline int
+bitmap_ctzll(uint64_t slab)
+{
+	if (slab == 0)
+		return 0;
+
+	return __builtin_ctzll(slab);
+}
+
+int
+cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p)
+{
+	uint32_t pos, idx;
+	uint64_t slab;
+	int rc;
+
+	if (!dev->outb.max_sa)
+		return -ENOTSUP;
+
+	pos = 0;
+	slab = 0;
+	/* Scan from the beginning */
+	plt_bitmap_scan_init(dev->outb.sa_bmap);
+	/* Scan bitmap to get the free sa index */
+	rc = plt_bitmap_scan(dev->outb.sa_bmap, &pos, &slab);
+	/* Empty bitmap */
+	if (rc == 0) {
+		plt_err("Outbound SA' exhausted, use 'ipsec_out_max_sa' "
+			"devargs to increase");
+		return -ERANGE;
+	}
+
+	/* Get free SA index */
+	idx = pos + bitmap_ctzll(slab);
+	plt_bitmap_clear(dev->outb.sa_bmap, idx);
+	*idx_p = idx;
+	return 0;
+}
+
+int
+cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev *dev, uint32_t idx)
+{
+	if (idx >= dev->outb.max_sa)
+		return -EINVAL;
+
+	/* Check if it is already free */
+	if (plt_bitmap_get(dev->outb.sa_bmap, idx))
+		return -EINVAL;
+
+	/* Mark index as free */
+	plt_bitmap_set(dev->outb.sa_bmap, idx);
+	return 0;
+}
+
+struct cnxk_eth_sec_sess *
+cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev, uint32_t spi, bool inb)
+{
+	struct cnxk_eth_sec_sess_list *list;
+	struct cnxk_eth_sec_sess *eth_sec;
+
+	list = inb ? &dev->inb.list : &dev->outb.list;
+	TAILQ_FOREACH(eth_sec, list, entry) {
+		if (eth_sec->spi == spi)
+			return eth_sec;
+	}
+
+	return NULL;
+}
+
+struct cnxk_eth_sec_sess *
+cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
+			      struct rte_security_session *sess)
+{
+	struct cnxk_eth_sec_sess *eth_sec = NULL;
+
+	/* Search in inbound list */
+	TAILQ_FOREACH(eth_sec, &dev->inb.list, entry) {
+		if (eth_sec->sess == sess)
+			return eth_sec;
+	}
+
+	/* Search in outbound list */
+	TAILQ_FOREACH(eth_sec, &dev->outb.list, entry) {
+		if (eth_sec->sess == sess)
+			return eth_sec;
+	}
+
+	return NULL;
+}
+
+static unsigned int
+cnxk_eth_sec_session_get_size(void *device __rte_unused)
+{
+	return sizeof(struct cnxk_eth_sec_sess);
+}
+
+struct rte_security_ops cnxk_eth_sec_ops = {
+	.session_get_size = cnxk_eth_sec_session_get_size
+};
+
+static int
+parse_ipsec_in_max_spi(const char *key, const char *value, void *extra_args)
+{
+	RTE_SET_USED(key);
+	uint32_t val;
+
+	val = atoi(value);
+
+	*(uint16_t *)extra_args = val;
+
+	return 0;
+}
+
+static int
+parse_selftest(const char *key, const char *value, void *extra_args)
+{
+	RTE_SET_USED(key);
+	uint32_t val;
+
+	val = atoi(value);
+
+	*(uint8_t *)extra_args = !!(val == 1);
+	return 0;
+}
+
+static int
+nix_inl_parse_devargs(struct rte_devargs *devargs,
+		      struct roc_nix_inl_dev *inl_dev)
+{
+	uint32_t ipsec_in_max_spi = BIT(8) - 1;
+	struct rte_kvargs *kvlist;
+	uint8_t selftest = 0;
+
+	if (devargs == NULL)
+		goto null_devargs;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (kvlist == NULL)
+		goto exit;
+
+	rte_kvargs_process(kvlist, CNXK_NIX_INL_SELFTEST, &parse_selftest,
+			   &selftest);
+	rte_kvargs_process(kvlist, CNXK_NIX_INL_IPSEC_IN_MAX_SPI,
+			   &parse_ipsec_in_max_spi, &ipsec_in_max_spi);
+	rte_kvargs_free(kvlist);
+
+null_devargs:
+	inl_dev->ipsec_in_max_spi = ipsec_in_max_spi;
+	inl_dev->selftest = selftest;
+	return 0;
+exit:
+	return -EINVAL;
+}
+
+static inline char *
+nix_inl_dev_to_name(struct rte_pci_device *pci_dev, char *name)
+{
+	snprintf(name, CNXK_NIX_INL_DEV_NAME_LEN,
+		 CNXK_NIX_INL_DEV_NAME PCI_PRI_FMT, pci_dev->addr.domain,
+		 pci_dev->addr.bus, pci_dev->addr.devid,
+		 pci_dev->addr.function);
+
+	return name;
+}
+
+static int
+cnxk_nix_inl_dev_remove(struct rte_pci_device *pci_dev)
+{
+	char name[CNXK_NIX_INL_DEV_NAME_LEN];
+	const struct rte_memzone *mz;
+	struct roc_nix_inl_dev *dev;
+	int rc;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	mz = rte_memzone_lookup(nix_inl_dev_to_name(pci_dev, name));
+	if (!mz)
+		return 0;
+
+	dev = mz->addr;
+
+	/* Cleanup inline dev */
+	rc = roc_nix_inl_dev_fini(dev);
+	if (rc) {
+		plt_err("Failed to cleanup inl dev, rc=%d(%s)", rc,
+			roc_error_msg_get(rc));
+		return rc;
+	}
+
+	rte_memzone_free(mz);
+	return 0;
+}
+
+static int
+cnxk_nix_inl_dev_probe(struct rte_pci_driver *pci_drv,
+		       struct rte_pci_device *pci_dev)
+{
+	char name[CNXK_NIX_INL_DEV_NAME_LEN];
+	struct roc_nix_inl_dev *inl_dev;
+	const struct rte_memzone *mz;
+	int rc = -ENOMEM;
+
+	RTE_SET_USED(pci_drv);
+
+	rc = roc_plt_init();
+	if (rc) {
+		plt_err("Failed to initialize platform model, rc=%d", rc);
+		return rc;
+	}
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	mz = rte_memzone_reserve_aligned(nix_inl_dev_to_name(pci_dev, name),
+					 sizeof(*inl_dev), SOCKET_ID_ANY, 0,
+					 RTE_CACHE_LINE_SIZE);
+	if (mz == NULL)
+		return rc;
+
+	inl_dev = mz->addr;
+	inl_dev->pci_dev = pci_dev;
+
+	/* Parse devargs string */
+	rc = nix_inl_parse_devargs(pci_dev->device.devargs, inl_dev);
+	if (rc) {
+		plt_err("Failed to parse devargs rc=%d", rc);
+		goto free_mem;
+	}
+
+	rc = roc_nix_inl_dev_init(inl_dev);
+	if (rc) {
+		plt_err("Failed to init nix inl device, rc=%d(%s)", rc,
+			roc_error_msg_get(rc));
+		goto free_mem;
+	}
+
+	return 0;
+free_mem:
+	rte_memzone_free(mz);
+	return rc;
+}
+
+static const struct rte_pci_id cnxk_nix_inl_pci_map[] = {
+	{RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_RVU_NIX_INL_PF)},
+	{RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CNXK_RVU_NIX_INL_VF)},
+	{
+		.vendor_id = 0,
+	},
+};
+
+static struct rte_pci_driver cnxk_nix_inl_pci = {
+	.id_table = cnxk_nix_inl_pci_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
+	.probe = cnxk_nix_inl_dev_probe,
+	.remove = cnxk_nix_inl_dev_remove,
+};
+
+RTE_PMD_REGISTER_PCI(cnxk_nix_inl, cnxk_nix_inl_pci);
+RTE_PMD_REGISTER_PCI_TABLE(cnxk_nix_inl, cnxk_nix_inl_pci_map);
+RTE_PMD_REGISTER_KMOD_DEP(cnxk_nix_inl, "vfio-pci");
+
+RTE_PMD_REGISTER_PARAM_STRING(cnxk_nix_inl,
+			      CNXK_NIX_INL_SELFTEST "=1"
+			      CNXK_NIX_INL_IPSEC_IN_MAX_SPI "=<1-65535>");
diff --git a/drivers/net/cnxk/cnxk_lookup.c b/drivers/net/cnxk/cnxk_lookup.c
index 0152ad9..f6ec768 100644
--- a/drivers/net/cnxk/cnxk_lookup.c
+++ b/drivers/net/cnxk/cnxk_lookup.c
@@ -7,12 +7,8 @@
 
 #include "cnxk_ethdev.h"
 
-/* NIX_RX_PARSE_S's ERRCODE + ERRLEV (12 bits) */
-#define ERRCODE_ERRLEN_WIDTH 12
-#define ERR_ARRAY_SZ	     ((BIT(ERRCODE_ERRLEN_WIDTH)) * sizeof(uint32_t))
-
-#define SA_TBL_SZ	(RTE_MAX_ETHPORTS * sizeof(uint64_t))
-#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_TBL_SZ)
+#define SA_BASE_TBL_SZ	(RTE_MAX_ETHPORTS * sizeof(uintptr_t))
+#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ)
 const uint32_t *
 cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 {
@@ -324,3 +320,45 @@ cnxk_nix_fastpath_lookup_mem_get(void)
 	}
 	return NULL;
 }
+
+int
+cnxk_nix_lookup_mem_sa_base_set(struct cnxk_eth_dev *dev)
+{
+	void *lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
+	uint16_t port = dev->eth_dev->data->port_id;
+	uintptr_t sa_base_tbl;
+	uintptr_t sa_base;
+	uint8_t sa_w;
+
+	if (!lookup_mem)
+		return -EIO;
+
+	sa_base = roc_nix_inl_inb_sa_base_get(&dev->nix, dev->inb.inl_dev);
+	if (!sa_base)
+		return -ENOTSUP;
+
+	sa_w = plt_log2_u32(dev->nix.ipsec_in_max_spi + 1);
+
+	/* Set SA Base in lookup mem */
+	sa_base_tbl = (uintptr_t)lookup_mem;
+	sa_base_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
+	*((uintptr_t *)sa_base_tbl + port) = sa_base | sa_w;
+	return 0;
+}
+
+int
+cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev)
+{
+	void *lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
+	uint16_t port = dev->eth_dev->data->port_id;
+	uintptr_t sa_base_tbl;
+
+	if (!lookup_mem)
+		return -EIO;
+
+	/* Set SA Base in lookup mem */
+	sa_base_tbl = (uintptr_t)lookup_mem;
+	sa_base_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
+	*((uintptr_t *)sa_base_tbl + port) = 0;
+	return 0;
+}
diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build
index d4cdd17..6cc30c3 100644
--- a/drivers/net/cnxk/meson.build
+++ b/drivers/net/cnxk/meson.build
@@ -12,6 +12,7 @@ sources = files(
         'cnxk_ethdev.c',
         'cnxk_ethdev_devargs.c',
         'cnxk_ethdev_ops.c',
+        'cnxk_ethdev_sec.c',
         'cnxk_link.c',
         'cnxk_lookup.c',
         'cnxk_ptp.c',
@@ -22,6 +23,7 @@ sources = files(
 # CN9K
 sources += files(
         'cn9k_ethdev.c',
+        'cn9k_ethdev_sec.c',
         'cn9k_rte_flow.c',
         'cn9k_rx.c',
         'cn9k_rx_mseg.c',
diff --git a/drivers/net/cnxk/version.map b/drivers/net/cnxk/version.map
index c2e0723..b9da6b1 100644
--- a/drivers/net/cnxk/version.map
+++ b/drivers/net/cnxk/version.map
@@ -1,3 +1,8 @@
 DPDK_22 {
 	local: *;
 };
+
+INTERNAL {
+	global:
+	cnxk_nix_inb_mode_set;
+};
-- 
2.8.4


  parent reply	other threads:[~2021-09-02  2:18 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-02  2:14 [dpdk-dev] [PATCH 00/27] net/cnxk: support for inline ipsec Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 01/27] common/cnxk: add security support for cn9k fast path Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 02/27] common/cnxk: add helper API to dump cpt parse header Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 03/27] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 04/27] common/cnxk: change nix debug API and queue API interface Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 05/27] common/cnxk: add nix inline device irq API Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 06/27] common/cnxk: add nix inline device init and fini Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 07/27] common/cnxk: add nix inline inbound and outbound support API Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 08/27] common/cnxk: dump cpt lf registers on error intr Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 09/27] common/cnxk: align cpt lf enable/disable sequence Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 10/27] common/cnxk: restore nix sqb pool limit before destroy Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 11/27] common/cnxk: add cq enable support in nix Tx path Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 12/27] common/cnxk: setup aura bp conf based on nix Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 13/27] common/cnxk: add anti-replay check implementation for cn9k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 14/27] common/cnxk: add inline IPsec support in rte flow Nithin Dabilpuram
2021-09-02  2:14 ` Nithin Dabilpuram [this message]
2021-09-02  2:14 ` [dpdk-dev] [PATCH 16/27] net/cnxk: add inline security support for cn10k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 17/27] net/cnxk: add cn9k Rx support for security offload Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 18/27] net/cnxk: add cn9k Tx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 19/27] net/cnxk: add cn10k Rx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 20/27] net/cnxk: add cn10k Tx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 21/27] net/cnxk: add cn9k anti replay " Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 22/27] net/cnxk: add cn10k IPsec transport mode support Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 23/27] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 24/27] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 25/27] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 26/27] net/cnxk: add devargs for configuring channel mask Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 27/27] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-09-29 12:44 ` [dpdk-dev] [PATCH 00/27] net/cnxk: support for inline ipsec Jerin Jacob
2021-09-30 17:00 ` [dpdk-dev] [PATCH v2 00/28] " Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 01/28] common/cnxk: support cn9k fast path security session Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 02/28] common/cnxk: support CPT parse header dump Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 03/28] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 04/28] common/cnxk: change NIX debug API and queue API interface Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 05/28] common/cnxk: support NIX inline device IRQ Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 06/28] common/cnxk: support NIX inline device init and fini Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 07/28] common/cnxk: support NIX inline inbound and outbound setup Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 08/28] common/cnxk: disable CQ drop when inline inbound is enabled Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 09/28] common/cnxk: dump CPT LF registers on error intr Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 10/28] common/cnxk: align CPT LF enable/disable sequence Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 11/28] common/cnxk: restore NIX sqb pool limit before destroy Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 12/28] common/cnxk: add CQ enable support in NIX Tx path Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 13/28] common/cnxk: setup aura BP conf based on nix Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 14/28] common/cnxk: support anti-replay check in SW for cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 15/28] common/cnxk: support inline IPsec rte flow action Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 16/28] net/cnxk: support inline security setup for cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 17/28] net/cnxk: support inline security setup for cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 18/28] net/cnxk: support Rx security offload on cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 19/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 20/28] net/cnxk: support Rx security offload on cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 21/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 22/28] net/cnxk: support IPsec anti replay in cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 23/28] net/cnxk: support IPsec transport mode in cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 24/28] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 25/28] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 26/28] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 27/28] net/cnxk: support configuring channel mask via devargs Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 28/28] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-10-01  5:37   ` [dpdk-dev] [PATCH v2 00/28] net/cnxk: support for inline ipsec Jerin Jacob
2021-10-01 13:39 ` [dpdk-dev] [PATCH v3 " Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 01/28] common/cnxk: support cn9k fast path security session Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 02/28] common/cnxk: support CPT parse header dump Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 03/28] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 04/28] common/cnxk: change NIX debug API and queue API interface Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 05/28] common/cnxk: support NIX inline device IRQ Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 06/28] common/cnxk: support NIX inline device init and fini Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 07/28] common/cnxk: support NIX inline inbound and outbound setup Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 08/28] common/cnxk: disable CQ drop when inline inbound is enabled Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 09/28] common/cnxk: dump CPT LF registers on error intr Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 10/28] common/cnxk: align CPT LF enable/disable sequence Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 11/28] common/cnxk: restore NIX sqb pool limit before destroy Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 12/28] common/cnxk: add CQ enable support in NIX Tx path Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 13/28] common/cnxk: setup aura BP conf based on nix Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 14/28] common/cnxk: support anti-replay check in SW for cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 15/28] common/cnxk: support inline IPsec rte flow action Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 16/28] net/cnxk: support inline security setup for cn9k Nithin Dabilpuram
2021-10-06 16:21     ` Ferruh Yigit
2021-10-06 16:44       ` Nithin Kumar Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 17/28] net/cnxk: support inline security setup for cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 18/28] net/cnxk: support Rx security offload on cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 19/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 20/28] net/cnxk: support Rx security offload on cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 21/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 22/28] net/cnxk: support IPsec anti replay in cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 23/28] net/cnxk: support IPsec transport mode in cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 24/28] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 25/28] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 26/28] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 27/28] net/cnxk: support configuring channel mask via devargs Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 28/28] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-10-02 13:49   ` [dpdk-dev] [PATCH v3 00/28] net/cnxk: support for inline ipsec Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210902021505.17607-16-ndabilpuram@marvell.com \
    --to=ndabilpuram@marvell.com \
    --cc=anatoly.burakov@intel.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=mdr@ashroe.eu \
    --cc=schalla@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).