DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 1/5] common/cnxk: reserve CPT LF for Rx inject
@ 2024-01-19  5:57 Rahul Bhansali
  2024-01-19  5:57 ` [PATCH 2/5] net/cnxk: support of " Rahul Bhansali
                   ` (4 more replies)
  0 siblings, 5 replies; 12+ messages in thread
From: Rahul Bhansali @ 2024-01-19  5:57 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: Rahul Bhansali

An additional CPT LF will be reserved and attached with
inline device to enable RXC and use for Rx inject purpose.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Depends-on: series-30819 ("Fixes and improvements in crypto cnxk")

 drivers/common/cnxk/roc_features.h     |  7 +++
 drivers/common/cnxk/roc_nix.h          |  1 +
 drivers/common/cnxk/roc_nix_inl.c      | 71 ++++++++++++++++++++++++--
 drivers/common/cnxk/roc_nix_inl.h      |  5 +-
 drivers/common/cnxk/roc_nix_inl_dev.c  | 61 +++++++++++++---------
 drivers/common/cnxk/roc_nix_inl_priv.h |  7 ++-
 drivers/common/cnxk/version.map        |  2 +
 7 files changed, 123 insertions(+), 31 deletions(-)

diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index f4807ee271..3b512be132 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -83,4 +83,11 @@ roc_feature_nix_has_inl_ipsec(void)
 {
 	return !roc_model_is_cnf10kb();
 }
+
+static inline bool
+roc_feature_nix_has_rx_inject(void)
+{
+	return (roc_model_is_cn10ka_b0() || roc_model_is_cn10kb());
+}
+
 #endif
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 84e6fc3df5..eebdd4ecc3 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -474,6 +474,7 @@ struct roc_nix {
 	uint32_t meta_buf_sz;
 	bool force_rx_aura_bp;
 	bool custom_meta_aura_ena;
+	bool rx_inj_ena;
 	/* End of input parameters */
 	/* LMT line base for "Per Core Tx LMT line" mode*/
 	uintptr_t lmt_base;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 07a90133ca..de8fd2a605 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -474,6 +474,34 @@ roc_nix_inl_outb_lf_base_get(struct roc_nix *roc_nix)
 	return (struct roc_cpt_lf *)nix->cpt_lf_base;
 }

+struct roc_cpt_lf *
+roc_nix_inl_inb_inj_lf_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix;
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev = NULL;
+	struct roc_cpt_lf *lf = NULL;
+
+	if (!idev)
+		return NULL;
+
+	inl_dev = idev->nix_inl_dev;
+
+	if (!inl_dev && roc_nix == NULL)
+		return NULL;
+
+	nix = roc_nix_to_nix_priv(roc_nix);
+
+	if (nix->inb_inl_dev && inl_dev && inl_dev->attach_cptlf &&
+	    inl_dev->rx_inj_ena)
+		return &inl_dev->cpt_lf[inl_dev->nb_cptlf - 1];
+
+	lf = roc_nix_inl_outb_lf_base_get(roc_nix);
+	if (lf)
+		lf += roc_nix->outb_nb_crypto_qs;
+	return lf;
+}
+
 uintptr_t
 roc_nix_inl_outb_sa_base_get(struct roc_nix *roc_nix)
 {
@@ -512,6 +540,35 @@ roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix, bool inb_inl_dev)
 	return (uintptr_t)nix->inb_sa_base;
 }

+bool
+roc_nix_inl_inb_rx_inject_enable(struct roc_nix *roc_nix, bool inb_inl_dev)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev;
+	struct nix *nix = NULL;
+
+	if (idev == NULL)
+		return 0;
+
+	if (!inb_inl_dev && roc_nix == NULL)
+		return 0;
+
+	if (roc_nix) {
+		nix = roc_nix_to_nix_priv(roc_nix);
+		if (!nix->inl_inb_ena)
+			return 0;
+	}
+
+	if (inb_inl_dev) {
+		inl_dev = idev->nix_inl_dev;
+		if (inl_dev && inl_dev->attach_cptlf && inl_dev->rx_inj_ena &&
+		    roc_nix->rx_inj_ena)
+			return true;
+	}
+
+	return roc_nix->rx_inj_ena;
+}
+
 uint32_t
 roc_nix_inl_inb_spi_range(struct roc_nix *roc_nix, bool inb_inl_dev,
 			  uint32_t *min_spi, uint32_t *max_spi)
@@ -941,6 +998,7 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)
 	bool ctx_ilen_valid = false;
 	size_t sa_sz, ring_sz;
 	uint8_t ctx_ilen = 0;
+	bool rx_inj = false;
 	uint16_t sso_pffunc;
 	uint8_t eng_grpmask;
 	uint64_t blkaddr, i;
@@ -958,6 +1016,12 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)

 	/* Retrieve inline device if present */
 	inl_dev = idev->nix_inl_dev;
+	if (roc_nix->rx_inj_ena && !(nix->inb_inl_dev && inl_dev && inl_dev->attach_cptlf &&
+				     inl_dev->rx_inj_ena)) {
+		nb_lf++;
+		rx_inj = true;
+	}
+
 	sso_pffunc = inl_dev ? inl_dev->dev.pf_func : idev_sso_pffunc_get();
 	/* Use sso_pffunc if explicitly requested */
 	if (roc_nix->ipsec_out_sso_pffunc)
@@ -986,7 +1050,8 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)
 		       1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
 		       1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
 	rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr,
-			   !roc_nix->ipsec_out_sso_pffunc, ctx_ilen_valid, ctx_ilen, false, 0);
+			   !roc_nix->ipsec_out_sso_pffunc, ctx_ilen_valid, ctx_ilen,
+			   rx_inj, nb_lf - 1);
 	if (rc) {
 		plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
 		goto lf_detach;
@@ -1632,7 +1697,7 @@ roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
 	if (inb && get_inl_lf) {
 		outb_lf = NULL;
 		if (inl_dev && inl_dev->attach_cptlf)
-			outb_lf = &inl_dev->cpt_lf;
+			outb_lf = &inl_dev->cpt_lf[0];
 	}

 	if (outb_lf) {
@@ -1696,7 +1761,7 @@ roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
 	if (inb && get_inl_lf) {
 		outb_lf = NULL;
 		if (inl_dev && inl_dev->attach_cptlf)
-			outb_lf = &inl_dev->cpt_lf;
+			outb_lf = &inl_dev->cpt_lf[0];
 	}

 	if (outb_lf) {
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index ab1e9c0f98..a89b40ff61 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -142,9 +142,10 @@ struct roc_nix_inl_dev {
 	uint32_t nb_meta_bufs;
 	uint32_t meta_buf_sz;
 	uint32_t max_ipsec_rules;
+	uint8_t rx_inj_ena; /* Rx Inject Enable */
 	/* End of input parameters */

-#define ROC_NIX_INL_MEM_SZ (1280)
+#define ROC_NIX_INL_MEM_SZ (1408)
 	uint8_t reserved[ROC_NIX_INL_MEM_SZ] __plt_cache_aligned;
 } __plt_cache_aligned;

@@ -167,6 +168,7 @@ int __roc_api roc_nix_inl_inb_fini(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_inl_inb_is_enabled(struct roc_nix *roc_nix);
 uintptr_t __roc_api roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix,
 						bool inl_dev_sa);
+bool __roc_api roc_nix_inl_inb_rx_inject_enable(struct roc_nix *roc_nix, bool inl_dev_sa);
 uint32_t __roc_api roc_nix_inl_inb_spi_range(struct roc_nix *roc_nix,
 					     bool inl_dev_sa, uint32_t *min,
 					     uint32_t *max);
@@ -196,6 +198,7 @@ bool __roc_api roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix);
 uintptr_t __roc_api roc_nix_inl_outb_sa_base_get(struct roc_nix *roc_nix);
 struct roc_cpt_lf *__roc_api
 roc_nix_inl_outb_lf_base_get(struct roc_nix *roc_nix);
+struct roc_cpt_lf *__roc_api roc_nix_inl_inb_inj_lf_get(struct roc_nix *roc_nix);
 uint16_t __roc_api roc_nix_inl_outb_sso_pffunc_get(struct roc_nix *roc_nix);
 int __roc_api roc_nix_inl_cb_register(roc_nix_inl_sso_work_cb_t cb, void *args);
 int __roc_api roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb,
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index f6991de051..60e6a43033 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -174,9 +174,9 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
 static int
 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
 {
-	struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
 	struct dev *dev = &inl_dev->dev;
 	bool ctx_ilen_valid = false;
+	struct roc_cpt_lf *lf;
 	uint8_t eng_grpmask;
 	uint8_t ctx_ilen = 0;
 	int rc;
@@ -194,27 +194,29 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
 	}

 	rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, inl_dev_sso, ctx_ilen_valid,
-			   ctx_ilen, false, 0);
+			   ctx_ilen, inl_dev->rx_inj_ena, inl_dev->nb_cptlf - 1);
 	if (rc) {
 		plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
 		return rc;
 	}

-	/* Setup CPT LF for submitting control opcode */
-	lf = &inl_dev->cpt_lf;
-	lf->lf_id = 0;
-	lf->nb_desc = 0; /* Set to default */
-	lf->dev = &inl_dev->dev;
-	lf->msixoff = inl_dev->cpt_msixoff;
-	lf->pci_dev = inl_dev->pci_dev;
+	for (int i = 0; i < inl_dev->nb_cptlf; i++) {
+		/* Setup CPT LF for submitting control opcode */
+		lf = &inl_dev->cpt_lf[i];
+		lf->lf_id = i;
+		lf->nb_desc = 0; /* Set to default */
+		lf->dev = &inl_dev->dev;
+		lf->msixoff = inl_dev->cpt_msixoff[i];
+		lf->pci_dev = inl_dev->pci_dev;

-	rc = cpt_lf_init(lf);
-	if (rc) {
-		plt_err("Failed to initialize CPT LF, rc=%d", rc);
-		goto lf_free;
-	}
+		rc = cpt_lf_init(lf);
+		if (rc) {
+			plt_err("Failed to initialize CPT LF, rc=%d", rc);
+			goto lf_free;
+		}

-	roc_cpt_iq_enable(lf);
+		roc_cpt_iq_enable(lf);
+	}
 	return 0;
 lf_free:
 	rc |= cpt_lfs_free(dev);
@@ -224,21 +226,22 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
 static int
 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
 {
-	struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
 	struct dev *dev = &inl_dev->dev;
-	int rc;
+	int rc, i;

 	if (!inl_dev->attach_cptlf)
 		return 0;

 	/* Cleanup CPT LF queue */
-	cpt_lf_fini(lf);
+	for (i = 0; i < inl_dev->nb_cptlf; i++)
+		cpt_lf_fini(&inl_dev->cpt_lf[i]);

 	/* Free LF resources */
 	rc = cpt_lfs_free(dev);
-	if (!rc)
-		lf->dev = NULL;
-	else
+	if (!rc) {
+		for (i = 0; i < inl_dev->nb_cptlf; i++)
+			inl_dev->cpt_lf[i].dev = NULL;
+	} else
 		plt_err("Failed to free CPT LF resources, rc=%d", rc);
 	return rc;
 }
@@ -533,7 +536,7 @@ nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
 	req->ssow = 1;
 	req->sso = 1;
 	if (inl_dev->attach_cptlf) {
-		req->cptlfs = 1;
+		req->cptlfs = inl_dev->nb_cptlf;
 		req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
 	}

@@ -550,7 +553,9 @@ nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
 	inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
 	inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
 	inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
-	inl_dev->cpt_msixoff = msix_rsp->cptlf_msixoff[0];
+
+	for (int i = 0; i < inl_dev->nb_cptlf; i++)
+		inl_dev->cpt_msixoff[i] = msix_rsp->cptlf_msixoff[i];

 	nix_blkaddr = nix_get_blkaddr(dev);
 	inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
@@ -912,6 +917,12 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	inl_dev->meta_buf_sz = roc_inl_dev->meta_buf_sz;
 	inl_dev->soft_exp_poll_freq = roc_inl_dev->soft_exp_poll_freq;

+	if (roc_inl_dev->rx_inj_ena) {
+		inl_dev->rx_inj_ena = 1;
+		inl_dev->nb_cptlf = NIX_INL_CPT_LF;
+	} else
+		inl_dev->nb_cptlf = 1;
+
 	if (roc_inl_dev->spb_drop_pc)
 		inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
 	if (roc_inl_dev->lpb_drop_pc)
@@ -1068,7 +1079,7 @@ roc_nix_inl_dev_cpt_setup(bool use_inl_dev_sso)
 		return -ENOENT;
 	inl_dev = idev->nix_inl_dev;

-	if (inl_dev->cpt_lf.dev != NULL)
+	if (inl_dev->cpt_lf[0].dev != NULL)
 		return -EBUSY;

 	return nix_inl_cpt_setup(inl_dev, use_inl_dev_sso);
@@ -1084,7 +1095,7 @@ roc_nix_inl_dev_cpt_release(void)
 		return -ENOENT;
 	inl_dev = idev->nix_inl_dev;

-	if (inl_dev->cpt_lf.dev == NULL)
+	if (inl_dev->cpt_lf[0].dev == NULL)
 		return 0;

 	return nix_inl_cpt_release(inl_dev);
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index 3217f4ebc1..5afc7d6655 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -7,6 +7,7 @@
 #include <sys/types.h>

 #define NIX_INL_META_SIZE 384u
+#define NIX_INL_CPT_LF	2

 struct nix_inl_dev;
 struct nix_inl_qint {
@@ -31,7 +32,7 @@ struct nix_inl_dev {
 	uint16_t nix_msixoff;
 	uint16_t ssow_msixoff;
 	uint16_t sso_msixoff;
-	uint16_t cpt_msixoff;
+	uint16_t cpt_msixoff[NIX_INL_CPT_LF];

 	/* SSO data */
 	uint32_t xaq_buf_size;
@@ -62,9 +63,10 @@ struct nix_inl_dev {
 	/* NIX/CPT data */
 	void *inb_sa_base;
 	uint16_t inb_sa_sz;
+	uint8_t nb_cptlf;

 	/* CPT data */
-	struct roc_cpt_lf cpt_lf;
+	struct roc_cpt_lf cpt_lf[NIX_INL_CPT_LF];

 	/* OUTB soft expiry poll thread */
 	plt_thread_t soft_exp_poll_thread;
@@ -91,6 +93,7 @@ struct nix_inl_dev {
 	bool ts_ena;
 	uint32_t nb_meta_bufs;
 	uint32_t meta_buf_sz;
+	uint8_t rx_inj_ena; /* Rx Inject Enable */

 	/* NPC */
 	int *ipsec_index;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 4981d42ab7..892fcb1f0d 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -243,6 +243,8 @@ INTERNAL {
 	roc_nix_inl_eng_caps_get;
 	roc_nix_inl_inb_is_enabled;
 	roc_nix_inl_inb_init;
+	roc_nix_inl_inb_inj_lf_get;
+	roc_nix_inl_inb_rx_inject_enable;
 	roc_nix_inl_inb_sa_base_get;
 	roc_nix_inl_inb_sa_get;
 	roc_nix_inl_inb_spi_range;
--
2.25.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 2/5] net/cnxk: support of Rx inject
  2024-01-19  5:57 [PATCH 1/5] common/cnxk: reserve CPT LF for Rx inject Rahul Bhansali
@ 2024-01-19  5:57 ` Rahul Bhansali
  2024-02-22  8:55   ` Jerin Jacob
  2024-01-19  5:57 ` [PATCH 3/5] common/cnxk: fix for inline dev pointer check Rahul Bhansali
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 12+ messages in thread
From: Rahul Bhansali @ 2024-01-19  5:57 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: Rahul Bhansali

Add Rx inject security callback APIs to configure, inject
packet to CPT and receive back as in receive path.
Devargs "rx_inj_ena=1" will be required to enable the
inline IPsec Rx inject feature. If inline device is used
then this devarg will be required for both inline device
and eth device.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 doc/guides/nics/cnxk.rst               |  27 +++
 drivers/net/cnxk/cn10k_ethdev.c        |   4 +
 drivers/net/cnxk/cn10k_ethdev_sec.c    |  48 +++++
 drivers/net/cnxk/cn10k_rx.h            | 241 ++++++++++++++++++++++++-
 drivers/net/cnxk/cn10k_rxtx.h          |  57 ++++++
 drivers/net/cnxk/cn10k_tx.h            |  57 ------
 drivers/net/cnxk/cnxk_ethdev.h         |   3 +
 drivers/net/cnxk/cnxk_ethdev_devargs.c |   8 +-
 drivers/net/cnxk/cnxk_ethdev_dp.h      |   8 +
 drivers/net/cnxk/cnxk_ethdev_sec.c     |  21 ++-
 10 files changed, 405 insertions(+), 69 deletions(-)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 9ec52e380f..39660dba82 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -416,6 +416,19 @@ Runtime Config Options
    With the above configuration, PMD would allocate meta buffers of size 512 for
    inline inbound IPsec processing second pass.
 
+- ``Rx Inject Enable inbound inline IPsec for second pass`` (default ``0``)
+
+   Rx packet inject feature for inbound inline IPsec processing can be enabled
+   by ``rx_inj_ena`` ``devargs`` parameter.
+   This option is for OCTEON CN106-B0/CN103XX SoC family.
+
+   For example::
+
+      -a 0002:02:00.0,rx_inj_ena=1
+
+   With the above configuration, driver would enable packet inject from ARM cores
+   to crypto to process and send back in Rx path.
+
 .. note::
 
    Above devarg parameters are configurable per device, user needs to pass the
@@ -613,6 +626,20 @@ Runtime Config Options for inline device
    With the above configuration, driver would poll for aging flows every 50
    seconds.
 
+- ``Rx Inject Enable inbound inline IPsec for second pass`` (default ``0``)
+
+   Rx packet inject feature for inbound inline IPsec processing can be enabled
+   by ``rx_inj_ena`` ``devargs`` parameter with both inline device and ethdev
+   device.
+   This option is for OCTEON CN106-B0/CN103XX SoC family.
+
+   For example::
+
+      -a 0002:1d:00.0,rx_inj_ena=1
+
+   With the above configuration, driver would enable packet inject from ARM cores
+   to crypto to process and send back in Rx path.
+
 Debugging Options
 -----------------
 
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index a2e943a3d0..78d1dca3c1 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -593,6 +593,10 @@ cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
 	if (dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F)
 		cn10k_nix_rx_queue_meta_aura_update(eth_dev);
 
+	/* Set flags for Rx Inject feature */
+	if (roc_idev_nix_rx_inject_get(nix->port_id))
+		dev->rx_offload_flags |= NIX_RX_SEC_REASSEMBLY_F;
+
 	cn10k_eth_set_tx_function(eth_dev);
 	cn10k_eth_set_rx_function(eth_dev);
 	return 0;
diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 575d0fabd5..42e4867d3c 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -1253,6 +1253,52 @@ eth_sec_caps_add(struct rte_security_capability eth_sec_caps[], uint32_t *idx,
 	*idx += nb_caps;
 }
 
+static uint16_t __rte_hot
+cn10k_eth_sec_inb_rx_inject(void *device, struct rte_mbuf **pkts,
+			    struct rte_security_session **sess, uint16_t nb_pkts)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+	return cn10k_nix_inj_pkts(sess, &dev->inj_cfg, pkts, nb_pkts);
+}
+
+static int
+cn10k_eth_sec_rx_inject_config(void *device, uint16_t port_id, bool enable)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	uint64_t channel, pf_func, inj_match_id = 0xFFFFUL;
+	struct cnxk_ethdev_inj_cfg *inj_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_cpt_lf *inl_lf;
+	uint64_t sa_base;
+
+	if (!rte_eth_dev_is_valid_port(port_id))
+		return -EINVAL;
+
+	if (eth_dev->data->dev_started || !eth_dev->data->dev_configured)
+		return -EBUSY;
+
+	if (!roc_nix_inl_inb_rx_inject_enable(nix, dev->inb.inl_dev))
+		return -ENOTSUP;
+
+	roc_idev_nix_rx_inject_set(port_id, enable);
+
+	inl_lf = roc_nix_inl_inb_inj_lf_get(nix);
+	sa_base = roc_nix_inl_inb_sa_base_get(nix, dev->inb.inl_dev);
+
+	inj_cfg = &dev->inj_cfg;
+	inj_cfg->sa_base = sa_base | eth_dev->data->port_id;
+	inj_cfg->io_addr = inl_lf->io_addr;
+	inj_cfg->lmt_base = nix->lmt_base;
+	channel = roc_nix_get_base_chan(nix);
+	pf_func = roc_nix_inl_dev_pffunc_get();
+	inj_cfg->cmd_w0 = pf_func << 48 | inj_match_id << 32 | channel << 4;
+
+	return 0;
+}
+
 void
 cn10k_eth_sec_ops_override(void)
 {
@@ -1287,4 +1333,6 @@ cn10k_eth_sec_ops_override(void)
 	cnxk_eth_sec_ops.session_stats_get = cn10k_eth_sec_session_stats_get;
 	cnxk_eth_sec_ops.macsec_sc_stats_get = cnxk_eth_macsec_sc_stats_get;
 	cnxk_eth_sec_ops.macsec_sa_stats_get = cnxk_eth_macsec_sa_stats_get;
+	cnxk_eth_sec_ops.rx_inject_configure = cn10k_eth_sec_rx_inject_config;
+	cnxk_eth_sec_ops.inb_pkt_rx_inject = cn10k_eth_sec_inb_rx_inject;
 }
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index 7bb4c86d75..c4ad1b64fe 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -5,6 +5,7 @@
 #define __CN10K_RX_H__
 
 #include <rte_ethdev.h>
+#include <rte_security_driver.h>
 #include <rte_vect.h>
 #include "cn10k_rxtx.h"
 
@@ -487,8 +488,19 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,
 	inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);
 	inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
 
-	/* Update dynamic field with userdata */
-	*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+	/* Cryptodev injected packet can be identified from SA IDX 0xFFFFFFFF, and
+	 * Ethdev injected packet can be identified with match ID 0xFFFF.
+	 */
+	if (flags & NIX_RX_REAS_F && (sa_idx == 0xFFFFFFFF || hdr->w0.match_id == 0xFFFFU)) {
+		*(uint64_t *)(&inner->rearm_data) = (mbuf_init & ~(BIT_ULL(16) - 1)) |
+						    inner->data_off;
+		if (hdr->w0.match_id == 0xFFFFU)
+			*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+	} else {
+		/* Update dynamic field with userdata */
+		*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+		*(uint64_t *)(&inner->rearm_data) = mbuf_init;
+	}
 
 	/* Get ucc from cpt parse header */
 	ucc = hdr->w3.hw_ccode;
@@ -502,7 +514,6 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,
 
 	inner->pkt_len = len;
 	inner->data_len = len;
-	*(uint64_t *)(&inner->rearm_data) = mbuf_init;
 
 	inner->ol_flags = ((CPT_COMP_HWGOOD_MASK & (1U << ucc)) ?
 			   RTE_MBUF_F_RX_SEC_OFFLOAD :
@@ -567,11 +578,20 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa,
 	*ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK |
 		       RTE_MBUF_F_RX_IP_CKSUM_MASK);
 
-	/* Get SPI from CPT_PARSE_S's cookie(already swapped) */
-	inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd((void *)inb_sa);
 
-	/* Update dynamic field with userdata */
-	*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+	if (flags & NIX_RX_REAS_F && !inb_sa) {
+		/* Clear and update original lower 16 bit of data offset */
+		*rearm = (*rearm & ~(BIT_ULL(16) - 1)) | inner->data_off;
+	} else {
+		/* Get SPI from CPT_PARSE_S's cookie(already swapped) */
+		inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd((void *)inb_sa);
+		/* Update dynamic field with userdata */
+		*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+	}
+
+	/* Clear and update original lower 16 bit of data offset */
+	if (flags & NIX_RX_REAS_F && hdr->w0.match_id == 0xFFFFU)
+		*rearm = (*rearm & ~(BIT_ULL(16) - 1)) | inner->data_off;
 
 	/* Mark inner mbuf as get */
 	if (!(flags & NIX_RX_REAS_F) ||
@@ -604,8 +624,10 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa,
 			*rearm = vsetq_lane_u64(mbuf_init, *rearm, 0);
 		} else {
 			/* Reassembly failure */
-			nix_sec_attach_frags(hdr, inner, inb_priv, mbuf_init);
-			*ol_flags |= inner->ol_flags;
+			if (inb_sa) {
+				nix_sec_attach_frags(hdr, inner, inb_priv, mbuf_init);
+				*ol_flags |= inner->ol_flags;
+			}
 		}
 	} else if (flags & NIX_RX_REAS_F) {
 		/* Without fragmentation but may have to handle OOP session */
@@ -703,7 +725,14 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 		cq_w5 = *((const uint64_t *)rx + 4);
 	/* Use inner rx parse for meta pkts sg list */
 	if (cq_w1 & BIT(11) && flags & NIX_RX_OFFLOAD_SECURITY_F) {
-		const uint64_t *wqe = (const uint64_t *)(mbuf + 1);
+		const uint64_t *wqe;
+		/* Rx Inject packet must have Match ID 0xFFFF and for this
+		 * wqe will get from address stored at mbuf+1 location
+		 */
+		if ((flags & NIX_RX_REAS_F) && hdr->w0.match_id == 0xFFFFU)
+			wqe = (const uint64_t *)*((uint64_t *)(mbuf + 1));
+		else
+			wqe = (const uint64_t *)(mbuf + 1);
 
 		if (hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL)
 			rx = (const union nix_rx_parse_u *)(wqe + 1);
@@ -1191,6 +1220,187 @@ cn10k_nix_flush_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pk
 	return nb_pkts;
 }
 
+#if defined(RTE_ARCH_ARM64)
+static __rte_always_inline uint16_t
+cn10k_nix_rx_inj_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd)
+{
+	union nix_send_sg_s *sg, l_sg;
+	struct rte_mbuf *m_next;
+	uint16_t segdw, nb_segs;
+	uint64_t len, dlen;
+	uint64_t *slist;
+
+	sg = (union nix_send_sg_s *)cmd;
+	l_sg.u = sg->u;
+	l_sg.u &= 0xC00000000000000; /* LD_TYPE */
+	l_sg.subdc = NIX_SUBDC_SG;
+	nb_segs = m->nb_segs;
+	len = m->pkt_len;
+	slist = &cmd[1];
+
+	/* Fill mbuf segments */
+	do {
+		*slist = rte_pktmbuf_iova(m);
+		dlen = m->data_len;
+		len -= dlen;
+
+		/* Set the segment length */
+		l_sg.u |= ((uint64_t)dlen << (l_sg.segs << 4));
+		l_sg.segs += 1;
+		slist++;
+		nb_segs--;
+		if (l_sg.segs > 2 && nb_segs) {
+			sg->u = l_sg.u;
+			/* Next SG subdesc */
+			sg = (union nix_send_sg_s *)slist;
+			l_sg.u = sg->u;
+			l_sg.u &= 0xC00000000000000; /* LD_TYPE */
+			l_sg.subdc = NIX_SUBDC_SG;
+			slist++;
+		}
+		m_next = m->next;
+		m->next = NULL;
+		m = m_next;
+	} while (nb_segs);
+
+	/* Add remaining bytes of data to last seg */
+	if (len) {
+		uint8_t shft = (l_sg.subdc == NIX_SUBDC_SG) ? ((l_sg.segs - 1) << 4) : 0;
+		dlen = ((l_sg.u >> shft) & 0xFFFFULL) + len;
+		l_sg.u = l_sg.u & ~(0xFFFFULL << shft);
+		l_sg.u |= dlen << shft;
+	}
+	/* Write the last subdc out */
+	sg->u = l_sg.u;
+
+	segdw = (uint64_t *)slist - cmd;
+	/* Roundup extra dwords to multiple of 2 */
+	segdw = (segdw >> 1) + (segdw & 0x1);
+	return segdw;
+}
+
+static __rte_always_inline uint16_t
+cn10k_nix_inj_pkts(struct rte_security_session **sess, struct cnxk_ethdev_inj_cfg *inj_cfg,
+		   struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	uintptr_t c_lbase = inj_cfg->lmt_base;
+	struct cn10k_sec_sess_priv sess_priv;
+	uint64_t sa_base = inj_cfg->sa_base;
+	uint16_t c_lmt_id, burst, left, i;
+	uintptr_t cptres, rxphdr, dptr;
+	struct rte_mbuf *m, *last;
+	uint8_t lnum, shft, loff;
+	uint64x2_t cmd01, cmd23;
+	uint64_t ucode_cmd[4];
+	rte_iova_t c_io_addr;
+	uint64_t *laddr;
+	uint64_t sa, w0;
+	uint16_t segdw;
+
+	/* Get LMT base address and LMT ID as lcore id */
+	ROC_LMT_CPT_BASE_ID_GET(c_lbase, c_lmt_id);
+	c_io_addr = inj_cfg->io_addr;
+
+	left = nb_pkts;
+again:
+	burst = left > 32 ? 32 : left;
+
+	lnum = 0;
+	loff = 0;
+	shft = 16;
+
+	for (i = 0; i < burst; i++) {
+		m = tx_pkts[i];
+		sess_priv.u64 = sess[i]->fast_mdata;
+		last = rte_pktmbuf_lastseg(m);
+
+		cptres = rte_pktmbuf_mtod_offset(last, uintptr_t, last->data_len);
+		cptres += BIT_ULL(7);
+		cptres = (cptres - 1) & ~(BIT_ULL(7) - 1);
+
+		if (m->nb_segs > 1) {
+			/* Will reserve NIX Rx descriptor with SG list after end of
+			 * last mbuf data location. and pointer to this will be
+			 * stored at 1st mbuf space for Rx path multi-seg processing.
+			 */
+			/* Pointer to WQE header */
+			*(uint64_t *)(m + 1) = cptres;
+			/* Reserve 8 Dwords of WQE Hdr + Rx Parse Hdr */
+			rxphdr = cptres + 8;
+			dptr = rxphdr + 7 * 8;
+			/* Prepare Multiseg SG list */
+			segdw = cn10k_nix_rx_inj_prepare_mseg(m, (uint64_t *)(dptr));
+			*(uint64_t *)rxphdr = (uint64_t)(segdw - 1) << 12;
+			cptres += 64 + segdw * 16;
+			ucode_cmd[1] = dptr | ((uint64_t)m->nb_segs << 60);
+		} else {
+			dptr = (uint64_t)rte_pktmbuf_iova(m);
+			ucode_cmd[1] = dptr;
+		}
+
+		/* Prepare CPT instruction */
+		/* CPT word 0 and 1 */
+		cmd01 = vdupq_n_u64(0);
+		w0 = inj_cfg->cmd_w0 | ((uint64_t)m->l2_len - 2) << 24 | (uint64_t)m->l2_len << 16;
+		cmd01 = vsetq_lane_u64(w0, cmd01, 0);
+		cmd01 = vsetq_lane_u64(cptres, cmd01, 1);
+
+		/* CPT word 2 and 3 */
+		cmd23 = vdupq_n_u64(0);
+		/* Set PF func */
+		w0 &= 0xFFFF000000000000UL;
+		cmd23 = vsetq_lane_u64(w0, cmd23, 0);
+		cmd23 = vsetq_lane_u64(((uint64_t)m + sizeof(struct rte_mbuf)) | 1, cmd23, 1);
+
+		sa_base &= ~0xFFFFUL;
+		sa = (uintptr_t)roc_nix_inl_ot_ipsec_inb_sa(sa_base, sess_priv.sa_idx);
+		ucode_cmd[0] = (ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC << 48 | 1UL << 54 |
+				((uint64_t)sess_priv.chksum) << 32 |
+				((uint64_t)sess_priv.dec_ttl) << 34 | m->pkt_len);
+
+		ucode_cmd[2] = 0;
+		ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+
+		/* Move to our line */
+		laddr = LMT_OFF(c_lbase, lnum, loff ? 64 : 0);
+
+		/* Write CPT instruction to lmt line */
+		vst1q_u64(laddr, cmd01);
+		vst1q_u64((laddr + 2), cmd23);
+
+		*(__uint128_t *)(laddr + 4) = *(__uint128_t *)ucode_cmd;
+		*(__uint128_t *)(laddr + 6) = *(__uint128_t *)(ucode_cmd + 2);
+
+		loff = !loff;
+		lnum = lnum + (loff ? 0 : 1);
+		shft = shft + (loff ? 0 : 3);
+	}
+
+	left -= burst;
+	tx_pkts += burst;
+	sess += burst;
+
+	cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, lnum, loff, shft);
+
+	rte_io_wmb();
+	if (left)
+		goto again;
+
+	return nb_pkts;
+}
+#else
+static __rte_always_inline uint16_t
+cn10k_nix_inj_pkts(struct rte_security_session **sess, struct cnxk_ethdev_inj_cfg *inj_cfg,
+		   struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	RTE_SET_USED(sess);
+	RTE_SET_USED(inj_cfg);
+	RTE_SET_USED(tx_pkts);
+	RTE_SET_USED(nb_pkts);
+	return 0;
+}
+#endif
+
 #if defined(RTE_ARCH_ARM64)
 
 static __rte_always_inline uint64_t
@@ -1558,6 +1768,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			uint64x2_t inner0, inner1, inner2, inner3;
 			uint64x2_t wqe01, wqe23, sa01, sa23;
 			uint16x4_t lens, l2lens, ltypes;
+			uint64x2_t mask01, mask23;
 			uint8x8_t ucc;
 
 			cpth0 = (uintptr_t)mbuf0 + d_off;
@@ -1587,6 +1798,11 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 
 			sa01 = vshrq_n_u64(sa01, 32);
 			sa23 = vshrq_n_u64(sa23, 32);
+
+			/* Crypto Look-aside Rx Inject case */
+			mask01 = vceqq_u64(sa01, vdupq_n_u64(0xFFFFFFFF));
+			mask23 = vceqq_u64(sa23, vdupq_n_u64(0xFFFFFFFF));
+
 			sa01 = vshlq_n_u64(sa01,
 					   ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
 			sa23 = vshlq_n_u64(sa23,
@@ -1594,6 +1810,11 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			sa01 = vaddq_u64(sa01, vdupq_n_u64(sa_base));
 			sa23 = vaddq_u64(sa23, vdupq_n_u64(sa_base));
 
+			if (flags & NIX_RX_REAS_F) {
+				sa01 = vbicq_u64(sa01, mask01);
+				sa23 = vbicq_u64(sa23, mask23);
+			}
+
 			const uint8x16x2_t tbl = {{
 				{
 					/* ROC_IE_OT_UCC_SUCCESS_PKT_IP_BADCSUM */
diff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h
index aeffc4ac92..2143df1a7e 100644
--- a/drivers/net/cnxk/cn10k_rxtx.h
+++ b/drivers/net/cnxk/cn10k_rxtx.h
@@ -191,4 +191,61 @@ handle_tx_completion_pkts(struct cn10k_eth_txq *txq, uint8_t mt_safe)
 		rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
 }
 
+static __rte_always_inline uint64_t
+cn10k_cpt_tx_steor_data(void)
+{
+	/* We have two CPT instructions per LMTLine */
+	const uint64_t dw_m1 = ROC_CN10K_TWO_CPT_INST_DW_M1;
+	uint64_t data;
+
+	/* This will be moved to addr area */
+	data = dw_m1 << 16;
+	data |= dw_m1 << 19;
+	data |= dw_m1 << 22;
+	data |= dw_m1 << 25;
+	data |= dw_m1 << 28;
+	data |= dw_m1 << 31;
+	data |= dw_m1 << 34;
+	data |= dw_m1 << 37;
+	data |= dw_m1 << 40;
+	data |= dw_m1 << 43;
+	data |= dw_m1 << 46;
+	data |= dw_m1 << 49;
+	data |= dw_m1 << 52;
+	data |= dw_m1 << 55;
+	data |= dw_m1 << 58;
+	data |= dw_m1 << 61;
+
+	return data;
+}
+
+static __rte_always_inline void
+cn10k_nix_sec_steorl(uintptr_t io_addr, uint32_t lmt_id, uint8_t lnum,
+		     uint8_t loff, uint8_t shft)
+{
+	uint64_t data;
+	uintptr_t pa;
+
+	/* Check if there is any CPT instruction to submit */
+	if (!lnum && !loff)
+		return;
+
+	data = cn10k_cpt_tx_steor_data();
+	/* Update lmtline use for partial end line */
+	if (loff) {
+		data &= ~(0x7ULL << shft);
+		/* Update it to half full i.e 64B */
+		data |= (0x3UL << shft);
+	}
+
+	pa = io_addr | ((data >> 16) & 0x7) << 4;
+	data &= ~(0x7ULL << 16);
+	/* Update lines - 1 that contain valid data */
+	data |= ((uint64_t)(lnum + loff - 1)) << 12;
+	data |= (uint64_t)lmt_id;
+
+	/* STEOR */
+	roc_lmt_submit_steorl(data, pa);
+}
+
 #endif /* __CN10K_RXTX_H__ */
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 467f0ccc65..664e47e1fc 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -314,34 +314,6 @@ cn10k_nix_tx_steor_vec_data(const uint16_t flags)
 	return data;
 }
 
-static __rte_always_inline uint64_t
-cn10k_cpt_tx_steor_data(void)
-{
-	/* We have two CPT instructions per LMTLine */
-	const uint64_t dw_m1 = ROC_CN10K_TWO_CPT_INST_DW_M1;
-	uint64_t data;
-
-	/* This will be moved to addr area */
-	data = dw_m1 << 16;
-	data |= dw_m1 << 19;
-	data |= dw_m1 << 22;
-	data |= dw_m1 << 25;
-	data |= dw_m1 << 28;
-	data |= dw_m1 << 31;
-	data |= dw_m1 << 34;
-	data |= dw_m1 << 37;
-	data |= dw_m1 << 40;
-	data |= dw_m1 << 43;
-	data |= dw_m1 << 46;
-	data |= dw_m1 << 49;
-	data |= dw_m1 << 52;
-	data |= dw_m1 << 55;
-	data |= dw_m1 << 58;
-	data |= dw_m1 << 61;
-
-	return data;
-}
-
 static __rte_always_inline void
 cn10k_nix_tx_skeleton(struct cn10k_eth_txq *txq, uint64_t *cmd,
 		      const uint16_t flags, const uint16_t static_sz)
@@ -461,35 +433,6 @@ cn10k_nix_sec_fc_wait(struct cn10k_eth_txq *txq, uint16_t nb_pkts)
 		goto again;
 }
 
-static __rte_always_inline void
-cn10k_nix_sec_steorl(uintptr_t io_addr, uint32_t lmt_id, uint8_t lnum,
-		     uint8_t loff, uint8_t shft)
-{
-	uint64_t data;
-	uintptr_t pa;
-
-	/* Check if there is any CPT instruction to submit */
-	if (!lnum && !loff)
-		return;
-
-	data = cn10k_cpt_tx_steor_data();
-	/* Update lmtline use for partial end line */
-	if (loff) {
-		data &= ~(0x7ULL << shft);
-		/* Update it to half full i.e 64B */
-		data |= (0x3UL << shft);
-	}
-
-	pa = io_addr | ((data >> 16) & 0x7) << 4;
-	data &= ~(0x7ULL << 16);
-	/* Update lines - 1 that contain valid data */
-	data |= ((uint64_t)(lnum + loff - 1)) << 12;
-	data |= (uint64_t)lmt_id;
-
-	/* STEOR */
-	roc_lmt_submit_steorl(data, pa);
-}
-
 #if defined(RTE_ARCH_ARM64)
 static __rte_always_inline void
 cn10k_nix_prep_sec_vec(struct rte_mbuf *m, uint64x2_t *cmd0, uint64x2_t *cmd1,
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 4d3ebf123b..015032827a 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -424,6 +424,9 @@ struct cnxk_eth_dev {
 	/* MCS device */
 	struct cnxk_mcs_dev *mcs_dev;
 	struct cnxk_macsec_sess_list mcs_list;
+
+	/* Inject packets */
+	struct cnxk_ethdev_inj_cfg inj_cfg;
 };
 
 struct cnxk_eth_rxq_sp {
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 8e862be933..50dc80ce2c 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -279,6 +279,7 @@ parse_val_u16(const char *key, const char *value, void *extra_args)
 #define CNXK_SQB_SLACK		"sqb_slack"
 #define CNXK_NIX_META_BUF_SZ	"meta_buf_sz"
 #define CNXK_FLOW_AGING_POLL_FREQ	"aging_poll_freq"
+#define CNXK_NIX_RX_INJ_ENABLE	"rx_inj_ena"
 
 int
 cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
@@ -305,6 +306,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 	uint32_t meta_buf_sz = 0;
 	uint16_t no_inl_dev = 0;
 	uint8_t lock_rx_ctx = 0;
+	uint8_t rx_inj_ena = 0;
 
 	memset(&sdp_chan, 0, sizeof(sdp_chan));
 	memset(&pre_l2_info, 0, sizeof(struct flow_pre_l2_size_info));
@@ -355,6 +357,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 	rte_kvargs_process(kvlist, CNXK_NIX_META_BUF_SZ, &parse_meta_bufsize, &meta_buf_sz);
 	rte_kvargs_process(kvlist, CNXK_FLOW_AGING_POLL_FREQ, &parse_val_u16,
 			   &aging_thread_poll_freq);
+	rte_kvargs_process(kvlist, CNXK_NIX_RX_INJ_ENABLE, &parse_flag, &rx_inj_ena);
 	rte_kvargs_free(kvlist);
 
 null_devargs:
@@ -387,6 +390,8 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 	dev->npc.pre_l2_size_offset_mask = pre_l2_info.pre_l2_size_off_mask;
 	dev->npc.pre_l2_size_shift_dir = pre_l2_info.pre_l2_size_shift_dir;
 	dev->npc.flow_age.aging_poll_freq = aging_thread_poll_freq;
+	if (roc_feature_nix_has_rx_inject())
+		dev->nix.rx_inj_ena = rx_inj_ena;
 	return 0;
 exit:
 	return -EINVAL;
@@ -409,4 +414,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_cnxk,
 			      CNXK_SDP_CHANNEL_MASK "=<1-4095>/<1-4095>"
 			      CNXK_CUSTOM_SA_ACT "=1"
 			      CNXK_SQB_SLACK "=<12-512>"
-			      CNXK_FLOW_AGING_POLL_FREQ "=<10-65535>");
+			      CNXK_FLOW_AGING_POLL_FREQ "=<10-65535>"
+			      CNXK_NIX_RX_INJ_ENABLE "=1");
diff --git a/drivers/net/cnxk/cnxk_ethdev_dp.h b/drivers/net/cnxk/cnxk_ethdev_dp.h
index c1f99a2616..56cfcb7fc6 100644
--- a/drivers/net/cnxk/cnxk_ethdev_dp.h
+++ b/drivers/net/cnxk/cnxk_ethdev_dp.h
@@ -4,6 +4,7 @@
 #ifndef __CNXK_ETHDEV_DP_H__
 #define __CNXK_ETHDEV_DP_H__
 
+#include <rte_security_driver.h>
 #include <rte_mbuf.h>
 
 /* If PTP is enabled additional SEND MEM DESC is required which
@@ -82,6 +83,13 @@ struct cnxk_timesync_info {
 	uint64_t *tx_tstamp;
 } __plt_cache_aligned;
 
+struct cnxk_ethdev_inj_cfg {
+	uintptr_t lmt_base;
+	uint64_t io_addr;
+	uint64_t sa_base;
+	uint64_t cmd_w0;
+} __plt_cache_aligned;
+
 /* Inlines */
 static __rte_always_inline uint64_t
 cnxk_pktmbuf_detach(struct rte_mbuf *m)
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index b02dac4952..6f5319e534 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -16,6 +16,7 @@
 #define CNXK_NIX_INL_META_BUF_SZ      "meta_buf_sz"
 #define CNXK_NIX_SOFT_EXP_POLL_FREQ   "soft_exp_poll_freq"
 #define CNXK_MAX_IPSEC_RULES	"max_ipsec_rules"
+#define CNXK_NIX_INL_RX_INJ_ENABLE	"rx_inj_ena"
 
 /* Default soft expiry poll freq in usec */
 #define CNXK_NIX_SOFT_EXP_POLL_FREQ_DFLT 100
@@ -192,6 +193,19 @@ parse_max_ipsec_rules(const char *key, const char *value, void *extra_args)
 	return 0;
 }
 
+static int
+parse_inl_rx_inj_ena(const char *key, const char *value, void *extra_args)
+{
+	RTE_SET_USED(key);
+	uint32_t val;
+
+	val = atoi(value);
+
+	*(uint8_t *)extra_args = !!(val == 1);
+
+	return 0;
+}
+
 int
 cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p,
 			 uint32_t spi)
@@ -352,6 +366,7 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	struct rte_kvargs *kvlist;
 	uint32_t nb_meta_bufs = 0;
 	uint32_t meta_buf_sz = 0;
+	uint8_t rx_inj_ena = 0;
 	uint8_t selftest = 0;
 
 	memset(&cpt_channel, 0, sizeof(cpt_channel));
@@ -378,6 +393,7 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	rte_kvargs_process(kvlist, CNXK_NIX_SOFT_EXP_POLL_FREQ,
 			   &parse_val_u32, &soft_exp_poll_freq);
 	rte_kvargs_process(kvlist, CNXK_MAX_IPSEC_RULES, &parse_max_ipsec_rules, &max_ipsec_rules);
+	rte_kvargs_process(kvlist, CNXK_NIX_INL_RX_INJ_ENABLE, &parse_inl_rx_inj_ena, &rx_inj_ena);
 	rte_kvargs_free(kvlist);
 
 null_devargs:
@@ -391,6 +407,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	inl_dev->meta_buf_sz = meta_buf_sz;
 	inl_dev->soft_exp_poll_freq = soft_exp_poll_freq;
 	inl_dev->max_ipsec_rules = max_ipsec_rules;
+	if (roc_feature_nix_has_rx_inject())
+		inl_dev->rx_inj_ena = rx_inj_ena;
 	return 0;
 exit:
 	return -EINVAL;
@@ -518,4 +536,5 @@ RTE_PMD_REGISTER_PARAM_STRING(cnxk_nix_inl,
 			      CNXK_NIX_INL_NB_META_BUFS "=<1-U32_MAX>"
 			      CNXK_NIX_INL_META_BUF_SZ "=<1-U32_MAX>"
 			      CNXK_NIX_SOFT_EXP_POLL_FREQ "=<0-U32_MAX>"
-			      CNXK_MAX_IPSEC_RULES "=<1-4095>");
+			      CNXK_MAX_IPSEC_RULES "=<1-4095>"
+			      CNXK_NIX_INL_RX_INJ_ENABLE "=1");
-- 
2.25.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 3/5] common/cnxk: fix for inline dev pointer check
  2024-01-19  5:57 [PATCH 1/5] common/cnxk: reserve CPT LF for Rx inject Rahul Bhansali
  2024-01-19  5:57 ` [PATCH 2/5] net/cnxk: support of " Rahul Bhansali
@ 2024-01-19  5:57 ` Rahul Bhansali
  2024-01-19  5:57 ` [PATCH 4/5] net/cnxk: fix to add reassembly fast path flag Rahul Bhansali
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 12+ messages in thread
From: Rahul Bhansali @ 2024-01-19  5:57 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Rahul Bhansali
  Cc: stable

Add missing check of Inline device pointer before accessing
is_multi_channel variable.

Fixes: 7ea187184a51 ("common/cnxk: support 1-N pool-aura per NIX LF")
Cc: stable@dpdk.org

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/common/cnxk/roc_nix_inl.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index de8fd2a605..a205c658e9 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -933,7 +933,8 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	inl_dev = idev->nix_inl_dev;
 
 	roc_nix->custom_meta_aura_ena = (roc_nix->local_meta_aura_ena &&
-					 (inl_dev->is_multi_channel || roc_nix->custom_sa_action));
+					 ((inl_dev && inl_dev->is_multi_channel) ||
+					  roc_nix->custom_sa_action));
 	if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
 		nix->need_meta_aura = true;
 		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
-- 
2.25.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 4/5] net/cnxk: fix to add reassembly fast path flag
  2024-01-19  5:57 [PATCH 1/5] common/cnxk: reserve CPT LF for Rx inject Rahul Bhansali
  2024-01-19  5:57 ` [PATCH 2/5] net/cnxk: support of " Rahul Bhansali
  2024-01-19  5:57 ` [PATCH 3/5] common/cnxk: fix for inline dev pointer check Rahul Bhansali
@ 2024-01-19  5:57 ` Rahul Bhansali
  2024-01-19  5:57 ` [PATCH 5/5] net/cnxk: select optimized LLC transaction type Rahul Bhansali
  2024-02-22 10:07 ` [PATCH v2 1/5] common/cnxk: reserve CPT LF for Rx inject Rahul Bhansali
  4 siblings, 0 replies; 12+ messages in thread
From: Rahul Bhansali @ 2024-01-19  5:57 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Akhil Goyal
  Cc: Rahul Bhansali, stable

For IPsec decrypted packets, full packet format condition check
is enabled for both reassembly and non-reassembly path as part
of OOP handling. Instead, it should be only in reassembly path.
To fix this, NIX_RX_REAS_F flag condition is added to avoid
packet format check in non-reassembly fast path.

Fixes: 5e9e008d0127 ("net/cnxk: support inline ingress out-of-place session")
Cc: stable@dpdk.org

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cn10k_rx.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index c4ad1b64fe..89621af3fb 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -734,7 +734,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 		else
 			wqe = (const uint64_t *)(mbuf + 1);
 
-		if (hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL)
+		if (!(flags & NIX_RX_REAS_F) || hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL)
 			rx = (const union nix_rx_parse_u *)(wqe + 1);
 	}
 
-- 
2.25.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 5/5] net/cnxk: select optimized LLC transaction type
  2024-01-19  5:57 [PATCH 1/5] common/cnxk: reserve CPT LF for Rx inject Rahul Bhansali
                   ` (2 preceding siblings ...)
  2024-01-19  5:57 ` [PATCH 4/5] net/cnxk: fix to add reassembly fast path flag Rahul Bhansali
@ 2024-01-19  5:57 ` Rahul Bhansali
  2024-02-22 10:07 ` [PATCH v2 1/5] common/cnxk: reserve CPT LF for Rx inject Rahul Bhansali
  4 siblings, 0 replies; 12+ messages in thread
From: Rahul Bhansali @ 2024-01-19  5:57 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: Rahul Bhansali

LLC transaction optimization by using LDWB LDTYPE option
in SG preparation for Tx. With this, if data is present
and dirty in LLC then the LLC would mark the data clean.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
 drivers/net/cnxk/cn10k_tx.h | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 664e47e1fc..fcd19be77e 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -331,9 +331,15 @@ cn10k_nix_tx_skeleton(struct cn10k_eth_txq *txq, uint64_t *cmd,
 		else
 			cmd[2] = NIX_SUBDC_EXT << 60;
 		cmd[3] = 0;
-		cmd[4] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
+		if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+			cmd[4] = (NIX_SUBDC_SG << 60) | (NIX_SENDLDTYPE_LDWB << 58) | BIT_ULL(48);
+		else
+			cmd[4] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
 	} else {
-		cmd[2] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
+		if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+			cmd[2] = (NIX_SUBDC_SG << 60) | (NIX_SENDLDTYPE_LDWB << 58) | BIT_ULL(48);
+		else
+			cmd[2] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
 	}
 }
 
@@ -1989,7 +1995,11 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
 
 	senddesc01_w1 = vdupq_n_u64(0);
 	senddesc23_w1 = senddesc01_w1;
-	sgdesc01_w0 = vdupq_n_u64((NIX_SUBDC_SG << 60) | BIT_ULL(48));
+	if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+		sgdesc01_w0 = vdupq_n_u64((NIX_SUBDC_SG << 60) | (NIX_SENDLDTYPE_LDWB << 58) |
+					  BIT_ULL(48));
+	else
+		sgdesc01_w0 = vdupq_n_u64((NIX_SUBDC_SG << 60) | BIT_ULL(48));
 	sgdesc23_w0 = sgdesc01_w0;
 
 	if (flags & NIX_TX_NEED_EXT_HDR) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/5] net/cnxk: support of Rx inject
  2024-01-19  5:57 ` [PATCH 2/5] net/cnxk: support of " Rahul Bhansali
@ 2024-02-22  8:55   ` Jerin Jacob
  0 siblings, 0 replies; 12+ messages in thread
From: Jerin Jacob @ 2024-02-22  8:55 UTC (permalink / raw)
  To: Rahul Bhansali
  Cc: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao

On Fri, Jan 19, 2024 at 11:27 AM Rahul Bhansali <rbhansali@marvell.com> wrote:
>
> Add Rx inject security callback APIs to configure, inject
> packet to CPT and receive back as in receive path.
> Devargs "rx_inj_ena=1" will be required to enable the
> inline IPsec Rx inject feature. If inline device is used
> then this devarg will be required for both inline device
> and eth device.
>
> Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
> ---
>  doc/guides/nics/cnxk.rst               |  27 +++

Please update release note. i.e Add one line under  "Updated Marvell
cnxk net driver "  in "doc/guides/rel_notes/release_24_03.rst.



>  drivers/net/cnxk/cn10k_ethdev.c        |   4 +
>  drivers/net/cnxk/cn10k_ethdev_sec.c    |  48 +++++
>  drivers/net/cnxk/cn10k_rx.h            | 241 ++++++++++++++++++++++++-
>  drivers/net/cnxk/cn10k_rxtx.h          |  57 ++++++
>  drivers/net/cnxk/cn10k_tx.h            |  57 ------
>  drivers/net/cnxk/cnxk_ethdev.h         |   3 +
>  drivers/net/cnxk/cnxk_ethdev_devargs.c |   8 +-
>  drivers/net/cnxk/cnxk_ethdev_dp.h      |   8 +
>  drivers/net/cnxk/cnxk_ethdev_sec.c     |  21 ++-
>  10 files changed, 405 insertions(+), 69 deletions(-)

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 1/5] common/cnxk: reserve CPT LF for Rx inject
  2024-01-19  5:57 [PATCH 1/5] common/cnxk: reserve CPT LF for Rx inject Rahul Bhansali
                   ` (3 preceding siblings ...)
  2024-01-19  5:57 ` [PATCH 5/5] net/cnxk: select optimized LLC transaction type Rahul Bhansali
@ 2024-02-22 10:07 ` Rahul Bhansali
  2024-02-22 10:07   ` [PATCH v2 2/5] net/cnxk: support of " Rahul Bhansali
                     ` (3 more replies)
  4 siblings, 4 replies; 12+ messages in thread
From: Rahul Bhansali @ 2024-02-22 10:07 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: Rahul Bhansali

An additional CPT LF will be reserved and attached with
inline device to enable RXC and use for Rx inject purpose.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: No change

 drivers/common/cnxk/roc_features.h     |  7 +++
 drivers/common/cnxk/roc_nix.h          |  1 +
 drivers/common/cnxk/roc_nix_inl.c      | 71 ++++++++++++++++++++++++--
 drivers/common/cnxk/roc_nix_inl.h      |  5 +-
 drivers/common/cnxk/roc_nix_inl_dev.c  | 61 +++++++++++++---------
 drivers/common/cnxk/roc_nix_inl_priv.h |  7 ++-
 drivers/common/cnxk/version.map        |  2 +
 7 files changed, 123 insertions(+), 31 deletions(-)

diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index f4807ee271..3b512be132 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -83,4 +83,11 @@ roc_feature_nix_has_inl_ipsec(void)
 {
 	return !roc_model_is_cnf10kb();
 }
+
+static inline bool
+roc_feature_nix_has_rx_inject(void)
+{
+	return (roc_model_is_cn10ka_b0() || roc_model_is_cn10kb());
+}
+
 #endif
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 84e6fc3df5..eebdd4ecc3 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -474,6 +474,7 @@ struct roc_nix {
 	uint32_t meta_buf_sz;
 	bool force_rx_aura_bp;
 	bool custom_meta_aura_ena;
+	bool rx_inj_ena;
 	/* End of input parameters */
 	/* LMT line base for "Per Core Tx LMT line" mode*/
 	uintptr_t lmt_base;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 07a90133ca..de8fd2a605 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -474,6 +474,34 @@ roc_nix_inl_outb_lf_base_get(struct roc_nix *roc_nix)
 	return (struct roc_cpt_lf *)nix->cpt_lf_base;
 }

+struct roc_cpt_lf *
+roc_nix_inl_inb_inj_lf_get(struct roc_nix *roc_nix)
+{
+	struct nix *nix;
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev = NULL;
+	struct roc_cpt_lf *lf = NULL;
+
+	if (!idev)
+		return NULL;
+
+	inl_dev = idev->nix_inl_dev;
+
+	if (!inl_dev && roc_nix == NULL)
+		return NULL;
+
+	nix = roc_nix_to_nix_priv(roc_nix);
+
+	if (nix->inb_inl_dev && inl_dev && inl_dev->attach_cptlf &&
+	    inl_dev->rx_inj_ena)
+		return &inl_dev->cpt_lf[inl_dev->nb_cptlf - 1];
+
+	lf = roc_nix_inl_outb_lf_base_get(roc_nix);
+	if (lf)
+		lf += roc_nix->outb_nb_crypto_qs;
+	return lf;
+}
+
 uintptr_t
 roc_nix_inl_outb_sa_base_get(struct roc_nix *roc_nix)
 {
@@ -512,6 +540,35 @@ roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix, bool inb_inl_dev)
 	return (uintptr_t)nix->inb_sa_base;
 }

+bool
+roc_nix_inl_inb_rx_inject_enable(struct roc_nix *roc_nix, bool inb_inl_dev)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev;
+	struct nix *nix = NULL;
+
+	if (idev == NULL)
+		return 0;
+
+	if (!inb_inl_dev && roc_nix == NULL)
+		return 0;
+
+	if (roc_nix) {
+		nix = roc_nix_to_nix_priv(roc_nix);
+		if (!nix->inl_inb_ena)
+			return 0;
+	}
+
+	if (inb_inl_dev) {
+		inl_dev = idev->nix_inl_dev;
+		if (inl_dev && inl_dev->attach_cptlf && inl_dev->rx_inj_ena &&
+		    roc_nix->rx_inj_ena)
+			return true;
+	}
+
+	return roc_nix->rx_inj_ena;
+}
+
 uint32_t
 roc_nix_inl_inb_spi_range(struct roc_nix *roc_nix, bool inb_inl_dev,
 			  uint32_t *min_spi, uint32_t *max_spi)
@@ -941,6 +998,7 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)
 	bool ctx_ilen_valid = false;
 	size_t sa_sz, ring_sz;
 	uint8_t ctx_ilen = 0;
+	bool rx_inj = false;
 	uint16_t sso_pffunc;
 	uint8_t eng_grpmask;
 	uint64_t blkaddr, i;
@@ -958,6 +1016,12 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)

 	/* Retrieve inline device if present */
 	inl_dev = idev->nix_inl_dev;
+	if (roc_nix->rx_inj_ena && !(nix->inb_inl_dev && inl_dev && inl_dev->attach_cptlf &&
+				     inl_dev->rx_inj_ena)) {
+		nb_lf++;
+		rx_inj = true;
+	}
+
 	sso_pffunc = inl_dev ? inl_dev->dev.pf_func : idev_sso_pffunc_get();
 	/* Use sso_pffunc if explicitly requested */
 	if (roc_nix->ipsec_out_sso_pffunc)
@@ -986,7 +1050,8 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)
 		       1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
 		       1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
 	rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr,
-			   !roc_nix->ipsec_out_sso_pffunc, ctx_ilen_valid, ctx_ilen, false, 0);
+			   !roc_nix->ipsec_out_sso_pffunc, ctx_ilen_valid, ctx_ilen,
+			   rx_inj, nb_lf - 1);
 	if (rc) {
 		plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
 		goto lf_detach;
@@ -1632,7 +1697,7 @@ roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
 	if (inb && get_inl_lf) {
 		outb_lf = NULL;
 		if (inl_dev && inl_dev->attach_cptlf)
-			outb_lf = &inl_dev->cpt_lf;
+			outb_lf = &inl_dev->cpt_lf[0];
 	}

 	if (outb_lf) {
@@ -1696,7 +1761,7 @@ roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
 	if (inb && get_inl_lf) {
 		outb_lf = NULL;
 		if (inl_dev && inl_dev->attach_cptlf)
-			outb_lf = &inl_dev->cpt_lf;
+			outb_lf = &inl_dev->cpt_lf[0];
 	}

 	if (outb_lf) {
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index ab1e9c0f98..a89b40ff61 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -142,9 +142,10 @@ struct roc_nix_inl_dev {
 	uint32_t nb_meta_bufs;
 	uint32_t meta_buf_sz;
 	uint32_t max_ipsec_rules;
+	uint8_t rx_inj_ena; /* Rx Inject Enable */
 	/* End of input parameters */

-#define ROC_NIX_INL_MEM_SZ (1280)
+#define ROC_NIX_INL_MEM_SZ (1408)
 	uint8_t reserved[ROC_NIX_INL_MEM_SZ] __plt_cache_aligned;
 } __plt_cache_aligned;

@@ -167,6 +168,7 @@ int __roc_api roc_nix_inl_inb_fini(struct roc_nix *roc_nix);
 bool __roc_api roc_nix_inl_inb_is_enabled(struct roc_nix *roc_nix);
 uintptr_t __roc_api roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix,
 						bool inl_dev_sa);
+bool __roc_api roc_nix_inl_inb_rx_inject_enable(struct roc_nix *roc_nix, bool inl_dev_sa);
 uint32_t __roc_api roc_nix_inl_inb_spi_range(struct roc_nix *roc_nix,
 					     bool inl_dev_sa, uint32_t *min,
 					     uint32_t *max);
@@ -196,6 +198,7 @@ bool __roc_api roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix);
 uintptr_t __roc_api roc_nix_inl_outb_sa_base_get(struct roc_nix *roc_nix);
 struct roc_cpt_lf *__roc_api
 roc_nix_inl_outb_lf_base_get(struct roc_nix *roc_nix);
+struct roc_cpt_lf *__roc_api roc_nix_inl_inb_inj_lf_get(struct roc_nix *roc_nix);
 uint16_t __roc_api roc_nix_inl_outb_sso_pffunc_get(struct roc_nix *roc_nix);
 int __roc_api roc_nix_inl_cb_register(roc_nix_inl_sso_work_cb_t cb, void *args);
 int __roc_api roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb,
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index f6991de051..60e6a43033 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -174,9 +174,9 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
 static int
 nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
 {
-	struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
 	struct dev *dev = &inl_dev->dev;
 	bool ctx_ilen_valid = false;
+	struct roc_cpt_lf *lf;
 	uint8_t eng_grpmask;
 	uint8_t ctx_ilen = 0;
 	int rc;
@@ -194,27 +194,29 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
 	}

 	rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, inl_dev_sso, ctx_ilen_valid,
-			   ctx_ilen, false, 0);
+			   ctx_ilen, inl_dev->rx_inj_ena, inl_dev->nb_cptlf - 1);
 	if (rc) {
 		plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
 		return rc;
 	}

-	/* Setup CPT LF for submitting control opcode */
-	lf = &inl_dev->cpt_lf;
-	lf->lf_id = 0;
-	lf->nb_desc = 0; /* Set to default */
-	lf->dev = &inl_dev->dev;
-	lf->msixoff = inl_dev->cpt_msixoff;
-	lf->pci_dev = inl_dev->pci_dev;
+	for (int i = 0; i < inl_dev->nb_cptlf; i++) {
+		/* Setup CPT LF for submitting control opcode */
+		lf = &inl_dev->cpt_lf[i];
+		lf->lf_id = i;
+		lf->nb_desc = 0; /* Set to default */
+		lf->dev = &inl_dev->dev;
+		lf->msixoff = inl_dev->cpt_msixoff[i];
+		lf->pci_dev = inl_dev->pci_dev;

-	rc = cpt_lf_init(lf);
-	if (rc) {
-		plt_err("Failed to initialize CPT LF, rc=%d", rc);
-		goto lf_free;
-	}
+		rc = cpt_lf_init(lf);
+		if (rc) {
+			plt_err("Failed to initialize CPT LF, rc=%d", rc);
+			goto lf_free;
+		}

-	roc_cpt_iq_enable(lf);
+		roc_cpt_iq_enable(lf);
+	}
 	return 0;
 lf_free:
 	rc |= cpt_lfs_free(dev);
@@ -224,21 +226,22 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
 static int
 nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
 {
-	struct roc_cpt_lf *lf = &inl_dev->cpt_lf;
 	struct dev *dev = &inl_dev->dev;
-	int rc;
+	int rc, i;

 	if (!inl_dev->attach_cptlf)
 		return 0;

 	/* Cleanup CPT LF queue */
-	cpt_lf_fini(lf);
+	for (i = 0; i < inl_dev->nb_cptlf; i++)
+		cpt_lf_fini(&inl_dev->cpt_lf[i]);

 	/* Free LF resources */
 	rc = cpt_lfs_free(dev);
-	if (!rc)
-		lf->dev = NULL;
-	else
+	if (!rc) {
+		for (i = 0; i < inl_dev->nb_cptlf; i++)
+			inl_dev->cpt_lf[i].dev = NULL;
+	} else
 		plt_err("Failed to free CPT LF resources, rc=%d", rc);
 	return rc;
 }
@@ -533,7 +536,7 @@ nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
 	req->ssow = 1;
 	req->sso = 1;
 	if (inl_dev->attach_cptlf) {
-		req->cptlfs = 1;
+		req->cptlfs = inl_dev->nb_cptlf;
 		req->cpt_blkaddr = RVU_BLOCK_ADDR_CPT0;
 	}

@@ -550,7 +553,9 @@ nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
 	inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
 	inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
 	inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
-	inl_dev->cpt_msixoff = msix_rsp->cptlf_msixoff[0];
+
+	for (int i = 0; i < inl_dev->nb_cptlf; i++)
+		inl_dev->cpt_msixoff[i] = msix_rsp->cptlf_msixoff[i];

 	nix_blkaddr = nix_get_blkaddr(dev);
 	inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
@@ -912,6 +917,12 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
 	inl_dev->meta_buf_sz = roc_inl_dev->meta_buf_sz;
 	inl_dev->soft_exp_poll_freq = roc_inl_dev->soft_exp_poll_freq;

+	if (roc_inl_dev->rx_inj_ena) {
+		inl_dev->rx_inj_ena = 1;
+		inl_dev->nb_cptlf = NIX_INL_CPT_LF;
+	} else
+		inl_dev->nb_cptlf = 1;
+
 	if (roc_inl_dev->spb_drop_pc)
 		inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
 	if (roc_inl_dev->lpb_drop_pc)
@@ -1068,7 +1079,7 @@ roc_nix_inl_dev_cpt_setup(bool use_inl_dev_sso)
 		return -ENOENT;
 	inl_dev = idev->nix_inl_dev;

-	if (inl_dev->cpt_lf.dev != NULL)
+	if (inl_dev->cpt_lf[0].dev != NULL)
 		return -EBUSY;

 	return nix_inl_cpt_setup(inl_dev, use_inl_dev_sso);
@@ -1084,7 +1095,7 @@ roc_nix_inl_dev_cpt_release(void)
 		return -ENOENT;
 	inl_dev = idev->nix_inl_dev;

-	if (inl_dev->cpt_lf.dev == NULL)
+	if (inl_dev->cpt_lf[0].dev == NULL)
 		return 0;

 	return nix_inl_cpt_release(inl_dev);
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index 3217f4ebc1..5afc7d6655 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -7,6 +7,7 @@
 #include <sys/types.h>

 #define NIX_INL_META_SIZE 384u
+#define NIX_INL_CPT_LF	2

 struct nix_inl_dev;
 struct nix_inl_qint {
@@ -31,7 +32,7 @@ struct nix_inl_dev {
 	uint16_t nix_msixoff;
 	uint16_t ssow_msixoff;
 	uint16_t sso_msixoff;
-	uint16_t cpt_msixoff;
+	uint16_t cpt_msixoff[NIX_INL_CPT_LF];

 	/* SSO data */
 	uint32_t xaq_buf_size;
@@ -62,9 +63,10 @@ struct nix_inl_dev {
 	/* NIX/CPT data */
 	void *inb_sa_base;
 	uint16_t inb_sa_sz;
+	uint8_t nb_cptlf;

 	/* CPT data */
-	struct roc_cpt_lf cpt_lf;
+	struct roc_cpt_lf cpt_lf[NIX_INL_CPT_LF];

 	/* OUTB soft expiry poll thread */
 	plt_thread_t soft_exp_poll_thread;
@@ -91,6 +93,7 @@ struct nix_inl_dev {
 	bool ts_ena;
 	uint32_t nb_meta_bufs;
 	uint32_t meta_buf_sz;
+	uint8_t rx_inj_ena; /* Rx Inject Enable */

 	/* NPC */
 	int *ipsec_index;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 4981d42ab7..892fcb1f0d 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -243,6 +243,8 @@ INTERNAL {
 	roc_nix_inl_eng_caps_get;
 	roc_nix_inl_inb_is_enabled;
 	roc_nix_inl_inb_init;
+	roc_nix_inl_inb_inj_lf_get;
+	roc_nix_inl_inb_rx_inject_enable;
 	roc_nix_inl_inb_sa_base_get;
 	roc_nix_inl_inb_sa_get;
 	roc_nix_inl_inb_spi_range;
--
2.25.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 2/5] net/cnxk: support of Rx inject
  2024-02-22 10:07 ` [PATCH v2 1/5] common/cnxk: reserve CPT LF for Rx inject Rahul Bhansali
@ 2024-02-22 10:07   ` Rahul Bhansali
  2024-02-22 10:07   ` [PATCH v2 3/5] common/cnxk: fix for inline dev pointer check Rahul Bhansali
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 12+ messages in thread
From: Rahul Bhansali @ 2024-02-22 10:07 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: Rahul Bhansali

Add Rx inject security callback APIs to configure, inject
packet to CPT and receive back as in receive path.
Devargs "rx_inj_ena=1" will be required to enable the
inline IPsec Rx inject feature. If inline device is used
then this devarg will be required for both inline device
and eth device.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: updated release_24_03.rst for Rx inject support

 doc/guides/nics/cnxk.rst               |  27 +++
 doc/guides/rel_notes/release_24_03.rst |   1 +
 drivers/net/cnxk/cn10k_ethdev.c        |   4 +
 drivers/net/cnxk/cn10k_ethdev_sec.c    |  48 +++++
 drivers/net/cnxk/cn10k_rx.h            | 241 ++++++++++++++++++++++++-
 drivers/net/cnxk/cn10k_rxtx.h          |  57 ++++++
 drivers/net/cnxk/cn10k_tx.h            |  57 ------
 drivers/net/cnxk/cnxk_ethdev.h         |   3 +
 drivers/net/cnxk/cnxk_ethdev_devargs.c |   8 +-
 drivers/net/cnxk/cnxk_ethdev_dp.h      |   8 +
 drivers/net/cnxk/cnxk_ethdev_sec.c     |  21 ++-
 11 files changed, 406 insertions(+), 69 deletions(-)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 9ec52e380f..39660dba82 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -416,6 +416,19 @@ Runtime Config Options
    With the above configuration, PMD would allocate meta buffers of size 512 for
    inline inbound IPsec processing second pass.

+- ``Rx Inject Enable inbound inline IPsec for second pass`` (default ``0``)
+
+   Rx packet inject feature for inbound inline IPsec processing can be enabled
+   by ``rx_inj_ena`` ``devargs`` parameter.
+   This option is for OCTEON CN106-B0/CN103XX SoC family.
+
+   For example::
+
+      -a 0002:02:00.0,rx_inj_ena=1
+
+   With the above configuration, driver would enable packet inject from ARM cores
+   to crypto to process and send back in Rx path.
+
 .. note::

    Above devarg parameters are configurable per device, user needs to pass the
@@ -613,6 +626,20 @@ Runtime Config Options for inline device
    With the above configuration, driver would poll for aging flows every 50
    seconds.

+- ``Rx Inject Enable inbound inline IPsec for second pass`` (default ``0``)
+
+   Rx packet inject feature for inbound inline IPsec processing can be enabled
+   by ``rx_inj_ena`` ``devargs`` parameter with both inline device and ethdev
+   device.
+   This option is for OCTEON CN106-B0/CN103XX SoC family.
+
+   For example::
+
+      -a 0002:1d:00.0,rx_inj_ena=1
+
+   With the above configuration, driver would enable packet inject from ARM cores
+   to crypto to process and send back in Rx path.
+
 Debugging Options
 -----------------

diff --git a/doc/guides/rel_notes/release_24_03.rst b/doc/guides/rel_notes/release_24_03.rst
index 879bb4944c..d6da5bc7a5 100644
--- a/doc/guides/rel_notes/release_24_03.rst
+++ b/doc/guides/rel_notes/release_24_03.rst
@@ -110,6 +110,7 @@ New Features

   * Added support for ``RTE_FLOW_ITEM_TYPE_PPPOES`` flow item.
   * Added support for ``RTE_FLOW_ACTION_TYPE_SAMPLE`` flow item.
+  * Added support for Rx inject in cn10k.

 * **Updated Marvell OCTEON EP driver.**

diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index a2e943a3d0..78d1dca3c1 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -593,6 +593,10 @@ cn10k_nix_dev_start(struct rte_eth_dev *eth_dev)
 	if (dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F)
 		cn10k_nix_rx_queue_meta_aura_update(eth_dev);

+	/* Set flags for Rx Inject feature */
+	if (roc_idev_nix_rx_inject_get(nix->port_id))
+		dev->rx_offload_flags |= NIX_RX_SEC_REASSEMBLY_F;
+
 	cn10k_eth_set_tx_function(eth_dev);
 	cn10k_eth_set_rx_function(eth_dev);
 	return 0;
diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 575d0fabd5..42e4867d3c 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -1253,6 +1253,52 @@ eth_sec_caps_add(struct rte_security_capability eth_sec_caps[], uint32_t *idx,
 	*idx += nb_caps;
 }

+static uint16_t __rte_hot
+cn10k_eth_sec_inb_rx_inject(void *device, struct rte_mbuf **pkts,
+			    struct rte_security_session **sess, uint16_t nb_pkts)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+	return cn10k_nix_inj_pkts(sess, &dev->inj_cfg, pkts, nb_pkts);
+}
+
+static int
+cn10k_eth_sec_rx_inject_config(void *device, uint16_t port_id, bool enable)
+{
+	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+	struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+	uint64_t channel, pf_func, inj_match_id = 0xFFFFUL;
+	struct cnxk_ethdev_inj_cfg *inj_cfg;
+	struct roc_nix *nix = &dev->nix;
+	struct roc_cpt_lf *inl_lf;
+	uint64_t sa_base;
+
+	if (!rte_eth_dev_is_valid_port(port_id))
+		return -EINVAL;
+
+	if (eth_dev->data->dev_started || !eth_dev->data->dev_configured)
+		return -EBUSY;
+
+	if (!roc_nix_inl_inb_rx_inject_enable(nix, dev->inb.inl_dev))
+		return -ENOTSUP;
+
+	roc_idev_nix_rx_inject_set(port_id, enable);
+
+	inl_lf = roc_nix_inl_inb_inj_lf_get(nix);
+	sa_base = roc_nix_inl_inb_sa_base_get(nix, dev->inb.inl_dev);
+
+	inj_cfg = &dev->inj_cfg;
+	inj_cfg->sa_base = sa_base | eth_dev->data->port_id;
+	inj_cfg->io_addr = inl_lf->io_addr;
+	inj_cfg->lmt_base = nix->lmt_base;
+	channel = roc_nix_get_base_chan(nix);
+	pf_func = roc_nix_inl_dev_pffunc_get();
+	inj_cfg->cmd_w0 = pf_func << 48 | inj_match_id << 32 | channel << 4;
+
+	return 0;
+}
+
 void
 cn10k_eth_sec_ops_override(void)
 {
@@ -1287,4 +1333,6 @@ cn10k_eth_sec_ops_override(void)
 	cnxk_eth_sec_ops.session_stats_get = cn10k_eth_sec_session_stats_get;
 	cnxk_eth_sec_ops.macsec_sc_stats_get = cnxk_eth_macsec_sc_stats_get;
 	cnxk_eth_sec_ops.macsec_sa_stats_get = cnxk_eth_macsec_sa_stats_get;
+	cnxk_eth_sec_ops.rx_inject_configure = cn10k_eth_sec_rx_inject_config;
+	cnxk_eth_sec_ops.inb_pkt_rx_inject = cn10k_eth_sec_inb_rx_inject;
 }
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index 7bb4c86d75..c4ad1b64fe 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -5,6 +5,7 @@
 #define __CN10K_RX_H__

 #include <rte_ethdev.h>
+#include <rte_security_driver.h>
 #include <rte_vect.h>
 #include "cn10k_rxtx.h"

@@ -487,8 +488,19 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,
 	inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, sa_idx);
 	inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);

-	/* Update dynamic field with userdata */
-	*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+	/* Cryptodev injected packet can be identified from SA IDX 0xFFFFFFFF, and
+	 * Ethdev injected packet can be identified with match ID 0xFFFF.
+	 */
+	if (flags & NIX_RX_REAS_F && (sa_idx == 0xFFFFFFFF || hdr->w0.match_id == 0xFFFFU)) {
+		*(uint64_t *)(&inner->rearm_data) = (mbuf_init & ~(BIT_ULL(16) - 1)) |
+						    inner->data_off;
+		if (hdr->w0.match_id == 0xFFFFU)
+			*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+	} else {
+		/* Update dynamic field with userdata */
+		*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+		*(uint64_t *)(&inner->rearm_data) = mbuf_init;
+	}

 	/* Get ucc from cpt parse header */
 	ucc = hdr->w3.hw_ccode;
@@ -502,7 +514,6 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,

 	inner->pkt_len = len;
 	inner->data_len = len;
-	*(uint64_t *)(&inner->rearm_data) = mbuf_init;

 	inner->ol_flags = ((CPT_COMP_HWGOOD_MASK & (1U << ucc)) ?
 			   RTE_MBUF_F_RX_SEC_OFFLOAD :
@@ -567,11 +578,20 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa,
 	*ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK |
 		       RTE_MBUF_F_RX_IP_CKSUM_MASK);

-	/* Get SPI from CPT_PARSE_S's cookie(already swapped) */
-	inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd((void *)inb_sa);

-	/* Update dynamic field with userdata */
-	*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+	if (flags & NIX_RX_REAS_F && !inb_sa) {
+		/* Clear and update original lower 16 bit of data offset */
+		*rearm = (*rearm & ~(BIT_ULL(16) - 1)) | inner->data_off;
+	} else {
+		/* Get SPI from CPT_PARSE_S's cookie(already swapped) */
+		inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd((void *)inb_sa);
+		/* Update dynamic field with userdata */
+		*rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
+	}
+
+	/* Clear and update original lower 16 bit of data offset */
+	if (flags & NIX_RX_REAS_F && hdr->w0.match_id == 0xFFFFU)
+		*rearm = (*rearm & ~(BIT_ULL(16) - 1)) | inner->data_off;

 	/* Mark inner mbuf as get */
 	if (!(flags & NIX_RX_REAS_F) ||
@@ -604,8 +624,10 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa,
 			*rearm = vsetq_lane_u64(mbuf_init, *rearm, 0);
 		} else {
 			/* Reassembly failure */
-			nix_sec_attach_frags(hdr, inner, inb_priv, mbuf_init);
-			*ol_flags |= inner->ol_flags;
+			if (inb_sa) {
+				nix_sec_attach_frags(hdr, inner, inb_priv, mbuf_init);
+				*ol_flags |= inner->ol_flags;
+			}
 		}
 	} else if (flags & NIX_RX_REAS_F) {
 		/* Without fragmentation but may have to handle OOP session */
@@ -703,7 +725,14 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 		cq_w5 = *((const uint64_t *)rx + 4);
 	/* Use inner rx parse for meta pkts sg list */
 	if (cq_w1 & BIT(11) && flags & NIX_RX_OFFLOAD_SECURITY_F) {
-		const uint64_t *wqe = (const uint64_t *)(mbuf + 1);
+		const uint64_t *wqe;
+		/* Rx Inject packet must have Match ID 0xFFFF and for this
+		 * wqe will get from address stored at mbuf+1 location
+		 */
+		if ((flags & NIX_RX_REAS_F) && hdr->w0.match_id == 0xFFFFU)
+			wqe = (const uint64_t *)*((uint64_t *)(mbuf + 1));
+		else
+			wqe = (const uint64_t *)(mbuf + 1);

 		if (hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL)
 			rx = (const union nix_rx_parse_u *)(wqe + 1);
@@ -1191,6 +1220,187 @@ cn10k_nix_flush_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pk
 	return nb_pkts;
 }

+#if defined(RTE_ARCH_ARM64)
+static __rte_always_inline uint16_t
+cn10k_nix_rx_inj_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd)
+{
+	union nix_send_sg_s *sg, l_sg;
+	struct rte_mbuf *m_next;
+	uint16_t segdw, nb_segs;
+	uint64_t len, dlen;
+	uint64_t *slist;
+
+	sg = (union nix_send_sg_s *)cmd;
+	l_sg.u = sg->u;
+	l_sg.u &= 0xC00000000000000; /* LD_TYPE */
+	l_sg.subdc = NIX_SUBDC_SG;
+	nb_segs = m->nb_segs;
+	len = m->pkt_len;
+	slist = &cmd[1];
+
+	/* Fill mbuf segments */
+	do {
+		*slist = rte_pktmbuf_iova(m);
+		dlen = m->data_len;
+		len -= dlen;
+
+		/* Set the segment length */
+		l_sg.u |= ((uint64_t)dlen << (l_sg.segs << 4));
+		l_sg.segs += 1;
+		slist++;
+		nb_segs--;
+		if (l_sg.segs > 2 && nb_segs) {
+			sg->u = l_sg.u;
+			/* Next SG subdesc */
+			sg = (union nix_send_sg_s *)slist;
+			l_sg.u = sg->u;
+			l_sg.u &= 0xC00000000000000; /* LD_TYPE */
+			l_sg.subdc = NIX_SUBDC_SG;
+			slist++;
+		}
+		m_next = m->next;
+		m->next = NULL;
+		m = m_next;
+	} while (nb_segs);
+
+	/* Add remaining bytes of data to last seg */
+	if (len) {
+		uint8_t shft = (l_sg.subdc == NIX_SUBDC_SG) ? ((l_sg.segs - 1) << 4) : 0;
+		dlen = ((l_sg.u >> shft) & 0xFFFFULL) + len;
+		l_sg.u = l_sg.u & ~(0xFFFFULL << shft);
+		l_sg.u |= dlen << shft;
+	}
+	/* Write the last subdc out */
+	sg->u = l_sg.u;
+
+	segdw = (uint64_t *)slist - cmd;
+	/* Roundup extra dwords to multiple of 2 */
+	segdw = (segdw >> 1) + (segdw & 0x1);
+	return segdw;
+}
+
+static __rte_always_inline uint16_t
+cn10k_nix_inj_pkts(struct rte_security_session **sess, struct cnxk_ethdev_inj_cfg *inj_cfg,
+		   struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	uintptr_t c_lbase = inj_cfg->lmt_base;
+	struct cn10k_sec_sess_priv sess_priv;
+	uint64_t sa_base = inj_cfg->sa_base;
+	uint16_t c_lmt_id, burst, left, i;
+	uintptr_t cptres, rxphdr, dptr;
+	struct rte_mbuf *m, *last;
+	uint8_t lnum, shft, loff;
+	uint64x2_t cmd01, cmd23;
+	uint64_t ucode_cmd[4];
+	rte_iova_t c_io_addr;
+	uint64_t *laddr;
+	uint64_t sa, w0;
+	uint16_t segdw;
+
+	/* Get LMT base address and LMT ID as lcore id */
+	ROC_LMT_CPT_BASE_ID_GET(c_lbase, c_lmt_id);
+	c_io_addr = inj_cfg->io_addr;
+
+	left = nb_pkts;
+again:
+	burst = left > 32 ? 32 : left;
+
+	lnum = 0;
+	loff = 0;
+	shft = 16;
+
+	for (i = 0; i < burst; i++) {
+		m = tx_pkts[i];
+		sess_priv.u64 = sess[i]->fast_mdata;
+		last = rte_pktmbuf_lastseg(m);
+
+		cptres = rte_pktmbuf_mtod_offset(last, uintptr_t, last->data_len);
+		cptres += BIT_ULL(7);
+		cptres = (cptres - 1) & ~(BIT_ULL(7) - 1);
+
+		if (m->nb_segs > 1) {
+			/* Will reserve NIX Rx descriptor with SG list after end of
+			 * last mbuf data location. and pointer to this will be
+			 * stored at 1st mbuf space for Rx path multi-seg processing.
+			 */
+			/* Pointer to WQE header */
+			*(uint64_t *)(m + 1) = cptres;
+			/* Reserve 8 Dwords of WQE Hdr + Rx Parse Hdr */
+			rxphdr = cptres + 8;
+			dptr = rxphdr + 7 * 8;
+			/* Prepare Multiseg SG list */
+			segdw = cn10k_nix_rx_inj_prepare_mseg(m, (uint64_t *)(dptr));
+			*(uint64_t *)rxphdr = (uint64_t)(segdw - 1) << 12;
+			cptres += 64 + segdw * 16;
+			ucode_cmd[1] = dptr | ((uint64_t)m->nb_segs << 60);
+		} else {
+			dptr = (uint64_t)rte_pktmbuf_iova(m);
+			ucode_cmd[1] = dptr;
+		}
+
+		/* Prepare CPT instruction */
+		/* CPT word 0 and 1 */
+		cmd01 = vdupq_n_u64(0);
+		w0 = inj_cfg->cmd_w0 | ((uint64_t)m->l2_len - 2) << 24 | (uint64_t)m->l2_len << 16;
+		cmd01 = vsetq_lane_u64(w0, cmd01, 0);
+		cmd01 = vsetq_lane_u64(cptres, cmd01, 1);
+
+		/* CPT word 2 and 3 */
+		cmd23 = vdupq_n_u64(0);
+		/* Set PF func */
+		w0 &= 0xFFFF000000000000UL;
+		cmd23 = vsetq_lane_u64(w0, cmd23, 0);
+		cmd23 = vsetq_lane_u64(((uint64_t)m + sizeof(struct rte_mbuf)) | 1, cmd23, 1);
+
+		sa_base &= ~0xFFFFUL;
+		sa = (uintptr_t)roc_nix_inl_ot_ipsec_inb_sa(sa_base, sess_priv.sa_idx);
+		ucode_cmd[0] = (ROC_IE_OT_MAJOR_OP_PROCESS_INBOUND_IPSEC << 48 | 1UL << 54 |
+				((uint64_t)sess_priv.chksum) << 32 |
+				((uint64_t)sess_priv.dec_ttl) << 34 | m->pkt_len);
+
+		ucode_cmd[2] = 0;
+		ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+
+		/* Move to our line */
+		laddr = LMT_OFF(c_lbase, lnum, loff ? 64 : 0);
+
+		/* Write CPT instruction to lmt line */
+		vst1q_u64(laddr, cmd01);
+		vst1q_u64((laddr + 2), cmd23);
+
+		*(__uint128_t *)(laddr + 4) = *(__uint128_t *)ucode_cmd;
+		*(__uint128_t *)(laddr + 6) = *(__uint128_t *)(ucode_cmd + 2);
+
+		loff = !loff;
+		lnum = lnum + (loff ? 0 : 1);
+		shft = shft + (loff ? 0 : 3);
+	}
+
+	left -= burst;
+	tx_pkts += burst;
+	sess += burst;
+
+	cn10k_nix_sec_steorl(c_io_addr, c_lmt_id, lnum, loff, shft);
+
+	rte_io_wmb();
+	if (left)
+		goto again;
+
+	return nb_pkts;
+}
+#else
+static __rte_always_inline uint16_t
+cn10k_nix_inj_pkts(struct rte_security_session **sess, struct cnxk_ethdev_inj_cfg *inj_cfg,
+		   struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	RTE_SET_USED(sess);
+	RTE_SET_USED(inj_cfg);
+	RTE_SET_USED(tx_pkts);
+	RTE_SET_USED(nb_pkts);
+	return 0;
+}
+#endif
+
 #if defined(RTE_ARCH_ARM64)

 static __rte_always_inline uint64_t
@@ -1558,6 +1768,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			uint64x2_t inner0, inner1, inner2, inner3;
 			uint64x2_t wqe01, wqe23, sa01, sa23;
 			uint16x4_t lens, l2lens, ltypes;
+			uint64x2_t mask01, mask23;
 			uint8x8_t ucc;

 			cpth0 = (uintptr_t)mbuf0 + d_off;
@@ -1587,6 +1798,11 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,

 			sa01 = vshrq_n_u64(sa01, 32);
 			sa23 = vshrq_n_u64(sa23, 32);
+
+			/* Crypto Look-aside Rx Inject case */
+			mask01 = vceqq_u64(sa01, vdupq_n_u64(0xFFFFFFFF));
+			mask23 = vceqq_u64(sa23, vdupq_n_u64(0xFFFFFFFF));
+
 			sa01 = vshlq_n_u64(sa01,
 					   ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
 			sa23 = vshlq_n_u64(sa23,
@@ -1594,6 +1810,11 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
 			sa01 = vaddq_u64(sa01, vdupq_n_u64(sa_base));
 			sa23 = vaddq_u64(sa23, vdupq_n_u64(sa_base));

+			if (flags & NIX_RX_REAS_F) {
+				sa01 = vbicq_u64(sa01, mask01);
+				sa23 = vbicq_u64(sa23, mask23);
+			}
+
 			const uint8x16x2_t tbl = {{
 				{
 					/* ROC_IE_OT_UCC_SUCCESS_PKT_IP_BADCSUM */
diff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h
index aeffc4ac92..2143df1a7e 100644
--- a/drivers/net/cnxk/cn10k_rxtx.h
+++ b/drivers/net/cnxk/cn10k_rxtx.h
@@ -191,4 +191,61 @@ handle_tx_completion_pkts(struct cn10k_eth_txq *txq, uint8_t mt_safe)
 		rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
 }

+static __rte_always_inline uint64_t
+cn10k_cpt_tx_steor_data(void)
+{
+	/* We have two CPT instructions per LMTLine */
+	const uint64_t dw_m1 = ROC_CN10K_TWO_CPT_INST_DW_M1;
+	uint64_t data;
+
+	/* This will be moved to addr area */
+	data = dw_m1 << 16;
+	data |= dw_m1 << 19;
+	data |= dw_m1 << 22;
+	data |= dw_m1 << 25;
+	data |= dw_m1 << 28;
+	data |= dw_m1 << 31;
+	data |= dw_m1 << 34;
+	data |= dw_m1 << 37;
+	data |= dw_m1 << 40;
+	data |= dw_m1 << 43;
+	data |= dw_m1 << 46;
+	data |= dw_m1 << 49;
+	data |= dw_m1 << 52;
+	data |= dw_m1 << 55;
+	data |= dw_m1 << 58;
+	data |= dw_m1 << 61;
+
+	return data;
+}
+
+static __rte_always_inline void
+cn10k_nix_sec_steorl(uintptr_t io_addr, uint32_t lmt_id, uint8_t lnum,
+		     uint8_t loff, uint8_t shft)
+{
+	uint64_t data;
+	uintptr_t pa;
+
+	/* Check if there is any CPT instruction to submit */
+	if (!lnum && !loff)
+		return;
+
+	data = cn10k_cpt_tx_steor_data();
+	/* Update lmtline use for partial end line */
+	if (loff) {
+		data &= ~(0x7ULL << shft);
+		/* Update it to half full i.e 64B */
+		data |= (0x3UL << shft);
+	}
+
+	pa = io_addr | ((data >> 16) & 0x7) << 4;
+	data &= ~(0x7ULL << 16);
+	/* Update lines - 1 that contain valid data */
+	data |= ((uint64_t)(lnum + loff - 1)) << 12;
+	data |= (uint64_t)lmt_id;
+
+	/* STEOR */
+	roc_lmt_submit_steorl(data, pa);
+}
+
 #endif /* __CN10K_RXTX_H__ */
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 467f0ccc65..664e47e1fc 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -314,34 +314,6 @@ cn10k_nix_tx_steor_vec_data(const uint16_t flags)
 	return data;
 }

-static __rte_always_inline uint64_t
-cn10k_cpt_tx_steor_data(void)
-{
-	/* We have two CPT instructions per LMTLine */
-	const uint64_t dw_m1 = ROC_CN10K_TWO_CPT_INST_DW_M1;
-	uint64_t data;
-
-	/* This will be moved to addr area */
-	data = dw_m1 << 16;
-	data |= dw_m1 << 19;
-	data |= dw_m1 << 22;
-	data |= dw_m1 << 25;
-	data |= dw_m1 << 28;
-	data |= dw_m1 << 31;
-	data |= dw_m1 << 34;
-	data |= dw_m1 << 37;
-	data |= dw_m1 << 40;
-	data |= dw_m1 << 43;
-	data |= dw_m1 << 46;
-	data |= dw_m1 << 49;
-	data |= dw_m1 << 52;
-	data |= dw_m1 << 55;
-	data |= dw_m1 << 58;
-	data |= dw_m1 << 61;
-
-	return data;
-}
-
 static __rte_always_inline void
 cn10k_nix_tx_skeleton(struct cn10k_eth_txq *txq, uint64_t *cmd,
 		      const uint16_t flags, const uint16_t static_sz)
@@ -461,35 +433,6 @@ cn10k_nix_sec_fc_wait(struct cn10k_eth_txq *txq, uint16_t nb_pkts)
 		goto again;
 }

-static __rte_always_inline void
-cn10k_nix_sec_steorl(uintptr_t io_addr, uint32_t lmt_id, uint8_t lnum,
-		     uint8_t loff, uint8_t shft)
-{
-	uint64_t data;
-	uintptr_t pa;
-
-	/* Check if there is any CPT instruction to submit */
-	if (!lnum && !loff)
-		return;
-
-	data = cn10k_cpt_tx_steor_data();
-	/* Update lmtline use for partial end line */
-	if (loff) {
-		data &= ~(0x7ULL << shft);
-		/* Update it to half full i.e 64B */
-		data |= (0x3UL << shft);
-	}
-
-	pa = io_addr | ((data >> 16) & 0x7) << 4;
-	data &= ~(0x7ULL << 16);
-	/* Update lines - 1 that contain valid data */
-	data |= ((uint64_t)(lnum + loff - 1)) << 12;
-	data |= (uint64_t)lmt_id;
-
-	/* STEOR */
-	roc_lmt_submit_steorl(data, pa);
-}
-
 #if defined(RTE_ARCH_ARM64)
 static __rte_always_inline void
 cn10k_nix_prep_sec_vec(struct rte_mbuf *m, uint64x2_t *cmd0, uint64x2_t *cmd1,
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 37b6395b93..cc77aefe15 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -424,6 +424,9 @@ struct cnxk_eth_dev {
 	/* MCS device */
 	struct cnxk_mcs_dev *mcs_dev;
 	struct cnxk_macsec_sess_list mcs_list;
+
+	/* Inject packets */
+	struct cnxk_ethdev_inj_cfg inj_cfg;
 };

 struct cnxk_eth_rxq_sp {
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 8e862be933..50dc80ce2c 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -279,6 +279,7 @@ parse_val_u16(const char *key, const char *value, void *extra_args)
 #define CNXK_SQB_SLACK		"sqb_slack"
 #define CNXK_NIX_META_BUF_SZ	"meta_buf_sz"
 #define CNXK_FLOW_AGING_POLL_FREQ	"aging_poll_freq"
+#define CNXK_NIX_RX_INJ_ENABLE	"rx_inj_ena"

 int
 cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
@@ -305,6 +306,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 	uint32_t meta_buf_sz = 0;
 	uint16_t no_inl_dev = 0;
 	uint8_t lock_rx_ctx = 0;
+	uint8_t rx_inj_ena = 0;

 	memset(&sdp_chan, 0, sizeof(sdp_chan));
 	memset(&pre_l2_info, 0, sizeof(struct flow_pre_l2_size_info));
@@ -355,6 +357,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 	rte_kvargs_process(kvlist, CNXK_NIX_META_BUF_SZ, &parse_meta_bufsize, &meta_buf_sz);
 	rte_kvargs_process(kvlist, CNXK_FLOW_AGING_POLL_FREQ, &parse_val_u16,
 			   &aging_thread_poll_freq);
+	rte_kvargs_process(kvlist, CNXK_NIX_RX_INJ_ENABLE, &parse_flag, &rx_inj_ena);
 	rte_kvargs_free(kvlist);

 null_devargs:
@@ -387,6 +390,8 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
 	dev->npc.pre_l2_size_offset_mask = pre_l2_info.pre_l2_size_off_mask;
 	dev->npc.pre_l2_size_shift_dir = pre_l2_info.pre_l2_size_shift_dir;
 	dev->npc.flow_age.aging_poll_freq = aging_thread_poll_freq;
+	if (roc_feature_nix_has_rx_inject())
+		dev->nix.rx_inj_ena = rx_inj_ena;
 	return 0;
 exit:
 	return -EINVAL;
@@ -409,4 +414,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_cnxk,
 			      CNXK_SDP_CHANNEL_MASK "=<1-4095>/<1-4095>"
 			      CNXK_CUSTOM_SA_ACT "=1"
 			      CNXK_SQB_SLACK "=<12-512>"
-			      CNXK_FLOW_AGING_POLL_FREQ "=<10-65535>");
+			      CNXK_FLOW_AGING_POLL_FREQ "=<10-65535>"
+			      CNXK_NIX_RX_INJ_ENABLE "=1");
diff --git a/drivers/net/cnxk/cnxk_ethdev_dp.h b/drivers/net/cnxk/cnxk_ethdev_dp.h
index c1f99a2616..56cfcb7fc6 100644
--- a/drivers/net/cnxk/cnxk_ethdev_dp.h
+++ b/drivers/net/cnxk/cnxk_ethdev_dp.h
@@ -4,6 +4,7 @@
 #ifndef __CNXK_ETHDEV_DP_H__
 #define __CNXK_ETHDEV_DP_H__

+#include <rte_security_driver.h>
 #include <rte_mbuf.h>

 /* If PTP is enabled additional SEND MEM DESC is required which
@@ -82,6 +83,13 @@ struct cnxk_timesync_info {
 	uint64_t *tx_tstamp;
 } __plt_cache_aligned;

+struct cnxk_ethdev_inj_cfg {
+	uintptr_t lmt_base;
+	uint64_t io_addr;
+	uint64_t sa_base;
+	uint64_t cmd_w0;
+} __plt_cache_aligned;
+
 /* Inlines */
 static __rte_always_inline uint64_t
 cnxk_pktmbuf_detach(struct rte_mbuf *m)
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index b02dac4952..6f5319e534 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -16,6 +16,7 @@
 #define CNXK_NIX_INL_META_BUF_SZ      "meta_buf_sz"
 #define CNXK_NIX_SOFT_EXP_POLL_FREQ   "soft_exp_poll_freq"
 #define CNXK_MAX_IPSEC_RULES	"max_ipsec_rules"
+#define CNXK_NIX_INL_RX_INJ_ENABLE	"rx_inj_ena"

 /* Default soft expiry poll freq in usec */
 #define CNXK_NIX_SOFT_EXP_POLL_FREQ_DFLT 100
@@ -192,6 +193,19 @@ parse_max_ipsec_rules(const char *key, const char *value, void *extra_args)
 	return 0;
 }

+static int
+parse_inl_rx_inj_ena(const char *key, const char *value, void *extra_args)
+{
+	RTE_SET_USED(key);
+	uint32_t val;
+
+	val = atoi(value);
+
+	*(uint8_t *)extra_args = !!(val == 1);
+
+	return 0;
+}
+
 int
 cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, uint32_t *idx_p,
 			 uint32_t spi)
@@ -352,6 +366,7 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	struct rte_kvargs *kvlist;
 	uint32_t nb_meta_bufs = 0;
 	uint32_t meta_buf_sz = 0;
+	uint8_t rx_inj_ena = 0;
 	uint8_t selftest = 0;

 	memset(&cpt_channel, 0, sizeof(cpt_channel));
@@ -378,6 +393,7 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	rte_kvargs_process(kvlist, CNXK_NIX_SOFT_EXP_POLL_FREQ,
 			   &parse_val_u32, &soft_exp_poll_freq);
 	rte_kvargs_process(kvlist, CNXK_MAX_IPSEC_RULES, &parse_max_ipsec_rules, &max_ipsec_rules);
+	rte_kvargs_process(kvlist, CNXK_NIX_INL_RX_INJ_ENABLE, &parse_inl_rx_inj_ena, &rx_inj_ena);
 	rte_kvargs_free(kvlist);

 null_devargs:
@@ -391,6 +407,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
 	inl_dev->meta_buf_sz = meta_buf_sz;
 	inl_dev->soft_exp_poll_freq = soft_exp_poll_freq;
 	inl_dev->max_ipsec_rules = max_ipsec_rules;
+	if (roc_feature_nix_has_rx_inject())
+		inl_dev->rx_inj_ena = rx_inj_ena;
 	return 0;
 exit:
 	return -EINVAL;
@@ -518,4 +536,5 @@ RTE_PMD_REGISTER_PARAM_STRING(cnxk_nix_inl,
 			      CNXK_NIX_INL_NB_META_BUFS "=<1-U32_MAX>"
 			      CNXK_NIX_INL_META_BUF_SZ "=<1-U32_MAX>"
 			      CNXK_NIX_SOFT_EXP_POLL_FREQ "=<0-U32_MAX>"
-			      CNXK_MAX_IPSEC_RULES "=<1-4095>");
+			      CNXK_MAX_IPSEC_RULES "=<1-4095>"
+			      CNXK_NIX_INL_RX_INJ_ENABLE "=1");
--
2.25.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 3/5] common/cnxk: fix for inline dev pointer check
  2024-02-22 10:07 ` [PATCH v2 1/5] common/cnxk: reserve CPT LF for Rx inject Rahul Bhansali
  2024-02-22 10:07   ` [PATCH v2 2/5] net/cnxk: support of " Rahul Bhansali
@ 2024-02-22 10:07   ` Rahul Bhansali
  2024-02-22 10:07   ` [PATCH v2 4/5] net/cnxk: fix to add reassembly fast path flag Rahul Bhansali
  2024-02-22 10:07   ` [PATCH v2 5/5] net/cnxk: select optimized LLC transaction type Rahul Bhansali
  3 siblings, 0 replies; 12+ messages in thread
From: Rahul Bhansali @ 2024-02-22 10:07 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Rahul Bhansali
  Cc: stable

Add missing check of Inline device pointer before accessing
is_multi_channel variable.

Fixes: 7ea187184a51 ("common/cnxk: support 1-N pool-aura per NIX LF")
Cc: stable@dpdk.org

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: No change

 drivers/common/cnxk/roc_nix_inl.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index de8fd2a605..a205c658e9 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -933,7 +933,8 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 	inl_dev = idev->nix_inl_dev;

 	roc_nix->custom_meta_aura_ena = (roc_nix->local_meta_aura_ena &&
-					 (inl_dev->is_multi_channel || roc_nix->custom_sa_action));
+					 ((inl_dev && inl_dev->is_multi_channel) ||
+					  roc_nix->custom_sa_action));
 	if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
 		nix->need_meta_aura = true;
 		if (!roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena)
--
2.25.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 4/5] net/cnxk: fix to add reassembly fast path flag
  2024-02-22 10:07 ` [PATCH v2 1/5] common/cnxk: reserve CPT LF for Rx inject Rahul Bhansali
  2024-02-22 10:07   ` [PATCH v2 2/5] net/cnxk: support of " Rahul Bhansali
  2024-02-22 10:07   ` [PATCH v2 3/5] common/cnxk: fix for inline dev pointer check Rahul Bhansali
@ 2024-02-22 10:07   ` Rahul Bhansali
  2024-02-22 10:07   ` [PATCH v2 5/5] net/cnxk: select optimized LLC transaction type Rahul Bhansali
  3 siblings, 0 replies; 12+ messages in thread
From: Rahul Bhansali @ 2024-02-22 10:07 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
	Satha Rao, Akhil Goyal
  Cc: Rahul Bhansali, stable

For IPsec decrypted packets, full packet format condition check
is enabled for both reassembly and non-reassembly path as part
of OOP handling. Instead, it should be only in reassembly path.
To fix this, NIX_RX_REAS_F flag condition is added to avoid
packet format check in non-reassembly fast path.

Fixes: 5e9e008d0127 ("net/cnxk: support inline ingress out-of-place session")
Cc: stable@dpdk.org

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: No change

 drivers/net/cnxk/cn10k_rx.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index c4ad1b64fe..89621af3fb 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -734,7 +734,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
 		else
 			wqe = (const uint64_t *)(mbuf + 1);

-		if (hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL)
+		if (!(flags & NIX_RX_REAS_F) || hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL)
 			rx = (const union nix_rx_parse_u *)(wqe + 1);
 	}

--
2.25.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2 5/5] net/cnxk: select optimized LLC transaction type
  2024-02-22 10:07 ` [PATCH v2 1/5] common/cnxk: reserve CPT LF for Rx inject Rahul Bhansali
                     ` (2 preceding siblings ...)
  2024-02-22 10:07   ` [PATCH v2 4/5] net/cnxk: fix to add reassembly fast path flag Rahul Bhansali
@ 2024-02-22 10:07   ` Rahul Bhansali
  2024-02-23  9:04     ` Jerin Jacob
  3 siblings, 1 reply; 12+ messages in thread
From: Rahul Bhansali @ 2024-02-22 10:07 UTC (permalink / raw)
  To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao
  Cc: Rahul Bhansali

LLC transaction optimization by using LDWB LDTYPE option
in SG preparation for Tx. With this, if data is present
and dirty in LLC then the LLC would mark the data clean.

Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
Changes in v2: No change

 drivers/net/cnxk/cn10k_tx.h | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 664e47e1fc..fcd19be77e 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -331,9 +331,15 @@ cn10k_nix_tx_skeleton(struct cn10k_eth_txq *txq, uint64_t *cmd,
 		else
 			cmd[2] = NIX_SUBDC_EXT << 60;
 		cmd[3] = 0;
-		cmd[4] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
+		if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+			cmd[4] = (NIX_SUBDC_SG << 60) | (NIX_SENDLDTYPE_LDWB << 58) | BIT_ULL(48);
+		else
+			cmd[4] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
 	} else {
-		cmd[2] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
+		if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+			cmd[2] = (NIX_SUBDC_SG << 60) | (NIX_SENDLDTYPE_LDWB << 58) | BIT_ULL(48);
+		else
+			cmd[2] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
 	}
 }

@@ -1989,7 +1995,11 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,

 	senddesc01_w1 = vdupq_n_u64(0);
 	senddesc23_w1 = senddesc01_w1;
-	sgdesc01_w0 = vdupq_n_u64((NIX_SUBDC_SG << 60) | BIT_ULL(48));
+	if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+		sgdesc01_w0 = vdupq_n_u64((NIX_SUBDC_SG << 60) | (NIX_SENDLDTYPE_LDWB << 58) |
+					  BIT_ULL(48));
+	else
+		sgdesc01_w0 = vdupq_n_u64((NIX_SUBDC_SG << 60) | BIT_ULL(48));
 	sgdesc23_w0 = sgdesc01_w0;

 	if (flags & NIX_TX_NEED_EXT_HDR) {
--
2.25.1


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2 5/5] net/cnxk: select optimized LLC transaction type
  2024-02-22 10:07   ` [PATCH v2 5/5] net/cnxk: select optimized LLC transaction type Rahul Bhansali
@ 2024-02-23  9:04     ` Jerin Jacob
  0 siblings, 0 replies; 12+ messages in thread
From: Jerin Jacob @ 2024-02-23  9:04 UTC (permalink / raw)
  To: Rahul Bhansali
  Cc: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao

On Thu, Feb 22, 2024 at 3:38 PM Rahul Bhansali <rbhansali@marvell.com> wrote:
>
> LLC transaction optimization by using LDWB LDTYPE option
> in SG preparation for Tx. With this, if data is present
> and dirty in LLC then the LLC would mark the data clean.
>
> Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>

Series applied to dpdk-next-net-mrvl/for-main. Thanks



> ---
> Changes in v2: No change
>
>  drivers/net/cnxk/cn10k_tx.h | 16 +++++++++++++---
>  1 file changed, 13 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
> index 664e47e1fc..fcd19be77e 100644
> --- a/drivers/net/cnxk/cn10k_tx.h
> +++ b/drivers/net/cnxk/cn10k_tx.h
> @@ -331,9 +331,15 @@ cn10k_nix_tx_skeleton(struct cn10k_eth_txq *txq, uint64_t *cmd,
>                 else
>                         cmd[2] = NIX_SUBDC_EXT << 60;
>                 cmd[3] = 0;
> -               cmd[4] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
> +               if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
> +                       cmd[4] = (NIX_SUBDC_SG << 60) | (NIX_SENDLDTYPE_LDWB << 58) | BIT_ULL(48);
> +               else
> +                       cmd[4] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
>         } else {
> -               cmd[2] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
> +               if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
> +                       cmd[2] = (NIX_SUBDC_SG << 60) | (NIX_SENDLDTYPE_LDWB << 58) | BIT_ULL(48);
> +               else
> +                       cmd[2] = (NIX_SUBDC_SG << 60) | BIT_ULL(48);
>         }
>  }
>
> @@ -1989,7 +1995,11 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
>
>         senddesc01_w1 = vdupq_n_u64(0);
>         senddesc23_w1 = senddesc01_w1;
> -       sgdesc01_w0 = vdupq_n_u64((NIX_SUBDC_SG << 60) | BIT_ULL(48));
> +       if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
> +               sgdesc01_w0 = vdupq_n_u64((NIX_SUBDC_SG << 60) | (NIX_SENDLDTYPE_LDWB << 58) |
> +                                         BIT_ULL(48));
> +       else
> +               sgdesc01_w0 = vdupq_n_u64((NIX_SUBDC_SG << 60) | BIT_ULL(48));
>         sgdesc23_w0 = sgdesc01_w0;
>
>         if (flags & NIX_TX_NEED_EXT_HDR) {
> --
> 2.25.1
>

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2024-02-23  9:04 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-01-19  5:57 [PATCH 1/5] common/cnxk: reserve CPT LF for Rx inject Rahul Bhansali
2024-01-19  5:57 ` [PATCH 2/5] net/cnxk: support of " Rahul Bhansali
2024-02-22  8:55   ` Jerin Jacob
2024-01-19  5:57 ` [PATCH 3/5] common/cnxk: fix for inline dev pointer check Rahul Bhansali
2024-01-19  5:57 ` [PATCH 4/5] net/cnxk: fix to add reassembly fast path flag Rahul Bhansali
2024-01-19  5:57 ` [PATCH 5/5] net/cnxk: select optimized LLC transaction type Rahul Bhansali
2024-02-22 10:07 ` [PATCH v2 1/5] common/cnxk: reserve CPT LF for Rx inject Rahul Bhansali
2024-02-22 10:07   ` [PATCH v2 2/5] net/cnxk: support of " Rahul Bhansali
2024-02-22 10:07   ` [PATCH v2 3/5] common/cnxk: fix for inline dev pointer check Rahul Bhansali
2024-02-22 10:07   ` [PATCH v2 4/5] net/cnxk: fix to add reassembly fast path flag Rahul Bhansali
2024-02-22 10:07   ` [PATCH v2 5/5] net/cnxk: select optimized LLC transaction type Rahul Bhansali
2024-02-23  9:04     ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).