DPDK patches and discussions
 help / color / mirror / Atom feed
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: Nithin Dabilpuram <ndabilpuram@marvell.com>,
	Kiran Kumar K <kirankumark@marvell.com>,
	Sunil Kumar Kori <skori@marvell.com>,
	Satha Rao <skoteshwar@marvell.com>, Ray Kinsella <mdr@ashroe.eu>
Cc: <jerinj@marvell.com>, <schalla@marvell.com>, <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH 06/27] common/cnxk: add nix inline device init and fini
Date: Thu, 2 Sep 2021 07:44:44 +0530	[thread overview]
Message-ID: <20210902021505.17607-7-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20210902021505.17607-1-ndabilpuram@marvell.com>

Add support to init and fini nix inline device with NIX LF,
SSO LF and SSOW LF for inline inbound IPSec in CN10K.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
 drivers/common/cnxk/meson.build        |   1 +
 drivers/common/cnxk/roc_api.h          |   2 +
 drivers/common/cnxk/roc_cpt.c          |   7 +-
 drivers/common/cnxk/roc_idev.c         |   2 +
 drivers/common/cnxk/roc_idev_priv.h    |   3 +
 drivers/common/cnxk/roc_nix_debug.c    |  35 +++
 drivers/common/cnxk/roc_nix_inl.h      |  55 ++++
 drivers/common/cnxk/roc_nix_inl_dev.c  | 544 +++++++++++++++++++++++++++++++++
 drivers/common/cnxk/roc_nix_inl_priv.h |   2 +
 drivers/common/cnxk/roc_platform.h     |   2 +
 drivers/common/cnxk/version.map        |   3 +
 11 files changed, 653 insertions(+), 3 deletions(-)
 create mode 100644 drivers/common/cnxk/roc_nix_inl_dev.c

diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index 207ca00..e8940d7 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -28,6 +28,7 @@ sources = files(
         'roc_nix_debug.c',
         'roc_nix_fc.c',
         'roc_nix_irq.c',
+        'roc_nix_inl_dev.c',
         'roc_nix_inl_dev_irq.c',
         'roc_nix_mac.c',
         'roc_nix_mcast.c',
diff --git a/drivers/common/cnxk/roc_api.h b/drivers/common/cnxk/roc_api.h
index c1af95e..53f4e4b 100644
--- a/drivers/common/cnxk/roc_api.h
+++ b/drivers/common/cnxk/roc_api.h
@@ -53,6 +53,8 @@
 #define PCI_DEVID_CNXK_RVU_SDP_PF     0xA0f6
 #define PCI_DEVID_CNXK_RVU_SDP_VF     0xA0f7
 #define PCI_DEVID_CNXK_BPHY	      0xA089
+#define PCI_DEVID_CNXK_RVU_NIX_INL_PF 0xA0F0
+#define PCI_DEVID_CNXK_RVU_NIX_INL_VF 0xA0F1
 
 #define PCI_DEVID_CN9K_CGX  0xA059
 #define PCI_DEVID_CN10K_RPM 0xA060
diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index 5e35d1b..3222b3e 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -381,11 +381,12 @@ cpt_lfs_alloc(struct dev *dev, uint8_t eng_grpmsk, uint8_t blkaddr,
 	if (blkaddr != RVU_BLOCK_ADDR_CPT0 && blkaddr != RVU_BLOCK_ADDR_CPT1)
 		return -EINVAL;
 
-	PLT_SET_USED(inl_dev_sso);
-
 	req = mbox_alloc_msg_cpt_lf_alloc(mbox);
 	req->nix_pf_func = 0;
-	req->sso_pf_func = idev_sso_pffunc_get();
+	if (inl_dev_sso && nix_inl_dev_pffunc_get())
+		req->sso_pf_func = nix_inl_dev_pffunc_get();
+	else
+		req->sso_pf_func = idev_sso_pffunc_get();
 	req->eng_grpmsk = eng_grpmsk;
 	req->blkaddr = blkaddr;
 
diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index 1494187..648f37b 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -38,6 +38,8 @@ idev_set_defaults(struct idev_cfg *idev)
 	idev->num_lmtlines = 0;
 	idev->bphy = NULL;
 	idev->cpt = NULL;
+	idev->nix_inl_dev = NULL;
+	plt_spinlock_init(&idev->nix_inl_dev_lock);
 	__atomic_store_n(&idev->npa_refcnt, 0, __ATOMIC_RELEASE);
 }
 
diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index 84e6f1e..2c8309b 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -9,6 +9,7 @@
 struct npa_lf;
 struct roc_bphy;
 struct roc_cpt;
+struct nix_inl_dev;
 struct idev_cfg {
 	uint16_t sso_pf_func;
 	uint16_t npa_pf_func;
@@ -20,6 +21,8 @@ struct idev_cfg {
 	uint64_t lmt_base_addr;
 	struct roc_bphy *bphy;
 	struct roc_cpt *cpt;
+	struct nix_inl_dev *nix_inl_dev;
+	plt_spinlock_t nix_inl_dev_lock;
 };
 
 /* Generic */
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index 9539bb9..582f5a3 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -1213,3 +1213,38 @@ roc_nix_dump(struct roc_nix *roc_nix)
 	nix_dump("  \trss_alg_idx = %d", nix->rss_alg_idx);
 	nix_dump("  \ttx_pause = %d", nix->tx_pause);
 }
+
+void
+roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev)
+{
+	struct nix_inl_dev *inl_dev =
+		(struct nix_inl_dev *)&roc_inl_dev->reserved;
+	struct dev *dev = &inl_dev->dev;
+
+	nix_dump("nix_inl_dev@%p", inl_dev);
+	nix_dump("  pf = %d", dev_get_pf(dev->pf_func));
+	nix_dump("  vf = %d", dev_get_vf(dev->pf_func));
+	nix_dump("  bar2 = 0x%" PRIx64, dev->bar2);
+	nix_dump("  bar4 = 0x%" PRIx64, dev->bar4);
+
+	nix_dump("  \tpci_dev = %p", inl_dev->pci_dev);
+	nix_dump("  \tnix_base = 0x%" PRIxPTR "", inl_dev->nix_base);
+	nix_dump("  \tsso_base = 0x%" PRIxPTR "", inl_dev->sso_base);
+	nix_dump("  \tssow_base = 0x%" PRIxPTR "", inl_dev->ssow_base);
+	nix_dump("  \tnix_msixoff = %d", inl_dev->nix_msixoff);
+	nix_dump("  \tsso_msixoff = %d", inl_dev->sso_msixoff);
+	nix_dump("  \tssow_msixoff = %d", inl_dev->ssow_msixoff);
+	nix_dump("  \tnix_cints = %d", inl_dev->cints);
+	nix_dump("  \tnix_qints = %d", inl_dev->qints);
+	nix_dump("  \trq_refs = %d", inl_dev->rq_refs);
+	nix_dump("  \tinb_sa_base = 0x%p", inl_dev->inb_sa_base);
+	nix_dump("  \tinb_sa_sz = %d", inl_dev->inb_sa_sz);
+	nix_dump("  \txaq_buf_size = %u", inl_dev->xaq_buf_size);
+	nix_dump("  \txae_waes = %u", inl_dev->xae_waes);
+	nix_dump("  \tiue = %u", inl_dev->iue);
+	nix_dump("  \txaq_aura = 0x%" PRIx64, inl_dev->xaq_aura);
+	nix_dump("  \txaq_mem = 0x%p", inl_dev->xaq_mem);
+
+	nix_dump("  \tinl_dev_rq:");
+	roc_nix_rq_dump(&inl_dev->rq);
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 1ec3dda..f1fe4a2 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -4,7 +4,62 @@
 #ifndef _ROC_NIX_INL_H_
 #define _ROC_NIX_INL_H_
 
+/* ONF INB HW area */
+#define ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ                                        \
+	PLT_ALIGN(sizeof(struct roc_onf_ipsec_inb_sa), ROC_ALIGN)
+/* ONF INB SW reserved area */
+#define ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD 384
+#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ                                        \
+	(ROC_NIX_INL_ONF_IPSEC_INB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_INB_SW_RSVD)
+#define ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2 9
+
+/* ONF OUTB HW area */
+#define ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ                                       \
+	PLT_ALIGN(sizeof(struct roc_onf_ipsec_outb_sa), ROC_ALIGN)
+/* ONF OUTB SW reserved area */
+#define ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD 128
+#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ                                       \
+	(ROC_NIX_INL_ONF_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_ONF_IPSEC_OUTB_SW_RSVD)
+#define ROC_NIX_INL_ONF_IPSEC_OUTB_SA_SZ_LOG2 8
+
+/* OT INB HW area */
+#define ROC_NIX_INL_OT_IPSEC_INB_HW_SZ                                         \
+	PLT_ALIGN(sizeof(struct roc_ot_ipsec_inb_sa), ROC_ALIGN)
+/* OT INB SW reserved area */
+#define ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD 128
+#define ROC_NIX_INL_OT_IPSEC_INB_SA_SZ                                         \
+	(ROC_NIX_INL_OT_IPSEC_INB_HW_SZ + ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD)
+#define ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2 10
+
+/* OT OUTB HW area */
+#define ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ                                        \
+	PLT_ALIGN(sizeof(struct roc_ot_ipsec_outb_sa), ROC_ALIGN)
+/* OT OUTB SW reserved area */
+#define ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD 128
+#define ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ                                        \
+	(ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD)
+#define ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2 9
+
+/* Alignment of SA Base */
+#define ROC_NIX_INL_SA_BASE_ALIGN BIT_ULL(16)
+
 /* Inline device SSO Work callback */
 typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args);
 
+struct roc_nix_inl_dev {
+	/* Input parameters */
+	struct plt_pci_device *pci_dev;
+	uint16_t ipsec_in_max_spi;
+	bool selftest;
+	/* End of input parameters */
+
+#define ROC_NIX_INL_MEM_SZ (1024)
+	uint8_t reserved[ROC_NIX_INL_MEM_SZ] __plt_cache_aligned;
+} __plt_cache_aligned;
+
+/* NIX Inline Device API */
+int __roc_api roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev);
+int __roc_api roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev);
+void __roc_api roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev);
+
 #endif /* _ROC_NIX_INL_H_ */
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
new file mode 100644
index 0000000..214f183
--- /dev/null
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -0,0 +1,544 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+#define XAQ_CACHE_CNT 0x7
+
+/* Default Rx Config for Inline NIX LF */
+#define NIX_INL_LF_RX_CFG                                                      \
+	(ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |            \
+	 ROC_NIX_LF_RX_CFG_IP6_UDP_OPT | ROC_NIX_LF_RX_CFG_DIS_APAD |          \
+	 ROC_NIX_LF_RX_CFG_CSUM_IL4 | ROC_NIX_LF_RX_CFG_CSUM_OL4 |             \
+	 ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |               \
+	 ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3)
+
+uint16_t
+nix_inl_dev_pffunc_get(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	struct nix_inl_dev *inl_dev;
+
+	if (idev != NULL) {
+		inl_dev = idev->nix_inl_dev;
+		if (inl_dev)
+			return inl_dev->dev.pf_func;
+	}
+	return 0;
+}
+
+static void
+nix_inl_selftest_work_cb(uint64_t *gw, void *args)
+{
+	uintptr_t work = gw[1];
+
+	*((uintptr_t *)args + (gw[0] & 0x1)) = work;
+
+	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+}
+
+static int
+nix_inl_selftest(void)
+{
+	struct idev_cfg *idev = idev_get_cfg();
+	roc_nix_inl_sso_work_cb_t save_cb;
+	static uintptr_t work_arr[2];
+	struct nix_inl_dev *inl_dev;
+	void *save_cb_args;
+	uint64_t add_work0;
+	int rc = 0;
+
+	if (idev == NULL)
+		return -ENOTSUP;
+
+	inl_dev = idev->nix_inl_dev;
+	if (inl_dev == NULL)
+		return -ENOTSUP;
+
+	plt_info("Performing nix inl self test");
+
+	/* Save and update cb to test cb */
+	save_cb = inl_dev->work_cb;
+	save_cb_args = inl_dev->cb_args;
+	inl_dev->work_cb = nix_inl_selftest_work_cb;
+	inl_dev->cb_args = work_arr;
+
+	plt_atomic_thread_fence(__ATOMIC_ACQ_REL);
+
+#define WORK_MAGIC1 0x335577ff0
+#define WORK_MAGIC2 0xdeadbeef0
+
+	/* Add work */
+	add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x0;
+	roc_store_pair(add_work0, WORK_MAGIC1, inl_dev->sso_base);
+	add_work0 = ((uint64_t)(SSO_TT_ORDERED) << 32) | 0x1;
+	roc_store_pair(add_work0, WORK_MAGIC2, inl_dev->sso_base);
+
+	plt_delay_ms(10000);
+
+	/* Check if we got expected work */
+	if (work_arr[0] != WORK_MAGIC1 || work_arr[1] != WORK_MAGIC2) {
+		plt_err("Failed to get expected work, [0]=%p [1]=%p",
+			(void *)work_arr[0], (void *)work_arr[1]);
+		rc = -EFAULT;
+		goto exit;
+	}
+
+	plt_info("Work, [0]=%p [1]=%p", (void *)work_arr[0],
+		 (void *)work_arr[1]);
+
+exit:
+	/* Restore state */
+	inl_dev->work_cb = save_cb;
+	inl_dev->cb_args = save_cb_args;
+	return rc;
+}
+
+static int
+nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
+{
+	struct nix_inline_ipsec_lf_cfg *lf_cfg;
+	struct mbox *mbox = (&inl_dev->dev)->mbox;
+	uint32_t sa_w;
+
+	lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
+	if (lf_cfg == NULL)
+		return -ENOSPC;
+
+	if (ena) {
+		sa_w = plt_align32pow2(inl_dev->ipsec_in_max_spi + 1);
+		sa_w = plt_log2_u32(sa_w);
+
+		lf_cfg->enable = 1;
+		lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
+		lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
+		/* CN9K SA size is different */
+		if (roc_model_is_cn9k())
+			lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
+		else
+			lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
+		lf_cfg->ipsec_cfg1.sa_idx_max = inl_dev->ipsec_in_max_spi;
+		lf_cfg->ipsec_cfg0.sa_pow2_size =
+			plt_log2_u32(inl_dev->inb_sa_sz);
+
+		lf_cfg->ipsec_cfg0.tag_const = 0;
+		lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
+	} else {
+		lf_cfg->enable = 0;
+	}
+
+	return mbox_process(mbox);
+}
+
+static int
+nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
+{
+	struct sso_lf_alloc_rsp *sso_rsp;
+	struct dev *dev = &inl_dev->dev;
+	uint32_t xaq_cnt, count, aura;
+	uint16_t hwgrp[1] = {0};
+	struct npa_pool_s pool;
+	uintptr_t iova;
+	int rc;
+
+	/* Alloc SSOW LF */
+	rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWS, 1, NULL);
+	if (rc) {
+		plt_err("Failed to alloc SSO HWS, rc=%d", rc);
+		return rc;
+	}
+
+	/* Alloc HWGRP LF */
+	rc = sso_lf_alloc(dev, SSO_LF_TYPE_HWGRP, 1, (void **)&sso_rsp);
+	if (rc) {
+		plt_err("Failed to alloc SSO HWGRP, rc=%d", rc);
+		goto free_ssow;
+	}
+
+	inl_dev->xaq_buf_size = sso_rsp->xaq_buf_size;
+	inl_dev->xae_waes = sso_rsp->xaq_wq_entries;
+	inl_dev->iue = sso_rsp->in_unit_entries;
+
+	/* Create XAQ pool */
+	xaq_cnt = XAQ_CACHE_CNT;
+	xaq_cnt += inl_dev->iue / inl_dev->xae_waes;
+	plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
+
+	inl_dev->xaq_mem = plt_zmalloc(inl_dev->xaq_buf_size * xaq_cnt,
+				       inl_dev->xaq_buf_size);
+	if (!inl_dev->xaq_mem) {
+		rc = NIX_ERR_NO_MEM;
+		plt_err("Failed to alloc xaq buf mem");
+		goto free_sso;
+	}
+
+	memset(&pool, 0, sizeof(struct npa_pool_s));
+	pool.nat_align = 1;
+	rc = roc_npa_pool_create(&inl_dev->xaq_aura, inl_dev->xaq_buf_size,
+				 xaq_cnt, NULL, &pool);
+	if (rc) {
+		plt_err("Failed to alloc aura for XAQ, rc=%d", rc);
+		goto free_mem;
+	}
+
+	/* Fill the XAQ buffers */
+	iova = (uint64_t)inl_dev->xaq_mem;
+	for (count = 0; count < xaq_cnt; count++) {
+		roc_npa_aura_op_free(inl_dev->xaq_aura, 0, iova);
+		iova += inl_dev->xaq_buf_size;
+	}
+	roc_npa_aura_op_range_set(inl_dev->xaq_aura, (uint64_t)inl_dev->xaq_mem,
+				  iova);
+
+	aura = roc_npa_aura_handle_to_aura(inl_dev->xaq_aura);
+
+	/* Setup xaq for hwgrps */
+	rc = sso_hwgrp_alloc_xaq(dev, aura, 1);
+	if (rc) {
+		plt_err("Failed to setup hwgrp xaq aura, rc=%d", rc);
+		goto destroy_pool;
+	}
+
+	/* Register SSO, SSOW error and work irq's */
+	rc = nix_inl_sso_register_irqs(inl_dev);
+	if (rc) {
+		plt_err("Failed to register sso irq's, rc=%d", rc);
+		goto release_xaq;
+	}
+
+	/* Setup hwgrp->hws link */
+	sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, true);
+
+	/* Enable HWGRP */
+	plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
+
+	return 0;
+
+release_xaq:
+	sso_hwgrp_release_xaq(&inl_dev->dev, 1);
+destroy_pool:
+	roc_npa_pool_destroy(inl_dev->xaq_aura);
+	inl_dev->xaq_aura = 0;
+free_mem:
+	plt_free(inl_dev->xaq_mem);
+	inl_dev->xaq_mem = NULL;
+free_sso:
+	sso_lf_free(dev, SSO_LF_TYPE_HWGRP, 1);
+free_ssow:
+	sso_lf_free(dev, SSO_LF_TYPE_HWS, 1);
+	return rc;
+}
+
+static int
+nix_inl_sso_release(struct nix_inl_dev *inl_dev)
+{
+	uint16_t hwgrp[1] = {0};
+
+	/* Disable HWGRP */
+	plt_write64(0, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
+
+	/* Unregister SSO/SSOW IRQ's */
+	nix_inl_sso_unregister_irqs(inl_dev);
+
+	/* Unlink hws */
+	sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, false);
+
+	/* Release XAQ aura */
+	sso_hwgrp_release_xaq(&inl_dev->dev, 1);
+
+	/* Free SSO, SSOW LF's */
+	sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWS, 1);
+	sso_lf_free(&inl_dev->dev, SSO_LF_TYPE_HWGRP, 1);
+
+	return 0;
+}
+
+static int
+nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
+{
+	uint16_t ipsec_in_max_spi = inl_dev->ipsec_in_max_spi;
+	struct dev *dev = &inl_dev->dev;
+	struct mbox *mbox = dev->mbox;
+	struct nix_lf_alloc_rsp *rsp;
+	struct nix_lf_alloc_req *req;
+	size_t inb_sa_sz;
+	int rc = -ENOSPC;
+
+	/* Alloc NIX LF needed for single RQ */
+	req = mbox_alloc_msg_nix_lf_alloc(mbox);
+	if (req == NULL)
+		return rc;
+	req->rq_cnt = 1;
+	req->sq_cnt = 1;
+	req->cq_cnt = 1;
+	/* XQESZ is W16 */
+	req->xqe_sz = NIX_XQESZ_W16;
+	/* RSS size does not matter as this RQ is only for UCAST_IPSEC action */
+	req->rss_sz = ROC_NIX_RSS_RETA_SZ_64;
+	req->rss_grps = ROC_NIX_RSS_GRPS;
+	req->npa_func = idev_npa_pffunc_get();
+	req->sso_func = dev->pf_func;
+	req->rx_cfg = NIX_INL_LF_RX_CFG;
+	req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
+
+	rc = mbox_process_msg(mbox, (void *)&rsp);
+	if (rc) {
+		plt_err("Failed to alloc lf, rc=%d", rc);
+		return rc;
+	}
+
+	inl_dev->lf_tx_stats = rsp->lf_tx_stats;
+	inl_dev->lf_rx_stats = rsp->lf_rx_stats;
+	inl_dev->qints = rsp->qints;
+	inl_dev->cints = rsp->cints;
+
+	/* Register nix interrupts */
+	rc = nix_inl_nix_register_irqs(inl_dev);
+	if (rc) {
+		plt_err("Failed to register nix irq's, rc=%d", rc);
+		goto lf_free;
+	}
+
+	/* CN9K SA is different */
+	if (roc_model_is_cn9k())
+		inb_sa_sz = ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ;
+	else
+		inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
+
+	/* Alloc contiguous memory for Inbound SA's */
+	inl_dev->inb_sa_sz = inb_sa_sz;
+	inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * ipsec_in_max_spi,
+					   ROC_NIX_INL_SA_BASE_ALIGN);
+	if (!inl_dev->inb_sa_base) {
+		plt_err("Failed to allocate memory for Inbound SA");
+		rc = -ENOMEM;
+		goto unregister_irqs;
+	}
+
+	/* Setup device specific inb SA table */
+	rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
+	if (rc) {
+		plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
+		goto free_mem;
+	}
+
+	return 0;
+free_mem:
+	plt_free(inl_dev->inb_sa_base);
+	inl_dev->inb_sa_base = NULL;
+unregister_irqs:
+	nix_inl_nix_unregister_irqs(inl_dev);
+lf_free:
+	mbox_alloc_msg_nix_lf_free(mbox);
+	rc |= mbox_process(mbox);
+	return rc;
+}
+
+static int
+nix_inl_nix_release(struct nix_inl_dev *inl_dev)
+{
+	struct dev *dev = &inl_dev->dev;
+	struct mbox *mbox = dev->mbox;
+	struct nix_lf_free_req *req;
+	struct ndc_sync_op *ndc_req;
+	int rc = -ENOSPC;
+
+	/* Disable Inbound processing */
+	rc = nix_inl_nix_ipsec_cfg(inl_dev, false);
+	if (rc)
+		plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
+
+	/* Sync NDC-NIX for LF */
+	ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
+	if (ndc_req == NULL)
+		return rc;
+	ndc_req->nix_lf_rx_sync = 1;
+	rc = mbox_process(mbox);
+	if (rc)
+		plt_err("Error on NDC-NIX-RX LF sync, rc %d", rc);
+
+	/* Unregister IRQs */
+	nix_inl_nix_unregister_irqs(inl_dev);
+
+	/* By default all associated mcam rules are deleted */
+	req = mbox_alloc_msg_nix_lf_free(mbox);
+	if (req == NULL)
+		return -ENOSPC;
+
+	return mbox_process(mbox);
+}
+
+static int
+nix_inl_lf_attach(struct nix_inl_dev *inl_dev)
+{
+	struct msix_offset_rsp *msix_rsp;
+	struct dev *dev = &inl_dev->dev;
+	struct mbox *mbox = dev->mbox;
+	struct rsrc_attach_req *req;
+	uint64_t nix_blkaddr;
+	int rc = -ENOSPC;
+
+	req = mbox_alloc_msg_attach_resources(mbox);
+	if (req == NULL)
+		return rc;
+	req->modify = true;
+	/* Attach 1 NIXLF, SSO HWS and SSO HWGRP */
+	req->nixlf = true;
+	req->ssow = 1;
+	req->sso = 1;
+
+	rc = mbox_process(dev->mbox);
+	if (rc)
+		return rc;
+
+	/* Get MSIX vector offsets */
+	mbox_alloc_msg_msix_offset(mbox);
+	rc = mbox_process_msg(dev->mbox, (void **)&msix_rsp);
+	if (rc)
+		return rc;
+
+	inl_dev->nix_msixoff = msix_rsp->nix_msixoff;
+	inl_dev->ssow_msixoff = msix_rsp->ssow_msixoff[0];
+	inl_dev->sso_msixoff = msix_rsp->sso_msixoff[0];
+
+	nix_blkaddr = nix_get_blkaddr(dev);
+	inl_dev->is_nix1 = (nix_blkaddr == RVU_BLOCK_ADDR_NIX1);
+
+	/* Update base addresses for LF's */
+	inl_dev->nix_base = dev->bar2 + (nix_blkaddr << 20);
+	inl_dev->ssow_base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20);
+	inl_dev->sso_base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20);
+	return 0;
+}
+
+static int
+nix_inl_lf_detach(struct nix_inl_dev *inl_dev)
+{
+	struct dev *dev = &inl_dev->dev;
+	struct mbox *mbox = dev->mbox;
+	struct rsrc_detach_req *req;
+	int rc = -ENOSPC;
+
+	req = mbox_alloc_msg_detach_resources(mbox);
+	if (req == NULL)
+		return rc;
+	req->partial = true;
+	req->nixlf = true;
+	req->ssow = true;
+	req->sso = true;
+
+	return mbox_process(dev->mbox);
+}
+
+int
+roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
+{
+	struct plt_pci_device *pci_dev;
+	struct nix_inl_dev *inl_dev;
+	struct idev_cfg *idev;
+	int rc;
+
+	pci_dev = roc_inl_dev->pci_dev;
+
+	/* Skip probe if already done */
+	idev = idev_get_cfg();
+	if (idev == NULL)
+		return -ENOTSUP;
+
+	if (idev->nix_inl_dev) {
+		plt_info("Skipping device %s, inline device already probed",
+			 pci_dev->name);
+		return -EEXIST;
+	}
+
+	PLT_STATIC_ASSERT(sizeof(struct nix_inl_dev) <= ROC_NIX_INL_MEM_SZ);
+
+	inl_dev = (struct nix_inl_dev *)roc_inl_dev->reserved;
+	memset(inl_dev, 0, sizeof(*inl_dev));
+
+	inl_dev->pci_dev = pci_dev;
+	inl_dev->ipsec_in_max_spi = roc_inl_dev->ipsec_in_max_spi;
+	inl_dev->selftest = roc_inl_dev->selftest;
+
+	/* Initialize base device */
+	rc = dev_init(&inl_dev->dev, pci_dev);
+	if (rc) {
+		plt_err("Failed to init roc device");
+		goto error;
+	}
+
+	/* Attach LF resources */
+	rc = nix_inl_lf_attach(inl_dev);
+	if (rc) {
+		plt_err("Failed to attach LF resources, rc=%d", rc);
+		goto dev_cleanup;
+	}
+
+	/* Setup NIX LF */
+	rc = nix_inl_nix_setup(inl_dev);
+	if (rc)
+		goto lf_detach;
+
+	/* Setup SSO LF */
+	rc = nix_inl_sso_setup(inl_dev);
+	if (rc)
+		goto nix_release;
+
+	idev->nix_inl_dev = inl_dev;
+
+	/* Perform selftest if asked for */
+	if (inl_dev->selftest) {
+		rc = nix_inl_selftest();
+		if (rc)
+			goto nix_release;
+	}
+
+	return 0;
+nix_release:
+	rc |= nix_inl_nix_release(inl_dev);
+lf_detach:
+	rc |= nix_inl_lf_detach(inl_dev);
+dev_cleanup:
+	rc |= dev_fini(&inl_dev->dev, pci_dev);
+error:
+	return rc;
+}
+
+int
+roc_nix_inl_dev_fini(struct roc_nix_inl_dev *roc_inl_dev)
+{
+	struct plt_pci_device *pci_dev;
+	struct nix_inl_dev *inl_dev;
+	struct idev_cfg *idev;
+	int rc;
+
+	idev = idev_get_cfg();
+	if (idev == NULL)
+		return 0;
+
+	if (!idev->nix_inl_dev ||
+	    PLT_PTR_DIFF(roc_inl_dev->reserved, idev->nix_inl_dev))
+		return 0;
+
+	inl_dev = idev->nix_inl_dev;
+	pci_dev = inl_dev->pci_dev;
+
+	/* Release SSO */
+	rc = nix_inl_sso_release(inl_dev);
+
+	/* Release NIX */
+	rc |= nix_inl_nix_release(inl_dev);
+
+	/* Detach LF's */
+	rc |= nix_inl_lf_detach(inl_dev);
+
+	/* Cleanup mbox */
+	rc |= dev_fini(&inl_dev->dev, pci_dev);
+	if (rc)
+		return rc;
+
+	idev->nix_inl_dev = NULL;
+	return 0;
+}
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index f424009..ab38062 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -54,4 +54,6 @@ void nix_inl_sso_unregister_irqs(struct nix_inl_dev *inl_dev);
 int nix_inl_nix_register_irqs(struct nix_inl_dev *inl_dev);
 void nix_inl_nix_unregister_irqs(struct nix_inl_dev *inl_dev);
 
+uint16_t nix_inl_dev_pffunc_get(void);
+
 #endif /* _ROC_NIX_INL_PRIV_H_ */
diff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h
index 177db3d..241655b 100644
--- a/drivers/common/cnxk/roc_platform.h
+++ b/drivers/common/cnxk/roc_platform.h
@@ -37,6 +37,7 @@
 #define PLT_MEMZONE_NAMESIZE	 RTE_MEMZONE_NAMESIZE
 #define PLT_STD_C11		 RTE_STD_C11
 #define PLT_PTR_ADD		 RTE_PTR_ADD
+#define PLT_PTR_DIFF		 RTE_PTR_DIFF
 #define PLT_MAX_RXTX_INTR_VEC_ID RTE_MAX_RXTX_INTR_VEC_ID
 #define PLT_INTR_VEC_RXTX_OFFSET RTE_INTR_VEC_RXTX_OFFSET
 #define PLT_MIN			 RTE_MIN
@@ -77,6 +78,7 @@
 #define plt_cpu_to_be_64 rte_cpu_to_be_64
 #define plt_be_to_cpu_64 rte_be_to_cpu_64
 
+#define plt_align32pow2	    rte_align32pow2
 #define plt_align32prevpow2 rte_align32prevpow2
 
 #define plt_bitmap			rte_bitmap
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 5dbb21c..3a35233 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -100,6 +100,9 @@ INTERNAL {
 	roc_nix_get_pf_func;
 	roc_nix_get_vf;
 	roc_nix_get_vwqe_interval;
+	roc_nix_inl_dev_dump;
+	roc_nix_inl_dev_fini;
+	roc_nix_inl_dev_init;
 	roc_nix_is_lbk;
 	roc_nix_is_pf;
 	roc_nix_is_sdp;
-- 
2.8.4


  parent reply	other threads:[~2021-09-02  2:17 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-02  2:14 [dpdk-dev] [PATCH 00/27] net/cnxk: support for inline ipsec Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 01/27] common/cnxk: add security support for cn9k fast path Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 02/27] common/cnxk: add helper API to dump cpt parse header Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 03/27] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 04/27] common/cnxk: change nix debug API and queue API interface Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 05/27] common/cnxk: add nix inline device irq API Nithin Dabilpuram
2021-09-02  2:14 ` Nithin Dabilpuram [this message]
2021-09-02  2:14 ` [dpdk-dev] [PATCH 07/27] common/cnxk: add nix inline inbound and outbound support API Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 08/27] common/cnxk: dump cpt lf registers on error intr Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 09/27] common/cnxk: align cpt lf enable/disable sequence Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 10/27] common/cnxk: restore nix sqb pool limit before destroy Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 11/27] common/cnxk: add cq enable support in nix Tx path Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 12/27] common/cnxk: setup aura bp conf based on nix Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 13/27] common/cnxk: add anti-replay check implementation for cn9k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 14/27] common/cnxk: add inline IPsec support in rte flow Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 15/27] net/cnxk: add inline security support for cn9k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 16/27] net/cnxk: add inline security support for cn10k Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 17/27] net/cnxk: add cn9k Rx support for security offload Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 18/27] net/cnxk: add cn9k Tx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 19/27] net/cnxk: add cn10k Rx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 20/27] net/cnxk: add cn10k Tx " Nithin Dabilpuram
2021-09-02  2:14 ` [dpdk-dev] [PATCH 21/27] net/cnxk: add cn9k anti replay " Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 22/27] net/cnxk: add cn10k IPsec transport mode support Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 23/27] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 24/27] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 25/27] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 26/27] net/cnxk: add devargs for configuring channel mask Nithin Dabilpuram
2021-09-02  2:15 ` [dpdk-dev] [PATCH 27/27] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-09-29 12:44 ` [dpdk-dev] [PATCH 00/27] net/cnxk: support for inline ipsec Jerin Jacob
2021-09-30 17:00 ` [dpdk-dev] [PATCH v2 00/28] " Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 01/28] common/cnxk: support cn9k fast path security session Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 02/28] common/cnxk: support CPT parse header dump Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 03/28] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 04/28] common/cnxk: change NIX debug API and queue API interface Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 05/28] common/cnxk: support NIX inline device IRQ Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 06/28] common/cnxk: support NIX inline device init and fini Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 07/28] common/cnxk: support NIX inline inbound and outbound setup Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 08/28] common/cnxk: disable CQ drop when inline inbound is enabled Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 09/28] common/cnxk: dump CPT LF registers on error intr Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 10/28] common/cnxk: align CPT LF enable/disable sequence Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 11/28] common/cnxk: restore NIX sqb pool limit before destroy Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 12/28] common/cnxk: add CQ enable support in NIX Tx path Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 13/28] common/cnxk: setup aura BP conf based on nix Nithin Dabilpuram
2021-09-30 17:00   ` [dpdk-dev] [PATCH v2 14/28] common/cnxk: support anti-replay check in SW for cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 15/28] common/cnxk: support inline IPsec rte flow action Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 16/28] net/cnxk: support inline security setup for cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 17/28] net/cnxk: support inline security setup for cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 18/28] net/cnxk: support Rx security offload on cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 19/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 20/28] net/cnxk: support Rx security offload on cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 21/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 22/28] net/cnxk: support IPsec anti replay in cn9k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 23/28] net/cnxk: support IPsec transport mode in cn10k Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 24/28] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 25/28] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 26/28] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 27/28] net/cnxk: support configuring channel mask via devargs Nithin Dabilpuram
2021-09-30 17:01   ` [dpdk-dev] [PATCH v2 28/28] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-10-01  5:37   ` [dpdk-dev] [PATCH v2 00/28] net/cnxk: support for inline ipsec Jerin Jacob
2021-10-01 13:39 ` [dpdk-dev] [PATCH v3 " Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 01/28] common/cnxk: support cn9k fast path security session Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 02/28] common/cnxk: support CPT parse header dump Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 03/28] common/cnxk: allow reuse of SSO API for inline dev Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 04/28] common/cnxk: change NIX debug API and queue API interface Nithin Dabilpuram
2021-10-01 13:39   ` [dpdk-dev] [PATCH v3 05/28] common/cnxk: support NIX inline device IRQ Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 06/28] common/cnxk: support NIX inline device init and fini Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 07/28] common/cnxk: support NIX inline inbound and outbound setup Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 08/28] common/cnxk: disable CQ drop when inline inbound is enabled Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 09/28] common/cnxk: dump CPT LF registers on error intr Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 10/28] common/cnxk: align CPT LF enable/disable sequence Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 11/28] common/cnxk: restore NIX sqb pool limit before destroy Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 12/28] common/cnxk: add CQ enable support in NIX Tx path Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 13/28] common/cnxk: setup aura BP conf based on nix Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 14/28] common/cnxk: support anti-replay check in SW for cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 15/28] common/cnxk: support inline IPsec rte flow action Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 16/28] net/cnxk: support inline security setup for cn9k Nithin Dabilpuram
2021-10-06 16:21     ` Ferruh Yigit
2021-10-06 16:44       ` Nithin Kumar Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 17/28] net/cnxk: support inline security setup for cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 18/28] net/cnxk: support Rx security offload on cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 19/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 20/28] net/cnxk: support Rx security offload on cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 21/28] net/cnxk: support Tx " Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 22/28] net/cnxk: support IPsec anti replay in cn9k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 23/28] net/cnxk: support IPsec transport mode in cn10k Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 24/28] net/cnxk: update ethertype for mixed IPsec tunnel versions Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 25/28] net/cnxk: allow zero udp6 checksum for non inline device Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 26/28] net/cnxk: add crypto capabilities for AES CBC and HMAC SHA1 Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 27/28] net/cnxk: support configuring channel mask via devargs Nithin Dabilpuram
2021-10-01 13:40   ` [dpdk-dev] [PATCH v3 28/28] net/cnxk: reflect globally enabled offloads in queue conf Nithin Dabilpuram
2021-10-02 13:49   ` [dpdk-dev] [PATCH v3 00/28] net/cnxk: support for inline ipsec Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210902021505.17607-7-ndabilpuram@marvell.com \
    --to=ndabilpuram@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=kirankumark@marvell.com \
    --cc=mdr@ashroe.eu \
    --cc=schalla@marvell.com \
    --cc=skori@marvell.com \
    --cc=skoteshwar@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).