From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: Nithin Dabilpuram <ndabilpuram@marvell.com>,
Kiran Kumar K <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>, Ray Kinsella <mdr@ashroe.eu>
Cc: <jerinj@marvell.com>, <dev@dpdk.org>
Subject: [PATCH 10/23] common/cnxk: support zero aura for inline inbound meta
Date: Wed, 10 Aug 2022 00:18:54 +0530 [thread overview]
Message-ID: <20220809184908.24030-10-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20220809184908.24030-1-ndabilpuram@marvell.com>
Add support to create zero aura for inline inbound meta pkts when platform
supports it. Aura zero will hold as many buffers as all the available
pkt pool with a data to accommodate 384B in best case to store
meta packets coming from Inline IPsec.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_idev.c | 10 ++
drivers/common/cnxk/roc_idev.h | 1 +
drivers/common/cnxk/roc_idev_priv.h | 9 ++
drivers/common/cnxk/roc_nix.h | 1 +
drivers/common/cnxk/roc_nix_inl.c | 211 +++++++++++++++++++++++++++++++++
drivers/common/cnxk/roc_nix_inl.h | 8 ++
drivers/common/cnxk/roc_nix_inl_dev.c | 2 +
drivers/common/cnxk/roc_nix_inl_priv.h | 4 +
drivers/common/cnxk/roc_nix_priv.h | 1 +
drivers/common/cnxk/roc_nix_queue.c | 19 +++
drivers/common/cnxk/version.map | 4 +
11 files changed, 270 insertions(+)
diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index a08c7ce..4d2eff9 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -241,3 +241,13 @@ idev_sso_set(struct roc_sso *sso)
if (idev != NULL)
__atomic_store_n(&idev->sso, sso, __ATOMIC_RELEASE);
}
+
+uint64_t
+roc_idev_nix_inl_meta_aura_get(void)
+{
+ struct idev_cfg *idev = idev_get_cfg();
+
+ if (idev != NULL)
+ return idev->inl_cfg.meta_aura;
+ return 0;
+}
diff --git a/drivers/common/cnxk/roc_idev.h b/drivers/common/cnxk/roc_idev.h
index 16793c2..926aac0 100644
--- a/drivers/common/cnxk/roc_idev.h
+++ b/drivers/common/cnxk/roc_idev.h
@@ -16,5 +16,6 @@ struct roc_cpt *__roc_api roc_idev_cpt_get(void);
void __roc_api roc_idev_cpt_set(struct roc_cpt *cpt);
struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
+uint64_t __roc_api roc_idev_nix_inl_meta_aura_get(void);
#endif /* _ROC_IDEV_H_ */
diff --git a/drivers/common/cnxk/roc_idev_priv.h b/drivers/common/cnxk/roc_idev_priv.h
index 46eebff..315cc6f 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -10,6 +10,14 @@ struct npa_lf;
struct roc_bphy;
struct roc_cpt;
struct nix_inl_dev;
+
+struct idev_nix_inl_cfg {
+ uint64_t meta_aura;
+ uint32_t nb_bufs;
+ uint32_t buf_sz;
+ uint32_t refs;
+};
+
struct idev_cfg {
uint16_t sso_pf_func;
uint16_t npa_pf_func;
@@ -23,6 +31,7 @@ struct idev_cfg {
struct roc_cpt *cpt;
struct roc_sso *sso;
struct nix_inl_dev *nix_inl_dev;
+ struct idev_nix_inl_cfg inl_cfg;
plt_spinlock_t nix_inl_dev_lock;
};
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 3ad3a7e..5f5f5f9 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -315,6 +315,7 @@ struct roc_nix_rq {
bool spb_drop_ena;
/* End of Input parameters */
struct roc_nix *roc_nix;
+ uint64_t meta_aura_handle;
uint16_t inl_dev_refs;
};
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index c621867..507a153 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -6,6 +6,7 @@
#include "roc_priv.h"
uint32_t soft_exp_consumer_cnt;
+roc_nix_inl_meta_pool_cb_t meta_pool_cb;
PLT_STATIC_ASSERT(ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ ==
1UL << ROC_NIX_INL_ONF_IPSEC_INB_SA_SZ_LOG2);
@@ -19,6 +20,155 @@ PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ ==
1UL << ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2);
static int
+nix_inl_meta_aura_destroy(void)
+{
+ struct idev_cfg *idev = idev_get_cfg();
+ struct idev_nix_inl_cfg *inl_cfg;
+ int rc;
+
+ if (!idev)
+ return -EINVAL;
+
+ inl_cfg = &idev->inl_cfg;
+ /* Destroy existing Meta aura */
+ if (inl_cfg->meta_aura) {
+ uint64_t avail, limit;
+
+ /* Check if all buffers are back to pool */
+ avail = roc_npa_aura_op_available(inl_cfg->meta_aura);
+ limit = roc_npa_aura_op_limit_get(inl_cfg->meta_aura);
+ if (avail != limit)
+ plt_warn("Not all buffers are back to meta pool,"
+ " %" PRIu64 " != %" PRIu64, avail, limit);
+
+ rc = meta_pool_cb(&inl_cfg->meta_aura, 0, 0, true);
+ if (rc) {
+ plt_err("Failed to destroy meta aura, rc=%d", rc);
+ return rc;
+ }
+ inl_cfg->meta_aura = 0;
+ inl_cfg->buf_sz = 0;
+ inl_cfg->nb_bufs = 0;
+ inl_cfg->refs = 0;
+ }
+
+ return 0;
+}
+
+static int
+nix_inl_meta_aura_create(struct idev_cfg *idev, uint16_t first_skip)
+{
+ uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
+ struct idev_nix_inl_cfg *inl_cfg;
+ struct nix_inl_dev *nix_inl_dev;
+ uint32_t nb_bufs, buf_sz;
+ int rc;
+
+ inl_cfg = &idev->inl_cfg;
+ nix_inl_dev = idev->nix_inl_dev;
+
+ /* Override meta buf count from devargs if present */
+ if (nix_inl_dev && nix_inl_dev->nb_meta_bufs)
+ nb_bufs = nix_inl_dev->nb_meta_bufs;
+ else
+ nb_bufs = roc_npa_buf_type_limit_get(mask);
+
+ /* Override meta buf size from devargs if present */
+ if (nix_inl_dev && nix_inl_dev->meta_buf_sz)
+ buf_sz = nix_inl_dev->meta_buf_sz;
+ else
+ buf_sz = first_skip + NIX_INL_META_SIZE;
+
+ /* Allocate meta aura */
+ rc = meta_pool_cb(&inl_cfg->meta_aura, buf_sz, nb_bufs, false);
+ if (rc) {
+ plt_err("Failed to allocate meta aura, rc=%d", rc);
+ return rc;
+ }
+
+ inl_cfg->buf_sz = buf_sz;
+ inl_cfg->nb_bufs = nb_bufs;
+ return 0;
+}
+
+int
+roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq)
+{
+ struct idev_cfg *idev = idev_get_cfg();
+ struct idev_nix_inl_cfg *inl_cfg;
+ uint32_t actual, expected;
+ uint64_t mask, type_mask;
+ int rc;
+
+ if (!idev || !meta_pool_cb)
+ return -EFAULT;
+ inl_cfg = &idev->inl_cfg;
+
+ /* Create meta aura if not present */
+ if (!inl_cfg->meta_aura) {
+ rc = nix_inl_meta_aura_create(idev, rq->first_skip);
+ if (rc)
+ return rc;
+ }
+
+ /* Validate if we have enough meta buffers */
+ mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
+ expected = roc_npa_buf_type_limit_get(mask);
+ actual = inl_cfg->nb_bufs;
+
+ if (actual < expected) {
+ plt_err("Insufficient buffers in meta aura %u < %u (expected)",
+ actual, expected);
+ return -EIO;
+ }
+
+ /* Validate if we have enough space for meta buffer */
+ if (rq->first_skip + NIX_INL_META_SIZE > inl_cfg->buf_sz) {
+ plt_err("Meta buffer size %u not sufficient to meet RQ first skip %u",
+ inl_cfg->buf_sz, rq->first_skip);
+ return -EIO;
+ }
+
+ /* Validate if we have enough VWQE buffers */
+ if (rq->vwqe_ena) {
+ actual = roc_npa_aura_op_limit_get(rq->vwqe_aura_handle);
+
+ type_mask = roc_npa_buf_type_mask(rq->vwqe_aura_handle);
+ if (type_mask & BIT_ULL(ROC_NPA_BUF_TYPE_VWQE_IPSEC) &&
+ type_mask & BIT_ULL(ROC_NPA_BUF_TYPE_VWQE)) {
+ /* VWQE aura shared b/w Inline enabled and non Inline
+ * enabled ports needs enough buffers to store all the
+ * packet buffers, one per vwqe.
+ */
+ mask = (BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC) |
+ BIT_ULL(ROC_NPA_BUF_TYPE_PACKET));
+ expected = roc_npa_buf_type_limit_get(mask);
+
+ if (actual < expected) {
+ plt_err("VWQE aura shared b/w Inline inbound and non-Inline inbound "
+ "ports needs vwqe bufs(%u) minimum of all pkt bufs (%u)",
+ actual, expected);
+ return -EIO;
+ }
+ } else {
+ /* VWQE aura not shared b/w Inline and non Inline ports have relaxed
+ * requirement of match all the meta buffers.
+ */
+ expected = inl_cfg->nb_bufs;
+
+ if (actual < expected) {
+ plt_err("VWQE aura not shared b/w Inline inbound and non-Inline "
+ "ports needs vwqe bufs(%u) minimum of all meta bufs (%u)",
+ actual, expected);
+ return -EIO;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
{
uint32_t ipsec_in_min_spi = roc_nix->ipsec_in_min_spi;
@@ -310,6 +460,10 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
if (rc)
return rc;
+ if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
+ nix->need_meta_aura = true;
+ idev->inl_cfg.refs++;
+ }
nix->inl_inb_ena = true;
return 0;
}
@@ -317,12 +471,22 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
int
roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
{
+ struct idev_cfg *idev = idev_get_cfg();
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
if (!nix->inl_inb_ena)
return 0;
+ if (!idev)
+ return -EFAULT;
+
nix->inl_inb_ena = false;
+ if (nix->need_meta_aura) {
+ nix->need_meta_aura = false;
+ idev->inl_cfg.refs--;
+ if (!idev->inl_cfg.refs)
+ nix_inl_meta_aura_destroy();
+ }
/* Flush Inbound CTX cache entries */
roc_nix_cpt_ctx_cache_sync(roc_nix);
@@ -592,6 +756,7 @@ roc_nix_inl_outb_is_enabled(struct roc_nix *roc_nix)
int
roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
{
+ struct nix *nix = roc_nix_to_nix_priv(rq->roc_nix);
struct idev_cfg *idev = idev_get_cfg();
int port_id = rq->roc_nix->port_id;
struct nix_inl_dev *inl_dev;
@@ -603,6 +768,10 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
if (idev == NULL)
return 0;
+ /* Update meta aura handle in RQ */
+ if (nix->need_meta_aura)
+ rq->meta_aura_handle = roc_npa_zero_aura_handle();
+
inl_dev = idev->nix_inl_dev;
/* Nothing to do if no inline device */
if (!inl_dev)
@@ -705,6 +874,13 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
return rc;
}
+ /* Check meta aura */
+ if (enable && nix->need_meta_aura) {
+ rc = roc_nix_inl_meta_aura_check(rq);
+ if (rc)
+ return rc;
+ }
+
inl_rq->inl_dev_refs++;
rq->inl_dev_refs = 1;
return 0;
@@ -724,6 +900,7 @@ roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq)
if (idev == NULL)
return 0;
+ rq->meta_aura_handle = 0;
if (!rq->inl_dev_refs)
return 0;
@@ -779,6 +956,9 @@ roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool enable)
rc = nix_rq_ena_dis(&inl_dev->dev, inl_rq, enable);
if (rc)
return rc;
+
+ if (enable && nix->need_meta_aura)
+ return roc_nix_inl_meta_aura_check(inl_rq);
}
return 0;
}
@@ -792,6 +972,31 @@ roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev)
nix->inb_inl_dev = use_inl_dev;
}
+void
+roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct idev_cfg *idev = idev_get_cfg();
+
+ if (!idev)
+ return;
+ /* Need to set here for cases when inbound SA table is
+ * managed outside RoC.
+ */
+ nix->inl_inb_ena = ena;
+ if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
+ if (ena) {
+ nix->need_meta_aura = true;
+ idev->inl_cfg.refs++;
+ } else if (nix->need_meta_aura) {
+ nix->need_meta_aura = false;
+ idev->inl_cfg.refs--;
+ if (!idev->inl_cfg.refs)
+ nix_inl_meta_aura_destroy();
+ }
+ }
+}
+
int
roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix, bool poll)
{
@@ -1128,3 +1333,9 @@ roc_nix_inl_dev_unlock(void)
if (idev != NULL)
plt_spinlock_unlock(&idev->nix_inl_dev_lock);
}
+
+void
+roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb)
+{
+ meta_pool_cb = cb;
+}
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 702ec01..9911a48 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -121,6 +121,9 @@ roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(void *sa)
typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
uint32_t soft_exp_event);
+typedef int (*roc_nix_inl_meta_pool_cb_t)(uint64_t *aura_handle, uint32_t blk_sz, uint32_t nb_bufs,
+ bool destroy);
+
struct roc_nix_inl_dev {
/* Input parameters */
struct plt_pci_device *pci_dev;
@@ -135,6 +138,8 @@ struct roc_nix_inl_dev {
uint8_t spb_drop_pc;
uint8_t lpb_drop_pc;
bool set_soft_exp_poll;
+ uint32_t nb_meta_bufs;
+ uint32_t meta_buf_sz;
/* End of input parameters */
#define ROC_NIX_INL_MEM_SZ (1280)
@@ -165,6 +170,7 @@ uint32_t __roc_api roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix,
uintptr_t __roc_api roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix,
bool inl_dev_sa, uint32_t spi);
void __roc_api roc_nix_inb_mode_set(struct roc_nix *roc_nix, bool use_inl_dev);
+void __roc_api roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena);
int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool ena);
int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
@@ -176,6 +182,7 @@ int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,
int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
bool inb_inl_dev);
int __roc_api roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool ena);
+int __roc_api roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq);
/* NIX Inline Outbound API */
int __roc_api roc_nix_inl_outb_init(struct roc_nix *roc_nix);
@@ -191,6 +198,7 @@ int __roc_api roc_nix_inl_cb_unregister(roc_nix_inl_sso_work_cb_t cb,
int __roc_api roc_nix_inl_outb_soft_exp_poll_switch(struct roc_nix *roc_nix,
bool poll);
uint64_t *__roc_api roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix);
+void __roc_api roc_nix_inl_meta_pool_cb_register(roc_nix_inl_meta_pool_cb_t cb);
/* NIX Inline/Outbound API */
enum roc_nix_inl_sa_sync_op {
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 3a96498..1e9b2b9 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -841,6 +841,8 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
inl_dev->set_soft_exp_poll = roc_inl_dev->set_soft_exp_poll;
inl_dev->nb_rqs = inl_dev->is_multi_channel ? 1 : PLT_MAX_ETHPORTS;
+ inl_dev->nb_meta_bufs = roc_inl_dev->nb_meta_bufs;
+ inl_dev->meta_buf_sz = roc_inl_dev->meta_buf_sz;
if (roc_inl_dev->spb_drop_pc)
inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index a775efc..ccd2adf 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -6,6 +6,8 @@
#include <pthread.h>
#include <sys/types.h>
+#define NIX_INL_META_SIZE 384u
+
struct nix_inl_dev;
struct nix_inl_qint {
struct nix_inl_dev *inl_dev;
@@ -86,6 +88,8 @@ struct nix_inl_dev {
bool attach_cptlf;
uint16_t wqe_skip;
bool ts_ena;
+ uint32_t nb_meta_bufs;
+ uint32_t meta_buf_sz;
};
int nix_inl_sso_register_irqs(struct nix_inl_dev *inl_dev);
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index a3d4ddf..a253f41 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -202,6 +202,7 @@ struct nix {
uint16_t nb_cpt_lf;
uint16_t outb_se_ring_cnt;
uint16_t outb_se_ring_base;
+ bool need_meta_aura;
/* Mode provided by driver */
bool inb_inl_dev;
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 98b9fb4..b197de0 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -89,7 +89,12 @@ roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
rc = nix_rq_ena_dis(&nix->dev, rq, enable);
nix_rq_vwqe_flush(rq, nix->vwqe_interval);
+ if (rc)
+ return rc;
+ /* Check for meta aura if RQ is enabled */
+ if (enable && nix->need_meta_aura)
+ rc = roc_nix_inl_meta_aura_check(rq);
return rc;
}
@@ -556,6 +561,13 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
/* Update aura buf type to indicate its use */
nix_rq_aura_buf_type_update(rq, true);
+ /* Check for meta aura if RQ is enabled */
+ if (ena && nix->need_meta_aura) {
+ rc = roc_nix_inl_meta_aura_check(rq);
+ if (rc)
+ return rc;
+ }
+
return nix_tel_node_add_rq(rq);
}
@@ -594,6 +606,13 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
/* Update aura attribute to indicate its use */
nix_rq_aura_buf_type_update(rq, true);
+ /* Check for meta aura if RQ is enabled */
+ if (ena && nix->need_meta_aura) {
+ rc = roc_nix_inl_meta_aura_check(rq);
+ if (rc)
+ return rc;
+ }
+
return nix_tel_node_add_rq(rq);
}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 6f3de2a..276fec3 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -95,6 +95,7 @@ INTERNAL {
roc_idev_npa_maxpools_set;
roc_idev_npa_nix_get;
roc_idev_num_lmtlines_get;
+ roc_idev_nix_inl_meta_aura_get;
roc_model;
roc_se_auth_key_set;
roc_se_ciph_key_set;
@@ -156,7 +157,10 @@ INTERNAL {
roc_nix_inl_inb_sa_sz;
roc_nix_inl_inb_tag_update;
roc_nix_inl_inb_fini;
+ roc_nix_inl_inb_set;
roc_nix_inb_is_with_inl_dev;
+ roc_nix_inl_meta_aura_check;
+ roc_nix_inl_meta_pool_cb_register;
roc_nix_inb_mode_set;
roc_nix_inl_outb_fini;
roc_nix_inl_outb_init;
--
2.8.4
next prev parent reply other threads:[~2022-08-09 18:52 UTC|newest]
Thread overview: 89+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 02/23] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 03/23] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 04/23] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 05/23] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 06/23] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 07/23] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 08/23] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 09/23] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
2022-08-09 18:48 ` Nithin Dabilpuram [this message]
2022-08-09 18:48 ` [PATCH 11/23] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 12/23] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 13/23] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 14/23] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 15/23] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 16/23] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 17/23] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 18/23] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 19/23] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 20/23] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 21/23] common/cnxk: add support for CPT second pass Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 22/23] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 23/23] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
2022-08-30 4:51 ` [PATCH 01/23] common/cnxk: fix part value for cn10k Jerin Jacob
2022-08-30 5:16 ` [EXT] " Nithin Kumar Dabilpuram
2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
2022-09-05 13:31 ` [PATCH v2 02/31] common/cnxk: fix part value for cn10k Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 03/31] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 04/31] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 05/31] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 06/31] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 07/31] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 08/31] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 09/31] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 10/31] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 11/31] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 12/31] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 13/31] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 14/31] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 15/31] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 16/31] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 17/31] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 18/31] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 19/31] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 20/31] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 21/31] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 22/31] common/cnxk: add support for CPT second pass Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 23/31] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 24/31] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 25/31] event/cnxk: wait for CPT fc on wqe path Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 26/31] net/cnxk: limit port specific SA table size Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 27/31] net/cnxk: add support for crypto cipher DES-CBC Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 28/31] net/cnxk: Add support for crypto auth alg MD5 Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 29/31] net/cnxk: enable esn and antireplay support Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 30/31] common/cnxk: dump device basic info to file Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 31/31] net/cnxk: dumps device private information Nithin Dabilpuram
2022-09-12 13:13 ` [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable Nithin Dabilpuram
2022-09-12 13:13 ` [PATCH v3 02/32] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
2022-09-12 13:13 ` [PATCH v3 03/32] common/cnxk: fix part value for cn10k Nithin Dabilpuram
2022-09-12 13:13 ` [PATCH v3 04/32] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
2022-09-12 13:13 ` [PATCH v3 05/32] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
2022-09-12 13:13 ` [PATCH v3 06/32] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 07/32] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 08/32] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 09/32] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 10/32] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 11/32] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 12/32] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 13/32] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 14/32] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 15/32] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 16/32] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 17/32] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 18/32] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 19/32] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 20/32] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 21/32] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 22/32] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 23/32] common/cnxk: add support for CPT second pass Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 24/32] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 25/32] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 26/32] event/cnxk: wait for CPT fc on wqe path Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 27/32] net/cnxk: limit port specific SA table size Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 28/32] net/cnxk: add support for crypto cipher DES-CBC Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 29/32] net/cnxk: add support for crypto auth alg MD5 Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 30/32] net/cnxk: enable esn and antireplay support Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 31/32] common/cnxk: dump device basic info to file Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 32/32] net/cnxk: dumps device private information Nithin Dabilpuram
2022-09-16 11:36 ` Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220809184908.24030-10-ndabilpuram@marvell.com \
--to=ndabilpuram@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=kirankumark@marvell.com \
--cc=mdr@ashroe.eu \
--cc=skori@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).