From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: Nithin Dabilpuram <ndabilpuram@marvell.com>,
Kiran Kumar K <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>,
Pavan Nikhilesh <pbhagavatula@marvell.com>,
"Shijith Thotton" <sthotton@marvell.com>
Cc: <jerinj@marvell.com>, <dev@dpdk.org>
Subject: [PATCH v3 01/32] net/cnxk: add eth port specific PTP enable
Date: Mon, 12 Sep 2022 18:43:54 +0530 [thread overview]
Message-ID: <20220912131425.1973415-1-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20220809184908.24030-1-ndabilpuram@marvell.com>
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add support to enable PTP per ethernet device when that
specific ethernet device is connected to event device via
Rx adapter.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
v3:
- Included this patch from series 23650 tp resolve compilation
dependency.
- Fixed commit header for patch 29/32
- Fixed commit message for patch 15/32
v2:
- Included this patch from series 24029 as suggested by Jerin to resolve
compilation dependency with event dev.
- Fixed one-liner commit messages
- Added few more patches for upstream.
drivers/common/cnxk/roc_io.h | 5 ++-
drivers/event/cnxk/cn10k_eventdev.c | 9 ++---
drivers/event/cnxk/cn10k_worker.h | 48 +++++++++++++++---------
drivers/event/cnxk/cn9k_eventdev.c | 13 +++----
drivers/event/cnxk/cn9k_worker.h | 32 +++++++++++-----
drivers/event/cnxk/cnxk_eventdev.h | 14 ++++---
drivers/event/cnxk/cnxk_eventdev_adptr.c | 9 +++++
drivers/net/cnxk/cn10k_rx.h | 3 +-
8 files changed, 82 insertions(+), 51 deletions(-)
diff --git a/drivers/common/cnxk/roc_io.h b/drivers/common/cnxk/roc_io.h
index 9d73e263f7..13f98ed549 100644
--- a/drivers/common/cnxk/roc_io.h
+++ b/drivers/common/cnxk/roc_io.h
@@ -161,14 +161,15 @@ roc_lmt_mov(void *out, const void *in, const uint32_t lmtext)
{
volatile const __uint128_t *src128 = (const __uint128_t *)in;
volatile __uint128_t *dst128 = (__uint128_t *)out;
+ uint32_t i;
dst128[0] = src128[0];
dst128[1] = src128[1];
/* lmtext receives following value:
* 1: NIX_SUBDC_EXT needed i.e. tx vlan case
*/
- if (lmtext)
- dst128[2] = src128[2];
+ for (i = 0; i < lmtext; i++)
+ dst128[2 + i] = src128[2 + i];
}
static __plt_always_inline void
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 5a0cab40a9..0be7ebfe29 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -694,8 +694,7 @@ cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
}
static void
-cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
- void *tstmp_info)
+cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
int i;
@@ -703,7 +702,7 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
for (i = 0; i < dev->nb_event_ports; i++) {
struct cn10k_sso_hws *ws = event_dev->data->ports[i];
ws->lookup_mem = lookup_mem;
- ws->tstamp = tstmp_info;
+ ws->tstamp = dev->tstamp;
}
}
@@ -715,7 +714,6 @@ cn10k_sso_rx_adapter_queue_add(
{
struct cn10k_eth_rxq *rxq;
void *lookup_mem;
- void *tstmp_info;
int rc;
rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
@@ -728,8 +726,7 @@ cn10k_sso_rx_adapter_queue_add(
return -EINVAL;
rxq = eth_dev->data->rx_queues[0];
lookup_mem = rxq->lookup_mem;
- tstmp_info = rxq->tstamp;
- cn10k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
+ cn10k_sso_set_priv_mem(event_dev, lookup_mem);
cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 0915f404e0..db56d96404 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -108,12 +108,29 @@ cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t __mbuf, uint8_t port_id,
mbuf_init | ((uint64_t)port_id) << 48, flags);
}
+static void
+cn10k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
+ struct cnxk_timesync_info *tstamp)
+{
+ uint64_t tstamp_ptr;
+ uint8_t laptr;
+
+ laptr = (uint8_t) *
+ (uint64_t *)(u64 + (CNXK_SSO_WQE_LAYR_PTR * sizeof(uint64_t)));
+ if (laptr == sizeof(uint64_t)) {
+ /* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64) +
+ CNXK_SSO_WQE_SG_PTR);
+ cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, true,
+ (uint64_t *)tstamp_ptr);
+ }
+}
+
static __rte_always_inline void
cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
void *lookup_mem, void *tstamp, uintptr_t lbase)
{
- uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
- (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
+ uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
struct rte_event_vector *vec;
uint64_t aura_handle, laddr;
uint16_t nb_mbufs, non_vec;
@@ -133,6 +150,9 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
for (i = OBJS_PER_CLINE; i < vec->nb_elem; i += OBJS_PER_CLINE)
rte_prefetch0(&vec->ptrs[i]);
+ if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
+ mbuf_init |= 8;
+
nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,
flags | NIX_RX_VWQE_F, lookup_mem,
@@ -158,7 +178,6 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
while (non_vec) {
struct nix_cqe_hdr_s *cqe = (struct nix_cqe_hdr_s *)wqe[0];
- uint64_t tstamp_ptr;
mbuf = (struct rte_mbuf *)((char *)cqe -
sizeof(struct rte_mbuf));
@@ -178,12 +197,10 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
cn10k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem,
mbuf_init, flags);
- /* Extracting tstamp, if PTP enabled*/
- tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)cqe) +
- CNXK_SSO_WQE_SG_PTR);
- cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
- flags & NIX_RX_OFFLOAD_TSTAMP_F,
- (uint64_t *)tstamp_ptr);
+
+ if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
+ cn10k_sso_process_tstamp((uint64_t)wqe[0],
+ (uint64_t)mbuf, tstamp);
wqe[0] = (struct rte_mbuf *)mbuf;
non_vec--;
wqe++;
@@ -200,8 +217,6 @@ static __rte_always_inline void
cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
const uint32_t flags)
{
- uint64_t tstamp_ptr;
-
u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
(u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
if ((flags & CPT_RX_WQE_F) &&
@@ -246,12 +261,9 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
cn10k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
ws->lookup_mem);
- /* Extracting tstamp, if PTP enabled*/
- tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
- CNXK_SSO_WQE_SG_PTR);
- cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
- flags & NIX_RX_OFFLOAD_TSTAMP_F,
- (uint64_t *)tstamp_ptr);
+ if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
+ cn10k_sso_process_tstamp(u64[1], mbuf,
+ ws->tstamp[port]);
u64[1] = mbuf;
} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) ==
RTE_EVENT_TYPE_ETHDEV_VECTOR) {
@@ -262,7 +274,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
((vwqe_hdr & 0xFFFF) << 48) | ((uint64_t)port << 32);
*(uint64_t *)u64[1] = (uint64_t)vwqe_hdr;
cn10k_process_vwqe(u64[1], port, flags, ws->lookup_mem,
- ws->tstamp, ws->lmt_base);
+ ws->tstamp[port], ws->lmt_base);
/* Mark vector mempool object as get */
RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
(void **)&u64[1], 1, 1);
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 2e27030049..8ade30f84b 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -123,7 +123,7 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
- struct cnxk_timesync_info *tstamp;
+ struct cnxk_timesync_info **tstamp;
struct cn9k_sso_hws_dual *dws;
struct cn9k_sso_hws *ws;
uint64_t cq_ds_cnt = 1;
@@ -942,8 +942,7 @@ cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
}
static void
-cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
- void *tstmp_info)
+cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
int i;
@@ -953,11 +952,11 @@ cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
struct cn9k_sso_hws_dual *dws =
event_dev->data->ports[i];
dws->lookup_mem = lookup_mem;
- dws->tstamp = tstmp_info;
+ dws->tstamp = dev->tstamp;
} else {
struct cn9k_sso_hws *ws = event_dev->data->ports[i];
ws->lookup_mem = lookup_mem;
- ws->tstamp = tstmp_info;
+ ws->tstamp = dev->tstamp;
}
}
}
@@ -970,7 +969,6 @@ cn9k_sso_rx_adapter_queue_add(
{
struct cn9k_eth_rxq *rxq;
void *lookup_mem;
- void *tstmp_info;
int rc;
rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
@@ -984,8 +982,7 @@ cn9k_sso_rx_adapter_queue_add(
rxq = eth_dev->data->rx_queues[0];
lookup_mem = rxq->lookup_mem;
- tstmp_info = rxq->tstamp;
- cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
+ cn9k_sso_set_priv_mem(event_dev, lookup_mem);
cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 64e97e321a..54b3545022 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -169,13 +169,29 @@ cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
mbuf_init | ((uint64_t)port_id) << 48, flags);
}
+static void
+cn9k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
+ struct cnxk_timesync_info *tstamp)
+{
+ uint64_t tstamp_ptr;
+ uint8_t laptr;
+
+ laptr = (uint8_t) *
+ (uint64_t *)(u64 + (CNXK_SSO_WQE_LAYR_PTR * sizeof(uint64_t)));
+ if (laptr == sizeof(uint64_t)) {
+ /* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64) +
+ CNXK_SSO_WQE_SG_PTR);
+ cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, true,
+ (uint64_t *)tstamp_ptr);
+ }
+}
+
static __rte_always_inline void
cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
const void *const lookup_mem,
- struct cnxk_timesync_info *tstamp)
+ struct cnxk_timesync_info **tstamp)
{
- uint64_t tstamp_ptr;
-
u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
(u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
if ((flags & CPT_RX_WQE_F) &&
@@ -187,12 +203,8 @@ cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
cn9k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
lookup_mem);
- /* Extracting tstamp, if PTP enabled*/
- tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
- CNXK_SSO_WQE_SG_PTR);
- cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
- flags & NIX_RX_OFFLOAD_TSTAMP_F,
- (uint64_t *)tstamp_ptr);
+ if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
+ cn9k_sso_process_tstamp(u64[1], mbuf, tstamp[port]);
u64[1] = mbuf;
}
}
@@ -298,7 +310,7 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
static __rte_always_inline uint16_t
cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev,
const uint32_t flags, void *lookup_mem,
- struct cnxk_timesync_info *tstamp)
+ struct cnxk_timesync_info **tstamp)
{
union {
__uint128_t get_work;
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index bfd0c5627e..fae4484758 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -38,6 +38,7 @@
#define CNXK_SSO_XAQ_CACHE_CNT (0x7)
#define CNXK_SSO_XAQ_SLACK (8)
#define CNXK_SSO_WQE_SG_PTR (9)
+#define CNXK_SSO_WQE_LAYR_PTR (5)
#define CNXK_SSO_PRIORITY_CNT (0x8)
#define CNXK_SSO_WEIGHT_MAX (0x3f)
#define CNXK_SSO_WEIGHT_MIN (0x3)
@@ -123,6 +124,7 @@ struct cnxk_sso_evdev {
uint64_t *timer_adptr_sz;
uint16_t vec_pool_cnt;
uint64_t *vec_pools;
+ struct cnxk_timesync_info *tstamp[RTE_MAX_ETHPORTS];
struct cnxk_sso_mlt_prio mlt_prio[RTE_EVENT_MAX_QUEUES_PER_DEV];
/* Dev args */
uint32_t xae_cnt;
@@ -140,12 +142,12 @@ struct cnxk_sso_evdev {
struct cn10k_sso_hws {
uint64_t base;
uint64_t gw_rdata;
- /* PTP timestamp */
- struct cnxk_timesync_info *tstamp;
void *lookup_mem;
uint32_t gw_wdata;
uint8_t swtag_req;
uint8_t hws_id;
+ /* PTP timestamp */
+ struct cnxk_timesync_info **tstamp;
/* Add Work Fastpath data */
uint64_t xaq_lmt __rte_cache_aligned;
uint64_t *fc_mem;
@@ -160,11 +162,11 @@ struct cn10k_sso_hws {
struct cn9k_sso_hws {
uint64_t base;
uint64_t gw_wdata;
- /* PTP timestamp */
- struct cnxk_timesync_info *tstamp;
void *lookup_mem;
uint8_t swtag_req;
uint8_t hws_id;
+ /* PTP timestamp */
+ struct cnxk_timesync_info **tstamp;
/* Add Work Fastpath data */
uint64_t xaq_lmt __rte_cache_aligned;
uint64_t *fc_mem;
@@ -177,12 +179,12 @@ struct cn9k_sso_hws {
struct cn9k_sso_hws_dual {
uint64_t base[2]; /* Ping and Pong */
uint64_t gw_wdata;
- /* PTP timestamp */
- struct cnxk_timesync_info *tstamp;
void *lookup_mem;
uint8_t swtag_req;
uint8_t vws; /* Ping pong bit */
uint8_t hws_id;
+ /* PTP timestamp */
+ struct cnxk_timesync_info **tstamp;
/* Add Work Fastpath data */
uint64_t xaq_lmt __rte_cache_aligned;
uint64_t *fc_mem;
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index 1f2e1b4b5d..b4fd821912 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -207,6 +207,14 @@ cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
}
+static void
+cnxk_sso_tstamp_cfg(uint16_t port_id, struct cnxk_eth_dev *cnxk_eth_dev,
+ struct cnxk_sso_evdev *dev)
+{
+ if (cnxk_eth_dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ dev->tstamp[port_id] = &cnxk_eth_dev->tstamp;
+}
+
int
cnxk_sso_rx_adapter_queue_add(
const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
@@ -255,6 +263,7 @@ cnxk_sso_rx_adapter_queue_add(
roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
rxq_sp->qconf.mp->pool_id, true,
dev->force_ena_bp, rxq_sp->tc);
+ cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev);
cnxk_eth_dev->nb_rxq_sso++;
}
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index 5ecb20f038..0f8790b8c7 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -1567,7 +1567,8 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
ol_flags3, mbuf3);
}
- if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
+ if ((flags & NIX_RX_OFFLOAD_TSTAMP_F) &&
+ ((flags & NIX_RX_VWQE_F) && tstamp)) {
const uint16x8_t len_off = {
0, /* ptype 0:15 */
0, /* ptype 16:32 */
--
2.25.1
next prev parent reply other threads:[~2022-09-12 13:15 UTC|newest]
Thread overview: 89+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-08-09 18:48 [PATCH 01/23] common/cnxk: fix part value for cn10k Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 02/23] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 03/23] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 04/23] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 05/23] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 06/23] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 07/23] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 08/23] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 09/23] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 10/23] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 11/23] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 12/23] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 13/23] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 14/23] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
2022-08-09 18:48 ` [PATCH 15/23] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 16/23] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 17/23] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 18/23] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 19/23] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 20/23] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 21/23] common/cnxk: add support for CPT second pass Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 22/23] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
2022-08-09 18:49 ` [PATCH 23/23] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
2022-08-30 4:51 ` [PATCH 01/23] common/cnxk: fix part value for cn10k Jerin Jacob
2022-08-30 5:16 ` [EXT] " Nithin Kumar Dabilpuram
2022-09-05 13:31 ` [PATCH v2 01/31] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
2022-09-05 13:31 ` [PATCH v2 02/31] common/cnxk: fix part value for cn10k Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 03/31] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 04/31] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 05/31] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 06/31] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 07/31] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 08/31] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 09/31] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 10/31] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 11/31] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 12/31] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 13/31] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 14/31] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 15/31] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 16/31] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 17/31] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 18/31] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 19/31] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 20/31] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 21/31] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 22/31] common/cnxk: add support for CPT second pass Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 23/31] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 24/31] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 25/31] event/cnxk: wait for CPT fc on wqe path Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 26/31] net/cnxk: limit port specific SA table size Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 27/31] net/cnxk: add support for crypto cipher DES-CBC Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 28/31] net/cnxk: Add support for crypto auth alg MD5 Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 29/31] net/cnxk: enable esn and antireplay support Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 30/31] common/cnxk: dump device basic info to file Nithin Dabilpuram
2022-09-05 13:32 ` [PATCH v2 31/31] net/cnxk: dumps device private information Nithin Dabilpuram
2022-09-12 13:13 ` Nithin Dabilpuram [this message]
2022-09-12 13:13 ` [PATCH v3 02/32] cnxk/net: add fc check in vector event Tx path Nithin Dabilpuram
2022-09-12 13:13 ` [PATCH v3 03/32] common/cnxk: fix part value for cn10k Nithin Dabilpuram
2022-09-12 13:13 ` [PATCH v3 04/32] common/cnxk: add cn10ka A1 platform Nithin Dabilpuram
2022-09-12 13:13 ` [PATCH v3 05/32] common/cnxk: update inbound inline IPsec config mailbox Nithin Dabilpuram
2022-09-12 13:13 ` [PATCH v3 06/32] net/cnxk: fix missing fc wait for outbound path in vec mode Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 07/32] common/cnxk: limit meta aura workaround to CN10K A0 Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 08/32] common/cnxk: delay inline device RQ enable to dev start Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 09/32] common/cnxk: reserve aura zero on cn10ka NPA Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 10/32] common/cnxk: add support to set NPA buf type Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 11/32] common/cnxk: update attributes to pools used by NIX Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 12/32] common/cnxk: support zero aura for inline inbound meta Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 13/32] net/cnxk: support for zero aura for inline meta Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 14/32] common/cnxk: avoid the use of platform specific APIs Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 15/32] net/cnxk: use full context IPsec structures in fp Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 16/32] net/cnxk: add crypto capabilities for HMAC-SHA2 Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 17/32] common/cnxk: enable aging on CN10K platform Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 18/32] common/cnxk: updated shaper profile with red algorithm Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 19/32] common/cnxk: add 98xx A1 platform Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 20/32] net/cnxk: enable additional ciphers for inline Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 21/32] net/cnxk: enable 3des-cbc cipher capability Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 22/32] net/cnxk: skip PFC configuration on LBK Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 23/32] common/cnxk: add support for CPT second pass Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 24/32] common/cnxk: add CQ limit associated with SQ Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 25/32] common/cnxk: support Tx compl event via RQ to CQ mapping Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 26/32] event/cnxk: wait for CPT fc on wqe path Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 27/32] net/cnxk: limit port specific SA table size Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 28/32] net/cnxk: add support for crypto cipher DES-CBC Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 29/32] net/cnxk: add support for crypto auth alg MD5 Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 30/32] net/cnxk: enable esn and antireplay support Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 31/32] common/cnxk: dump device basic info to file Nithin Dabilpuram
2022-09-12 13:14 ` [PATCH v3 32/32] net/cnxk: dumps device private information Nithin Dabilpuram
2022-09-16 11:36 ` Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220912131425.1973415-1-ndabilpuram@marvell.com \
--to=ndabilpuram@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=kirankumark@marvell.com \
--cc=pbhagavatula@marvell.com \
--cc=skori@marvell.com \
--cc=skoteshwar@marvell.com \
--cc=sthotton@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).