* [PATCH] event/cnxk: add eth port specific PTP enable
@ 2022-06-12 17:56 pbhagavatula
2022-06-20 18:12 ` Jerin Jacob
0 siblings, 1 reply; 2+ messages in thread
From: pbhagavatula @ 2022-06-12 17:56 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Pavan Nikhilesh, Shijith Thotton
Cc: dev
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add support to enable PTP per ethernet device when that
specific ethernet device is connected to event device via
Rx adapter.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_io.h | 5 ++-
drivers/event/cnxk/cn10k_eventdev.c | 9 ++---
drivers/event/cnxk/cn10k_worker.h | 48 +++++++++++++++---------
drivers/event/cnxk/cn9k_eventdev.c | 13 +++----
drivers/event/cnxk/cn9k_worker.h | 32 +++++++++++-----
drivers/event/cnxk/cnxk_eventdev.h | 14 ++++---
drivers/event/cnxk/cnxk_eventdev_adptr.c | 9 +++++
drivers/net/cnxk/cn10k_rx.h | 3 +-
8 files changed, 82 insertions(+), 51 deletions(-)
diff --git a/drivers/common/cnxk/roc_io.h b/drivers/common/cnxk/roc_io.h
index 62e98d9d00..68db13e748 100644
--- a/drivers/common/cnxk/roc_io.h
+++ b/drivers/common/cnxk/roc_io.h
@@ -159,14 +159,15 @@ roc_lmt_mov(void *out, const void *in, const uint32_t lmtext)
{
volatile const __uint128_t *src128 = (const __uint128_t *)in;
volatile __uint128_t *dst128 = (__uint128_t *)out;
+ uint32_t i;
dst128[0] = src128[0];
dst128[1] = src128[1];
/* lmtext receives following value:
* 1: NIX_SUBDC_EXT needed i.e. tx vlan case
*/
- if (lmtext)
- dst128[2] = src128[2];
+ for (i = 0; i < lmtext; i++)
+ dst128[2 + i] = src128[2 + i];
}
static __plt_always_inline void
diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index 0da809db29..a0c3d22284 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -693,8 +693,7 @@ cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
}
static void
-cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
- void *tstmp_info)
+cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
int i;
@@ -702,7 +701,7 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
for (i = 0; i < dev->nb_event_ports; i++) {
struct cn10k_sso_hws *ws = event_dev->data->ports[i];
ws->lookup_mem = lookup_mem;
- ws->tstamp = tstmp_info;
+ ws->tstamp = dev->tstamp;
}
}
@@ -714,7 +713,6 @@ cn10k_sso_rx_adapter_queue_add(
{
struct cn10k_eth_rxq *rxq;
void *lookup_mem;
- void *tstmp_info;
int rc;
rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
@@ -727,8 +725,7 @@ cn10k_sso_rx_adapter_queue_add(
return -EINVAL;
rxq = eth_dev->data->rx_queues[0];
lookup_mem = rxq->lookup_mem;
- tstmp_info = rxq->tstamp;
- cn10k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
+ cn10k_sso_set_priv_mem(event_dev, lookup_mem);
cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index 034f508dd8..61c40eff7f 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -108,12 +108,29 @@ cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t __mbuf, uint8_t port_id,
mbuf_init | ((uint64_t)port_id) << 48, flags);
}
+static void
+cn10k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
+ struct cnxk_timesync_info *tstamp)
+{
+ uint64_t tstamp_ptr;
+ uint8_t laptr;
+
+ laptr = (uint8_t) *
+ (uint64_t *)(u64 + (CNXK_SSO_WQE_LAYR_PTR * sizeof(uint64_t)));
+ if (laptr == sizeof(uint64_t)) {
+ /* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64) +
+ CNXK_SSO_WQE_SG_PTR);
+ cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, true,
+ (uint64_t *)tstamp_ptr);
+ }
+}
+
static __rte_always_inline void
cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
void *lookup_mem, void *tstamp, uintptr_t lbase)
{
- uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
- (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
+ uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
struct rte_event_vector *vec;
uint64_t aura_handle, laddr;
uint16_t nb_mbufs, non_vec;
@@ -133,6 +150,9 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
for (i = OBJS_PER_CLINE; i < vec->nb_elem; i += OBJS_PER_CLINE)
rte_prefetch0(&vec->ptrs[i]);
+ if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
+ mbuf_init |= 8;
+
nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,
flags | NIX_RX_VWQE_F, lookup_mem,
@@ -158,7 +178,6 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
while (non_vec) {
struct nix_cqe_hdr_s *cqe = (struct nix_cqe_hdr_s *)wqe[0];
- uint64_t tstamp_ptr;
mbuf = (struct rte_mbuf *)((char *)cqe -
sizeof(struct rte_mbuf));
@@ -178,12 +197,10 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
cn10k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem,
mbuf_init, flags);
- /* Extracting tstamp, if PTP enabled*/
- tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)cqe) +
- CNXK_SSO_WQE_SG_PTR);
- cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
- flags & NIX_RX_OFFLOAD_TSTAMP_F,
- (uint64_t *)tstamp_ptr);
+
+ if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
+ cn10k_sso_process_tstamp((uint64_t)wqe[0],
+ (uint64_t)mbuf, tstamp);
wqe[0] = (struct rte_mbuf *)mbuf;
non_vec--;
wqe++;
@@ -200,8 +217,6 @@ static __rte_always_inline void
cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
const uint32_t flags)
{
- uint64_t tstamp_ptr;
-
u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
(u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
if ((flags & CPT_RX_WQE_F) &&
@@ -246,12 +261,9 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
cn10k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
ws->lookup_mem);
- /* Extracting tstamp, if PTP enabled*/
- tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
- CNXK_SSO_WQE_SG_PTR);
- cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
- flags & NIX_RX_OFFLOAD_TSTAMP_F,
- (uint64_t *)tstamp_ptr);
+ if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
+ cn10k_sso_process_tstamp(u64[1], mbuf,
+ ws->tstamp[port]);
u64[1] = mbuf;
} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) ==
RTE_EVENT_TYPE_ETHDEV_VECTOR) {
@@ -262,7 +274,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
((vwqe_hdr & 0xFFFF) << 48) | ((uint64_t)port << 32);
*(uint64_t *)u64[1] = (uint64_t)vwqe_hdr;
cn10k_process_vwqe(u64[1], port, flags, ws->lookup_mem,
- ws->tstamp, ws->lmt_base);
+ ws->tstamp[port], ws->lmt_base);
/* Mark vector mempool object as get */
RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
(void **)&u64[1], 1, 1);
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index 84347795f0..a810e5c579 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -123,7 +123,7 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
- struct cnxk_timesync_info *tstamp;
+ struct cnxk_timesync_info **tstamp;
struct cn9k_sso_hws_dual *dws;
struct cn9k_sso_hws *ws;
uint64_t cq_ds_cnt = 1;
@@ -942,8 +942,7 @@ cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
}
static void
-cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
- void *tstmp_info)
+cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
{
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
int i;
@@ -953,11 +952,11 @@ cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
struct cn9k_sso_hws_dual *dws =
event_dev->data->ports[i];
dws->lookup_mem = lookup_mem;
- dws->tstamp = tstmp_info;
+ dws->tstamp = dev->tstamp;
} else {
struct cn9k_sso_hws *ws = event_dev->data->ports[i];
ws->lookup_mem = lookup_mem;
- ws->tstamp = tstmp_info;
+ ws->tstamp = dev->tstamp;
}
}
}
@@ -970,7 +969,6 @@ cn9k_sso_rx_adapter_queue_add(
{
struct cn9k_eth_rxq *rxq;
void *lookup_mem;
- void *tstmp_info;
int rc;
rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
@@ -984,8 +982,7 @@ cn9k_sso_rx_adapter_queue_add(
rxq = eth_dev->data->rx_queues[0];
lookup_mem = rxq->lookup_mem;
- tstmp_info = rxq->tstamp;
- cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
+ cn9k_sso_set_priv_mem(event_dev, lookup_mem);
cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
return 0;
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 64e97e321a..54b3545022 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -169,13 +169,29 @@ cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
mbuf_init | ((uint64_t)port_id) << 48, flags);
}
+static void
+cn9k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
+ struct cnxk_timesync_info *tstamp)
+{
+ uint64_t tstamp_ptr;
+ uint8_t laptr;
+
+ laptr = (uint8_t) *
+ (uint64_t *)(u64 + (CNXK_SSO_WQE_LAYR_PTR * sizeof(uint64_t)));
+ if (laptr == sizeof(uint64_t)) {
+ /* Extracting tstamp, if PTP enabled*/
+ tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64) +
+ CNXK_SSO_WQE_SG_PTR);
+ cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, true,
+ (uint64_t *)tstamp_ptr);
+ }
+}
+
static __rte_always_inline void
cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
const void *const lookup_mem,
- struct cnxk_timesync_info *tstamp)
+ struct cnxk_timesync_info **tstamp)
{
- uint64_t tstamp_ptr;
-
u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
(u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
if ((flags & CPT_RX_WQE_F) &&
@@ -187,12 +203,8 @@ cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
cn9k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
lookup_mem);
- /* Extracting tstamp, if PTP enabled*/
- tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
- CNXK_SSO_WQE_SG_PTR);
- cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
- flags & NIX_RX_OFFLOAD_TSTAMP_F,
- (uint64_t *)tstamp_ptr);
+ if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
+ cn9k_sso_process_tstamp(u64[1], mbuf, tstamp[port]);
u64[1] = mbuf;
}
}
@@ -298,7 +310,7 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
static __rte_always_inline uint16_t
cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev,
const uint32_t flags, void *lookup_mem,
- struct cnxk_timesync_info *tstamp)
+ struct cnxk_timesync_info **tstamp)
{
union {
__uint128_t get_work;
diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
index 6c05303a68..868f92370f 100644
--- a/drivers/event/cnxk/cnxk_eventdev.h
+++ b/drivers/event/cnxk/cnxk_eventdev.h
@@ -38,6 +38,7 @@
#define CNXK_SSO_XAQ_CACHE_CNT (0x7)
#define CNXK_SSO_XAQ_SLACK (8)
#define CNXK_SSO_WQE_SG_PTR (9)
+#define CNXK_SSO_WQE_LAYR_PTR (5)
#define CNXK_SSO_PRIORITY_CNT (0x8)
#define CNXK_SSO_WEIGHT_MAX (0x3f)
#define CNXK_SSO_WEIGHT_MIN (0x3)
@@ -122,6 +123,7 @@ struct cnxk_sso_evdev {
uint64_t *timer_adptr_sz;
uint16_t vec_pool_cnt;
uint64_t *vec_pools;
+ struct cnxk_timesync_info *tstamp[RTE_MAX_ETHPORTS];
struct cnxk_sso_mlt_prio mlt_prio[RTE_EVENT_MAX_QUEUES_PER_DEV];
/* Dev args */
uint32_t xae_cnt;
@@ -139,12 +141,12 @@ struct cnxk_sso_evdev {
struct cn10k_sso_hws {
uint64_t base;
uint64_t gw_rdata;
- /* PTP timestamp */
- struct cnxk_timesync_info *tstamp;
void *lookup_mem;
uint32_t gw_wdata;
uint8_t swtag_req;
uint8_t hws_id;
+ /* PTP timestamp */
+ struct cnxk_timesync_info **tstamp;
/* Add Work Fastpath data */
uint64_t xaq_lmt __rte_cache_aligned;
uint64_t *fc_mem;
@@ -159,11 +161,11 @@ struct cn10k_sso_hws {
struct cn9k_sso_hws {
uint64_t base;
uint64_t gw_wdata;
- /* PTP timestamp */
- struct cnxk_timesync_info *tstamp;
void *lookup_mem;
uint8_t swtag_req;
uint8_t hws_id;
+ /* PTP timestamp */
+ struct cnxk_timesync_info **tstamp;
/* Add Work Fastpath data */
uint64_t xaq_lmt __rte_cache_aligned;
uint64_t *fc_mem;
@@ -176,12 +178,12 @@ struct cn9k_sso_hws {
struct cn9k_sso_hws_dual {
uint64_t base[2]; /* Ping and Pong */
uint64_t gw_wdata;
- /* PTP timestamp */
- struct cnxk_timesync_info *tstamp;
void *lookup_mem;
uint8_t swtag_req;
uint8_t vws; /* Ping pong bit */
uint8_t hws_id;
+ /* PTP timestamp */
+ struct cnxk_timesync_info **tstamp;
/* Add Work Fastpath data */
uint64_t xaq_lmt __rte_cache_aligned;
uint64_t *fc_mem;
diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
index fa96090bfa..4406bae068 100644
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@ -204,6 +204,14 @@ cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
}
+static void
+cnxk_sso_tstamp_cfg(uint16_t port_id, struct cnxk_eth_dev *cnxk_eth_dev,
+ struct cnxk_sso_evdev *dev)
+{
+ if (cnxk_eth_dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
+ dev->tstamp[port_id] = &cnxk_eth_dev->tstamp;
+}
+
int
cnxk_sso_rx_adapter_queue_add(
const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
@@ -250,6 +258,7 @@ cnxk_sso_rx_adapter_queue_add(
rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
rxq_sp->qconf.mp->pool_id, true,
dev->force_ena_bp);
+ cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev);
cnxk_eth_dev->nb_rxq_sso++;
}
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index 5ecb20f038..0f8790b8c7 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -1567,7 +1567,8 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
ol_flags3, mbuf3);
}
- if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
+ if ((flags & NIX_RX_OFFLOAD_TSTAMP_F) &&
+ ((flags & NIX_RX_VWQE_F) && tstamp)) {
const uint16x8_t len_off = {
0, /* ptype 0:15 */
0, /* ptype 16:32 */
--
2.25.1
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH] event/cnxk: add eth port specific PTP enable
2022-06-12 17:56 [PATCH] event/cnxk: add eth port specific PTP enable pbhagavatula
@ 2022-06-20 18:12 ` Jerin Jacob
0 siblings, 0 replies; 2+ messages in thread
From: Jerin Jacob @ 2022-06-20 18:12 UTC (permalink / raw)
To: Pavan Nikhilesh
Cc: Jerin Jacob, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Shijith Thotton, dpdk-dev
On Sun, Jun 12, 2022 at 11:26 PM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Add support to enable PTP per ethernet device when that
> specific ethernet device is connected to event device via
> Rx adapter.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
There is a conflict with [for-next-net] branch [dpdk-next-net-mrvl]
tree. Please rebase.
When dpdk-next-net-mrvl merged to upstream, we need to take this rebased patch.
[for-next-net]dell[dpdk-next-net-mrvl] $ git pw series apply 23485
Failed to apply patch:
Applying: event/cnxk: add eth port specific PTP enable
Using index info to reconstruct a base tree...
M drivers/common/cnxk/roc_io.h
M drivers/event/cnxk/cn10k_eventdev.c
M drivers/event/cnxk/cnxk_eventdev_adptr.c
Falling back to patching base and 3-way merge...
Auto-merging drivers/event/cnxk/cnxk_eventdev_adptr.c
CONFLICT (content): Merge conflict in drivers/event/cnxk/cnxk_eventdev_adptr.c
Auto-merging drivers/event/cnxk/cn10k_eventdev.c
Auto-merging drivers/common/cnxk/roc_io.h
error: Failed to merge in the changes.
hint: Use 'git am --show-current-patch=diff' to see the failed patch
Patch failed at 0001 event/cnxk: add eth port specific PTP enable
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".
[for-next-net]dell[dpdk-next-net-mrvl] $ git diff
diff --cc drivers/event/cnxk/cnxk_eventdev_adptr.c
index 8fcc377e8d,4406bae068..0000000000
--- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
+++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
@@@ -250,11 -255,10 +258,18 @@@ cnxk_sso_rx_adapter_queue_add
rc |= roc_nix_rx_drop_re_set(&cnxk_eth_dev->nix,
false);
}
++<<<<<<< HEAD
+
+ if (rxq_sp->tx_pause)
+ roc_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
+ rxq_sp->qconf.mp->pool_id, true,
+ dev->force_ena_bp, rxq_sp->tc);
++=======
+ rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
+ rxq_sp->qconf.mp->pool_id, true,
+ dev->force_ena_bp);
+ cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev);
++>>>>>>> event/cnxk: add eth port specific PTP enable
cnxk_eth_dev->nb_rxq_sso++;
}
[for-next-net]dell[dpdk-next-net-mrvl] $
> ---
> drivers/common/cnxk/roc_io.h | 5 ++-
> drivers/event/cnxk/cn10k_eventdev.c | 9 ++---
> drivers/event/cnxk/cn10k_worker.h | 48 +++++++++++++++---------
> drivers/event/cnxk/cn9k_eventdev.c | 13 +++----
> drivers/event/cnxk/cn9k_worker.h | 32 +++++++++++-----
> drivers/event/cnxk/cnxk_eventdev.h | 14 ++++---
> drivers/event/cnxk/cnxk_eventdev_adptr.c | 9 +++++
> drivers/net/cnxk/cn10k_rx.h | 3 +-
> 8 files changed, 82 insertions(+), 51 deletions(-)
>
> diff --git a/drivers/common/cnxk/roc_io.h b/drivers/common/cnxk/roc_io.h
> index 62e98d9d00..68db13e748 100644
> --- a/drivers/common/cnxk/roc_io.h
> +++ b/drivers/common/cnxk/roc_io.h
> @@ -159,14 +159,15 @@ roc_lmt_mov(void *out, const void *in, const uint32_t lmtext)
> {
> volatile const __uint128_t *src128 = (const __uint128_t *)in;
> volatile __uint128_t *dst128 = (__uint128_t *)out;
> + uint32_t i;
>
> dst128[0] = src128[0];
> dst128[1] = src128[1];
> /* lmtext receives following value:
> * 1: NIX_SUBDC_EXT needed i.e. tx vlan case
> */
> - if (lmtext)
> - dst128[2] = src128[2];
> + for (i = 0; i < lmtext; i++)
> + dst128[2 + i] = src128[2 + i];
> }
>
> static __plt_always_inline void
> diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
> index 0da809db29..a0c3d22284 100644
> --- a/drivers/event/cnxk/cn10k_eventdev.c
> +++ b/drivers/event/cnxk/cn10k_eventdev.c
> @@ -693,8 +693,7 @@ cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
> }
>
> static void
> -cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
> - void *tstmp_info)
> +cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
> {
> struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> int i;
> @@ -702,7 +701,7 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
> for (i = 0; i < dev->nb_event_ports; i++) {
> struct cn10k_sso_hws *ws = event_dev->data->ports[i];
> ws->lookup_mem = lookup_mem;
> - ws->tstamp = tstmp_info;
> + ws->tstamp = dev->tstamp;
> }
> }
>
> @@ -714,7 +713,6 @@ cn10k_sso_rx_adapter_queue_add(
> {
> struct cn10k_eth_rxq *rxq;
> void *lookup_mem;
> - void *tstmp_info;
> int rc;
>
> rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
> @@ -727,8 +725,7 @@ cn10k_sso_rx_adapter_queue_add(
> return -EINVAL;
> rxq = eth_dev->data->rx_queues[0];
> lookup_mem = rxq->lookup_mem;
> - tstmp_info = rxq->tstamp;
> - cn10k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
> + cn10k_sso_set_priv_mem(event_dev, lookup_mem);
> cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
>
> return 0;
> diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
> index 034f508dd8..61c40eff7f 100644
> --- a/drivers/event/cnxk/cn10k_worker.h
> +++ b/drivers/event/cnxk/cn10k_worker.h
> @@ -108,12 +108,29 @@ cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t __mbuf, uint8_t port_id,
> mbuf_init | ((uint64_t)port_id) << 48, flags);
> }
>
> +static void
> +cn10k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
> + struct cnxk_timesync_info *tstamp)
> +{
> + uint64_t tstamp_ptr;
> + uint8_t laptr;
> +
> + laptr = (uint8_t) *
> + (uint64_t *)(u64 + (CNXK_SSO_WQE_LAYR_PTR * sizeof(uint64_t)));
> + if (laptr == sizeof(uint64_t)) {
> + /* Extracting tstamp, if PTP enabled*/
> + tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64) +
> + CNXK_SSO_WQE_SG_PTR);
> + cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, true,
> + (uint64_t *)tstamp_ptr);
> + }
> +}
> +
> static __rte_always_inline void
> cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
> void *lookup_mem, void *tstamp, uintptr_t lbase)
> {
> - uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
> - (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
> + uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
> struct rte_event_vector *vec;
> uint64_t aura_handle, laddr;
> uint16_t nb_mbufs, non_vec;
> @@ -133,6 +150,9 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
> for (i = OBJS_PER_CLINE; i < vec->nb_elem; i += OBJS_PER_CLINE)
> rte_prefetch0(&vec->ptrs[i]);
>
> + if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
> + mbuf_init |= 8;
> +
> nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
> nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,
> flags | NIX_RX_VWQE_F, lookup_mem,
> @@ -158,7 +178,6 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
>
> while (non_vec) {
> struct nix_cqe_hdr_s *cqe = (struct nix_cqe_hdr_s *)wqe[0];
> - uint64_t tstamp_ptr;
>
> mbuf = (struct rte_mbuf *)((char *)cqe -
> sizeof(struct rte_mbuf));
> @@ -178,12 +197,10 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
>
> cn10k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem,
> mbuf_init, flags);
> - /* Extracting tstamp, if PTP enabled*/
> - tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)cqe) +
> - CNXK_SSO_WQE_SG_PTR);
> - cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
> - flags & NIX_RX_OFFLOAD_TSTAMP_F,
> - (uint64_t *)tstamp_ptr);
> +
> + if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
> + cn10k_sso_process_tstamp((uint64_t)wqe[0],
> + (uint64_t)mbuf, tstamp);
> wqe[0] = (struct rte_mbuf *)mbuf;
> non_vec--;
> wqe++;
> @@ -200,8 +217,6 @@ static __rte_always_inline void
> cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
> const uint32_t flags)
> {
> - uint64_t tstamp_ptr;
> -
> u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
> (u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
> if ((flags & CPT_RX_WQE_F) &&
> @@ -246,12 +261,9 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
> u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
> cn10k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
> ws->lookup_mem);
> - /* Extracting tstamp, if PTP enabled*/
> - tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
> - CNXK_SSO_WQE_SG_PTR);
> - cn10k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
> - flags & NIX_RX_OFFLOAD_TSTAMP_F,
> - (uint64_t *)tstamp_ptr);
> + if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
> + cn10k_sso_process_tstamp(u64[1], mbuf,
> + ws->tstamp[port]);
> u64[1] = mbuf;
> } else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) ==
> RTE_EVENT_TYPE_ETHDEV_VECTOR) {
> @@ -262,7 +274,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
> ((vwqe_hdr & 0xFFFF) << 48) | ((uint64_t)port << 32);
> *(uint64_t *)u64[1] = (uint64_t)vwqe_hdr;
> cn10k_process_vwqe(u64[1], port, flags, ws->lookup_mem,
> - ws->tstamp, ws->lmt_base);
> + ws->tstamp[port], ws->lmt_base);
> /* Mark vector mempool object as get */
> RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
> (void **)&u64[1], 1, 1);
> diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
> index 84347795f0..a810e5c579 100644
> --- a/drivers/event/cnxk/cn9k_eventdev.c
> +++ b/drivers/event/cnxk/cn9k_eventdev.c
> @@ -123,7 +123,7 @@ cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
> {
> struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
> uint64_t retry = CNXK_SSO_FLUSH_RETRY_MAX;
> - struct cnxk_timesync_info *tstamp;
> + struct cnxk_timesync_info **tstamp;
> struct cn9k_sso_hws_dual *dws;
> struct cn9k_sso_hws *ws;
> uint64_t cq_ds_cnt = 1;
> @@ -942,8 +942,7 @@ cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
> }
>
> static void
> -cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
> - void *tstmp_info)
> +cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
> {
> struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
> int i;
> @@ -953,11 +952,11 @@ cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
> struct cn9k_sso_hws_dual *dws =
> event_dev->data->ports[i];
> dws->lookup_mem = lookup_mem;
> - dws->tstamp = tstmp_info;
> + dws->tstamp = dev->tstamp;
> } else {
> struct cn9k_sso_hws *ws = event_dev->data->ports[i];
> ws->lookup_mem = lookup_mem;
> - ws->tstamp = tstmp_info;
> + ws->tstamp = dev->tstamp;
> }
> }
> }
> @@ -970,7 +969,6 @@ cn9k_sso_rx_adapter_queue_add(
> {
> struct cn9k_eth_rxq *rxq;
> void *lookup_mem;
> - void *tstmp_info;
> int rc;
>
> rc = strncmp(eth_dev->device->driver->name, "net_cn9k", 8);
> @@ -984,8 +982,7 @@ cn9k_sso_rx_adapter_queue_add(
>
> rxq = eth_dev->data->rx_queues[0];
> lookup_mem = rxq->lookup_mem;
> - tstmp_info = rxq->tstamp;
> - cn9k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
> + cn9k_sso_set_priv_mem(event_dev, lookup_mem);
> cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
>
> return 0;
> diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
> index 64e97e321a..54b3545022 100644
> --- a/drivers/event/cnxk/cn9k_worker.h
> +++ b/drivers/event/cnxk/cn9k_worker.h
> @@ -169,13 +169,29 @@ cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
> mbuf_init | ((uint64_t)port_id) << 48, flags);
> }
>
> +static void
> +cn9k_sso_process_tstamp(uint64_t u64, uint64_t mbuf,
> + struct cnxk_timesync_info *tstamp)
> +{
> + uint64_t tstamp_ptr;
> + uint8_t laptr;
> +
> + laptr = (uint8_t) *
> + (uint64_t *)(u64 + (CNXK_SSO_WQE_LAYR_PTR * sizeof(uint64_t)));
> + if (laptr == sizeof(uint64_t)) {
> + /* Extracting tstamp, if PTP enabled*/
> + tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64) +
> + CNXK_SSO_WQE_SG_PTR);
> + cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp, true,
> + (uint64_t *)tstamp_ptr);
> + }
> +}
> +
> static __rte_always_inline void
> cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
> const void *const lookup_mem,
> - struct cnxk_timesync_info *tstamp)
> + struct cnxk_timesync_info **tstamp)
> {
> - uint64_t tstamp_ptr;
> -
> u64[0] = (u64[0] & (0x3ull << 32)) << 6 |
> (u64[0] & (0x3FFull << 36)) << 4 | (u64[0] & 0xffffffff);
> if ((flags & CPT_RX_WQE_F) &&
> @@ -187,12 +203,8 @@ cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
> u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
> cn9k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
> lookup_mem);
> - /* Extracting tstamp, if PTP enabled*/
> - tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)u64[1]) +
> - CNXK_SSO_WQE_SG_PTR);
> - cn9k_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
> - flags & NIX_RX_OFFLOAD_TSTAMP_F,
> - (uint64_t *)tstamp_ptr);
> + if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
> + cn9k_sso_process_tstamp(u64[1], mbuf, tstamp[port]);
> u64[1] = mbuf;
> }
> }
> @@ -298,7 +310,7 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
> static __rte_always_inline uint16_t
> cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev,
> const uint32_t flags, void *lookup_mem,
> - struct cnxk_timesync_info *tstamp)
> + struct cnxk_timesync_info **tstamp)
> {
> union {
> __uint128_t get_work;
> diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h
> index 6c05303a68..868f92370f 100644
> --- a/drivers/event/cnxk/cnxk_eventdev.h
> +++ b/drivers/event/cnxk/cnxk_eventdev.h
> @@ -38,6 +38,7 @@
> #define CNXK_SSO_XAQ_CACHE_CNT (0x7)
> #define CNXK_SSO_XAQ_SLACK (8)
> #define CNXK_SSO_WQE_SG_PTR (9)
> +#define CNXK_SSO_WQE_LAYR_PTR (5)
> #define CNXK_SSO_PRIORITY_CNT (0x8)
> #define CNXK_SSO_WEIGHT_MAX (0x3f)
> #define CNXK_SSO_WEIGHT_MIN (0x3)
> @@ -122,6 +123,7 @@ struct cnxk_sso_evdev {
> uint64_t *timer_adptr_sz;
> uint16_t vec_pool_cnt;
> uint64_t *vec_pools;
> + struct cnxk_timesync_info *tstamp[RTE_MAX_ETHPORTS];
> struct cnxk_sso_mlt_prio mlt_prio[RTE_EVENT_MAX_QUEUES_PER_DEV];
> /* Dev args */
> uint32_t xae_cnt;
> @@ -139,12 +141,12 @@ struct cnxk_sso_evdev {
> struct cn10k_sso_hws {
> uint64_t base;
> uint64_t gw_rdata;
> - /* PTP timestamp */
> - struct cnxk_timesync_info *tstamp;
> void *lookup_mem;
> uint32_t gw_wdata;
> uint8_t swtag_req;
> uint8_t hws_id;
> + /* PTP timestamp */
> + struct cnxk_timesync_info **tstamp;
> /* Add Work Fastpath data */
> uint64_t xaq_lmt __rte_cache_aligned;
> uint64_t *fc_mem;
> @@ -159,11 +161,11 @@ struct cn10k_sso_hws {
> struct cn9k_sso_hws {
> uint64_t base;
> uint64_t gw_wdata;
> - /* PTP timestamp */
> - struct cnxk_timesync_info *tstamp;
> void *lookup_mem;
> uint8_t swtag_req;
> uint8_t hws_id;
> + /* PTP timestamp */
> + struct cnxk_timesync_info **tstamp;
> /* Add Work Fastpath data */
> uint64_t xaq_lmt __rte_cache_aligned;
> uint64_t *fc_mem;
> @@ -176,12 +178,12 @@ struct cn9k_sso_hws {
> struct cn9k_sso_hws_dual {
> uint64_t base[2]; /* Ping and Pong */
> uint64_t gw_wdata;
> - /* PTP timestamp */
> - struct cnxk_timesync_info *tstamp;
> void *lookup_mem;
> uint8_t swtag_req;
> uint8_t vws; /* Ping pong bit */
> uint8_t hws_id;
> + /* PTP timestamp */
> + struct cnxk_timesync_info **tstamp;
> /* Add Work Fastpath data */
> uint64_t xaq_lmt __rte_cache_aligned;
> uint64_t *fc_mem;
> diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> index fa96090bfa..4406bae068 100644
> --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
> +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> @@ -204,6 +204,14 @@ cnxk_sso_rx_adapter_vwqe_enable(struct cnxk_eth_dev *cnxk_eth_dev,
> return roc_nix_rq_modify(&cnxk_eth_dev->nix, rq, 0);
> }
>
> +static void
> +cnxk_sso_tstamp_cfg(uint16_t port_id, struct cnxk_eth_dev *cnxk_eth_dev,
> + struct cnxk_sso_evdev *dev)
> +{
> + if (cnxk_eth_dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
> + dev->tstamp[port_id] = &cnxk_eth_dev->tstamp;
> +}
> +
> int
> cnxk_sso_rx_adapter_queue_add(
> const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
> @@ -250,6 +258,7 @@ cnxk_sso_rx_adapter_queue_add(
> rox_nix_fc_npa_bp_cfg(&cnxk_eth_dev->nix,
> rxq_sp->qconf.mp->pool_id, true,
> dev->force_ena_bp);
> + cnxk_sso_tstamp_cfg(eth_dev->data->port_id, cnxk_eth_dev, dev);
> cnxk_eth_dev->nb_rxq_sso++;
> }
>
> diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
> index 5ecb20f038..0f8790b8c7 100644
> --- a/drivers/net/cnxk/cn10k_rx.h
> +++ b/drivers/net/cnxk/cn10k_rx.h
> @@ -1567,7 +1567,8 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts,
> ol_flags3, mbuf3);
> }
>
> - if (flags & NIX_RX_OFFLOAD_TSTAMP_F) {
> + if ((flags & NIX_RX_OFFLOAD_TSTAMP_F) &&
> + ((flags & NIX_RX_VWQE_F) && tstamp)) {
> const uint16x8_t len_off = {
> 0, /* ptype 0:15 */
> 0, /* ptype 16:32 */
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2022-06-20 18:12 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-12 17:56 [PATCH] event/cnxk: add eth port specific PTP enable pbhagavatula
2022-06-20 18:12 ` Jerin Jacob
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).