From: Shijith Thotton <sthotton@marvell.com>
To: <dev@dpdk.org>
Cc: Shijith Thotton <sthotton@marvell.com>, <jerinj@marvell.com>,
<ndabilpuram@marvell.com>, <anoobj@marvell.com>,
<pbhagavatula@marvell.com>, <gakhil@marvell.com>
Subject: [dpdk-dev] [PATCH v3 6/8] event/cnxk: add cn9k crypto adapter fast path ops
Date: Thu, 2 Sep 2021 20:11:54 +0530 [thread overview]
Message-ID: <c769f60fd35ab9a6113eaf0adb72139506eb9f55.1630593512.git.sthotton@marvell.com> (raw)
In-Reply-To: <cover.1630593512.git.sthotton@marvell.com>
Set crypto adapter enqueue and dequeue operations for CN9K.
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
---
drivers/event/cnxk/cn9k_eventdev.c | 94 +++++++++++++++++++-
drivers/event/cnxk/cn9k_worker.c | 22 +++++
drivers/event/cnxk/cn9k_worker.h | 41 ++++++++-
drivers/event/cnxk/cn9k_worker_deq_ca.c | 65 ++++++++++++++
drivers/event/cnxk/cn9k_worker_dual_deq_ca.c | 75 ++++++++++++++++
drivers/event/cnxk/meson.build | 2 +
6 files changed, 292 insertions(+), 7 deletions(-)
create mode 100644 drivers/event/cnxk/cn9k_worker_deq_ca.c
create mode 100644 drivers/event/cnxk/cn9k_worker_dual_deq_ca.c
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index c73d81c092..59a3dc22a3 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -358,6 +358,20 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
#undef R
};
+ const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t sso_hws_deq_ca_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2] = {
#define R(name, f5, f4, f3, f2, f1, f0, flags) \
[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_seg_##name,
@@ -385,7 +399,22 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_tmo_seg_burst_##name,
NIX_RX_FASTPATH_MODES
#undef R
- };
+ };
+
+ const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ sso_hws_deq_ca_seg_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_deq_ca_seg_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
/* Dual WS modes */
const event_dequeue_t sso_hws_dual_deq[2][2][2][2][2][2] = {
@@ -415,7 +444,22 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
[f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_tmo_burst_##name,
NIX_RX_FASTPATH_MODES
#undef R
- };
+ };
+
+ const event_dequeue_t sso_hws_dual_deq_ca[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ sso_hws_dual_deq_ca_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
const event_dequeue_t sso_hws_dual_deq_seg[2][2][2][2][2][2] = {
#define R(name, f5, f4, f3, f2, f1, f0, flags) \
@@ -447,6 +491,21 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
#undef R
};
+ const event_dequeue_t sso_hws_dual_deq_ca_seg[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_seg_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const event_dequeue_burst_t
+ sso_hws_dual_deq_ca_seg_burst[2][2][2][2][2][2] = {
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ [f5][f4][f3][f2][f1][f0] = cn9k_sso_hws_dual_deq_ca_seg_burst_##name,
+ NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
/* Tx modes */
const event_tx_adapter_enqueue
sso_hws_tx_adptr_enq[2][2][2][2][2][2] = {
@@ -494,6 +553,12 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
sso_hws_deq_tmo_seg_burst);
}
+ if (dev->is_ca_internal_port) {
+ CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+ sso_hws_deq_ca_seg);
+ CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+ sso_hws_deq_ca_seg_burst);
+ }
} else {
CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
@@ -504,7 +569,14 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
sso_hws_deq_tmo_burst);
}
+ if (dev->is_ca_internal_port) {
+ CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+ sso_hws_deq_ca);
+ CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
+ sso_hws_deq_ca_burst);
+ }
}
+ event_dev->ca_enqueue = cn9k_sso_hws_ca_enq;
if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
CN9K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
@@ -519,6 +591,7 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
event_dev->enqueue_forward_burst =
cn9k_sso_hws_dual_enq_fwd_burst;
+ event_dev->ca_enqueue = cn9k_sso_hws_dual_ca_enq;
if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
@@ -532,6 +605,13 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
dev, event_dev->dequeue_burst,
sso_hws_dual_deq_tmo_seg_burst);
}
+ if (dev->is_ca_internal_port) {
+ CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+ sso_hws_dual_deq_ca_seg);
+ CN9K_SET_EVDEV_DEQ_OP(
+ dev, event_dev->dequeue_burst,
+ sso_hws_dual_deq_ca_seg_burst);
+ }
} else {
CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
sso_hws_dual_deq);
@@ -544,6 +624,13 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
dev, event_dev->dequeue_burst,
sso_hws_dual_deq_tmo_burst);
}
+ if (dev->is_ca_internal_port) {
+ CN9K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
+ sso_hws_dual_deq_ca);
+ CN9K_SET_EVDEV_DEQ_OP(
+ dev, event_dev->dequeue_burst,
+ sso_hws_dual_deq_ca_burst);
+ }
}
if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
@@ -930,7 +1017,8 @@ cn9k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn9k");
- *caps = 0;
+ *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
+ RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
return 0;
}
diff --git a/drivers/event/cnxk/cn9k_worker.c b/drivers/event/cnxk/cn9k_worker.c
index 538bc4b0b3..32f7cc0343 100644
--- a/drivers/event/cnxk/cn9k_worker.c
+++ b/drivers/event/cnxk/cn9k_worker.c
@@ -5,6 +5,7 @@
#include "roc_api.h"
#include "cn9k_worker.h"
+#include "cn9k_cryptodev_ops.h"
uint16_t __rte_hot
cn9k_sso_hws_enq(void *port, const struct rte_event *ev)
@@ -117,3 +118,24 @@ cn9k_sso_hws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
return 1;
}
+
+uint16_t __rte_hot
+cn9k_sso_hws_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
+{
+ struct cn9k_sso_hws *ws = port;
+
+ RTE_SET_USED(nb_events);
+
+ return cn9k_cpt_crypto_adapter_enqueue(ws->tag_op, ev->event_ptr);
+}
+
+uint16_t __rte_hot
+cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[], uint16_t nb_events)
+{
+ struct cn9k_sso_hws_dual *dws = port;
+
+ RTE_SET_USED(nb_events);
+
+ return cn9k_cpt_crypto_adapter_enqueue(dws->ws_state[!dws->vws].tag_op,
+ ev->event_ptr);
+}
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 9b2a0bf882..3e8f214904 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -8,6 +8,7 @@
#include "cnxk_ethdev.h"
#include "cnxk_eventdev.h"
#include "cnxk_worker.h"
+#include "cn9k_cryptodev_ops.h"
#include "cn9k_ethdev.h"
#include "cn9k_rx.h"
@@ -187,8 +188,12 @@ cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
(gw.u64[0] & 0xffffffff);
if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
- if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
- RTE_EVENT_TYPE_ETHDEV) {
+ if ((flags & CPT_RX_WQE_F) &&
+ (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+ RTE_EVENT_TYPE_CRYPTODEV)) {
+ gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
+ } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+ RTE_EVENT_TYPE_ETHDEV) {
uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
@@ -260,8 +265,12 @@ cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
(gw.u64[0] & 0xffffffff);
if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
- if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
- RTE_EVENT_TYPE_ETHDEV) {
+ if ((flags & CPT_RX_WQE_F) &&
+ (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+ RTE_EVENT_TYPE_CRYPTODEV)) {
+ gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
+ } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
+ RTE_EVENT_TYPE_ETHDEV) {
uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
@@ -366,6 +375,10 @@ uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(void *port,
uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
const struct rte_event ev[],
uint16_t nb_events);
+uint16_t __rte_hot cn9k_sso_hws_ca_enq(void *port, struct rte_event ev[],
+ uint16_t nb_events);
+uint16_t __rte_hot cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[],
+ uint16_t nb_events);
#define R(name, f5, f4, f3, f2, f1, f0, flags) \
uint16_t __rte_hot cn9k_sso_hws_deq_##name( \
@@ -378,6 +391,11 @@ uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name( \
void *port, struct rte_event ev[], uint16_t nb_events, \
uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name( \
void *port, struct rte_event *ev, uint64_t timeout_ticks); \
uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name( \
@@ -386,6 +404,11 @@ uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name( \
void *port, struct rte_event *ev, uint64_t timeout_ticks); \
uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_burst_##name( \
void *port, struct rte_event ev[], uint16_t nb_events, \
uint64_t timeout_ticks);
@@ -403,6 +426,11 @@ NIX_RX_FASTPATH_MODES
uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name( \
void *port, struct rte_event ev[], uint16_t nb_events, \
uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name( \
void *port, struct rte_event *ev, uint64_t timeout_ticks); \
uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name( \
@@ -411,6 +439,11 @@ NIX_RX_FASTPATH_MODES
uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name( \
void *port, struct rte_event *ev, uint64_t timeout_ticks); \
uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks); \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_burst_##name( \
void *port, struct rte_event ev[], uint16_t nb_events, \
uint64_t timeout_ticks);
diff --git a/drivers/event/cnxk/cn9k_worker_deq_ca.c b/drivers/event/cnxk/cn9k_worker_deq_ca.c
new file mode 100644
index 0000000000..dbdbba17db
--- /dev/null
+++ b/drivers/event/cnxk/cn9k_worker_deq_ca.c
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn9k_worker.h"
+#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws *ws = port; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait(ws->tag_op); \
+ return 1; \
+ } \
+ \
+ return cn9k_sso_hws_get_work(ws, ev, flags | CPT_RX_WQE_F, \
+ ws->lookup_mem); \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_deq_ca_##name(port, ev, timeout_ticks); \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws *ws = port; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ \
+ if (ws->swtag_req) { \
+ ws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait(ws->tag_op); \
+ return 1; \
+ } \
+ \
+ return cn9k_sso_hws_get_work( \
+ ws, ev, flags | NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F, \
+ ws->lookup_mem); \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_deq_ca_seg_##name(port, ev, \
+ timeout_ticks); \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R
diff --git a/drivers/event/cnxk/cn9k_worker_dual_deq_ca.c b/drivers/event/cnxk/cn9k_worker_dual_deq_ca.c
new file mode 100644
index 0000000000..dc9191fe80
--- /dev/null
+++ b/drivers/event/cnxk/cn9k_worker_dual_deq_ca.c
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cn9k_worker.h"
+#include "cnxk_eventdev.h"
+#include "cnxk_worker.h"
+
+#define R(name, f5, f4, f3, f2, f1, f0, flags) \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws_dual *dws = port; \
+ uint16_t gw; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ if (dws->swtag_req) { \
+ dws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait( \
+ dws->ws_state[!dws->vws].tag_op); \
+ return 1; \
+ } \
+ \
+ gw = cn9k_sso_hws_dual_get_work(&dws->ws_state[dws->vws], \
+ &dws->ws_state[!dws->vws], ev, \
+ flags | CPT_RX_WQE_F, \
+ dws->lookup_mem, dws->tstamp); \
+ dws->vws = !dws->vws; \
+ return gw; \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_dual_deq_ca_##name(port, ev, \
+ timeout_ticks); \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_##name( \
+ void *port, struct rte_event *ev, uint64_t timeout_ticks) \
+ { \
+ struct cn9k_sso_hws_dual *dws = port; \
+ uint16_t gw; \
+ \
+ RTE_SET_USED(timeout_ticks); \
+ if (dws->swtag_req) { \
+ dws->swtag_req = 0; \
+ cnxk_sso_hws_swtag_wait( \
+ dws->ws_state[!dws->vws].tag_op); \
+ return 1; \
+ } \
+ \
+ gw = cn9k_sso_hws_dual_get_work( \
+ &dws->ws_state[dws->vws], &dws->ws_state[!dws->vws], \
+ ev, flags | NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F, \
+ dws->lookup_mem, dws->tstamp); \
+ dws->vws = !dws->vws; \
+ return gw; \
+ } \
+ \
+ uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_burst_##name( \
+ void *port, struct rte_event ev[], uint16_t nb_events, \
+ uint64_t timeout_ticks) \
+ { \
+ RTE_SET_USED(nb_events); \
+ \
+ return cn9k_sso_hws_dual_deq_ca_seg_##name(port, ev, \
+ timeout_ticks); \
+ }
+
+NIX_RX_FASTPATH_MODES
+#undef R
diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build
index 1155e18ba7..ffbc0ce0f4 100644
--- a/drivers/event/cnxk/meson.build
+++ b/drivers/event/cnxk/meson.build
@@ -13,9 +13,11 @@ sources = files(
'cn9k_worker.c',
'cn9k_worker_deq.c',
'cn9k_worker_deq_burst.c',
+ 'cn9k_worker_deq_ca.c',
'cn9k_worker_deq_tmo.c',
'cn9k_worker_dual_deq.c',
'cn9k_worker_dual_deq_burst.c',
+ 'cn9k_worker_dual_deq_ca.c',
'cn9k_worker_dual_deq_tmo.c',
'cn9k_worker_tx_enq.c',
'cn9k_worker_tx_enq_seg.c',
--
2.25.1
next prev parent reply other threads:[~2021-09-02 14:43 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-08-30 11:09 [dpdk-dev] [PATCH 0/8] Crypto adapter support for Marvell CNXK driver Shijith Thotton
2021-08-30 11:09 ` [dpdk-dev] [PATCH 1/8] net/cnxk: add flag to show CPT can enqueue events Shijith Thotton
2021-08-30 11:09 ` [dpdk-dev] [PATCH 2/8] event/cnxk: add macro to set eventdev ops Shijith Thotton
2021-08-30 11:09 ` [dpdk-dev] [PATCH 3/8] common/cnxk: add API to check CPT IQ is full Shijith Thotton
2021-08-30 11:09 ` [dpdk-dev] [PATCH 4/8] drivers: add cnxk crypto adapter eventdev ops Shijith Thotton
2021-08-30 11:09 ` [dpdk-dev] [PATCH 5/8] crypto/cnxk: add cn9k crypto adapter fast path ops Shijith Thotton
2021-08-31 15:43 ` Kinsella, Ray
2021-09-01 8:45 ` [dpdk-dev] [EXT] " Anoob Joseph
2021-08-30 11:09 ` [dpdk-dev] [PATCH 6/8] event/cnxk: " Shijith Thotton
2021-08-30 11:09 ` [dpdk-dev] [PATCH 7/8] crypto/cnxk: add cn10k " Shijith Thotton
2021-08-31 15:42 ` Kinsella, Ray
2021-09-01 8:46 ` [dpdk-dev] [EXT] " Anoob Joseph
2021-08-30 11:09 ` [dpdk-dev] [PATCH 8/8] event/cnxk: " Shijith Thotton
2021-09-02 12:17 ` [dpdk-dev] [PATCH v2 0/8] Crypto adapter support for Marvell CNXK driver Shijith Thotton
2021-09-02 12:17 ` [dpdk-dev] [PATCH v2 1/8] net/cnxk: add flag to show CPT can enqueue events Shijith Thotton
2021-09-02 12:17 ` [dpdk-dev] [PATCH v2 2/8] event/cnxk: add macro to set eventdev ops Shijith Thotton
2021-09-02 12:17 ` [dpdk-dev] [PATCH v2 3/8] common/cnxk: add API to check CPT IQ is full Shijith Thotton
2021-09-02 12:17 ` [dpdk-dev] [PATCH v2 4/8] drivers: add cnxk crypto adapter eventdev ops Shijith Thotton
2021-09-02 12:17 ` [dpdk-dev] [PATCH v2 5/8] crypto/cnxk: add cn9k crypto adapter fast path ops Shijith Thotton
2021-09-02 12:17 ` [dpdk-dev] [PATCH v2 6/8] event/cnxk: " Shijith Thotton
2021-09-02 12:17 ` [dpdk-dev] [PATCH v2 7/8] crypto/cnxk: add cn10k " Shijith Thotton
2021-09-02 12:17 ` [dpdk-dev] [PATCH v2 8/8] event/cnxk: " Shijith Thotton
2021-09-02 14:41 ` [dpdk-dev] [PATCH v3 0/8] Crypto adapter support for Marvell CNXK driver Shijith Thotton
2021-09-02 14:41 ` [dpdk-dev] [PATCH v3 1/8] net/cnxk: add flag to show CPT can enqueue events Shijith Thotton
2021-09-02 14:41 ` [dpdk-dev] [PATCH v3 2/8] event/cnxk: add macro to set eventdev ops Shijith Thotton
2021-09-02 14:41 ` [dpdk-dev] [PATCH v3 3/8] common/cnxk: add API to check CPT IQ is full Shijith Thotton
2021-09-02 14:41 ` [dpdk-dev] [PATCH v3 4/8] drivers: add cnxk crypto adapter eventdev ops Shijith Thotton
2021-09-02 14:41 ` [dpdk-dev] [PATCH v3 5/8] crypto/cnxk: add cn9k crypto adapter fast path ops Shijith Thotton
2021-09-02 14:41 ` Shijith Thotton [this message]
2021-09-02 14:41 ` [dpdk-dev] [PATCH v3 7/8] crypto/cnxk: add cn10k " Shijith Thotton
2021-09-02 14:41 ` [dpdk-dev] [PATCH v3 8/8] event/cnxk: " Shijith Thotton
2021-09-03 15:04 ` [dpdk-dev] [PATCH v3 0/8] Crypto adapter support for Marvell CNXK driver Akhil Goyal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=c769f60fd35ab9a6113eaf0adb72139506eb9f55.1630593512.git.sthotton@marvell.com \
--to=sthotton@marvell.com \
--cc=anoobj@marvell.com \
--cc=dev@dpdk.org \
--cc=gakhil@marvell.com \
--cc=jerinj@marvell.com \
--cc=ndabilpuram@marvell.com \
--cc=pbhagavatula@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).