From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Pavan Nikhilesh <pbhagavatula@marvell.com>
Cc: <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH 17/44] event/octeontx2: add octeontx2 SSO dual workslot mode
Date: Sun, 2 Jun 2019 00:23:27 +0530 [thread overview]
Message-ID: <20190601185355.370-18-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20190601185355.370-1-pbhagavatula@marvell.com>
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
OcteonTx2 AP core SSO cache contains two entires each entry caches
state of an single GWS aka event port.
AP core requests events from SSO by using following sequence :
1. Write to SSOW_LF_GWS_OP_GET_WORK
2. Wait for SSO to complete scheduling by polling on SSOW_LF_GWS_TAG[63]
3. SSO notifies core by clearing SSOW_LF_GWS_TAG[63] and if work is
valid SSOW_LF_GWS_WQP is non-zero.
The above sequence uses only one in-core cache entry.
In dual workslot mode we try to use both the in-core cache entries by
triggering GET_WORK on a second workslot as soon as the above sequence
completes. This effectively hides the schedule latency of SSO if there
are enough events with unique flow_tags in-flight.
This mode reserves two SSO GWS lf's for each event port effectively
doubling single core performance.
Dual workslot mode is the default mode of operation in octeontx2.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Jerin Jacob <jerinj@marvell.com>
---
drivers/event/octeontx2/otx2_evdev.c | 204 ++++++++++++++++++---
drivers/event/octeontx2/otx2_evdev.h | 17 ++
drivers/event/octeontx2/otx2_evdev_irq.c | 4 +-
drivers/event/octeontx2/otx2_evdev_stats.h | 52 +++++-
4 files changed, 242 insertions(+), 35 deletions(-)
diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index ff9f905b3..3979bed9a 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -20,7 +20,7 @@ static inline int
sso_get_msix_offsets(const struct rte_eventdev *event_dev)
{
struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
- uint8_t nb_ports = dev->nb_event_ports;
+ uint8_t nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
struct otx2_mbox *mbox = dev->mbox;
struct msix_offset_rsp *msix_rsp;
int i, rc;
@@ -82,16 +82,26 @@ otx2_sso_port_link(struct rte_eventdev *event_dev, void *port,
const uint8_t queues[], const uint8_t priorities[],
uint16_t nb_links)
{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
uint8_t port_id = 0;
uint16_t link;
- RTE_SET_USED(event_dev);
RTE_SET_USED(priorities);
for (link = 0; link < nb_links; link++) {
- struct otx2_ssogws *ws = port;
-
- port_id = ws->port;
- sso_port_link_modify(ws, queues[link], true);
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = port;
+
+ port_id = ws->port;
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[0], queues[link], true);
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[1], queues[link], true);
+ } else {
+ struct otx2_ssogws *ws = port;
+
+ port_id = ws->port;
+ sso_port_link_modify(ws, queues[link], true);
+ }
}
sso_func_trace("port=%d nb_links=%d", port_id, nb_links);
@@ -102,15 +112,27 @@ static int
otx2_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
uint8_t queues[], uint16_t nb_unlinks)
{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
uint8_t port_id = 0;
uint16_t unlink;
- RTE_SET_USED(event_dev);
for (unlink = 0; unlink < nb_unlinks; unlink++) {
- struct otx2_ssogws *ws = port;
-
- port_id = ws->port;
- sso_port_link_modify(ws, queues[unlink], false);
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = port;
+
+ port_id = ws->port;
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[0], queues[unlink],
+ false);
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[1], queues[unlink],
+ false);
+ } else {
+ struct otx2_ssogws *ws = port;
+
+ port_id = ws->port;
+ sso_port_link_modify(ws, queues[unlink], false);
+ }
}
sso_func_trace("port=%d nb_unlinks=%d", port_id, nb_unlinks);
@@ -242,11 +264,23 @@ sso_clr_links(const struct rte_eventdev *event_dev)
int i, j;
for (i = 0; i < dev->nb_event_ports; i++) {
- struct otx2_ssogws *ws;
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws;
- ws = event_dev->data->ports[i];
- for (j = 0; j < dev->nb_event_queues; j++)
- sso_port_link_modify(ws, j, false);
+ ws = event_dev->data->ports[i];
+ for (j = 0; j < dev->nb_event_queues; j++) {
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[0], j, false);
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[1], j, false);
+ }
+ } else {
+ struct otx2_ssogws *ws;
+
+ ws = event_dev->data->ports[i];
+ for (j = 0; j < dev->nb_event_queues; j++)
+ sso_port_link_modify(ws, j, false);
+ }
}
}
@@ -261,6 +295,73 @@ sso_set_port_ops(struct otx2_ssogws *ws, uintptr_t base)
ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
}
+static int
+sso_configure_dual_ports(const struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ uint8_t vws = 0;
+ uint8_t nb_lf;
+ int i, rc;
+
+ otx2_sso_dbg("configuring event ports %d", dev->nb_event_ports);
+
+ nb_lf = dev->nb_event_ports * 2;
+ /* Ask AF to attach required LFs. */
+ rc = sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, true);
+ if (rc < 0) {
+ otx2_err("failed to attach SSO GWS LF");
+ return -ENODEV;
+ }
+
+ if (sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, true) < 0) {
+ sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
+ otx2_err("failed to init SSO GWS LF");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < dev->nb_event_ports; i++) {
+ struct otx2_ssogws_dual *ws;
+ uintptr_t base;
+
+ /* Free memory prior to re-allocation if needed */
+ if (event_dev->data->ports[i] != NULL) {
+ ws = event_dev->data->ports[i];
+ rte_free(ws);
+ ws = NULL;
+ }
+
+ /* Allocate event port memory */
+ ws = rte_zmalloc_socket("otx2_sso_ws",
+ sizeof(struct otx2_ssogws_dual),
+ RTE_CACHE_LINE_SIZE,
+ event_dev->data->socket_id);
+ if (ws == NULL) {
+ otx2_err("failed to alloc memory for port=%d", i);
+ rc = -ENOMEM;
+ break;
+ }
+
+ ws->port = i;
+ base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
+ sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[0], base);
+ vws++;
+
+ base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | vws << 12);
+ sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
+ vws++;
+
+ event_dev->data->ports[i] = ws;
+ }
+
+ if (rc < 0) {
+ sso_lf_cfg(dev, mbox, SSO_LF_GWS, nb_lf, false);
+ sso_hw_lf_cfg(mbox, SSO_LF_GWS, nb_lf, false);
+ }
+
+ return rc;
+}
+
static int
sso_configure_ports(const struct rte_eventdev *event_dev)
{
@@ -460,6 +561,7 @@ sso_lf_teardown(struct otx2_sso_evdev *dev,
break;
case SSO_LF_GWS:
nb_lf = dev->nb_event_ports;
+ nb_lf *= dev->dual_ws ? 2 : 1;
break;
default:
return;
@@ -525,7 +627,12 @@ otx2_sso_configure(const struct rte_eventdev *event_dev)
dev->nb_event_queues = conf->nb_event_queues;
dev->nb_event_ports = conf->nb_event_ports;
- if (sso_configure_ports(event_dev)) {
+ if (dev->dual_ws)
+ rc = sso_configure_dual_ports(event_dev);
+ else
+ rc = sso_configure_ports(event_dev);
+
+ if (rc < 0) {
otx2_err("failed to configure event ports");
return -ENODEV;
}
@@ -655,14 +762,27 @@ otx2_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
/* Set get_work timeout for HWS */
val = NSEC2USEC(dev->deq_tmo_ns) - 1;
- struct otx2_ssogws *ws = event_dev->data->ports[port_id];
- uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
-
- rte_memcpy(ws->grps_base, grps_base,
- sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
- ws->fc_mem = dev->fc_mem;
- ws->xaq_lmt = dev->xaq_lmt;
- otx2_write64(val, base + SSOW_LF_GWS_NW_TIM);
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = event_dev->data->ports[port_id];
+
+ rte_memcpy(ws->grps_base, grps_base,
+ sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
+ ws->fc_mem = dev->fc_mem;
+ ws->xaq_lmt = dev->xaq_lmt;
+ otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
+ ws->ws_state[0].getwrk_op) + SSOW_LF_GWS_NW_TIM);
+ otx2_write64(val, OTX2_SSOW_GET_BASE_ADDR(
+ ws->ws_state[1].getwrk_op) + SSOW_LF_GWS_NW_TIM);
+ } else {
+ struct otx2_ssogws *ws = event_dev->data->ports[port_id];
+ uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
+
+ rte_memcpy(ws->grps_base, grps_base,
+ sizeof(uintptr_t) * OTX2_SSO_MAX_VHGRP);
+ ws->fc_mem = dev->fc_mem;
+ ws->xaq_lmt = dev->xaq_lmt;
+ otx2_write64(val, base + SSOW_LF_GWS_NW_TIM);
+ }
otx2_sso_dbg("port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
@@ -730,18 +850,37 @@ otx2_sso_dump(struct rte_eventdev *event_dev, FILE *f)
uint8_t queue;
uint8_t port;
+ fprintf(f, "[%s] SSO running in [%s] mode\n", __func__, dev->dual_ws ?
+ "dual_ws" : "single_ws");
/* Dump SSOW registers */
for (port = 0; port < dev->nb_event_ports; port++) {
- fprintf(f, "[%s]SSO single workslot[%d] dump\n",
- __func__, port);
- ssogws_dump(event_dev->data->ports[port], f);
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws =
+ event_dev->data->ports[port];
+
+ fprintf(f, "[%s] SSO dual workslot[%d] vws[%d] dump\n",
+ __func__, port, 0);
+ ssogws_dump((struct otx2_ssogws *)&ws->ws_state[0], f);
+ fprintf(f, "[%s]SSO dual workslot[%d] vws[%d] dump\n",
+ __func__, port, 1);
+ ssogws_dump((struct otx2_ssogws *)&ws->ws_state[1], f);
+ } else {
+ fprintf(f, "[%s]SSO single workslot[%d] dump\n",
+ __func__, port);
+ ssogws_dump(event_dev->data->ports[port], f);
+ }
}
/* Dump SSO registers */
for (queue = 0; queue < dev->nb_event_queues; queue++) {
fprintf(f, "[%s]SSO group[%d] dump\n", __func__, queue);
- struct otx2_ssogws *ws = event_dev->data->ports[0];
- ssoggrp_dump(ws->grps_base[queue], f);
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = event_dev->data->ports[0];
+ ssoggrp_dump(ws->grps_base[queue], f);
+ } else {
+ struct otx2_ssogws *ws = event_dev->data->ports[0];
+ ssoggrp_dump(ws->grps_base[queue], f);
+ }
}
}
@@ -874,7 +1013,14 @@ otx2_sso_init(struct rte_eventdev *event_dev)
goto otx2_npa_lf_uninit;
}
+ dev->dual_ws = 1;
sso_parse_devargs(dev, pci_dev->device.devargs);
+ if (dev->dual_ws) {
+ otx2_sso_dbg("using dual workslot mode");
+ dev->max_event_ports = dev->max_event_ports / 2;
+ } else {
+ otx2_sso_dbg("using single workslot mode");
+ }
otx2_sso_pf_func_set(dev->pf_func);
otx2_sso_dbg("initializing %s max_queues=%d max_ports=%d",
diff --git a/drivers/event/octeontx2/otx2_evdev.h b/drivers/event/octeontx2/otx2_evdev.h
index 99ee6cea0..ef46a0482 100644
--- a/drivers/event/octeontx2/otx2_evdev.h
+++ b/drivers/event/octeontx2/otx2_evdev.h
@@ -106,6 +106,7 @@ struct otx2_sso_evdev {
uint64_t nb_xaq_cfg;
rte_iova_t fc_iova;
struct rte_mempool *xaq_pool;
+ uint8_t dual_ws;
/* Dev args */
uint32_t xae_cnt;
/* HW const */
@@ -140,6 +141,22 @@ struct otx2_ssogws {
uintptr_t grps_base[OTX2_SSO_MAX_VHGRP];
} __rte_cache_aligned;
+struct otx2_ssogws_state {
+ OTX2_SSOGWS_OPS;
+};
+
+struct otx2_ssogws_dual {
+ /* Get Work Fastpath data */
+ struct otx2_ssogws_state ws_state[2]; /* Ping and Pong */
+ uint8_t swtag_req;
+ uint8_t vws; /* Ping pong bit */
+ uint8_t port;
+ /* Add Work Fastpath data */
+ uint64_t xaq_lmt __rte_cache_aligned;
+ uint64_t *fc_mem;
+ uintptr_t grps_base[OTX2_SSO_MAX_VHGRP];
+} __rte_cache_aligned;
+
static inline struct otx2_sso_evdev *
sso_pmd_priv(const struct rte_eventdev *event_dev)
{
diff --git a/drivers/event/octeontx2/otx2_evdev_irq.c b/drivers/event/octeontx2/otx2_evdev_irq.c
index ecc8ae775..e10389703 100644
--- a/drivers/event/octeontx2/otx2_evdev_irq.c
+++ b/drivers/event/octeontx2/otx2_evdev_irq.c
@@ -121,7 +121,7 @@ sso_register_irqs(const struct rte_eventdev *event_dev)
int i, rc = -EINVAL;
uint8_t nb_ports;
- nb_ports = dev->nb_event_ports;
+ nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
for (i = 0; i < dev->nb_event_queues; i++) {
if (dev->sso_msixoff[i] == MSIX_VECTOR_INVALID) {
@@ -163,7 +163,7 @@ sso_unregister_irqs(const struct rte_eventdev *event_dev)
uint8_t nb_ports;
int i;
- nb_ports = dev->nb_event_ports;
+ nb_ports = dev->nb_event_ports * (dev->dual_ws ? 2 : 1);
for (i = 0; i < dev->nb_event_queues; i++) {
uintptr_t base = dev->bar2 + (RVU_BLOCK_ADDR_SSO << 20 |
diff --git a/drivers/event/octeontx2/otx2_evdev_stats.h b/drivers/event/octeontx2/otx2_evdev_stats.h
index df76a1333..9d7c694ee 100644
--- a/drivers/event/octeontx2/otx2_evdev_stats.h
+++ b/drivers/event/octeontx2/otx2_evdev_stats.h
@@ -76,11 +76,29 @@ otx2_sso_xstats_get(const struct rte_eventdev *event_dev,
xstats = sso_hws_xstats;
req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
- ((struct sso_info_req *)req_rsp)->hws = queue_port_id;
+ ((struct sso_info_req *)req_rsp)->hws = dev->dual_ws ?
+ 2 * queue_port_id : queue_port_id;
rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
if (rc < 0)
goto invalid_value;
+ if (dev->dual_ws) {
+ for (i = 0; i < n && i < xstats_mode_count; i++) {
+ xstat = &xstats[ids[i] - start_offset];
+ values[i] = *(uint64_t *)
+ ((char *)req_rsp + xstat->offset);
+ values[i] = (values[i] >> xstat->shift) &
+ xstat->mask;
+ }
+
+ req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
+ ((struct sso_info_req *)req_rsp)->hws =
+ (2 * queue_port_id) + 1;
+ rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
+ if (rc < 0)
+ goto invalid_value;
+ }
+
break;
case RTE_EVENT_DEV_XSTATS_QUEUE:
if (queue_port_id >= (signed int)dev->nb_event_queues)
@@ -107,7 +125,11 @@ otx2_sso_xstats_get(const struct rte_eventdev *event_dev,
value = *(uint64_t *)((char *)req_rsp + xstat->offset);
value = (value >> xstat->shift) & xstat->mask;
- values[i] = value;
+ if ((mode == RTE_EVENT_DEV_XSTATS_PORT) && dev->dual_ws)
+ values[i] += value;
+ else
+ values[i] = value;
+
values[i] -= xstat->reset_snap[queue_port_id];
}
@@ -143,11 +165,30 @@ otx2_sso_xstats_reset(struct rte_eventdev *event_dev,
xstats = sso_hws_xstats;
req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
- ((struct sso_info_req *)req_rsp)->hws = queue_port_id;
+ ((struct sso_info_req *)req_rsp)->hws = dev->dual_ws ?
+ 2 * queue_port_id : queue_port_id;
rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
if (rc < 0)
goto invalid_value;
+ if (dev->dual_ws) {
+ for (i = 0; i < n && i < xstats_mode_count; i++) {
+ xstat = &xstats[ids[i] - start_offset];
+ xstat->reset_snap[queue_port_id] = *(uint64_t *)
+ ((char *)req_rsp + xstat->offset);
+ xstat->reset_snap[queue_port_id] =
+ (xstat->reset_snap[queue_port_id] >>
+ xstat->shift) & xstat->mask;
+ }
+
+ req_rsp = otx2_mbox_alloc_msg_sso_hws_get_stats(mbox);
+ ((struct sso_info_req *)req_rsp)->hws =
+ (2 * queue_port_id) + 1;
+ rc = otx2_mbox_process_msg(mbox, (void **)&req_rsp);
+ if (rc < 0)
+ goto invalid_value;
+ }
+
break;
case RTE_EVENT_DEV_XSTATS_QUEUE:
if (queue_port_id >= (signed int)dev->nb_event_queues)
@@ -174,7 +215,10 @@ otx2_sso_xstats_reset(struct rte_eventdev *event_dev,
value = *(uint64_t *)((char *)req_rsp + xstat->offset);
value = (value >> xstat->shift) & xstat->mask;
- xstat->reset_snap[queue_port_id] = value;
+ if ((mode == RTE_EVENT_DEV_XSTATS_PORT) && dev->dual_ws)
+ xstat->reset_snap[queue_port_id] += value;
+ else
+ xstat->reset_snap[queue_port_id] = value;
}
return i;
invalid_value:
--
2.21.0
next prev parent reply other threads:[~2019-06-01 18:57 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-06-01 18:53 [dpdk-dev] [PATCH 00/44] OCTEON TX2 event device driver pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 01/44] event/octeontx2: add build infra and device probe pbhagavatula
2019-06-17 7:50 ` Jerin Jacob Kollanukkaran
2019-06-01 18:53 ` [dpdk-dev] [PATCH 02/44] event/octeontx2: add init and fini for octeontx2 SSO object pbhagavatula
2019-06-17 7:52 ` Jerin Jacob Kollanukkaran
2019-06-01 18:53 ` [dpdk-dev] [PATCH 03/44] event/octeontx2: add device capabilities function pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 04/44] event/octeontx2: add device configure function pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 05/44] event/octeontx2: add event queue config functions pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 06/44] event/octeontx2: allocate event inflight buffers pbhagavatula
2019-06-17 7:56 ` Jerin Jacob Kollanukkaran
2019-06-01 18:53 ` [dpdk-dev] [PATCH 07/44] event/octeontx2: add devargs for inflight buffer count pbhagavatula
2019-06-17 7:58 ` Jerin Jacob Kollanukkaran
2019-06-01 18:53 ` [dpdk-dev] [PATCH 08/44] event/octeontx2: add event port config functions pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 09/44] event/octeontx2: support linking queues to ports pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 10/44] event/octeontx2: support dequeue timeout tick conversion pbhagavatula
2019-06-17 8:01 ` Jerin Jacob Kollanukkaran
2019-06-01 18:53 ` [dpdk-dev] [PATCH 11/44] event/octeontx2: add SSO GWS and GGRP IRQ handlers pbhagavatula
2019-06-17 8:04 ` Jerin Jacob Kollanukkaran
2019-06-01 18:53 ` [dpdk-dev] [PATCH 12/44] event/octeontx2: add register dump functions pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 13/44] event/octeontx2: add xstats support pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 14/44] event/octeontx2: add SSO HW device operations pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 15/44] event/octeontx2: add worker enqueue functions pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 16/44] event/octeontx2: add worker dequeue functions pbhagavatula
2019-06-01 18:53 ` pbhagavatula [this message]
2019-06-01 18:53 ` [dpdk-dev] [PATCH 18/44] event/octeontx2: add SSO dual GWS HW device operations pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 19/44] event/octeontx2: add worker dual GWS enqueue functions pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 20/44] event/octeontx2: add worker dual GWS dequeue functions pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 21/44] event/octeontx2: add devargs to force legacy mode pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 22/44] event/octeontx2: add device start function pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 23/44] event/octeontx2: add devargs to control SSO GGRP QoS pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 24/44] event/octeontx2: add device stop and close functions pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 25/44] event/octeontx2: add SSO selftest pbhagavatula
2019-06-17 8:18 ` Jerin Jacob Kollanukkaran
2019-06-01 18:53 ` [dpdk-dev] [PATCH 26/44] doc: add Marvell OCTEON TX2 event device documentation pbhagavatula
2019-06-17 8:15 ` Jerin Jacob Kollanukkaran
2019-06-01 18:53 ` [dpdk-dev] [PATCH 27/44] event/octeontx2: add event timer support pbhagavatula
2019-06-17 8:20 ` Jerin Jacob Kollanukkaran
2019-06-01 18:53 ` [dpdk-dev] [PATCH 28/44] event/octeontx2: add timer adapter capabilities pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 29/44] event/octeontx2: create and free timer adapter pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 30/44] event/octeontx2: allow TIM to optimize config pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 31/44] event/octeontx2: add devargs to disable NPA pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 32/44] event/octeontx2: add devargs to modify chunk slots pbhagavatula
2019-06-17 8:24 ` Jerin Jacob Kollanukkaran
2019-06-01 18:53 ` [dpdk-dev] [PATCH 33/44] event/octeontx2: add TIM IRQ handlers pbhagavatula
2019-06-17 8:25 ` Jerin Jacob Kollanukkaran
2019-06-01 18:53 ` [dpdk-dev] [PATCH 34/44] event/octeontx2: allow adapters to resize inflight buffers pbhagavatula
2019-06-17 8:27 ` Jerin Jacob Kollanukkaran
2019-06-01 18:53 ` [dpdk-dev] [PATCH 35/44] event/octeontx2: add timer adapter info get function pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 36/44] event/octeontx2: add TIM bucket operations pbhagavatula
2019-06-17 8:31 ` Jerin Jacob Kollanukkaran
2019-06-01 18:53 ` [dpdk-dev] [PATCH 37/44] event/octeontx2: add event timer arm routine pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 38/44] event/octeontx2: add event timer arm timeout burst pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 39/44] event/octeontx2: add event timer cancel function pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 40/44] event/octeontx2: add event timer stats get and reset pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 41/44] event/octeontx2: add even timer adapter start and stop pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 42/44] event/octeontx2: add devargs to limit timer adapters pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 43/44] event/octeontx2: add devargs to control adapter parameters pbhagavatula
2019-06-01 18:53 ` [dpdk-dev] [PATCH 44/44] doc: update Marvell OCTEON TX2 eventdev documentation pbhagavatula
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190601185355.370-18-pbhagavatula@marvell.com \
--to=pbhagavatula@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).