* [dpdk-stable] [PATCH] event/octeontx2: unlink queues during port release
@ 2020-11-18 8:54 Shijith Thotton
2020-11-19 8:46 ` [dpdk-stable] [PATCH v2] " Shijith Thotton
0 siblings, 1 reply; 3+ messages in thread
From: Shijith Thotton @ 2020-11-18 8:54 UTC (permalink / raw)
To: jerinj; +Cc: dev, Shijith Thotton, stable, Pavan Nikhilesh
Unlinking queues from port should be done during port release. Doing it
during device re-configuration could result in segfault as ports array
is re-allocated based on new number of ports.
Fixes: f7ac8b66b23c ("event/octeontx2: support linking queues to ports")
Cc: stable@dpdk.org
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
drivers/event/octeontx2/otx2_evdev.c | 98 +++++++++++++++++-----------
drivers/event/octeontx2/otx2_evdev.h | 12 ++++
2 files changed, 71 insertions(+), 39 deletions(-)
diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index b31c26e95..ed29b8325 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -689,7 +689,36 @@ sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
static void
otx2_sso_port_release(void *port)
{
- rte_free(port);
+ struct otx2_ssogws_cookie *gws_cookie = ssogws_get_cookie(port);
+ struct otx2_sso_evdev *dev;
+ int i;
+
+ if (!gws_cookie->configured)
+ goto free;
+
+ dev = sso_pmd_priv(gws_cookie->event_dev);
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = port;
+
+ for (i = 0; i < dev->nb_event_queues; i++) {
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[0], i, false);
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[1], i, false);
+ }
+ memset(ws, 0, sizeof(*ws));
+ } else {
+ struct otx2_ssogws *ws = port;
+
+ for (i = 0; i < dev->nb_event_queues; i++)
+ sso_port_link_modify(ws, i, false);
+ memset(ws, 0, sizeof(*ws));
+ }
+
+ memset(gws_cookie, 0, sizeof(*gws_cookie));
+
+free:
+ rte_free(gws_cookie);
}
static void
@@ -699,33 +728,6 @@ otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
RTE_SET_USED(queue_id);
}
-static void
-sso_clr_links(const struct rte_eventdev *event_dev)
-{
- struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
- int i, j;
-
- for (i = 0; i < dev->nb_event_ports; i++) {
- if (dev->dual_ws) {
- struct otx2_ssogws_dual *ws;
-
- ws = event_dev->data->ports[i];
- for (j = 0; j < dev->nb_event_queues; j++) {
- sso_port_link_modify((struct otx2_ssogws *)
- &ws->ws_state[0], j, false);
- sso_port_link_modify((struct otx2_ssogws *)
- &ws->ws_state[1], j, false);
- }
- } else {
- struct otx2_ssogws *ws;
-
- ws = event_dev->data->ports[i];
- for (j = 0; j < dev->nb_event_queues; j++)
- sso_port_link_modify(ws, j, false);
- }
- }
-}
-
static void
sso_restore_links(const struct rte_eventdev *event_dev)
{
@@ -803,6 +805,7 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
}
for (i = 0; i < dev->nb_event_ports; i++) {
+ struct otx2_ssogws_cookie *gws_cookie;
struct otx2_ssogws_dual *ws;
uintptr_t base;
@@ -811,14 +814,20 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
} else {
/* Allocate event port memory */
ws = rte_zmalloc_socket("otx2_sso_ws",
- sizeof(struct otx2_ssogws_dual),
+ sizeof(struct otx2_ssogws_dual) +
+ RTE_CACHE_LINE_SIZE,
RTE_CACHE_LINE_SIZE,
event_dev->data->socket_id);
- }
- if (ws == NULL) {
- otx2_err("Failed to alloc memory for port=%d", i);
- rc = -ENOMEM;
- break;
+ if (ws == NULL) {
+ otx2_err("Failed to alloc memory for port=%d",
+ i);
+ rc = -ENOMEM;
+ break;
+ }
+
+ /* First cache line reserved for cookie */
+ ws = (struct otx2_ssogws_dual *)
+ ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
}
ws->port = i;
@@ -830,6 +839,10 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
vws++;
+ gws_cookie = ssogws_get_cookie(ws);
+ gws_cookie->event_dev = event_dev;
+ gws_cookie->configured = 1;
+
event_dev->data->ports[i] = ws;
}
@@ -866,6 +879,7 @@ sso_configure_ports(const struct rte_eventdev *event_dev)
}
for (i = 0; i < nb_lf; i++) {
+ struct otx2_ssogws_cookie *gws_cookie;
struct otx2_ssogws *ws;
uintptr_t base;
@@ -878,7 +892,8 @@ sso_configure_ports(const struct rte_eventdev *event_dev)
/* Allocate event port memory */
ws = rte_zmalloc_socket("otx2_sso_ws",
- sizeof(struct otx2_ssogws),
+ sizeof(struct otx2_ssogws) +
+ RTE_CACHE_LINE_SIZE,
RTE_CACHE_LINE_SIZE,
event_dev->data->socket_id);
if (ws == NULL) {
@@ -887,10 +902,18 @@ sso_configure_ports(const struct rte_eventdev *event_dev)
break;
}
+ /* First cache line reserved for cookie */
+ ws = (struct otx2_ssogws *)
+ ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
+
ws->port = i;
base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
sso_set_port_ops(ws, base);
+ gws_cookie = ssogws_get_cookie(ws);
+ gws_cookie->event_dev = event_dev;
+ gws_cookie->configured = 1;
+
event_dev->data->ports[i] = ws;
}
@@ -1099,11 +1122,8 @@ otx2_sso_configure(const struct rte_eventdev *event_dev)
return -EINVAL;
}
- if (dev->configured) {
+ if (dev->configured)
sso_unregister_irqs(event_dev);
- /* Clear any prior port-queue mapping. */
- sso_clr_links(event_dev);
- }
if (dev->nb_event_queues) {
/* Finit any previous queues. */
diff --git a/drivers/event/octeontx2/otx2_evdev.h b/drivers/event/octeontx2/otx2_evdev.h
index 547e29d4a..116f953f5 100644
--- a/drivers/event/octeontx2/otx2_evdev.h
+++ b/drivers/event/octeontx2/otx2_evdev.h
@@ -217,6 +217,18 @@ sso_pmd_priv(const struct rte_eventdev *event_dev)
return event_dev->data->dev_private;
}
+struct otx2_ssogws_cookie {
+ const struct rte_eventdev *event_dev;
+ bool configured;
+};
+
+static inline struct otx2_ssogws_cookie *
+ssogws_get_cookie(void *ws)
+{
+ return (struct otx2_ssogws_cookie *)
+ ((uint8_t *)ws - RTE_CACHE_LINE_SIZE);
+}
+
static const union mbuf_initializer mbuf_init = {
.fields = {
.data_off = RTE_PKTMBUF_HEADROOM,
--
2.25.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* [dpdk-stable] [PATCH v2] event/octeontx2: unlink queues during port release
2020-11-18 8:54 [dpdk-stable] [PATCH] event/octeontx2: unlink queues during port release Shijith Thotton
@ 2020-11-19 8:46 ` Shijith Thotton
2020-11-20 12:26 ` [dpdk-stable] [dpdk-dev] " Jerin Jacob
0 siblings, 1 reply; 3+ messages in thread
From: Shijith Thotton @ 2020-11-19 8:46 UTC (permalink / raw)
To: jerinj; +Cc: dev, Shijith Thotton, stable, Pavan Nikhilesh
Unlinking queues from port should be done during port release. Doing it
during device re-configuration could result in segfault as ports array
is re-allocated based on new number of ports.
Fixes: f7ac8b66b23c ("event/octeontx2: support linking queues to ports")
Cc: stable@dpdk.org
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
v2:
* Took care of cookie during single workslot free and re-allocation.
drivers/event/octeontx2/otx2_evdev.c | 100 ++++++++++++---------
drivers/event/octeontx2/otx2_evdev.h | 12 +++
drivers/event/octeontx2/otx2_evdev_adptr.c | 24 +++--
3 files changed, 89 insertions(+), 47 deletions(-)
diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index b31c26e95..de9e394a4 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -689,7 +689,36 @@ sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
static void
otx2_sso_port_release(void *port)
{
- rte_free(port);
+ struct otx2_ssogws_cookie *gws_cookie = ssogws_get_cookie(port);
+ struct otx2_sso_evdev *dev;
+ int i;
+
+ if (!gws_cookie->configured)
+ goto free;
+
+ dev = sso_pmd_priv(gws_cookie->event_dev);
+ if (dev->dual_ws) {
+ struct otx2_ssogws_dual *ws = port;
+
+ for (i = 0; i < dev->nb_event_queues; i++) {
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[0], i, false);
+ sso_port_link_modify((struct otx2_ssogws *)
+ &ws->ws_state[1], i, false);
+ }
+ memset(ws, 0, sizeof(*ws));
+ } else {
+ struct otx2_ssogws *ws = port;
+
+ for (i = 0; i < dev->nb_event_queues; i++)
+ sso_port_link_modify(ws, i, false);
+ memset(ws, 0, sizeof(*ws));
+ }
+
+ memset(gws_cookie, 0, sizeof(*gws_cookie));
+
+free:
+ rte_free(gws_cookie);
}
static void
@@ -699,33 +728,6 @@ otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
RTE_SET_USED(queue_id);
}
-static void
-sso_clr_links(const struct rte_eventdev *event_dev)
-{
- struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
- int i, j;
-
- for (i = 0; i < dev->nb_event_ports; i++) {
- if (dev->dual_ws) {
- struct otx2_ssogws_dual *ws;
-
- ws = event_dev->data->ports[i];
- for (j = 0; j < dev->nb_event_queues; j++) {
- sso_port_link_modify((struct otx2_ssogws *)
- &ws->ws_state[0], j, false);
- sso_port_link_modify((struct otx2_ssogws *)
- &ws->ws_state[1], j, false);
- }
- } else {
- struct otx2_ssogws *ws;
-
- ws = event_dev->data->ports[i];
- for (j = 0; j < dev->nb_event_queues; j++)
- sso_port_link_modify(ws, j, false);
- }
- }
-}
-
static void
sso_restore_links(const struct rte_eventdev *event_dev)
{
@@ -803,6 +805,7 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
}
for (i = 0; i < dev->nb_event_ports; i++) {
+ struct otx2_ssogws_cookie *gws_cookie;
struct otx2_ssogws_dual *ws;
uintptr_t base;
@@ -811,14 +814,20 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
} else {
/* Allocate event port memory */
ws = rte_zmalloc_socket("otx2_sso_ws",
- sizeof(struct otx2_ssogws_dual),
+ sizeof(struct otx2_ssogws_dual) +
+ RTE_CACHE_LINE_SIZE,
RTE_CACHE_LINE_SIZE,
event_dev->data->socket_id);
- }
- if (ws == NULL) {
- otx2_err("Failed to alloc memory for port=%d", i);
- rc = -ENOMEM;
- break;
+ if (ws == NULL) {
+ otx2_err("Failed to alloc memory for port=%d",
+ i);
+ rc = -ENOMEM;
+ break;
+ }
+
+ /* First cache line is reserved for cookie */
+ ws = (struct otx2_ssogws_dual *)
+ ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
}
ws->port = i;
@@ -830,6 +839,10 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
vws++;
+ gws_cookie = ssogws_get_cookie(ws);
+ gws_cookie->event_dev = event_dev;
+ gws_cookie->configured = 1;
+
event_dev->data->ports[i] = ws;
}
@@ -866,19 +879,21 @@ sso_configure_ports(const struct rte_eventdev *event_dev)
}
for (i = 0; i < nb_lf; i++) {
+ struct otx2_ssogws_cookie *gws_cookie;
struct otx2_ssogws *ws;
uintptr_t base;
/* Free memory prior to re-allocation if needed */
if (event_dev->data->ports[i] != NULL) {
ws = event_dev->data->ports[i];
- rte_free(ws);
+ rte_free(ssogws_get_cookie(ws));
ws = NULL;
}
/* Allocate event port memory */
ws = rte_zmalloc_socket("otx2_sso_ws",
- sizeof(struct otx2_ssogws),
+ sizeof(struct otx2_ssogws) +
+ RTE_CACHE_LINE_SIZE,
RTE_CACHE_LINE_SIZE,
event_dev->data->socket_id);
if (ws == NULL) {
@@ -887,10 +902,18 @@ sso_configure_ports(const struct rte_eventdev *event_dev)
break;
}
+ /* First cache line is reserved for cookie */
+ ws = (struct otx2_ssogws *)
+ ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
+
ws->port = i;
base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
sso_set_port_ops(ws, base);
+ gws_cookie = ssogws_get_cookie(ws);
+ gws_cookie->event_dev = event_dev;
+ gws_cookie->configured = 1;
+
event_dev->data->ports[i] = ws;
}
@@ -1099,11 +1122,8 @@ otx2_sso_configure(const struct rte_eventdev *event_dev)
return -EINVAL;
}
- if (dev->configured) {
+ if (dev->configured)
sso_unregister_irqs(event_dev);
- /* Clear any prior port-queue mapping. */
- sso_clr_links(event_dev);
- }
if (dev->nb_event_queues) {
/* Finit any previous queues. */
diff --git a/drivers/event/octeontx2/otx2_evdev.h b/drivers/event/octeontx2/otx2_evdev.h
index 547e29d4a..116f953f5 100644
--- a/drivers/event/octeontx2/otx2_evdev.h
+++ b/drivers/event/octeontx2/otx2_evdev.h
@@ -217,6 +217,18 @@ sso_pmd_priv(const struct rte_eventdev *event_dev)
return event_dev->data->dev_private;
}
+struct otx2_ssogws_cookie {
+ const struct rte_eventdev *event_dev;
+ bool configured;
+};
+
+static inline struct otx2_ssogws_cookie *
+ssogws_get_cookie(void *ws)
+{
+ return (struct otx2_ssogws_cookie *)
+ ((uint8_t *)ws - RTE_CACHE_LINE_SIZE);
+}
+
static const union mbuf_initializer mbuf_init = {
.fields = {
.data_off = RTE_PKTMBUF_HEADROOM,
diff --git a/drivers/event/octeontx2/otx2_evdev_adptr.c b/drivers/event/octeontx2/otx2_evdev_adptr.c
index 0a5d7924a..d69f269df 100644
--- a/drivers/event/octeontx2/otx2_evdev_adptr.c
+++ b/drivers/event/octeontx2/otx2_evdev_adptr.c
@@ -453,9 +453,10 @@ sso_add_tx_queue_data(const struct rte_eventdev *event_dev,
struct otx2_ssogws_dual *dws;
old_dws = event_dev->data->ports[i];
- dws = rte_realloc_socket(old_dws,
+ dws = rte_realloc_socket(ssogws_get_cookie(old_dws),
sizeof(struct otx2_ssogws_dual)
- + (sizeof(uint64_t) *
+ + RTE_CACHE_LINE_SIZE +
+ (sizeof(uint64_t) *
(dev->max_port_id + 1) *
RTE_MAX_QUEUES_PER_PORT),
RTE_CACHE_LINE_SIZE,
@@ -463,6 +464,10 @@ sso_add_tx_queue_data(const struct rte_eventdev *event_dev,
if (dws == NULL)
return -ENOMEM;
+ /* First cache line is reserved for cookie */
+ dws = (struct otx2_ssogws_dual *)
+ ((uint8_t *)dws + RTE_CACHE_LINE_SIZE);
+
((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
)&dws->tx_adptr_data)[eth_port_id][tx_queue_id] =
(uint64_t)txq;
@@ -472,16 +477,21 @@ sso_add_tx_queue_data(const struct rte_eventdev *event_dev,
struct otx2_ssogws *ws;
old_ws = event_dev->data->ports[i];
- ws = rte_realloc_socket(old_ws,
- sizeof(struct otx2_ssogws_dual)
- + (sizeof(uint64_t) *
- (dev->max_port_id + 1) *
- RTE_MAX_QUEUES_PER_PORT),
+ ws = rte_realloc_socket(ssogws_get_cookie(old_ws),
+ sizeof(struct otx2_ssogws) +
+ RTE_CACHE_LINE_SIZE +
+ (sizeof(uint64_t) *
+ (dev->max_port_id + 1) *
+ RTE_MAX_QUEUES_PER_PORT),
RTE_CACHE_LINE_SIZE,
event_dev->data->socket_id);
if (ws == NULL)
return -ENOMEM;
+ /* First cache line is reserved for cookie */
+ ws = (struct otx2_ssogws *)
+ ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
+
((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
)&ws->tx_adptr_data)[eth_port_id][tx_queue_id] =
(uint64_t)txq;
--
2.25.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [dpdk-stable] [dpdk-dev] [PATCH v2] event/octeontx2: unlink queues during port release
2020-11-19 8:46 ` [dpdk-stable] [PATCH v2] " Shijith Thotton
@ 2020-11-20 12:26 ` Jerin Jacob
0 siblings, 0 replies; 3+ messages in thread
From: Jerin Jacob @ 2020-11-20 12:26 UTC (permalink / raw)
To: Shijith Thotton; +Cc: Jerin Jacob, dpdk-dev, dpdk stable, Pavan Nikhilesh
On Thu, Nov 19, 2020 at 2:17 PM Shijith Thotton <sthotton@marvell.com> wrote:
>
> Unlinking queues from port should be done during port release. Doing it
> during device re-configuration could result in segfault as ports array
> is re-allocated based on new number of ports.
>
> Fixes: f7ac8b66b23c ("event/octeontx2: support linking queues to ports")
> Cc: stable@dpdk.org
>
> Signed-off-by: Shijith Thotton <sthotton@marvell.com>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Applied to dpdk-next-net-eventdev/for-main. Thanks
> ---
> v2:
> * Took care of cookie during single workslot free and re-allocation.
>
> drivers/event/octeontx2/otx2_evdev.c | 100 ++++++++++++---------
> drivers/event/octeontx2/otx2_evdev.h | 12 +++
> drivers/event/octeontx2/otx2_evdev_adptr.c | 24 +++--
> 3 files changed, 89 insertions(+), 47 deletions(-)
>
> diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
> index b31c26e95..de9e394a4 100644
> --- a/drivers/event/octeontx2/otx2_evdev.c
> +++ b/drivers/event/octeontx2/otx2_evdev.c
> @@ -689,7 +689,36 @@ sso_lf_cfg(struct otx2_sso_evdev *dev, struct otx2_mbox *mbox,
> static void
> otx2_sso_port_release(void *port)
> {
> - rte_free(port);
> + struct otx2_ssogws_cookie *gws_cookie = ssogws_get_cookie(port);
> + struct otx2_sso_evdev *dev;
> + int i;
> +
> + if (!gws_cookie->configured)
> + goto free;
> +
> + dev = sso_pmd_priv(gws_cookie->event_dev);
> + if (dev->dual_ws) {
> + struct otx2_ssogws_dual *ws = port;
> +
> + for (i = 0; i < dev->nb_event_queues; i++) {
> + sso_port_link_modify((struct otx2_ssogws *)
> + &ws->ws_state[0], i, false);
> + sso_port_link_modify((struct otx2_ssogws *)
> + &ws->ws_state[1], i, false);
> + }
> + memset(ws, 0, sizeof(*ws));
> + } else {
> + struct otx2_ssogws *ws = port;
> +
> + for (i = 0; i < dev->nb_event_queues; i++)
> + sso_port_link_modify(ws, i, false);
> + memset(ws, 0, sizeof(*ws));
> + }
> +
> + memset(gws_cookie, 0, sizeof(*gws_cookie));
> +
> +free:
> + rte_free(gws_cookie);
> }
>
> static void
> @@ -699,33 +728,6 @@ otx2_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
> RTE_SET_USED(queue_id);
> }
>
> -static void
> -sso_clr_links(const struct rte_eventdev *event_dev)
> -{
> - struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
> - int i, j;
> -
> - for (i = 0; i < dev->nb_event_ports; i++) {
> - if (dev->dual_ws) {
> - struct otx2_ssogws_dual *ws;
> -
> - ws = event_dev->data->ports[i];
> - for (j = 0; j < dev->nb_event_queues; j++) {
> - sso_port_link_modify((struct otx2_ssogws *)
> - &ws->ws_state[0], j, false);
> - sso_port_link_modify((struct otx2_ssogws *)
> - &ws->ws_state[1], j, false);
> - }
> - } else {
> - struct otx2_ssogws *ws;
> -
> - ws = event_dev->data->ports[i];
> - for (j = 0; j < dev->nb_event_queues; j++)
> - sso_port_link_modify(ws, j, false);
> - }
> - }
> -}
> -
> static void
> sso_restore_links(const struct rte_eventdev *event_dev)
> {
> @@ -803,6 +805,7 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
> }
>
> for (i = 0; i < dev->nb_event_ports; i++) {
> + struct otx2_ssogws_cookie *gws_cookie;
> struct otx2_ssogws_dual *ws;
> uintptr_t base;
>
> @@ -811,14 +814,20 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
> } else {
> /* Allocate event port memory */
> ws = rte_zmalloc_socket("otx2_sso_ws",
> - sizeof(struct otx2_ssogws_dual),
> + sizeof(struct otx2_ssogws_dual) +
> + RTE_CACHE_LINE_SIZE,
> RTE_CACHE_LINE_SIZE,
> event_dev->data->socket_id);
> - }
> - if (ws == NULL) {
> - otx2_err("Failed to alloc memory for port=%d", i);
> - rc = -ENOMEM;
> - break;
> + if (ws == NULL) {
> + otx2_err("Failed to alloc memory for port=%d",
> + i);
> + rc = -ENOMEM;
> + break;
> + }
> +
> + /* First cache line is reserved for cookie */
> + ws = (struct otx2_ssogws_dual *)
> + ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
> }
>
> ws->port = i;
> @@ -830,6 +839,10 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
> sso_set_port_ops((struct otx2_ssogws *)&ws->ws_state[1], base);
> vws++;
>
> + gws_cookie = ssogws_get_cookie(ws);
> + gws_cookie->event_dev = event_dev;
> + gws_cookie->configured = 1;
> +
> event_dev->data->ports[i] = ws;
> }
>
> @@ -866,19 +879,21 @@ sso_configure_ports(const struct rte_eventdev *event_dev)
> }
>
> for (i = 0; i < nb_lf; i++) {
> + struct otx2_ssogws_cookie *gws_cookie;
> struct otx2_ssogws *ws;
> uintptr_t base;
>
> /* Free memory prior to re-allocation if needed */
> if (event_dev->data->ports[i] != NULL) {
> ws = event_dev->data->ports[i];
> - rte_free(ws);
> + rte_free(ssogws_get_cookie(ws));
> ws = NULL;
> }
>
> /* Allocate event port memory */
> ws = rte_zmalloc_socket("otx2_sso_ws",
> - sizeof(struct otx2_ssogws),
> + sizeof(struct otx2_ssogws) +
> + RTE_CACHE_LINE_SIZE,
> RTE_CACHE_LINE_SIZE,
> event_dev->data->socket_id);
> if (ws == NULL) {
> @@ -887,10 +902,18 @@ sso_configure_ports(const struct rte_eventdev *event_dev)
> break;
> }
>
> + /* First cache line is reserved for cookie */
> + ws = (struct otx2_ssogws *)
> + ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
> +
> ws->port = i;
> base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | i << 12);
> sso_set_port_ops(ws, base);
>
> + gws_cookie = ssogws_get_cookie(ws);
> + gws_cookie->event_dev = event_dev;
> + gws_cookie->configured = 1;
> +
> event_dev->data->ports[i] = ws;
> }
>
> @@ -1099,11 +1122,8 @@ otx2_sso_configure(const struct rte_eventdev *event_dev)
> return -EINVAL;
> }
>
> - if (dev->configured) {
> + if (dev->configured)
> sso_unregister_irqs(event_dev);
> - /* Clear any prior port-queue mapping. */
> - sso_clr_links(event_dev);
> - }
>
> if (dev->nb_event_queues) {
> /* Finit any previous queues. */
> diff --git a/drivers/event/octeontx2/otx2_evdev.h b/drivers/event/octeontx2/otx2_evdev.h
> index 547e29d4a..116f953f5 100644
> --- a/drivers/event/octeontx2/otx2_evdev.h
> +++ b/drivers/event/octeontx2/otx2_evdev.h
> @@ -217,6 +217,18 @@ sso_pmd_priv(const struct rte_eventdev *event_dev)
> return event_dev->data->dev_private;
> }
>
> +struct otx2_ssogws_cookie {
> + const struct rte_eventdev *event_dev;
> + bool configured;
> +};
> +
> +static inline struct otx2_ssogws_cookie *
> +ssogws_get_cookie(void *ws)
> +{
> + return (struct otx2_ssogws_cookie *)
> + ((uint8_t *)ws - RTE_CACHE_LINE_SIZE);
> +}
> +
> static const union mbuf_initializer mbuf_init = {
> .fields = {
> .data_off = RTE_PKTMBUF_HEADROOM,
> diff --git a/drivers/event/octeontx2/otx2_evdev_adptr.c b/drivers/event/octeontx2/otx2_evdev_adptr.c
> index 0a5d7924a..d69f269df 100644
> --- a/drivers/event/octeontx2/otx2_evdev_adptr.c
> +++ b/drivers/event/octeontx2/otx2_evdev_adptr.c
> @@ -453,9 +453,10 @@ sso_add_tx_queue_data(const struct rte_eventdev *event_dev,
> struct otx2_ssogws_dual *dws;
>
> old_dws = event_dev->data->ports[i];
> - dws = rte_realloc_socket(old_dws,
> + dws = rte_realloc_socket(ssogws_get_cookie(old_dws),
> sizeof(struct otx2_ssogws_dual)
> - + (sizeof(uint64_t) *
> + + RTE_CACHE_LINE_SIZE +
> + (sizeof(uint64_t) *
> (dev->max_port_id + 1) *
> RTE_MAX_QUEUES_PER_PORT),
> RTE_CACHE_LINE_SIZE,
> @@ -463,6 +464,10 @@ sso_add_tx_queue_data(const struct rte_eventdev *event_dev,
> if (dws == NULL)
> return -ENOMEM;
>
> + /* First cache line is reserved for cookie */
> + dws = (struct otx2_ssogws_dual *)
> + ((uint8_t *)dws + RTE_CACHE_LINE_SIZE);
> +
> ((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
> )&dws->tx_adptr_data)[eth_port_id][tx_queue_id] =
> (uint64_t)txq;
> @@ -472,16 +477,21 @@ sso_add_tx_queue_data(const struct rte_eventdev *event_dev,
> struct otx2_ssogws *ws;
>
> old_ws = event_dev->data->ports[i];
> - ws = rte_realloc_socket(old_ws,
> - sizeof(struct otx2_ssogws_dual)
> - + (sizeof(uint64_t) *
> - (dev->max_port_id + 1) *
> - RTE_MAX_QUEUES_PER_PORT),
> + ws = rte_realloc_socket(ssogws_get_cookie(old_ws),
> + sizeof(struct otx2_ssogws) +
> + RTE_CACHE_LINE_SIZE +
> + (sizeof(uint64_t) *
> + (dev->max_port_id + 1) *
> + RTE_MAX_QUEUES_PER_PORT),
> RTE_CACHE_LINE_SIZE,
> event_dev->data->socket_id);
> if (ws == NULL)
> return -ENOMEM;
>
> + /* First cache line is reserved for cookie */
> + ws = (struct otx2_ssogws *)
> + ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
> +
> ((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
> )&ws->tx_adptr_data)[eth_port_id][tx_queue_id] =
> (uint64_t)txq;
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-11-20 12:26 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-18 8:54 [dpdk-stable] [PATCH] event/octeontx2: unlink queues during port release Shijith Thotton
2020-11-19 8:46 ` [dpdk-stable] [PATCH v2] " Shijith Thotton
2020-11-20 12:26 ` [dpdk-stable] [dpdk-dev] " Jerin Jacob
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).