From: Shijith Thotton <sthotton@marvell.com>
To: <dev@dpdk.org>, <jerinj@marvell.com>
Cc: Pavan Nikhilesh <pbhagavatula@marvell.com>,
<harry.van.haaren@intel.com>, <mattias.ronnblom@ericsson.com>,
Shijith Thotton <sthotton@marvell.com>,
Nithin Dabilpuram <ndabilpuram@marvell.com>,
Kiran Kumar K <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>
Subject: [PATCH v2 6/6] common/cnxk: use lock when accessing mbox of SSO
Date: Tue, 5 Apr 2022 11:11:03 +0530 [thread overview]
Message-ID: <9c22418754c23d37e29ea63ad476d8743bcb8743.1649136534.git.sthotton@marvell.com> (raw)
In-Reply-To: <cover.1649136534.git.sthotton@marvell.com>
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Since mbox is now accessed from multiple threads, use lock to
synchronize access.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
---
drivers/common/cnxk/roc_sso.c | 174 +++++++++++++++++++++--------
drivers/common/cnxk/roc_sso_priv.h | 1 +
drivers/common/cnxk/roc_tim.c | 134 ++++++++++++++--------
3 files changed, 215 insertions(+), 94 deletions(-)
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index f8a0a96533..358d37a9f2 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -36,8 +36,8 @@ sso_lf_alloc(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf,
}
rc = mbox_process_msg(dev->mbox, rsp);
- if (rc < 0)
- return rc;
+ if (rc)
+ return -EIO;
return 0;
}
@@ -69,8 +69,8 @@ sso_lf_free(struct dev *dev, enum sso_lf_type lf_type, uint16_t nb_lf)
}
rc = mbox_process(dev->mbox);
- if (rc < 0)
- return rc;
+ if (rc)
+ return -EIO;
return 0;
}
@@ -98,7 +98,7 @@ sso_rsrc_attach(struct roc_sso *roc_sso, enum sso_lf_type lf_type,
}
req->modify = true;
- if (mbox_process(dev->mbox) < 0)
+ if (mbox_process(dev->mbox))
return -EIO;
return 0;
@@ -126,7 +126,7 @@ sso_rsrc_detach(struct roc_sso *roc_sso, enum sso_lf_type lf_type)
}
req->partial = true;
- if (mbox_process(dev->mbox) < 0)
+ if (mbox_process(dev->mbox))
return -EIO;
return 0;
@@ -141,9 +141,9 @@ sso_rsrc_get(struct roc_sso *roc_sso)
mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
rc = mbox_process_msg(dev->mbox, (void **)&rsrc_cnt);
- if (rc < 0) {
+ if (rc) {
plt_err("Failed to get free resource count\n");
- return rc;
+ return -EIO;
}
roc_sso->max_hwgrp = rsrc_cnt->sso;
@@ -197,8 +197,8 @@ sso_msix_fill(struct roc_sso *roc_sso, uint16_t nb_hws, uint16_t nb_hwgrp)
mbox_alloc_msg_msix_offset(dev->mbox);
rc = mbox_process_msg(dev->mbox, (void **)&rsp);
- if (rc < 0)
- return rc;
+ if (rc)
+ return -EIO;
for (i = 0; i < nb_hws; i++)
sso->hws_msix_offset[i] = rsp->ssow_msixoff[i];
@@ -285,53 +285,71 @@ int
roc_sso_hws_stats_get(struct roc_sso *roc_sso, uint8_t hws,
struct roc_sso_hws_stats *stats)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
struct sso_hws_stats *req_rsp;
+ struct dev *dev = &sso->dev;
int rc;
+ plt_spinlock_lock(&sso->mbox_lock);
req_rsp = (struct sso_hws_stats *)mbox_alloc_msg_sso_hws_get_stats(
dev->mbox);
if (req_rsp == NULL) {
rc = mbox_process(dev->mbox);
- if (rc < 0)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto fail;
+ }
req_rsp = (struct sso_hws_stats *)
mbox_alloc_msg_sso_hws_get_stats(dev->mbox);
- if (req_rsp == NULL)
- return -ENOSPC;
+ if (req_rsp == NULL) {
+ rc = -ENOSPC;
+ goto fail;
+ }
}
req_rsp->hws = hws;
rc = mbox_process_msg(dev->mbox, (void **)&req_rsp);
- if (rc)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto fail;
+ }
stats->arbitration = req_rsp->arbitration;
- return 0;
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint8_t hwgrp,
struct roc_sso_hwgrp_stats *stats)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
struct sso_grp_stats *req_rsp;
+ struct dev *dev = &sso->dev;
int rc;
+ plt_spinlock_lock(&sso->mbox_lock);
req_rsp = (struct sso_grp_stats *)mbox_alloc_msg_sso_grp_get_stats(
dev->mbox);
if (req_rsp == NULL) {
rc = mbox_process(dev->mbox);
- if (rc < 0)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto fail;
+ }
req_rsp = (struct sso_grp_stats *)
mbox_alloc_msg_sso_grp_get_stats(dev->mbox);
- if (req_rsp == NULL)
- return -ENOSPC;
+ if (req_rsp == NULL) {
+ rc = -ENOSPC;
+ goto fail;
+ }
}
req_rsp->grp = hwgrp;
rc = mbox_process_msg(dev->mbox, (void **)&req_rsp);
- if (rc)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto fail;
+ }
stats->aw_status = req_rsp->aw_status;
stats->dq_pc = req_rsp->dq_pc;
@@ -341,7 +359,10 @@ roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint8_t hwgrp,
stats->ts_pc = req_rsp->ts_pc;
stats->wa_pc = req_rsp->wa_pc;
stats->ws_pc = req_rsp->ws_pc;
- return 0;
+
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
@@ -358,10 +379,12 @@ int
roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos,
uint8_t nb_qos, uint32_t nb_xaq)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
+ struct dev *dev = &sso->dev;
struct sso_grp_qos_cfg *req;
int i, rc;
+ plt_spinlock_lock(&sso->mbox_lock);
for (i = 0; i < nb_qos; i++) {
uint8_t xaq_prcnt = qos[i].xaq_prcnt;
uint8_t iaq_prcnt = qos[i].iaq_prcnt;
@@ -370,11 +393,16 @@ roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos,
req = mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
if (req == NULL) {
rc = mbox_process(dev->mbox);
- if (rc < 0)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto fail;
+ }
+
req = mbox_alloc_msg_sso_grp_qos_config(dev->mbox);
- if (req == NULL)
- return -ENOSPC;
+ if (req == NULL) {
+ rc = -ENOSPC;
+ goto fail;
+ }
}
req->grp = qos[i].hwgrp;
req->xaq_limit = (nb_xaq * (xaq_prcnt ? xaq_prcnt : 100)) / 100;
@@ -386,7 +414,12 @@ roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos,
100;
}
- return mbox_process(dev->mbox);
+ rc = mbox_process(dev->mbox);
+ if (rc)
+ rc = -EIO;
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
@@ -482,11 +515,16 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
int
roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso, uint32_t nb_xae)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
+ struct dev *dev = &sso->dev;
+ int rc;
- return sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae,
- roc_sso->xae_waes, roc_sso->xaq_buf_size,
- roc_sso->nb_hwgrp);
+ plt_spinlock_lock(&sso->mbox_lock);
+ rc = sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae,
+ roc_sso->xae_waes, roc_sso->xaq_buf_size,
+ roc_sso->nb_hwgrp);
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
@@ -515,9 +553,14 @@ sso_hwgrp_free_xaq_aura(struct dev *dev, struct roc_sso_xaq_data *xaq,
int
roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso, uint16_t nb_hwgrp)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
+ struct dev *dev = &sso->dev;
+ int rc;
- return sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp);
+ plt_spinlock_lock(&sso->mbox_lock);
+ rc = sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp);
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
@@ -533,16 +576,24 @@ sso_hwgrp_alloc_xaq(struct dev *dev, uint32_t npa_aura_id, uint16_t hwgrps)
req->npa_aura_id = npa_aura_id;
req->hwgrps = hwgrps;
- return mbox_process(dev->mbox);
+ if (mbox_process(dev->mbox))
+ return -EIO;
+
+ return 0;
}
int
roc_sso_hwgrp_alloc_xaq(struct roc_sso *roc_sso, uint32_t npa_aura_id,
uint16_t hwgrps)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
+ struct dev *dev = &sso->dev;
+ int rc;
- return sso_hwgrp_alloc_xaq(dev, npa_aura_id, hwgrps);
+ plt_spinlock_lock(&sso->mbox_lock);
+ rc = sso_hwgrp_alloc_xaq(dev, npa_aura_id, hwgrps);
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
@@ -555,40 +606,56 @@ sso_hwgrp_release_xaq(struct dev *dev, uint16_t hwgrps)
return -EINVAL;
req->hwgrps = hwgrps;
- return mbox_process(dev->mbox);
+ if (mbox_process(dev->mbox))
+ return -EIO;
+
+ return 0;
}
int
roc_sso_hwgrp_release_xaq(struct roc_sso *roc_sso, uint16_t hwgrps)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
+ struct dev *dev = &sso->dev;
+ int rc;
- return sso_hwgrp_release_xaq(dev, hwgrps);
+ plt_spinlock_lock(&sso->mbox_lock);
+ rc = sso_hwgrp_release_xaq(dev, hwgrps);
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
roc_sso_hwgrp_set_priority(struct roc_sso *roc_sso, uint16_t hwgrp,
uint8_t weight, uint8_t affinity, uint8_t priority)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_sso);
+ struct dev *dev = &sso->dev;
struct sso_grp_priority *req;
int rc = -ENOSPC;
+ plt_spinlock_lock(&sso->mbox_lock);
req = mbox_alloc_msg_sso_grp_set_priority(dev->mbox);
if (req == NULL)
- return rc;
+ goto fail;
req->grp = hwgrp;
req->weight = weight;
req->affinity = affinity;
req->priority = priority;
rc = mbox_process(dev->mbox);
- if (rc < 0)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto fail;
+ }
+ plt_spinlock_unlock(&sso->mbox_lock);
plt_sso_dbg("HWGRP %d weight %d affinity %d priority %d", hwgrp, weight,
affinity, priority);
return 0;
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
@@ -603,10 +670,11 @@ roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp)
if (roc_sso->max_hws < nb_hws)
return -ENOENT;
+ plt_spinlock_lock(&sso->mbox_lock);
rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWS, nb_hws);
if (rc < 0) {
plt_err("Unable to attach SSO HWS LFs");
- return rc;
+ goto fail;
}
rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWGRP, nb_hwgrp);
@@ -645,6 +713,7 @@ roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp)
goto sso_msix_fail;
}
+ plt_spinlock_unlock(&sso->mbox_lock);
roc_sso->nb_hwgrp = nb_hwgrp;
roc_sso->nb_hws = nb_hws;
@@ -657,6 +726,8 @@ roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp)
sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWGRP);
hwgrp_atch_fail:
sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWS);
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
@@ -678,6 +749,7 @@ roc_sso_rsrc_fini(struct roc_sso *roc_sso)
roc_sso->nb_hwgrp = 0;
roc_sso->nb_hws = 0;
+ plt_spinlock_unlock(&sso->mbox_lock);
}
int
@@ -696,6 +768,7 @@ roc_sso_dev_init(struct roc_sso *roc_sso)
sso = roc_sso_to_sso_priv(roc_sso);
memset(sso, 0, sizeof(*sso));
pci_dev = roc_sso->pci_dev;
+ plt_spinlock_init(&sso->mbox_lock);
rc = dev_init(&sso->dev, pci_dev);
if (rc < 0) {
@@ -703,6 +776,7 @@ roc_sso_dev_init(struct roc_sso *roc_sso)
goto fail;
}
+ plt_spinlock_lock(&sso->mbox_lock);
rc = sso_rsrc_get(roc_sso);
if (rc < 0) {
plt_err("Failed to get SSO resources");
@@ -739,6 +813,7 @@ roc_sso_dev_init(struct roc_sso *roc_sso)
sso->pci_dev = pci_dev;
sso->dev.drv_inited = true;
roc_sso->lmt_base = sso->dev.lmt_base;
+ plt_spinlock_unlock(&sso->mbox_lock);
return 0;
link_mem_free:
@@ -746,6 +821,7 @@ roc_sso_dev_init(struct roc_sso *roc_sso)
rsrc_fail:
rc |= dev_fini(&sso->dev, pci_dev);
fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
diff --git a/drivers/common/cnxk/roc_sso_priv.h b/drivers/common/cnxk/roc_sso_priv.h
index 09729d4f62..674e4e0a39 100644
--- a/drivers/common/cnxk/roc_sso_priv.h
+++ b/drivers/common/cnxk/roc_sso_priv.h
@@ -22,6 +22,7 @@ struct sso {
/* SSO link mapping. */
struct plt_bitmap **link_map;
void *link_map_mem;
+ plt_spinlock_t mbox_lock;
} __plt_cache_aligned;
enum sso_err_status {
diff --git a/drivers/common/cnxk/roc_tim.c b/drivers/common/cnxk/roc_tim.c
index cefd9bc89d..0f9209937b 100644
--- a/drivers/common/cnxk/roc_tim.c
+++ b/drivers/common/cnxk/roc_tim.c
@@ -8,15 +8,16 @@
static int
tim_fill_msix(struct roc_tim *roc_tim, uint16_t nb_ring)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
struct tim *tim = roc_tim_to_tim_priv(roc_tim);
+ struct dev *dev = &sso->dev;
struct msix_offset_rsp *rsp;
int i, rc;
mbox_alloc_msg_msix_offset(dev->mbox);
rc = mbox_process_msg(dev->mbox, (void **)&rsp);
- if (rc < 0)
- return rc;
+ if (rc)
+ return -EIO;
for (i = 0; i < nb_ring; i++)
tim->tim_msix_offsets[i] = rsp->timlf_msixoff[i];
@@ -88,20 +89,23 @@ int
roc_tim_lf_enable(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *start_tsc,
uint32_t *cur_bkt)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
+ struct dev *dev = &sso->dev;
struct tim_enable_rsp *rsp;
struct tim_ring_req *req;
int rc = -ENOSPC;
+ plt_spinlock_lock(&sso->mbox_lock);
req = mbox_alloc_msg_tim_enable_ring(dev->mbox);
if (req == NULL)
- return rc;
+ goto fail;
req->ring = ring_id;
rc = mbox_process_msg(dev->mbox, (void **)&rsp);
- if (rc < 0) {
+ if (rc) {
tim_err_desc(rc);
- return rc;
+ rc = -EIO;
+ goto fail;
}
if (cur_bkt)
@@ -109,28 +113,34 @@ roc_tim_lf_enable(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *start_tsc,
if (start_tsc)
*start_tsc = rsp->timestarted;
- return 0;
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
roc_tim_lf_disable(struct roc_tim *roc_tim, uint8_t ring_id)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
+ struct dev *dev = &sso->dev;
struct tim_ring_req *req;
int rc = -ENOSPC;
+ plt_spinlock_lock(&sso->mbox_lock);
req = mbox_alloc_msg_tim_disable_ring(dev->mbox);
if (req == NULL)
- return rc;
+ goto fail;
req->ring = ring_id;
rc = mbox_process(dev->mbox);
- if (rc < 0) {
+ if (rc) {
tim_err_desc(rc);
- return rc;
+ rc = -EIO;
}
- return 0;
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
uintptr_t
@@ -147,13 +157,15 @@ roc_tim_lf_config(struct roc_tim *roc_tim, uint8_t ring_id,
uint8_t ena_dfb, uint32_t bucket_sz, uint32_t chunk_sz,
uint32_t interval, uint64_t intervalns, uint64_t clockfreq)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
+ struct dev *dev = &sso->dev;
struct tim_config_req *req;
int rc = -ENOSPC;
+ plt_spinlock_lock(&sso->mbox_lock);
req = mbox_alloc_msg_tim_config_ring(dev->mbox);
if (req == NULL)
- return rc;
+ goto fail;
req->ring = ring_id;
req->bigendian = false;
req->bucketsize = bucket_sz;
@@ -167,12 +179,14 @@ roc_tim_lf_config(struct roc_tim *roc_tim, uint8_t ring_id,
req->gpioedge = TIM_GPIO_LTOH_TRANS;
rc = mbox_process(dev->mbox);
- if (rc < 0) {
+ if (rc) {
tim_err_desc(rc);
- return rc;
+ rc = -EIO;
}
- return 0;
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
@@ -180,27 +194,32 @@ roc_tim_lf_interval(struct roc_tim *roc_tim, enum roc_tim_clk_src clk_src,
uint64_t clockfreq, uint64_t *intervalns,
uint64_t *interval)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
+ struct dev *dev = &sso->dev;
struct tim_intvl_req *req;
struct tim_intvl_rsp *rsp;
int rc = -ENOSPC;
+ plt_spinlock_lock(&sso->mbox_lock);
req = mbox_alloc_msg_tim_get_min_intvl(dev->mbox);
if (req == NULL)
- return rc;
+ goto fail;
req->clockfreq = clockfreq;
req->clocksource = clk_src;
rc = mbox_process_msg(dev->mbox, (void **)&rsp);
- if (rc < 0) {
+ if (rc) {
tim_err_desc(rc);
- return rc;
+ rc = -EIO;
+ goto fail;
}
*intervalns = rsp->intvl_ns;
*interval = rsp->intvl_cyc;
- return 0;
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
+ return rc;
}
int
@@ -214,17 +233,19 @@ roc_tim_lf_alloc(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *clk)
struct dev *dev = &sso->dev;
int rc = -ENOSPC;
+ plt_spinlock_lock(&sso->mbox_lock);
req = mbox_alloc_msg_tim_lf_alloc(dev->mbox);
if (req == NULL)
- return rc;
+ goto fail;
req->npa_pf_func = idev_npa_pffunc_get();
req->sso_pf_func = idev_sso_pffunc_get();
req->ring = ring_id;
rc = mbox_process_msg(dev->mbox, (void **)&rsp);
- if (rc < 0) {
+ if (rc) {
tim_err_desc(rc);
- return rc;
+ rc = -EIO;
+ goto fail;
}
if (clk)
@@ -235,12 +256,18 @@ roc_tim_lf_alloc(struct roc_tim *roc_tim, uint8_t ring_id, uint64_t *clk)
if (rc < 0) {
plt_tim_dbg("Failed to register Ring[%d] IRQ", ring_id);
free_req = mbox_alloc_msg_tim_lf_free(dev->mbox);
- if (free_req == NULL)
- return -ENOSPC;
+ if (free_req == NULL) {
+ rc = -ENOSPC;
+ goto fail;
+ }
free_req->ring = ring_id;
- mbox_process(dev->mbox);
+ rc = mbox_process(dev->mbox);
+ if (rc)
+ rc = -EIO;
}
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
@@ -256,17 +283,20 @@ roc_tim_lf_free(struct roc_tim *roc_tim, uint8_t ring_id)
tim_unregister_irq_priv(roc_tim, sso->pci_dev->intr_handle, ring_id,
tim->tim_msix_offsets[ring_id]);
+ plt_spinlock_lock(&sso->mbox_lock);
req = mbox_alloc_msg_tim_lf_free(dev->mbox);
if (req == NULL)
- return rc;
+ goto fail;
req->ring = ring_id;
rc = mbox_process(dev->mbox);
if (rc < 0) {
tim_err_desc(rc);
- return rc;
+ rc = -EIO;
}
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
return 0;
}
@@ -276,40 +306,48 @@ roc_tim_init(struct roc_tim *roc_tim)
struct rsrc_attach_req *attach_req;
struct rsrc_detach_req *detach_req;
struct free_rsrcs_rsp *free_rsrc;
- struct dev *dev;
+ struct sso *sso;
uint16_t nb_lfs;
+ struct dev *dev;
int rc;
if (roc_tim == NULL || roc_tim->roc_sso == NULL)
return TIM_ERR_PARAM;
+ sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
+ dev = &sso->dev;
PLT_STATIC_ASSERT(sizeof(struct tim) <= TIM_MEM_SZ);
- dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev;
nb_lfs = roc_tim->nb_lfs;
+ plt_spinlock_lock(&sso->mbox_lock);
mbox_alloc_msg_free_rsrc_cnt(dev->mbox);
rc = mbox_process_msg(dev->mbox, (void *)&free_rsrc);
- if (rc < 0) {
+ if (rc) {
plt_err("Unable to get free rsrc count.");
- return 0;
+ nb_lfs = 0;
+ goto fail;
}
if (nb_lfs && (free_rsrc->tim < nb_lfs)) {
plt_tim_dbg("Requested LFs : %d Available LFs : %d", nb_lfs,
free_rsrc->tim);
- return 0;
+ nb_lfs = 0;
+ goto fail;
}
attach_req = mbox_alloc_msg_attach_resources(dev->mbox);
- if (attach_req == NULL)
- return -ENOSPC;
+ if (attach_req == NULL) {
+ nb_lfs = 0;
+ goto fail;
+ }
attach_req->modify = true;
attach_req->timlfs = nb_lfs ? nb_lfs : free_rsrc->tim;
nb_lfs = attach_req->timlfs;
rc = mbox_process(dev->mbox);
- if (rc < 0) {
+ if (rc) {
plt_err("Unable to attach TIM LFs.");
- return 0;
+ nb_lfs = 0;
+ goto fail;
}
rc = tim_fill_msix(roc_tim, nb_lfs);
@@ -317,28 +355,34 @@ roc_tim_init(struct roc_tim *roc_tim)
plt_err("Unable to get TIM MSIX vectors");
detach_req = mbox_alloc_msg_detach_resources(dev->mbox);
- if (detach_req == NULL)
- return -ENOSPC;
+ if (detach_req == NULL) {
+ nb_lfs = 0;
+ goto fail;
+ }
detach_req->partial = true;
detach_req->timlfs = true;
mbox_process(dev->mbox);
-
- return 0;
+ nb_lfs = 0;
}
+fail:
+ plt_spinlock_unlock(&sso->mbox_lock);
return nb_lfs;
}
void
roc_tim_fini(struct roc_tim *roc_tim)
{
- struct dev *dev = &roc_sso_to_sso_priv(roc_tim->roc_sso)->dev;
+ struct sso *sso = roc_sso_to_sso_priv(roc_tim->roc_sso);
struct rsrc_detach_req *detach_req;
+ struct dev *dev = &sso->dev;
+ plt_spinlock_lock(&sso->mbox_lock);
detach_req = mbox_alloc_msg_detach_resources(dev->mbox);
PLT_ASSERT(detach_req);
detach_req->partial = true;
detach_req->timlfs = true;
mbox_process(dev->mbox);
+ plt_spinlock_unlock(&sso->mbox_lock);
}
--
2.25.1
next prev parent reply other threads:[~2022-04-05 5:42 UTC|newest]
Thread overview: 58+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-29 13:10 [PATCH 0/6] Extend and set event queue attributes at runtime Shijith Thotton
2022-03-29 13:11 ` [PATCH 1/6] eventdev: support to set " Shijith Thotton
2022-03-30 10:58 ` Van Haaren, Harry
2022-04-04 9:35 ` Shijith Thotton
2022-04-04 9:45 ` Van Haaren, Harry
2022-03-30 12:14 ` Mattias Rönnblom
2022-04-04 11:45 ` Shijith Thotton
2022-03-29 13:11 ` [PATCH 2/6] eventdev: add weight and affinity to queue attributes Shijith Thotton
2022-03-30 12:12 ` Mattias Rönnblom
2022-04-04 9:33 ` Shijith Thotton
2022-03-29 13:11 ` [PATCH 3/6] doc: announce change in event queue conf structure Shijith Thotton
2022-03-29 13:11 ` [PATCH 4/6] test/event: test cases to test runtime queue attribute Shijith Thotton
2022-03-29 13:11 ` [PATCH 5/6] event/cnxk: support to set runtime queue attributes Shijith Thotton
2022-03-30 11:05 ` Van Haaren, Harry
2022-04-04 7:59 ` Shijith Thotton
2022-03-29 13:11 ` [PATCH 6/6] common/cnxk: use lock when accessing mbox of SSO Shijith Thotton
2022-03-29 18:49 ` [PATCH 0/6] Extend and set event queue attributes at runtime Jerin Jacob
2022-03-30 10:52 ` Van Haaren, Harry
2022-04-04 7:57 ` Shijith Thotton
2022-04-05 5:40 ` [PATCH v2 " Shijith Thotton
2022-04-05 5:40 ` [PATCH v2 1/6] eventdev: support to set " Shijith Thotton
2022-05-09 12:43 ` Jerin Jacob
2022-04-05 5:40 ` [PATCH v2 2/6] eventdev: add weight and affinity to queue attributes Shijith Thotton
2022-05-09 12:46 ` Jerin Jacob
2022-04-05 5:41 ` [PATCH v2 3/6] doc: announce change in event queue conf structure Shijith Thotton
2022-05-09 12:47 ` Jerin Jacob
2022-05-15 10:24 ` [PATCH v3] " Shijith Thotton
2022-07-12 14:05 ` Jerin Jacob
2022-07-13 6:52 ` [EXT] " Pavan Nikhilesh Bhagavatula
2022-07-13 8:55 ` Mattias Rönnblom
2022-07-13 9:56 ` Pavan Nikhilesh Bhagavatula
2022-07-17 12:52 ` Thomas Monjalon
2022-04-05 5:41 ` [PATCH v2 4/6] test/event: test cases to test runtime queue attribute Shijith Thotton
2022-05-09 12:55 ` Jerin Jacob
2022-04-05 5:41 ` [PATCH v2 5/6] event/cnxk: support to set runtime queue attributes Shijith Thotton
2022-05-09 12:57 ` Jerin Jacob
2022-04-05 5:41 ` Shijith Thotton [this message]
2022-04-11 11:07 ` [PATCH v2 0/6] Extend and set event queue attributes at runtime Shijith Thotton
2022-05-15 9:53 ` [PATCH v3 0/5] " Shijith Thotton
2022-05-15 9:53 ` [PATCH v3 1/5] eventdev: support to set " Shijith Thotton
2022-05-15 13:11 ` Mattias Rönnblom
2022-05-16 3:57 ` Shijith Thotton
2022-05-16 10:23 ` Mattias Rönnblom
2022-05-16 12:12 ` Shijith Thotton
2022-05-15 9:53 ` [PATCH v3 2/5] eventdev: add weight and affinity to queue attributes Shijith Thotton
2022-05-15 9:53 ` [PATCH v3 3/5] test/event: test cases to test runtime queue attribute Shijith Thotton
2022-05-15 9:53 ` [PATCH v3 4/5] common/cnxk: use lock when accessing mbox of SSO Shijith Thotton
2022-05-15 9:53 ` [PATCH v3 5/5] event/cnxk: support to set runtime queue attributes Shijith Thotton
2022-05-16 17:35 ` [PATCH v4 0/5] Extend and set event queue attributes at runtime Shijith Thotton
2022-05-16 17:35 ` [PATCH v4 1/5] eventdev: support to set " Shijith Thotton
2022-05-16 18:02 ` Jerin Jacob
2022-05-17 8:55 ` Mattias Rönnblom
2022-05-17 13:35 ` Jerin Jacob
2022-05-19 8:49 ` Ray Kinsella
2022-05-16 17:35 ` [PATCH v4 2/5] eventdev: add weight and affinity to queue attributes Shijith Thotton
2022-05-16 17:35 ` [PATCH v4 3/5] test/event: test cases to test runtime queue attribute Shijith Thotton
2022-05-16 17:35 ` [PATCH v4 4/5] common/cnxk: use lock when accessing mbox of SSO Shijith Thotton
2022-05-16 17:35 ` [PATCH v4 5/5] event/cnxk: support to set runtime queue attributes Shijith Thotton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=9c22418754c23d37e29ea63ad476d8743bcb8743.1649136534.git.sthotton@marvell.com \
--to=sthotton@marvell.com \
--cc=dev@dpdk.org \
--cc=harry.van.haaren@intel.com \
--cc=jerinj@marvell.com \
--cc=kirankumark@marvell.com \
--cc=mattias.ronnblom@ericsson.com \
--cc=ndabilpuram@marvell.com \
--cc=pbhagavatula@marvell.com \
--cc=skori@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).