From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>,
"Kiran Kumar K" <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>
Cc: <dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>
Subject: [PATCH] common/cnxk: remove unnecessary locks
Date: Thu, 2 Feb 2023 13:25:09 +0530 [thread overview]
Message-ID: <20230202075509.1698-1-pbhagavatula@marvell.com> (raw)
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Remove unnecessary locks as locking is now taken care by
mbox_get and mbox_put.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
Depends-on: 26537
drivers/common/cnxk/roc_sso.c | 25 -------------------------
drivers/common/cnxk/roc_sso_priv.h | 1 -
2 files changed, 26 deletions(-)
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 9920d0c604..8f27c004a7 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -327,7 +327,6 @@ roc_sso_hws_stats_get(struct roc_sso *roc_sso, uint8_t hws,
struct mbox *mbox;
int rc;
- plt_spinlock_lock(&sso->mbox_lock);
mbox = mbox_get(dev->mbox);
req_rsp = (struct sso_hws_stats *)mbox_alloc_msg_sso_hws_get_stats(
mbox);
@@ -354,7 +353,6 @@ roc_sso_hws_stats_get(struct roc_sso *roc_sso, uint8_t hws,
stats->arbitration = req_rsp->arbitration;
fail:
mbox_put(mbox);
- plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
@@ -368,7 +366,6 @@ roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint8_t hwgrp,
struct mbox *mbox;
int rc;
- plt_spinlock_lock(&sso->mbox_lock);
mbox = mbox_get(dev->mbox);
req_rsp = (struct sso_grp_stats *)mbox_alloc_msg_sso_grp_get_stats(
mbox);
@@ -403,7 +400,6 @@ roc_sso_hwgrp_stats_get(struct roc_sso *roc_sso, uint8_t hwgrp,
fail:
mbox_put(mbox);
- plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
@@ -427,7 +423,6 @@ roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos,
struct mbox *mbox;
int i, rc;
- plt_spinlock_lock(&sso->mbox_lock);
mbox = mbox_get(dev->mbox);
for (i = 0; i < nb_qos; i++) {
uint8_t iaq_prcnt = qos[i].iaq_prcnt;
@@ -461,7 +456,6 @@ roc_sso_hwgrp_qos_config(struct roc_sso *roc_sso, struct roc_sso_hwgrp_qos *qos,
rc = -EIO;
fail:
mbox_put(mbox);
- plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
@@ -562,11 +556,9 @@ roc_sso_hwgrp_init_xaq_aura(struct roc_sso *roc_sso, uint32_t nb_xae)
struct dev *dev = &sso->dev;
int rc;
- plt_spinlock_lock(&sso->mbox_lock);
rc = sso_hwgrp_init_xaq_aura(dev, &roc_sso->xaq, nb_xae,
roc_sso->xae_waes, roc_sso->xaq_buf_size,
roc_sso->nb_hwgrp);
- plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
@@ -600,9 +592,7 @@ roc_sso_hwgrp_free_xaq_aura(struct roc_sso *roc_sso, uint16_t nb_hwgrp)
struct dev *dev = &sso->dev;
int rc;
- plt_spinlock_lock(&sso->mbox_lock);
rc = sso_hwgrp_free_xaq_aura(dev, &roc_sso->xaq, nb_hwgrp);
- plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
@@ -639,9 +629,7 @@ roc_sso_hwgrp_alloc_xaq(struct roc_sso *roc_sso, uint32_t npa_aura_id,
struct dev *dev = &sso->dev;
int rc;
- plt_spinlock_lock(&sso->mbox_lock);
rc = sso_hwgrp_alloc_xaq(dev, npa_aura_id, hwgrps);
- plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
@@ -677,9 +665,7 @@ roc_sso_hwgrp_release_xaq(struct roc_sso *roc_sso, uint16_t hwgrps)
struct dev *dev = &sso->dev;
int rc;
- plt_spinlock_lock(&sso->mbox_lock);
rc = sso_hwgrp_release_xaq(dev, hwgrps);
- plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
@@ -693,7 +679,6 @@ roc_sso_hwgrp_set_priority(struct roc_sso *roc_sso, uint16_t hwgrp,
struct mbox *mbox;
int rc = -ENOSPC;
- plt_spinlock_lock(&sso->mbox_lock);
mbox = mbox_get(dev->mbox);
req = mbox_alloc_msg_sso_grp_set_priority(mbox);
if (req == NULL)
@@ -709,14 +694,12 @@ roc_sso_hwgrp_set_priority(struct roc_sso *roc_sso, uint16_t hwgrp,
goto fail;
}
mbox_put(mbox);
- plt_spinlock_unlock(&sso->mbox_lock);
plt_sso_dbg("HWGRP %d weight %d affinity %d priority %d", hwgrp, weight,
affinity, priority);
return 0;
fail:
mbox_put(mbox);
- plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
@@ -732,7 +715,6 @@ roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp)
if (!nb_hws || roc_sso->max_hws < nb_hws)
return -ENOENT;
- plt_spinlock_lock(&sso->mbox_lock);
rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWS, nb_hws);
if (rc < 0) {
plt_err("Unable to attach SSO HWS LFs");
@@ -775,7 +757,6 @@ roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp)
goto sso_msix_fail;
}
- plt_spinlock_unlock(&sso->mbox_lock);
roc_sso->nb_hwgrp = nb_hwgrp;
roc_sso->nb_hws = nb_hws;
@@ -789,7 +770,6 @@ roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp)
hwgrp_atch_fail:
sso_rsrc_detach(roc_sso, SSO_LF_TYPE_HWS);
fail:
- plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
@@ -811,7 +791,6 @@ roc_sso_rsrc_fini(struct roc_sso *roc_sso)
roc_sso->nb_hwgrp = 0;
roc_sso->nb_hws = 0;
- plt_spinlock_unlock(&sso->mbox_lock);
}
int
@@ -830,7 +809,6 @@ roc_sso_dev_init(struct roc_sso *roc_sso)
sso = roc_sso_to_sso_priv(roc_sso);
memset(sso, 0, sizeof(*sso));
pci_dev = roc_sso->pci_dev;
- plt_spinlock_init(&sso->mbox_lock);
rc = dev_init(&sso->dev, pci_dev);
if (rc < 0) {
@@ -838,7 +816,6 @@ roc_sso_dev_init(struct roc_sso *roc_sso)
goto fail;
}
- plt_spinlock_lock(&sso->mbox_lock);
rc = sso_rsrc_get(roc_sso);
if (rc < 0) {
plt_err("Failed to get SSO resources");
@@ -880,7 +857,6 @@ roc_sso_dev_init(struct roc_sso *roc_sso)
sso->pci_dev = pci_dev;
sso->dev.drv_inited = true;
roc_sso->lmt_base = sso->dev.lmt_base;
- plt_spinlock_unlock(&sso->mbox_lock);
return 0;
link_mem_free:
@@ -888,7 +864,6 @@ roc_sso_dev_init(struct roc_sso *roc_sso)
rsrc_fail:
rc |= dev_fini(&sso->dev, pci_dev);
fail:
- plt_spinlock_unlock(&sso->mbox_lock);
return rc;
}
diff --git a/drivers/common/cnxk/roc_sso_priv.h b/drivers/common/cnxk/roc_sso_priv.h
index 674e4e0a39..09729d4f62 100644
--- a/drivers/common/cnxk/roc_sso_priv.h
+++ b/drivers/common/cnxk/roc_sso_priv.h
@@ -22,7 +22,6 @@ struct sso {
/* SSO link mapping. */
struct plt_bitmap **link_map;
void *link_map_mem;
- plt_spinlock_t mbox_lock;
} __plt_cache_aligned;
enum sso_err_status {
--
2.25.1
next reply other threads:[~2023-02-02 7:55 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-02 7:55 pbhagavatula [this message]
2023-02-02 9:00 ` David Marchand
2023-02-07 20:28 ` [PATCH v2] " pbhagavatula
2023-02-11 11:04 ` Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230202075509.1698-1-pbhagavatula@marvell.com \
--to=pbhagavatula@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=kirankumark@marvell.com \
--cc=ndabilpuram@marvell.com \
--cc=skori@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).