From: David Marchand <david.marchand@redhat.com>
To: dev@dpdk.org
Cc: thomas@monjalon.net, Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Subject: [PATCH v2 09/20] net/sfc: rework locking in proxy code
Date: Fri, 24 Feb 2023 16:11:32 +0100 [thread overview]
Message-ID: <20230224151143.3274897-10-david.marchand@redhat.com> (raw)
In-Reply-To: <20230224151143.3274897-1-david.marchand@redhat.com>
Remove one extra layer for proxy code: sfc_get_adapter_by_pf_port_id()
now only resolves the sa object and sfc_adapter_(|un)lock() are added
were necessary.
This will simplify lock checks later.
Signed-off-by: David Marchand <david.marchand@redhat.com>
---
drivers/net/sfc/sfc_repr_proxy.c | 59 ++++++++++++++++----------------
1 file changed, 30 insertions(+), 29 deletions(-)
diff --git a/drivers/net/sfc/sfc_repr_proxy.c b/drivers/net/sfc/sfc_repr_proxy.c
index 4b958ced61..4ba7683370 100644
--- a/drivers/net/sfc/sfc_repr_proxy.c
+++ b/drivers/net/sfc/sfc_repr_proxy.c
@@ -51,17 +51,9 @@ sfc_get_adapter_by_pf_port_id(uint16_t pf_port_id)
dev = &rte_eth_devices[pf_port_id];
sa = sfc_adapter_by_eth_dev(dev);
- sfc_adapter_lock(sa);
-
return sa;
}
-static void
-sfc_put_adapter(struct sfc_adapter *sa)
-{
- sfc_adapter_unlock(sa);
-}
-
static struct sfc_repr_proxy_port *
sfc_repr_proxy_find_port(struct sfc_repr_proxy *rp, uint16_t repr_id)
{
@@ -1289,6 +1281,7 @@ sfc_repr_proxy_add_port(uint16_t pf_port_id, uint16_t repr_id,
int rc;
sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
+ sfc_adapter_lock(sa);
rp = sfc_repr_proxy_by_adapter(sa);
sfc_log_init(sa, "entry");
@@ -1341,7 +1334,7 @@ sfc_repr_proxy_add_port(uint16_t pf_port_id, uint16_t repr_id,
}
sfc_log_init(sa, "done");
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return 0;
@@ -1352,7 +1345,7 @@ sfc_repr_proxy_add_port(uint16_t pf_port_id, uint16_t repr_id,
fail_alloc_port:
fail_port_exists:
sfc_log_init(sa, "failed: %s", rte_strerror(rc));
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return rc;
}
@@ -1366,6 +1359,7 @@ sfc_repr_proxy_del_port(uint16_t pf_port_id, uint16_t repr_id)
int rc;
sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
+ sfc_adapter_lock(sa);
rp = sfc_repr_proxy_by_adapter(sa);
sfc_log_init(sa, "entry");
@@ -1393,14 +1387,14 @@ sfc_repr_proxy_del_port(uint16_t pf_port_id, uint16_t repr_id)
sfc_log_init(sa, "done");
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return 0;
fail_port_remove:
fail_no_port:
sfc_log_init(sa, "failed: %s", rte_strerror(rc));
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return rc;
}
@@ -1416,6 +1410,7 @@ sfc_repr_proxy_add_rxq(uint16_t pf_port_id, uint16_t repr_id,
struct sfc_adapter *sa;
sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
+ sfc_adapter_lock(sa);
rp = sfc_repr_proxy_by_adapter(sa);
sfc_log_init(sa, "entry");
@@ -1423,14 +1418,14 @@ sfc_repr_proxy_add_rxq(uint16_t pf_port_id, uint16_t repr_id,
port = sfc_repr_proxy_find_port(rp, repr_id);
if (port == NULL) {
sfc_err(sa, "%s() failed: no such port", __func__);
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return ENOENT;
}
rxq = &port->rxq[queue_id];
if (rp->dp_rxq[queue_id].mp != NULL && rp->dp_rxq[queue_id].mp != mp) {
sfc_err(sa, "multiple mempools per queue are not supported");
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return ENOTSUP;
}
@@ -1440,7 +1435,7 @@ sfc_repr_proxy_add_rxq(uint16_t pf_port_id, uint16_t repr_id,
rp->dp_rxq[queue_id].ref_count++;
sfc_log_init(sa, "done");
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return 0;
}
@@ -1455,6 +1450,7 @@ sfc_repr_proxy_del_rxq(uint16_t pf_port_id, uint16_t repr_id,
struct sfc_adapter *sa;
sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
+ sfc_adapter_lock(sa);
rp = sfc_repr_proxy_by_adapter(sa);
sfc_log_init(sa, "entry");
@@ -1462,7 +1458,7 @@ sfc_repr_proxy_del_rxq(uint16_t pf_port_id, uint16_t repr_id,
port = sfc_repr_proxy_find_port(rp, repr_id);
if (port == NULL) {
sfc_err(sa, "%s() failed: no such port", __func__);
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return;
}
@@ -1475,7 +1471,7 @@ sfc_repr_proxy_del_rxq(uint16_t pf_port_id, uint16_t repr_id,
rp->dp_rxq[queue_id].mp = NULL;
sfc_log_init(sa, "done");
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
}
int
@@ -1489,6 +1485,7 @@ sfc_repr_proxy_add_txq(uint16_t pf_port_id, uint16_t repr_id,
struct sfc_adapter *sa;
sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
+ sfc_adapter_lock(sa);
rp = sfc_repr_proxy_by_adapter(sa);
sfc_log_init(sa, "entry");
@@ -1496,7 +1493,7 @@ sfc_repr_proxy_add_txq(uint16_t pf_port_id, uint16_t repr_id,
port = sfc_repr_proxy_find_port(rp, repr_id);
if (port == NULL) {
sfc_err(sa, "%s() failed: no such port", __func__);
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return ENOENT;
}
@@ -1507,7 +1504,7 @@ sfc_repr_proxy_add_txq(uint16_t pf_port_id, uint16_t repr_id,
*egress_mport = port->egress_mport;
sfc_log_init(sa, "done");
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return 0;
}
@@ -1522,6 +1519,7 @@ sfc_repr_proxy_del_txq(uint16_t pf_port_id, uint16_t repr_id,
struct sfc_adapter *sa;
sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
+ sfc_adapter_lock(sa);
rp = sfc_repr_proxy_by_adapter(sa);
sfc_log_init(sa, "entry");
@@ -1529,7 +1527,7 @@ sfc_repr_proxy_del_txq(uint16_t pf_port_id, uint16_t repr_id,
port = sfc_repr_proxy_find_port(rp, repr_id);
if (port == NULL) {
sfc_err(sa, "%s() failed: no such port", __func__);
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return;
}
@@ -1538,7 +1536,7 @@ sfc_repr_proxy_del_txq(uint16_t pf_port_id, uint16_t repr_id,
txq->ring = NULL;
sfc_log_init(sa, "done");
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
}
int
@@ -1551,6 +1549,7 @@ sfc_repr_proxy_start_repr(uint16_t pf_port_id, uint16_t repr_id)
int rc;
sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
+ sfc_adapter_lock(sa);
rp = sfc_repr_proxy_by_adapter(sa);
sfc_log_init(sa, "entry");
@@ -1594,7 +1593,7 @@ sfc_repr_proxy_start_repr(uint16_t pf_port_id, uint16_t repr_id)
}
sfc_log_init(sa, "done");
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return 0;
@@ -1606,7 +1605,7 @@ sfc_repr_proxy_start_repr(uint16_t pf_port_id, uint16_t repr_id)
fail_not_found:
sfc_err(sa, "failed to start repr %u proxy port: %s", repr_id,
rte_strerror(rc));
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return rc;
}
@@ -1621,6 +1620,7 @@ sfc_repr_proxy_stop_repr(uint16_t pf_port_id, uint16_t repr_id)
int rc;
sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
+ sfc_adapter_lock(sa);
rp = sfc_repr_proxy_by_adapter(sa);
sfc_log_init(sa, "entry");
@@ -1628,14 +1628,14 @@ sfc_repr_proxy_stop_repr(uint16_t pf_port_id, uint16_t repr_id)
port = sfc_repr_proxy_find_port(rp, repr_id);
if (port == NULL) {
sfc_err(sa, "%s() failed: no such port", __func__);
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return ENOENT;
}
if (!port->enabled) {
sfc_log_init(sa, "repr %u proxy port is not started - skip",
repr_id);
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return 0;
}
@@ -1662,7 +1662,7 @@ sfc_repr_proxy_stop_repr(uint16_t pf_port_id, uint16_t repr_id)
sfc_err(sa,
"failed to stop representor proxy TxQ %u: %s",
repr_id, rte_strerror(rc));
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return rc;
}
}
@@ -1670,7 +1670,7 @@ sfc_repr_proxy_stop_repr(uint16_t pf_port_id, uint16_t repr_id)
port->enabled = false;
sfc_log_init(sa, "done");
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return 0;
}
@@ -1685,13 +1685,14 @@ sfc_repr_proxy_repr_entity_mac_addr_set(uint16_t pf_port_id, uint16_t repr_id,
int rc;
sa = sfc_get_adapter_by_pf_port_id(pf_port_id);
+ sfc_adapter_lock(sa);
rp = sfc_repr_proxy_by_adapter(sa);
port = sfc_repr_proxy_find_port(rp, repr_id);
if (port == NULL) {
sfc_err(sa, "%s() failed: no such port (repr_id=%u)",
__func__, repr_id);
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return ENOENT;
}
@@ -1703,7 +1704,7 @@ sfc_repr_proxy_repr_entity_mac_addr_set(uint16_t pf_port_id, uint16_t repr_id,
__func__, repr_id, rte_strerror(rc));
}
- sfc_put_adapter(sa);
+ sfc_adapter_unlock(sa);
return rc;
}
--
2.39.2
next prev parent reply other threads:[~2023-02-24 15:12 UTC|newest]
Thread overview: 76+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-24 8:16 [PATCH 00/14] Enable lock annotations on most libraries and drivers David Marchand
2023-02-24 8:16 ` [PATCH 01/14] malloc: rework heap lock handling David Marchand
2023-02-24 8:16 ` [PATCH 02/14] mem: rework malloc heap init David Marchand
2023-02-24 8:16 ` [PATCH 03/14] mem: annotate shared memory config locks David Marchand
2023-02-24 8:16 ` [PATCH 04/14] hash: annotate cuckoo hash lock David Marchand
2023-02-24 8:16 ` [PATCH 05/14] graph: annotate graph lock David Marchand
2023-02-24 8:16 ` [PATCH 06/14] drivers: inherit lock annotations for Intel drivers David Marchand
2023-02-24 8:16 ` [PATCH 07/14] net/cxgbe: inherit lock annotations David Marchand
2023-02-24 8:16 ` [PATCH 08/14] net/fm10k: annotate mailbox lock David Marchand
2023-02-24 8:16 ` [PATCH 09/14] net/sfc: rework locking in proxy code David Marchand
2023-02-24 8:16 ` [PATCH 10/14] net/sfc: inherit lock annotations David Marchand
2023-02-24 8:16 ` [PATCH 11/14] net/virtio: annotate lock for guest announce David Marchand
2023-02-24 8:16 ` [PATCH 12/14] raw/ifpga: inherit lock annotations David Marchand
2023-02-24 8:16 ` [PATCH 13/14] vdpa/sfc: " David Marchand
2023-02-24 8:16 ` [PATCH 14/14] enable lock check David Marchand
2023-02-24 15:11 ` [PATCH v2 00/20] Enable lock annotations on most libraries and drivers David Marchand
2023-02-24 15:11 ` [PATCH v2 01/20] malloc: rework heap lock handling David Marchand
2023-02-24 15:11 ` [PATCH v2 02/20] mem: rework malloc heap init David Marchand
2023-02-24 15:11 ` [PATCH v2 03/20] mem: annotate shared memory config locks David Marchand
2023-02-24 15:11 ` [PATCH v2 04/20] hash: annotate cuckoo hash lock David Marchand
2023-02-24 15:11 ` [PATCH v2 05/20] graph: annotate graph lock David Marchand
2023-02-24 15:11 ` [PATCH v2 06/20] drivers: inherit lock annotations for Intel drivers David Marchand
2023-02-24 15:11 ` [PATCH v2 07/20] net/cxgbe: inherit lock annotations David Marchand
2023-02-24 15:11 ` [PATCH v2 08/20] net/fm10k: annotate mailbox lock David Marchand
2023-02-24 15:11 ` David Marchand [this message]
2023-02-24 15:11 ` [PATCH v2 10/20] net/sfc: inherit lock annotations David Marchand
2023-02-24 15:11 ` [PATCH v2 11/20] net/virtio: annotate lock for guest announce David Marchand
2023-02-27 2:05 ` Xia, Chenbo
2023-02-27 8:24 ` David Marchand
2023-02-27 16:28 ` Maxime Coquelin
2023-02-28 2:45 ` Xia, Chenbo
2023-03-02 9:26 ` David Marchand
2023-03-02 9:28 ` Maxime Coquelin
2023-03-02 12:35 ` David Marchand
2023-02-24 15:11 ` [PATCH v2 12/20] raw/ifpga: inherit lock annotations David Marchand
2023-02-27 6:29 ` Xu, Rosen
2023-02-27 7:15 ` Huang, Wei
2023-02-24 15:11 ` [PATCH v2 13/20] vdpa/sfc: " David Marchand
2023-02-24 15:11 ` [PATCH v2 14/20] ipc: annotate pthread mutex David Marchand
2023-02-24 15:11 ` [PATCH v2 15/20] ethdev: " David Marchand
2023-02-24 15:11 ` [PATCH v2 16/20] net/failsafe: fix mutex locking David Marchand
2023-02-24 15:35 ` Gaëtan Rivet
2023-02-24 15:11 ` [PATCH v2 17/20] net/failsafe: annotate pthread mutex David Marchand
2023-02-24 15:11 ` [PATCH v2 18/20] net/hinic: " David Marchand
2023-02-24 15:11 ` [PATCH v2 19/20] eal/windows: disable lock check on alarm code David Marchand
2023-02-24 15:11 ` [PATCH v2 20/20] enable lock check David Marchand
2023-02-27 2:32 ` Xia, Chenbo
2023-02-24 15:58 ` [PATCH v2 00/20] Enable lock annotations on most libraries and drivers Gaëtan Rivet
2023-02-25 10:16 ` David Marchand
2023-02-27 16:12 ` Gaëtan Rivet
2023-03-02 8:52 ` David Marchand
2023-04-03 10:52 ` David Marchand
2023-04-03 15:03 ` Tyler Retzlaff
2023-04-03 15:36 ` Tyler Retzlaff
2023-04-04 7:45 ` David Marchand
2023-04-04 12:48 ` [PATCH v3 00/16] " David Marchand
2023-04-04 12:48 ` [PATCH v3 01/16] malloc: rework heap destroy David Marchand
2023-04-04 12:48 ` [PATCH v3 02/16] mem: rework malloc heap init David Marchand
2023-04-04 12:48 ` [PATCH v3 03/16] mem: annotate shared memory config locks David Marchand
2023-04-04 12:48 ` [PATCH v3 04/16] hash: annotate cuckoo hash lock David Marchand
2023-04-04 12:48 ` [PATCH v3 05/16] graph: annotate graph lock David Marchand
2023-04-04 12:48 ` [PATCH v3 06/16] drivers: inherit lock annotations for Intel drivers David Marchand
2023-04-04 12:48 ` [PATCH v3 07/16] net/cxgbe: inherit lock annotations David Marchand
2023-04-04 12:48 ` [PATCH v3 08/16] net/fm10k: annotate mailbox lock David Marchand
2023-04-04 12:48 ` [PATCH v3 09/16] net/sfc: rework locking in proxy code David Marchand
2023-04-04 12:48 ` [PATCH v3 10/16] net/sfc: inherit lock annotations David Marchand
2023-04-04 12:48 ` [PATCH v3 11/16] net/virtio: rework guest announce notify helper David Marchand
2023-04-04 12:48 ` [PATCH v3 12/16] raw/ifpga: inherit lock annotations David Marchand
2023-04-04 12:48 ` [PATCH v3 13/16] vdpa/sfc: " David Marchand
2023-04-04 12:48 ` [PATCH v3 14/16] net/failsafe: fix mutex locking David Marchand
2023-04-04 12:48 ` [PATCH v3 15/16] eal/windows: disable lock check on alarm code David Marchand
2023-04-04 16:08 ` Tyler Retzlaff
2023-04-04 21:02 ` Dmitry Kozlyuk
2023-04-04 12:48 ` [PATCH v3 16/16] enable lock check David Marchand
2023-04-11 3:21 ` Sachin Saxena (OSS)
2023-04-23 20:09 ` [PATCH v3 00/16] Enable lock annotations on most libraries and drivers Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230224151143.3274897-10-david.marchand@redhat.com \
--to=david.marchand@redhat.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=dev@dpdk.org \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).