From: David Marchand <david.marchand@redhat.com>
To: dev@dpdk.org
Cc: probb@iol.unh.edu, "Morten Brørup" <mb@smartsharesystems.com>,
"Thomas Monjalon" <thomas@monjalon.net>,
"Ferruh Yigit" <ferruh.yigit@amd.com>,
"Andrew Rybchenko" <andrew.rybchenko@oktetlabs.ru>
Subject: [PATCH v4 1/3] ethdev: protect shared memory accesses under one lock
Date: Wed, 27 Sep 2023 13:45:13 +0200 [thread overview]
Message-ID: <20230927114515.1245213-2-david.marchand@redhat.com> (raw)
In-Reply-To: <20230927114515.1245213-1-david.marchand@redhat.com>
ethdev currently uses two locks to protect access around
eth_dev_shared_data:
- one (process local) for avoiding multiple threads to reserve/lookup
the eth_dev_shared_data memzone,
- one (shared with other processes) for protecting port
allocation/destruction,
A next change will make it possible for eth_dev_shared_data memzone to
be freed during a DPDK application lifetime. Accessing its content must
then be protected for concurrent accesses and with this new protection,
existing locks become unneeded.
Simplify the logic and put everything under a single lock in DPDK shared
memory config (which cannot be freed during a DPDK application lifetime).
Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
Changes since v3:
- updated commitlog,
---
lib/eal/common/eal_common_mcfg.c | 6 +++
lib/eal/common/eal_memcfg.h | 1 +
lib/eal/include/rte_eal_memconfig.h | 4 ++
lib/eal/version.map | 1 +
lib/ethdev/ethdev_driver.c | 30 ++++++++-------
lib/ethdev/ethdev_private.c | 9 -----
lib/ethdev/ethdev_private.h | 9 +++--
lib/ethdev/rte_ethdev.c | 60 ++++++++++++++++-------------
8 files changed, 68 insertions(+), 52 deletions(-)
diff --git a/lib/eal/common/eal_common_mcfg.c b/lib/eal/common/eal_common_mcfg.c
index b60d41f7b6..2a785e74c4 100644
--- a/lib/eal/common/eal_common_mcfg.c
+++ b/lib/eal/common/eal_common_mcfg.c
@@ -177,6 +177,12 @@ rte_mcfg_timer_unlock(void)
rte_spinlock_unlock(rte_mcfg_timer_get_lock());
}
+rte_spinlock_t *
+rte_mcfg_ethdev_get_lock(void)
+{
+ return &rte_eal_get_configuration()->mem_config->ethdev_lock;
+}
+
bool
rte_mcfg_get_single_file_segments(void)
{
diff --git a/lib/eal/common/eal_memcfg.h b/lib/eal/common/eal_memcfg.h
index 8889ba063f..d5c63e2f4d 100644
--- a/lib/eal/common/eal_memcfg.h
+++ b/lib/eal/common/eal_memcfg.h
@@ -37,6 +37,7 @@ struct rte_mem_config {
rte_rwlock_t qlock; /**< used by tailqs for thread safety. */
rte_rwlock_t mplock; /**< used by mempool library for thread safety. */
rte_spinlock_t tlock; /**< used by timer library for thread safety. */
+ rte_spinlock_t ethdev_lock; /**< used by ethdev library. */
rte_rwlock_t memory_hotplug_lock;
/**< Indicates whether memory hotplug request is in progress. */
diff --git a/lib/eal/include/rte_eal_memconfig.h b/lib/eal/include/rte_eal_memconfig.h
index c527f9aa29..0b1d0d4ff0 100644
--- a/lib/eal/include/rte_eal_memconfig.h
+++ b/lib/eal/include/rte_eal_memconfig.h
@@ -39,6 +39,10 @@ __rte_internal
rte_spinlock_t *
rte_mcfg_timer_get_lock(void);
+__rte_internal
+rte_spinlock_t *
+rte_mcfg_ethdev_get_lock(void);
+
/**
* Lock the internal EAL shared memory configuration for shared access.
*/
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 915057b325..e00a844805 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -452,6 +452,7 @@ INTERNAL {
rte_intr_vec_list_free;
rte_intr_vec_list_index_get;
rte_intr_vec_list_index_set;
+ rte_mcfg_ethdev_get_lock;
rte_mcfg_mem_get_lock;
rte_mcfg_mempool_get_lock;
rte_mcfg_tailq_get_lock;
diff --git a/lib/ethdev/ethdev_driver.c b/lib/ethdev/ethdev_driver.c
index 30db839a77..c92cd4b947 100644
--- a/lib/ethdev/ethdev_driver.c
+++ b/lib/ethdev/ethdev_driver.c
@@ -45,6 +45,7 @@ eth_dev_allocated(const char *name)
static uint16_t
eth_dev_find_free_port(void)
+ __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
{
uint16_t i;
@@ -61,6 +62,7 @@ eth_dev_find_free_port(void)
static struct rte_eth_dev *
eth_dev_get(uint16_t port_id)
+ __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
{
struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
@@ -87,10 +89,10 @@ rte_eth_dev_allocate(const char *name)
return NULL;
}
- eth_dev_shared_data_prepare();
+ /* Synchronize port creation between primary and secondary processes. */
+ rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
- /* Synchronize port creation between primary and secondary threads. */
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
+ eth_dev_shared_data_prepare();
if (eth_dev_allocated(name) != NULL) {
RTE_ETHDEV_LOG(ERR,
@@ -114,7 +116,7 @@ rte_eth_dev_allocate(const char *name)
pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL);
unlock:
- rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
return eth_dev;
}
@@ -124,13 +126,13 @@ rte_eth_dev_allocated(const char *name)
{
struct rte_eth_dev *ethdev;
- eth_dev_shared_data_prepare();
+ rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
+ eth_dev_shared_data_prepare();
ethdev = eth_dev_allocated(name);
- rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
return ethdev;
}
@@ -146,10 +148,10 @@ rte_eth_dev_attach_secondary(const char *name)
uint16_t i;
struct rte_eth_dev *eth_dev = NULL;
- eth_dev_shared_data_prepare();
-
/* Synchronize port attachment to primary port creation and release. */
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
+
+ eth_dev_shared_data_prepare();
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
@@ -164,7 +166,7 @@ rte_eth_dev_attach_secondary(const char *name)
RTE_ASSERT(eth_dev->data->port_id == i);
}
- rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
return eth_dev;
}
@@ -220,7 +222,9 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
if (eth_dev == NULL)
return -EINVAL;
+ rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
eth_dev_shared_data_prepare();
+ rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
if (eth_dev->state != RTE_ETH_DEV_UNUSED)
rte_eth_dev_callback_process(eth_dev,
@@ -228,7 +232,7 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
eth_dev->state = RTE_ETH_DEV_UNUSED;
eth_dev->device = NULL;
@@ -252,7 +256,7 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
}
- rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
return 0;
}
diff --git a/lib/ethdev/ethdev_private.c b/lib/ethdev/ethdev_private.c
index 14ec8c6ccf..6756625729 100644
--- a/lib/ethdev/ethdev_private.c
+++ b/lib/ethdev/ethdev_private.c
@@ -11,12 +11,8 @@
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
-/* Shared memory between primary and secondary processes. */
struct eth_dev_shared *eth_dev_shared_data;
-/* spinlock for shared data allocation */
-static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
-
/* spinlock for eth device callbacks */
rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
@@ -328,8 +324,6 @@ eth_dev_shared_data_prepare(void)
const unsigned int flags = 0;
const struct rte_memzone *mz;
- rte_spinlock_lock(ð_dev_shared_data_lock);
-
if (eth_dev_shared_data == NULL) {
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
/* Allocate port data and ownership shared memory. */
@@ -345,13 +339,10 @@ eth_dev_shared_data_prepare(void)
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
eth_dev_shared_data->next_owner_id =
RTE_ETH_DEV_NO_OWNER + 1;
- rte_spinlock_init(ð_dev_shared_data->ownership_lock);
memset(eth_dev_shared_data->data, 0,
sizeof(eth_dev_shared_data->data));
}
}
-
- rte_spinlock_unlock(ð_dev_shared_data_lock);
}
void
diff --git a/lib/ethdev/ethdev_private.h b/lib/ethdev/ethdev_private.h
index acb4b335c8..f7706e6a95 100644
--- a/lib/ethdev/ethdev_private.h
+++ b/lib/ethdev/ethdev_private.h
@@ -7,6 +7,7 @@
#include <sys/queue.h>
+#include <rte_eal_memconfig.h>
#include <rte_malloc.h>
#include <rte_os_shim.h>
@@ -14,11 +15,12 @@
struct eth_dev_shared {
uint64_t next_owner_id;
- rte_spinlock_t ownership_lock;
struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
};
-extern struct eth_dev_shared *eth_dev_shared_data;
+/* Shared memory between primary and secondary processes. */
+extern struct eth_dev_shared *eth_dev_shared_data
+ __rte_guarded_by(rte_mcfg_ethdev_get_lock());
/**
* The user application callback description.
@@ -65,7 +67,8 @@ void eth_dev_fp_ops_setup(struct rte_eth_fp_ops *fpo,
const struct rte_eth_dev *dev);
-void eth_dev_shared_data_prepare(void);
+void eth_dev_shared_data_prepare(void)
+ __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock());
void eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid);
void eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid);
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 46eaed6467..e0d22c27c6 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -409,6 +409,7 @@ rte_eth_dev_is_valid_port(uint16_t port_id)
static int
eth_is_valid_owner_id(uint64_t owner_id)
+ __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
{
if (owner_id == RTE_ETH_DEV_NO_OWNER ||
eth_dev_shared_data->next_owner_id <= owner_id)
@@ -437,13 +438,12 @@ rte_eth_dev_owner_new(uint64_t *owner_id)
return -EINVAL;
}
- eth_dev_shared_data_prepare();
-
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
+ eth_dev_shared_data_prepare();
*owner_id = eth_dev_shared_data->next_owner_id++;
- rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
rte_ethdev_trace_owner_new(*owner_id);
@@ -453,6 +453,7 @@ rte_eth_dev_owner_new(uint64_t *owner_id)
static int
eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
const struct rte_eth_dev_owner *new_owner)
+ __rte_exclusive_locks_required(rte_mcfg_ethdev_get_lock())
{
struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
struct rte_eth_dev_owner *port_owner;
@@ -503,13 +504,12 @@ rte_eth_dev_owner_set(const uint16_t port_id,
{
int ret;
- eth_dev_shared_data_prepare();
-
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
+ eth_dev_shared_data_prepare();
ret = eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
- rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
rte_ethdev_trace_owner_set(port_id, owner, ret);
@@ -523,13 +523,12 @@ rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
{.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
int ret;
- eth_dev_shared_data_prepare();
-
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
+ eth_dev_shared_data_prepare();
ret = eth_dev_owner_set(port_id, owner_id, &new_owner);
- rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
rte_ethdev_trace_owner_unset(port_id, owner_id, ret);
@@ -542,10 +541,9 @@ rte_eth_dev_owner_delete(const uint64_t owner_id)
uint16_t port_id;
int ret = 0;
- eth_dev_shared_data_prepare();
-
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
+ eth_dev_shared_data_prepare();
if (eth_is_valid_owner_id(owner_id)) {
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
struct rte_eth_dev_data *data =
@@ -564,7 +562,7 @@ rte_eth_dev_owner_delete(const uint64_t owner_id)
ret = -EINVAL;
}
- rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
+ rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
rte_ethdev_trace_owner_delete(owner_id, ret);
@@ -591,11 +589,12 @@ rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
return -EINVAL;
}
- eth_dev_shared_data_prepare();
+ rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
+ eth_dev_shared_data_prepare();
rte_memcpy(owner, ðdev->data->owner, sizeof(*owner));
- rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
+
+ rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
rte_ethdev_trace_owner_get(port_id, owner);
@@ -675,9 +674,12 @@ rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
return -EINVAL;
}
+ rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
/* shouldn't check 'rte_eth_devices[i].data',
* because it might be overwritten by VDEV PMD */
tmp = eth_dev_shared_data->data[port_id].name;
+ rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
+
strcpy(name, tmp);
rte_ethdev_trace_get_name_by_port(port_id, name);
@@ -688,6 +690,7 @@ rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
int
rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
{
+ int ret = -ENODEV;
uint16_t pid;
if (name == NULL) {
@@ -701,16 +704,19 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
return -EINVAL;
}
- RTE_ETH_FOREACH_VALID_DEV(pid)
- if (!strcmp(name, eth_dev_shared_data->data[pid].name)) {
- *port_id = pid;
-
- rte_ethdev_trace_get_port_by_name(name, *port_id);
+ rte_spinlock_lock(rte_mcfg_ethdev_get_lock());
+ RTE_ETH_FOREACH_VALID_DEV(pid) {
+ if (strcmp(name, eth_dev_shared_data->data[pid].name) != 0)
+ continue;
- return 0;
- }
+ *port_id = pid;
+ rte_ethdev_trace_get_port_by_name(name, *port_id);
+ ret = 0;
+ break;
+ }
+ rte_spinlock_unlock(rte_mcfg_ethdev_get_lock());
- return -ENODEV;
+ return ret;
}
int
--
2.41.0
next prev parent reply other threads:[~2023-09-27 11:45 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-18 9:13 [PATCH 0/2] Release ethdev shared memory on port cleanup David Marchand
2023-08-18 9:13 ` [PATCH 1/2] ethdev: protect shared memory accesses under one lock David Marchand
2023-08-18 9:13 ` [PATCH 2/2] ethdev: cleanup shared data with the last port David Marchand
2023-08-18 11:36 ` David Marchand
2023-08-18 10:18 ` [PATCH 0/2] Release ethdev shared memory on port cleanup Morten Brørup
2023-08-31 15:34 ` Thomas Monjalon
2023-08-18 13:41 ` [PATCH v2 " David Marchand
2023-08-18 13:41 ` [PATCH v2 1/2] ethdev: protect shared memory accesses under one lock David Marchand
2023-08-18 13:41 ` [PATCH v2 2/2] ethdev: cleanup shared data with the last port David Marchand
2023-08-21 8:58 ` [PATCH v3 0/3] Release ethdev shared memory on port cleanup David Marchand
2023-08-21 8:58 ` [PATCH v3 1/3] ethdev: protect shared memory accesses under one lock David Marchand
2023-08-21 8:58 ` [PATCH v3 2/3] ethdev: avoid panicking in absence of ethdev shared data David Marchand
2023-08-21 8:58 ` [PATCH v3 3/3] ethdev: cleanup shared data with the last port David Marchand
2023-08-31 16:05 ` [PATCH v3 0/3] Release ethdev shared memory on port cleanup Thomas Monjalon
2023-09-01 7:32 ` David Marchand
2023-09-27 11:45 ` [PATCH v4 " David Marchand
2023-09-27 11:45 ` David Marchand [this message]
2023-09-27 11:45 ` [PATCH v4 2/3] ethdev: avoid panicking in absence of ethdev shared data David Marchand
2023-09-27 11:45 ` [PATCH v4 3/3] ethdev: cleanup shared data with the last port David Marchand
2023-10-11 12:53 ` [PATCH v4 0/3] Release ethdev shared memory on port cleanup Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230927114515.1245213-2-david.marchand@redhat.com \
--to=david.marchand@redhat.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@amd.com \
--cc=mb@smartsharesystems.com \
--cc=probb@iol.unh.edu \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).