DPDK patches and discussions
 help / color / mirror / Atom feed
From: David Marchand <david.marchand@redhat.com>
To: dev@dpdk.org
Cc: thomas@monjalon.net, Gaetan Rivet <grive@u256.net>
Subject: [PATCH v3 14/16] net/failsafe: fix mutex locking
Date: Tue,  4 Apr 2023 14:48:38 +0200	[thread overview]
Message-ID: <20230404124840.1898-15-david.marchand@redhat.com> (raw)
In-Reply-To: <20230404124840.1898-1-david.marchand@redhat.com>

The pthread mutex API describes cases where locking might fail.
Check fts_enter wrapper return code.

Signed-off-by: David Marchand <david.marchand@redhat.com>
Acked-by: Gaetan Rivet <grive@u256.net>
---
 drivers/net/failsafe/failsafe_ether.c |   3 +-
 drivers/net/failsafe/failsafe_flow.c  |  23 +++--
 drivers/net/failsafe/failsafe_ops.c   | 142 +++++++++++++++++++-------
 3 files changed, 123 insertions(+), 45 deletions(-)

diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c
index 10b90fd837..031f3eb13f 100644
--- a/drivers/net/failsafe/failsafe_ether.c
+++ b/drivers/net/failsafe/failsafe_ether.c
@@ -592,7 +592,8 @@ failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused,
 {
 	struct sub_device *sdev = cb_arg;
 
-	fs_lock(fs_dev(sdev), 0);
+	if (fs_lock(fs_dev(sdev), 0) != 0)
+		return -1;
 	/* Switch as soon as possible tx_dev. */
 	fs_switch_dev(fs_dev(sdev), sdev);
 	/* Use safe bursts in any case. */
diff --git a/drivers/net/failsafe/failsafe_flow.c b/drivers/net/failsafe/failsafe_flow.c
index 354f9fec20..707e6c63b5 100644
--- a/drivers/net/failsafe/failsafe_flow.c
+++ b/drivers/net/failsafe/failsafe_flow.c
@@ -72,7 +72,9 @@ fs_flow_validate(struct rte_eth_dev *dev,
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		DEBUG("Calling rte_flow_validate on sub_device %d", i);
 		ret = rte_flow_validate(PORT_ID(sdev),
@@ -99,7 +101,8 @@ fs_flow_create(struct rte_eth_dev *dev,
 	struct rte_flow *flow;
 	uint8_t i;
 
-	fs_lock(dev, 0);
+	if (fs_lock(dev, 0) != 0)
+		return NULL;
 	flow = fs_flow_allocate(attr, patterns, actions);
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		flow->flows[i] = rte_flow_create(PORT_ID(sdev),
@@ -137,8 +140,9 @@ fs_flow_destroy(struct rte_eth_dev *dev,
 		ERROR("Invalid flow");
 		return -EINVAL;
 	}
-	ret = 0;
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		int local_ret;
 
@@ -169,7 +173,9 @@ fs_flow_flush(struct rte_eth_dev *dev,
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		DEBUG("Calling rte_flow_flush on sub_device %d", i);
 		ret = rte_flow_flush(PORT_ID(sdev), error);
@@ -197,7 +203,8 @@ fs_flow_query(struct rte_eth_dev *dev,
 {
 	struct sub_device *sdev;
 
-	fs_lock(dev, 0);
+	if (fs_lock(dev, 0) != 0)
+		return -1;
 	sdev = TX_SUBDEV(dev);
 	if (sdev != NULL) {
 		int ret = rte_flow_query(PORT_ID(sdev),
@@ -223,7 +230,9 @@ fs_flow_isolate(struct rte_eth_dev *dev,
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV(sdev, i, dev) {
 		if (sdev->state < DEV_PROBED)
 			continue;
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index d357e1bc83..35649b6244 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -28,7 +28,9 @@ fs_dev_configure(struct rte_eth_dev *dev)
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV(sdev, i, dev) {
 		int rmv_interrupt = 0;
 		int lsc_interrupt = 0;
@@ -129,7 +131,9 @@ fs_dev_start(struct rte_eth_dev *dev)
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	ret = failsafe_rx_intr_install(dev);
 	if (ret) {
 		fs_unlock(dev, 0);
@@ -189,7 +193,9 @@ fs_dev_stop(struct rte_eth_dev *dev)
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	PRIV(dev)->state = DEV_STARTED - 1;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_STARTED) {
 		ret = rte_eth_dev_stop(PORT_ID(sdev));
@@ -217,7 +223,9 @@ fs_dev_set_link_up(struct rte_eth_dev *dev)
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		DEBUG("Calling rte_eth_dev_set_link_up on sub_device %d", i);
 		ret = rte_eth_dev_set_link_up(PORT_ID(sdev));
@@ -239,7 +247,9 @@ fs_dev_set_link_down(struct rte_eth_dev *dev)
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		DEBUG("Calling rte_eth_dev_set_link_down on sub_device %d", i);
 		ret = rte_eth_dev_set_link_down(PORT_ID(sdev));
@@ -263,7 +273,9 @@ fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	int err = 0;
 	bool failure = true;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		uint16_t port_id = ETH(sdev)->data->port_id;
 
@@ -289,7 +301,9 @@ fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		uint16_t port_id = ETH(sdev)->data->port_id;
 
@@ -316,7 +330,9 @@ fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	int err = 0;
 	bool failure = true;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		uint16_t port_id = ETH(sdev)->data->port_id;
 
@@ -342,7 +358,9 @@ fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		uint16_t port_id = ETH(sdev)->data->port_id;
 
@@ -369,7 +387,8 @@ fs_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 
 	if (rxq == NULL)
 		return;
-	fs_lock(dev, 0);
+	if (fs_lock(dev, 0) != 0)
+		return;
 	if (rxq->event_fd >= 0)
 		close(rxq->event_fd);
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
@@ -395,7 +414,9 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	if (rx_conf->rx_deferred_start) {
 		FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
 			if (SUBOPS(sdev, rx_queue_start) == NULL) {
@@ -466,7 +487,9 @@ fs_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
 	int ret;
 	int rc = 0;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	if (idx >= dev->data->nb_rx_queues) {
 		rc = -EINVAL;
 		goto unlock;
@@ -506,7 +529,9 @@ fs_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
 	int rc = 0;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	if (idx >= dev->data->nb_rx_queues) {
 		rc = -EINVAL;
 		goto unlock;
@@ -542,7 +567,8 @@ fs_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 
 	if (txq == NULL)
 		return;
-	fs_lock(dev, 0);
+	if (fs_lock(dev, 0) != 0)
+		return;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		if (ETH(sdev)->data->tx_queues != NULL &&
 		    ETH(sdev)->data->tx_queues[txq->qid] != NULL)
@@ -565,7 +591,9 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	if (tx_conf->tx_deferred_start) {
 		FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
 			if (SUBOPS(sdev, tx_queue_start) == NULL) {
@@ -639,7 +667,9 @@ failsafe_eth_dev_close(struct rte_eth_dev *dev)
 	uint8_t i;
 	int err, ret = 0;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	failsafe_hotplug_alarm_cancel(dev);
 	if (PRIV(dev)->state == DEV_STARTED) {
 		ret = dev->dev_ops->dev_stop(dev);
@@ -693,7 +723,9 @@ fs_promiscuous_enable(struct rte_eth_dev *dev)
 	uint8_t i;
 	int ret = 0;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		ret = rte_eth_promiscuous_enable(PORT_ID(sdev));
 		ret = fs_err(sdev, ret);
@@ -725,7 +757,9 @@ fs_promiscuous_disable(struct rte_eth_dev *dev)
 	uint8_t i;
 	int ret = 0;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		ret = rte_eth_promiscuous_disable(PORT_ID(sdev));
 		ret = fs_err(sdev, ret);
@@ -757,7 +791,9 @@ fs_allmulticast_enable(struct rte_eth_dev *dev)
 	uint8_t i;
 	int ret = 0;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		ret = rte_eth_allmulticast_enable(PORT_ID(sdev));
 		ret = fs_err(sdev, ret);
@@ -789,7 +825,9 @@ fs_allmulticast_disable(struct rte_eth_dev *dev)
 	uint8_t i;
 	int ret = 0;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		ret = rte_eth_allmulticast_disable(PORT_ID(sdev));
 		ret = fs_err(sdev, ret);
@@ -822,7 +860,9 @@ fs_link_update(struct rte_eth_dev *dev,
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		DEBUG("Calling link_update on sub_device %d", i);
 		ret = (SUBOPS(sdev, link_update))(ETH(sdev), wait_to_complete);
@@ -859,7 +899,9 @@ fs_stats_get(struct rte_eth_dev *dev,
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	rte_memcpy(stats, &PRIV(dev)->stats_accumulator, sizeof(*stats));
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		struct rte_eth_stats *snapshot = &sdev->stats_snapshot.stats;
@@ -893,7 +935,9 @@ fs_stats_reset(struct rte_eth_dev *dev)
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		ret = rte_eth_stats_reset(PORT_ID(sdev));
 		if (ret) {
@@ -983,7 +1027,9 @@ fs_xstats_get_names(struct rte_eth_dev *dev,
 {
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	ret = __fs_xstats_get_names(dev, xstats_names, limit);
 	fs_unlock(dev, 0);
 	return ret;
@@ -1035,7 +1081,9 @@ fs_xstats_get(struct rte_eth_dev *dev,
 {
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	ret = __fs_xstats_get(dev, xstats, n);
 	fs_unlock(dev, 0);
 
@@ -1048,9 +1096,11 @@ fs_xstats_reset(struct rte_eth_dev *dev)
 {
 	struct sub_device *sdev;
 	uint8_t i;
-	int r = 0;
+	int r;
 
-	fs_lock(dev, 0);
+	r = fs_lock(dev, 0);
+	if (r != 0)
+		return r;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		r = rte_eth_xstats_reset(PORT_ID(sdev));
 		if (r < 0)
@@ -1238,7 +1288,8 @@ fs_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 	struct rte_eth_dev *edev;
 	const uint32_t *ret;
 
-	fs_lock(dev, 0);
+	if (fs_lock(dev, 0) != 0)
+		return NULL;
 	sdev = TX_SUBDEV(dev);
 	if (sdev == NULL) {
 		ret = NULL;
@@ -1270,7 +1321,9 @@ fs_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		DEBUG("Calling rte_eth_dev_set_mtu on sub_device %d", i);
 		ret = rte_eth_dev_set_mtu(PORT_ID(sdev), mtu);
@@ -1292,7 +1345,9 @@ fs_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		DEBUG("Calling rte_eth_dev_vlan_filter on sub_device %d", i);
 		ret = rte_eth_dev_vlan_filter(PORT_ID(sdev), vlan_id, on);
@@ -1314,7 +1369,9 @@ fs_flow_ctrl_get(struct rte_eth_dev *dev,
 	struct sub_device *sdev;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	sdev = TX_SUBDEV(dev);
 	if (sdev == NULL) {
 		ret = 0;
@@ -1338,7 +1395,9 @@ fs_flow_ctrl_set(struct rte_eth_dev *dev,
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		DEBUG("Calling rte_eth_dev_flow_ctrl_set on sub_device %d", i);
 		ret = rte_eth_dev_flow_ctrl_set(PORT_ID(sdev), fc_conf);
@@ -1359,7 +1418,8 @@ fs_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
 	struct sub_device *sdev;
 	uint8_t i;
 
-	fs_lock(dev, 0);
+	if (fs_lock(dev, 0) != 0)
+		return;
 	/* No check: already done within the rte_eth_dev_mac_addr_remove
 	 * call for the fail-safe device.
 	 */
@@ -1381,7 +1441,9 @@ fs_mac_addr_add(struct rte_eth_dev *dev,
 	uint8_t i;
 
 	RTE_ASSERT(index < FAILSAFE_MAX_ETHADDR);
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		ret = rte_eth_dev_mac_addr_add(PORT_ID(sdev), mac_addr, vmdq);
 		if ((ret = fs_err(sdev, ret))) {
@@ -1407,7 +1469,9 @@ fs_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
 		ret = fs_err(sdev, ret);
@@ -1432,7 +1496,9 @@ fs_set_mc_addr_list(struct rte_eth_dev *dev,
 	int ret;
 	void *mcast_addrs;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
@@ -1480,7 +1546,9 @@ fs_rss_hash_update(struct rte_eth_dev *dev,
 	uint8_t i;
 	int ret;
 
-	fs_lock(dev, 0);
+	ret = fs_lock(dev, 0);
+	if (ret != 0)
+		return ret;
 	FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
 		ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf);
 		ret = fs_err(sdev, ret);
-- 
2.39.2


  parent reply	other threads:[~2023-04-04 12:50 UTC|newest]

Thread overview: 76+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-24  8:16 [PATCH 00/14] Enable lock annotations on most libraries and drivers David Marchand
2023-02-24  8:16 ` [PATCH 01/14] malloc: rework heap lock handling David Marchand
2023-02-24  8:16 ` [PATCH 02/14] mem: rework malloc heap init David Marchand
2023-02-24  8:16 ` [PATCH 03/14] mem: annotate shared memory config locks David Marchand
2023-02-24  8:16 ` [PATCH 04/14] hash: annotate cuckoo hash lock David Marchand
2023-02-24  8:16 ` [PATCH 05/14] graph: annotate graph lock David Marchand
2023-02-24  8:16 ` [PATCH 06/14] drivers: inherit lock annotations for Intel drivers David Marchand
2023-02-24  8:16 ` [PATCH 07/14] net/cxgbe: inherit lock annotations David Marchand
2023-02-24  8:16 ` [PATCH 08/14] net/fm10k: annotate mailbox lock David Marchand
2023-02-24  8:16 ` [PATCH 09/14] net/sfc: rework locking in proxy code David Marchand
2023-02-24  8:16 ` [PATCH 10/14] net/sfc: inherit lock annotations David Marchand
2023-02-24  8:16 ` [PATCH 11/14] net/virtio: annotate lock for guest announce David Marchand
2023-02-24  8:16 ` [PATCH 12/14] raw/ifpga: inherit lock annotations David Marchand
2023-02-24  8:16 ` [PATCH 13/14] vdpa/sfc: " David Marchand
2023-02-24  8:16 ` [PATCH 14/14] enable lock check David Marchand
2023-02-24 15:11 ` [PATCH v2 00/20] Enable lock annotations on most libraries and drivers David Marchand
2023-02-24 15:11   ` [PATCH v2 01/20] malloc: rework heap lock handling David Marchand
2023-02-24 15:11   ` [PATCH v2 02/20] mem: rework malloc heap init David Marchand
2023-02-24 15:11   ` [PATCH v2 03/20] mem: annotate shared memory config locks David Marchand
2023-02-24 15:11   ` [PATCH v2 04/20] hash: annotate cuckoo hash lock David Marchand
2023-02-24 15:11   ` [PATCH v2 05/20] graph: annotate graph lock David Marchand
2023-02-24 15:11   ` [PATCH v2 06/20] drivers: inherit lock annotations for Intel drivers David Marchand
2023-02-24 15:11   ` [PATCH v2 07/20] net/cxgbe: inherit lock annotations David Marchand
2023-02-24 15:11   ` [PATCH v2 08/20] net/fm10k: annotate mailbox lock David Marchand
2023-02-24 15:11   ` [PATCH v2 09/20] net/sfc: rework locking in proxy code David Marchand
2023-02-24 15:11   ` [PATCH v2 10/20] net/sfc: inherit lock annotations David Marchand
2023-02-24 15:11   ` [PATCH v2 11/20] net/virtio: annotate lock for guest announce David Marchand
2023-02-27  2:05     ` Xia, Chenbo
2023-02-27  8:24       ` David Marchand
2023-02-27 16:28         ` Maxime Coquelin
2023-02-28  2:45           ` Xia, Chenbo
2023-03-02  9:26           ` David Marchand
2023-03-02  9:28             ` Maxime Coquelin
2023-03-02 12:35               ` David Marchand
2023-02-24 15:11   ` [PATCH v2 12/20] raw/ifpga: inherit lock annotations David Marchand
2023-02-27  6:29     ` Xu, Rosen
2023-02-27  7:15       ` Huang, Wei
2023-02-24 15:11   ` [PATCH v2 13/20] vdpa/sfc: " David Marchand
2023-02-24 15:11   ` [PATCH v2 14/20] ipc: annotate pthread mutex David Marchand
2023-02-24 15:11   ` [PATCH v2 15/20] ethdev: " David Marchand
2023-02-24 15:11   ` [PATCH v2 16/20] net/failsafe: fix mutex locking David Marchand
2023-02-24 15:35     ` Gaëtan Rivet
2023-02-24 15:11   ` [PATCH v2 17/20] net/failsafe: annotate pthread mutex David Marchand
2023-02-24 15:11   ` [PATCH v2 18/20] net/hinic: " David Marchand
2023-02-24 15:11   ` [PATCH v2 19/20] eal/windows: disable lock check on alarm code David Marchand
2023-02-24 15:11   ` [PATCH v2 20/20] enable lock check David Marchand
2023-02-27  2:32     ` Xia, Chenbo
2023-02-24 15:58   ` [PATCH v2 00/20] Enable lock annotations on most libraries and drivers Gaëtan Rivet
2023-02-25 10:16     ` David Marchand
2023-02-27 16:12       ` Gaëtan Rivet
2023-03-02  8:52         ` David Marchand
2023-04-03 10:52           ` David Marchand
2023-04-03 15:03             ` Tyler Retzlaff
2023-04-03 15:36             ` Tyler Retzlaff
2023-04-04  7:45               ` David Marchand
2023-04-04 12:48 ` [PATCH v3 00/16] " David Marchand
2023-04-04 12:48   ` [PATCH v3 01/16] malloc: rework heap destroy David Marchand
2023-04-04 12:48   ` [PATCH v3 02/16] mem: rework malloc heap init David Marchand
2023-04-04 12:48   ` [PATCH v3 03/16] mem: annotate shared memory config locks David Marchand
2023-04-04 12:48   ` [PATCH v3 04/16] hash: annotate cuckoo hash lock David Marchand
2023-04-04 12:48   ` [PATCH v3 05/16] graph: annotate graph lock David Marchand
2023-04-04 12:48   ` [PATCH v3 06/16] drivers: inherit lock annotations for Intel drivers David Marchand
2023-04-04 12:48   ` [PATCH v3 07/16] net/cxgbe: inherit lock annotations David Marchand
2023-04-04 12:48   ` [PATCH v3 08/16] net/fm10k: annotate mailbox lock David Marchand
2023-04-04 12:48   ` [PATCH v3 09/16] net/sfc: rework locking in proxy code David Marchand
2023-04-04 12:48   ` [PATCH v3 10/16] net/sfc: inherit lock annotations David Marchand
2023-04-04 12:48   ` [PATCH v3 11/16] net/virtio: rework guest announce notify helper David Marchand
2023-04-04 12:48   ` [PATCH v3 12/16] raw/ifpga: inherit lock annotations David Marchand
2023-04-04 12:48   ` [PATCH v3 13/16] vdpa/sfc: " David Marchand
2023-04-04 12:48   ` David Marchand [this message]
2023-04-04 12:48   ` [PATCH v3 15/16] eal/windows: disable lock check on alarm code David Marchand
2023-04-04 16:08     ` Tyler Retzlaff
2023-04-04 21:02     ` Dmitry Kozlyuk
2023-04-04 12:48   ` [PATCH v3 16/16] enable lock check David Marchand
2023-04-11  3:21     ` Sachin Saxena (OSS)
2023-04-23 20:09   ` [PATCH v3 00/16] Enable lock annotations on most libraries and drivers Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230404124840.1898-15-david.marchand@redhat.com \
    --to=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=grive@u256.net \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).