* [PATCH 20.11] net/hns3: fix traffic management thread safety
@ 2023-11-16 12:34 Chengwen Feng
2023-11-16 15:27 ` Luca Boccassi
2023-11-23 10:49 ` Kevin Traynor
0 siblings, 2 replies; 4+ messages in thread
From: Chengwen Feng @ 2023-11-16 12:34 UTC (permalink / raw)
To: stable, ktraynor; +Cc: lihuisong, haijie1
[ upstream commit 69901040975bff8a38edfc47aee727cadc87d356 ]
The driver-related TM (traffic management) info is implemented through
the linked list. The following threads are involved in the read and
write of the TM info:
1. main thread: invokes the rte_tm_xxx() API family to modify or read.
2. interrupt thread: will read TM info in reset recover process.
Currently, thread safety protection of TM info is implemented only in
the following operations:
1. some of the rte_tm_xxx() API's implementation.
2. reset recover process.
Thread safety risks may exist in other scenarios, so fix by make sure
all the rte_tm_xxx() API's implementations protected by hw.lock.
Fixes: c09c7847d892 ("net/hns3: support traffic management")
Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Dongdong Liu <liudongdong3@huawei.com>
---
drivers/net/hns3/hns3_tm.c | 173 ++++++++++++++++++++++++++++++++-----
1 file changed, 150 insertions(+), 23 deletions(-)
diff --git a/drivers/net/hns3/hns3_tm.c b/drivers/net/hns3/hns3_tm.c
index e1089b6bd0..67402a700f 100644
--- a/drivers/net/hns3/hns3_tm.c
+++ b/drivers/net/hns3/hns3_tm.c
@@ -1081,21 +1081,6 @@ hns3_tm_hierarchy_commit(struct rte_eth_dev *dev,
return -EINVAL;
}
-static int
-hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev,
- int clear_on_fail,
- struct rte_tm_error *error)
-{
- struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int ret;
-
- rte_spinlock_lock(&hw->lock);
- ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error);
- rte_spinlock_unlock(&hw->lock);
-
- return ret;
-}
-
static int
hns3_tm_node_shaper_do_update(struct hns3_hw *hw,
uint32_t node_id,
@@ -1195,6 +1180,148 @@ hns3_tm_node_shaper_update(struct rte_eth_dev *dev,
return 0;
}
+static int
+hns3_tm_capabilities_get_wrap(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_tm_capabilities_get(dev, cap, error);
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_tm_shaper_profile_add_wrap(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_shaper_params *profile,
+ struct rte_tm_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_tm_shaper_profile_add(dev, shaper_profile_id, profile, error);
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_tm_shaper_profile_del_wrap(struct rte_eth_dev *dev,
+ uint32_t shaper_profile_id,
+ struct rte_tm_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_tm_shaper_profile_del(dev, shaper_profile_id, error);
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_tm_node_add_wrap(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_tm_node_add(dev, node_id, parent_node_id, priority,
+ weight, level_id, params, error);
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_tm_node_delete_wrap(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_tm_node_delete(dev, node_id, error);
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_tm_node_type_get_wrap(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ int *is_leaf,
+ struct rte_tm_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_tm_node_type_get(dev, node_id, is_leaf, error);
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_tm_level_capabilities_get_wrap(struct rte_eth_dev *dev,
+ uint32_t level_id,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_tm_level_capabilities_get(dev, level_id, cap, error);
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_tm_node_capabilities_get_wrap(struct rte_eth_dev *dev,
+ uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_tm_node_capabilities_get(dev, node_id, cap, error);
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
+static int
+hns3_tm_hierarchy_commit_wrap(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&hw->lock);
+ ret = hns3_tm_hierarchy_commit(dev, clear_on_fail, error);
+ rte_spinlock_unlock(&hw->lock);
+
+ return ret;
+}
+
static int
hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev,
uint32_t node_id,
@@ -1213,14 +1340,14 @@ hns3_tm_node_shaper_update_wrap(struct rte_eth_dev *dev,
}
static const struct rte_tm_ops hns3_tm_ops = {
- .capabilities_get = hns3_tm_capabilities_get,
- .shaper_profile_add = hns3_tm_shaper_profile_add,
- .shaper_profile_delete = hns3_tm_shaper_profile_del,
- .node_add = hns3_tm_node_add,
- .node_delete = hns3_tm_node_delete,
- .node_type_get = hns3_tm_node_type_get,
- .level_capabilities_get = hns3_tm_level_capabilities_get,
- .node_capabilities_get = hns3_tm_node_capabilities_get,
+ .capabilities_get = hns3_tm_capabilities_get_wrap,
+ .shaper_profile_add = hns3_tm_shaper_profile_add_wrap,
+ .shaper_profile_delete = hns3_tm_shaper_profile_del_wrap,
+ .node_add = hns3_tm_node_add_wrap,
+ .node_delete = hns3_tm_node_delete_wrap,
+ .node_type_get = hns3_tm_node_type_get_wrap,
+ .level_capabilities_get = hns3_tm_level_capabilities_get_wrap,
+ .node_capabilities_get = hns3_tm_node_capabilities_get_wrap,
.hierarchy_commit = hns3_tm_hierarchy_commit_wrap,
.node_shaper_update = hns3_tm_node_shaper_update_wrap,
};
--
2.17.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 20.11] net/hns3: fix traffic management thread safety
2023-11-16 12:34 [PATCH 20.11] net/hns3: fix traffic management thread safety Chengwen Feng
@ 2023-11-16 15:27 ` Luca Boccassi
2023-11-17 1:02 ` fengchengwen
2023-11-23 10:49 ` Kevin Traynor
1 sibling, 1 reply; 4+ messages in thread
From: Luca Boccassi @ 2023-11-16 15:27 UTC (permalink / raw)
To: Chengwen Feng; +Cc: stable, ktraynor, lihuisong, haijie1
On Thu, 16 Nov 2023 at 12:37, Chengwen Feng <fengchengwen@huawei.com> wrote:
>
> [ upstream commit 69901040975bff8a38edfc47aee727cadc87d356 ]
>
> The driver-related TM (traffic management) info is implemented through
> the linked list. The following threads are involved in the read and
> write of the TM info:
>
> 1. main thread: invokes the rte_tm_xxx() API family to modify or read.
> 2. interrupt thread: will read TM info in reset recover process.
>
> Currently, thread safety protection of TM info is implemented only in
> the following operations:
> 1. some of the rte_tm_xxx() API's implementation.
> 2. reset recover process.
>
> Thread safety risks may exist in other scenarios, so fix by make sure
> all the rte_tm_xxx() API's implementations protected by hw.lock.
>
> Fixes: c09c7847d892 ("net/hns3: support traffic management")
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Signed-off-by: Dongdong Liu <liudongdong3@huawei.com>
> ---
> drivers/net/hns3/hns3_tm.c | 173 ++++++++++++++++++++++++++++++++-----
> 1 file changed, 150 insertions(+), 23 deletions(-)
>
This patch doesn't apply on 20.11, did you mean to target it at 21.11 or 22.11?
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 20.11] net/hns3: fix traffic management thread safety
2023-11-16 15:27 ` Luca Boccassi
@ 2023-11-17 1:02 ` fengchengwen
0 siblings, 0 replies; 4+ messages in thread
From: fengchengwen @ 2023-11-17 1:02 UTC (permalink / raw)
To: Luca Boccassi; +Cc: stable, ktraynor, lihuisong, haijie1
Hi Luca,
On 2023/11/16 23:27, Luca Boccassi wrote:
> On Thu, 16 Nov 2023 at 12:37, Chengwen Feng <fengchengwen@huawei.com> wrote:
>>
>> [ upstream commit 69901040975bff8a38edfc47aee727cadc87d356 ]
>>
>> The driver-related TM (traffic management) info is implemented through
>> the linked list. The following threads are involved in the read and
>> write of the TM info:
>>
>> 1. main thread: invokes the rte_tm_xxx() API family to modify or read.
>> 2. interrupt thread: will read TM info in reset recover process.
>>
>> Currently, thread safety protection of TM info is implemented only in
>> the following operations:
>> 1. some of the rte_tm_xxx() API's implementation.
>> 2. reset recover process.
>>
>> Thread safety risks may exist in other scenarios, so fix by make sure
>> all the rte_tm_xxx() API's implementations protected by hw.lock.
>>
>> Fixes: c09c7847d892 ("net/hns3: support traffic management")
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>> Signed-off-by: Dongdong Liu <liudongdong3@huawei.com>
>> ---
>> drivers/net/hns3/hns3_tm.c | 173 ++++++++++++++++++++++++++++++++-----
>> 1 file changed, 150 insertions(+), 23 deletions(-)
>>
>
> This patch doesn't apply on 20.11, did you mean to target it at 21.11 or 22.11?
Sorry for this, it was target for 21.11, I wrote the wrong prefix.
Thanks
Chengwen
>
> .
>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 20.11] net/hns3: fix traffic management thread safety
2023-11-16 12:34 [PATCH 20.11] net/hns3: fix traffic management thread safety Chengwen Feng
2023-11-16 15:27 ` Luca Boccassi
@ 2023-11-23 10:49 ` Kevin Traynor
1 sibling, 0 replies; 4+ messages in thread
From: Kevin Traynor @ 2023-11-23 10:49 UTC (permalink / raw)
To: Chengwen Feng, stable; +Cc: lihuisong, haijie1
On 16/11/2023 12:34, Chengwen Feng wrote:
> [ upstream commit 69901040975bff8a38edfc47aee727cadc87d356 ]
>
> The driver-related TM (traffic management) info is implemented through
> the linked list. The following threads are involved in the read and
> write of the TM info:
>
> 1. main thread: invokes the rte_tm_xxx() API family to modify or read.
> 2. interrupt thread: will read TM info in reset recover process.
>
> Currently, thread safety protection of TM info is implemented only in
> the following operations:
> 1. some of the rte_tm_xxx() API's implementation.
> 2. reset recover process.
>
> Thread safety risks may exist in other scenarios, so fix by make sure
> all the rte_tm_xxx() API's implementations protected by hw.lock.
>
> Fixes: c09c7847d892 ("net/hns3: support traffic management")
>
> Signed-off-by: Chengwen Feng<fengchengwen@huawei.com>
> Signed-off-by: Dongdong Liu<liudongdong3@huawei.com>
> ---
> drivers/net/hns3/hns3_tm.c | 173 ++++++++++++++++++++++++++++++++-----
> 1 file changed, 150 insertions(+), 23 deletions(-)
Applied to 21.11 branch. Thanks for backporting.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2023-11-23 10:49 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-16 12:34 [PATCH 20.11] net/hns3: fix traffic management thread safety Chengwen Feng
2023-11-16 15:27 ` Luca Boccassi
2023-11-17 1:02 ` fengchengwen
2023-11-23 10:49 ` Kevin Traynor
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).