From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3822DA0542; Sat, 8 Oct 2022 09:56:54 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 29BD942B7C; Sat, 8 Oct 2022 09:56:54 +0200 (CEST) Received: from szxga03-in.huawei.com (szxga03-in.huawei.com [45.249.212.189]) by mails.dpdk.org (Postfix) with ESMTP id E65FA42B77 for ; Sat, 8 Oct 2022 09:56:52 +0200 (CEST) Received: from dggpeml500024.china.huawei.com (unknown [172.30.72.56]) by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4MkyB76FSwzkXw5; Sat, 8 Oct 2022 15:54:23 +0800 (CST) Received: from [10.67.100.224] (10.67.100.224) by dggpeml500024.china.huawei.com (7.185.36.10) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.31; Sat, 8 Oct 2022 15:56:51 +0800 Subject: Re: [PATCH v5 07/10] memarea: support backup memory mechanism To: =?UTF-8?Q?Mattias_R=c3=b6nnblom?= , datshan , , , , , CC: , References: <20220721044648.6817-1-fengchengwen@huawei.com> <20221005040952.8166-1-datshan@qq.com> <6423791e-2a52-387a-9a33-7ecdd72db2fd@lysator.liu.se> From: fengchengwen Message-ID: <8882a22a-07ad-c756-4032-2dbde0a9e721@huawei.com> Date: Sat, 8 Oct 2022 15:56:51 +0800 User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:68.0) Gecko/20100101 Thunderbird/68.11.0 MIME-Version: 1.0 In-Reply-To: <6423791e-2a52-387a-9a33-7ecdd72db2fd@lysator.liu.se> Content-Type: text/plain; charset="utf-8" Content-Language: en-US Content-Transfer-Encoding: 8bit X-Originating-IP: [10.67.100.224] X-ClientProxiedBy: dggems701-chm.china.huawei.com (10.3.19.178) To dggpeml500024.china.huawei.com (7.185.36.10) X-CFilter-Loop: Reflected X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Hi Mattias, On 2022/10/7 3:53, Mattias Rönnblom wrote: > On 2022-10-05 06:09, datshan wrote: >> From: Chengwen Feng >> >> This patch supports backup memory mechanism, the memarea could use >> another memarea as a backup. > > Maybe it's worth mentioning what backup means already here. > > "This patch adds a memarea backup mechanism, where an allocation request which cannot be met by a certain memarea is deferred to its backup memarea." +1 > > I assume they can be nested indefinitely? Theoretically, yes. And I'm going to add, to avoid loops > >> >> Signed-off-by: Chengwen Feng >> --- >>   doc/guides/prog_guide/memarea_lib.rst |  3 +++ >>   lib/memarea/memarea_private.h         |  2 ++ >>   lib/memarea/rte_memarea.c             | 22 ++++++++++++++++++++++ >>   lib/memarea/rte_memarea.h             |  7 +++++++ >>   4 files changed, 34 insertions(+) >> >> diff --git a/doc/guides/prog_guide/memarea_lib.rst b/doc/guides/prog_guide/memarea_lib.rst >> index c77012fe44..842d35f77a 100644 >> --- a/doc/guides/prog_guide/memarea_lib.rst >> +++ b/doc/guides/prog_guide/memarea_lib.rst >> @@ -25,6 +25,9 @@ The main features are as follows: >>     * It supports MT-safe as long as it's specified at creation time. >>   +* It provides backup memory mechanism, the memarea could use another memarea >> +  as a backup. >> + >>   Library API Overview >>   -------------------- >>   diff --git a/lib/memarea/memarea_private.h b/lib/memarea/memarea_private.h >> index 98406879b9..08735ca81f 100644 >> --- a/lib/memarea/memarea_private.h >> +++ b/lib/memarea/memarea_private.h >> @@ -23,11 +23,13 @@ struct rte_memarea { >>       struct rte_memarea_param init; >>       rte_spinlock_t           lock; >>       void                    *area_addr; >> +    void                    *top_addr; >>       struct memarea_elem_list elem_list; >>       struct memarea_elem_list free_list; >>         uint64_t alloc_fails; >>       uint64_t refcnt_check_fails; >> +    uint64_t bak_alloc_fails; >>   } __rte_cache_aligned; >>     #endif /* MEMAREA_PRIVATE_H */ >> diff --git a/lib/memarea/rte_memarea.c b/lib/memarea/rte_memarea.c >> index b70830d0bb..f45191aa7f 100644 >> --- a/lib/memarea/rte_memarea.c >> +++ b/lib/memarea/rte_memarea.c >> @@ -132,6 +132,7 @@ rte_memarea_create(const struct rte_memarea_param *init) >>       TAILQ_INIT(&ma->elem_list); >>       TAILQ_INIT(&ma->free_list); >>       ma->area_addr = addr; >> +    ma->top_addr = (void *)((uintptr_t)addr + init->total_sz - 1); > > RTE_PTR_ADD() > >>       elem = addr; >>       elem->size = init->total_sz - sizeof(struct memarea_elem); >>       elem->cookie = MEMAREA_FREE_ELEM_COOKIE; >> @@ -200,6 +201,15 @@ memarea_add_node(struct rte_memarea *ma, struct memarea_elem *elem, size_t need_ >>       elem->size = align_size; >>   } >>   +static inline void * >> +memarea_alloc_backup(struct rte_memarea *ma, size_t size, uint32_t cookie) >> +{ >> +    void *ptr = rte_memarea_alloc(ma->init.bak_memarea, size, cookie); >> +    if (unlikely(ptr == NULL)) >> +        ma->bak_alloc_fails++; >> +    return ptr; >> +} >> + >>   void * >>   rte_memarea_alloc(struct rte_memarea *ma, size_t size, uint32_t cookie) >>   { >> @@ -221,6 +231,8 @@ rte_memarea_alloc(struct rte_memarea *ma, size_t size, uint32_t cookie) >>           ptr = (void *)((uintptr_t)elem + sizeof(struct memarea_elem)); >>           break; >>       } >> +    if (ptr == NULL && ma->init.bak_memarea != NULL) > > Maybe you want an unlikely() around the above, too. I assume using the backup area is an exceptional case. +1 > >> +        ptr = memarea_alloc_backup(ma, size, cookie); >>       if (unlikely(ptr == NULL)) >>           ma->alloc_fails++; >>       memarea_unlock(ma); >> @@ -283,6 +295,12 @@ rte_memarea_update_refcnt(struct rte_memarea *ma, void *ptr, int16_t value) >>           return; >>         memarea_lock(ma); >> +    if (ptr < ma->area_addr || ptr > ma->top_addr) { >> +        rte_memarea_update_refcnt(ma->init.bak_memarea, ptr, value); >> +        memarea_unlock(ma); >> +        return; >> +    } >> + >>       if (unlikely(elem->refcnt <= 0 || elem->refcnt + value < 0)) { >>           RTE_LOG(ERR, MEMAREA, >>               "memarea: %s cookie: 0x%x curr refcnt: %d update refcnt: %d check fail!\n", >> @@ -373,10 +391,14 @@ rte_memarea_dump(struct rte_memarea *ma, FILE *f, bool dump_all) >>       fprintf(f, "  algorithm: %s\n", memarea_alg_name(ma->init.alg)); >>       fprintf(f, "  total-size: 0x%zx\n", ma->init.total_sz); >>       fprintf(f, "  mt-safe: %s\n", ma->init.mt_safe ? "yes" : "no"); >> +    if (ma->init.bak_memarea) >> +        fprintf(f, "  backup-memarea-name: %s\n", ma->init.bak_memarea->init.name); >>       fprintf(f, "  total-regions: %u\n", memarea_elem_list_num(ma)); >>       fprintf(f, "  total-free-regions: %u\n", memarea_free_list_num(ma)); >>       fprintf(f, "  alloc_fails: %" PRIu64 "\n", ma->alloc_fails); >>       fprintf(f, "  refcnt_check_fails: %" PRIu64 "\n", ma->refcnt_check_fails); >> +    if (ma->init.bak_memarea) >> +        fprintf(f, "  backup_alloc_fails: %" PRIu64 "\n", ma->bak_alloc_fails); >>       if (dump_all) >>           memarea_dump_all(ma, f); >>       memarea_unlock(ma); >> diff --git a/lib/memarea/rte_memarea.h b/lib/memarea/rte_memarea.h >> index 10b8229c64..348febab7f 100644 >> --- a/lib/memarea/rte_memarea.h >> +++ b/lib/memarea/rte_memarea.h >> @@ -39,6 +39,9 @@ >>    *   specified, all the functions of the memarea API are lock-free, and assume >>    *   to not be invoked in parallel on different logical cores to work on the >>    *   same memarea. >> + * - It provides backup memory mechanism, the memarea could use another memarea >> + *   as a backup. It will attempts to allocate object from backup memarea when >> + *   the current memarea failed to allocate. >>    */ >>     #include >> @@ -105,6 +108,10 @@ struct rte_memarea_param { >>            */ >>           struct rte_memarea *user_memarea; >>       }; >> +    /** Backup memarea, which is used to handle the scenario where the >> +     * current memarea allocation failure. >> +     */ >> +    struct rte_memarea *bak_memarea; >>   }; >>     /** > > .