From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 5AD58A0350; Thu, 25 Jun 2020 21:49:40 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A1ADCDE0; Thu, 25 Jun 2020 21:49:39 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id 78794CF3 for ; Thu, 25 Jun 2020 21:49:37 +0200 (CEST) IronPort-SDR: 0n5NX4x2B3kNts8AscDmVc2kgj+aN7S61DvbzuDbj65m5h2fESt+NpYsx72TOBFjNyzgnKFnbu DPnnoy1b0+Eg== X-IronPort-AV: E=McAfee;i="6000,8403,9663"; a="144132499" X-IronPort-AV: E=Sophos;i="5.75,280,1589266800"; d="scan'208";a="144132499" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga103.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 25 Jun 2020 12:49:36 -0700 IronPort-SDR: 4NqLEJfHGZ5JuWdaw61gMOJE53+0kVDgTTc0etnBUcKsuhi4SLSZh++BmQIp7D5NezZc+cnSEF RTbrijC7cdBA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,280,1589266800"; d="scan'208";a="453123591" Received: from vmedvedk-mobl.ger.corp.intel.com (HELO [10.213.26.103]) ([10.213.26.103]) by orsmga005.jf.intel.com with ESMTP; 25 Jun 2020 12:49:34 -0700 To: "Ananyev, Konstantin" , "dev@dpdk.org" Cc: "Wang, Yipeng1" , "Gobriel, Sameh" , "Richardson, Bruce" References: From: "Medvedkin, Vladimir" Message-ID: <76b1cad4-8bc9-a812-ea12-fb7e1a8c8d22@intel.com> Date: Thu, 25 Jun 2020 20:49:33 +0100 User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:68.0) Gecko/20100101 Thunderbird/68.9.0 MIME-Version: 1.0 In-Reply-To: Content-Type: text/plain; charset=utf-8; format=flowed Content-Transfer-Encoding: 7bit Content-Language: en-US Subject: Re: [dpdk-dev] [PATCH v4 1/4] hash: add kv hash library X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Hi Konstantin, Thanks for the review. See below On 23/06/2020 16:44, Ananyev, Konstantin wrote: > Hi Vladimir, > >> --- /dev/null >> +++ b/lib/librte_hash/k32v64_hash.c >> @@ -0,0 +1,277 @@ >> +/* SPDX-License-Identifier: BSD-3-Clause >> + * Copyright(c) 2020 Intel Corporation >> + */ >> + >> +#include >> + >> +#include >> +#include >> +#include >> + >> +#include "k32v64_hash.h" >> + >> +static inline int >> +k32v64_hash_lookup(struct k32v64_hash_table *table, uint32_t key, >> +uint32_t hash, uint64_t *value) >> +{ >> +return __k32v64_hash_lookup(table, key, hash, value, __kv_cmp_keys); >> +} >> + >> +static int >> +k32v64_hash_bulk_lookup(struct rte_kv_hash_table *ht, void *keys_p, >> +uint32_t *hashes, void *values_p, unsigned int n) >> +{ >> +struct k32v64_hash_table *table = (struct k32v64_hash_table *)ht; >> +uint32_t *keys = keys_p; >> +uint64_t *values = values_p; >> +int ret, cnt = 0; >> +unsigned int i; >> + >> +if (unlikely((table == NULL) || (keys == NULL) || (hashes == NULL) || >> +(values == NULL))) >> +return -EINVAL; >> + >> +for (i = 0; i < n; i++) { >> +ret = k32v64_hash_lookup(table, keys[i], hashes[i], >> +&values[i]); >> +if (ret == 0) >> +cnt++; >> +} >> +return cnt; >> +} >> + >> +#ifdef CC_AVX512VL_SUPPORT >> +int >> +k32v64_hash_bulk_lookup_avx512vl(struct rte_kv_hash_table *ht, >> +void *keys_p, uint32_t *hashes, void *values_p, unsigned int n); >> +#endif >> + >> +static rte_kv_hash_bulk_lookup_t >> +get_lookup_bulk_fn(void) >> +{ >> +#ifdef CC_AVX512VL_SUPPORT >> +if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512VL)) >> +return k32v64_hash_bulk_lookup_avx512vl; >> +#endif >> +return k32v64_hash_bulk_lookup; >> +} >> + >> +static int >> +k32v64_hash_add(struct k32v64_hash_table *table, uint32_t key, >> +uint32_t hash, uint64_t value, uint64_t *old_value, int *found) >> +{ >> +uint32_t bucket; >> +int i, idx, ret; >> +uint8_t msk; >> +struct k32v64_ext_ent *tmp, *ent, *prev = NULL; >> + >> +if (table == NULL) >> +return -EINVAL; >> + >> +bucket = hash & table->bucket_msk; >> +/* Search key in table. Update value if exists */ >> +for (i = 0; i < K32V64_KEYS_PER_BUCKET; i++) { >> +if ((key == table->t[bucket].key[i]) && >> +(table->t[bucket].key_mask & (1 << i))) { >> +*old_value = table->t[bucket].val[i]; >> +*found = 1; >> +__atomic_fetch_add(&table->t[bucket].cnt, 1, >> +__ATOMIC_ACQUIRE); >> +table->t[bucket].val[i] = value; >> +__atomic_fetch_add(&table->t[bucket].cnt, 1, >> +__ATOMIC_RELEASE); >> +return 0; >> +} >> +} >> + >> +if (!SLIST_EMPTY(&table->t[bucket].head)) { >> +SLIST_FOREACH(ent, &table->t[bucket].head, next) { >> +if (ent->key == key) { >> +*old_value = ent->val; >> +*found = 1; >> +__atomic_fetch_add(&table->t[bucket].cnt, 1, >> +__ATOMIC_ACQUIRE); >> +ent->val = value; >> +__atomic_fetch_add(&table->t[bucket].cnt, 1, >> +__ATOMIC_RELEASE); >> +return 0; >> +} >> +} >> +} >> + >> +msk = ~table->t[bucket].key_mask & VALID_KEY_MSK; >> +if (msk) { >> +idx = __builtin_ctz(msk); >> +table->t[bucket].key[idx] = key; >> +table->t[bucket].val[idx] = value; >> +__atomic_or_fetch(&table->t[bucket].key_mask, 1 << idx, >> +__ATOMIC_RELEASE); > I think this case also has to guarded with table->t[bucket].cnt updates. Agree, will change it. > >> +table->nb_ent++; >> +*found = 0; >> +return 0; >> +} >> + >> +ret = rte_mempool_get(table->ext_ent_pool, (void **)&ent); >> +if (ret < 0) >> +return ret; >> + >> +SLIST_NEXT(ent, next) = NULL; >> +ent->key = key; >> +ent->val = value; >> +rte_smp_wmb(); > __atomic_thread_fence(__ATOMIC_RELEASE); > ? Sure, looks like I missed it, will fix > >> +SLIST_FOREACH(tmp, &table->t[bucket].head, next) >> +prev = tmp; >> + >> +if (prev == NULL) >> +SLIST_INSERT_HEAD(&table->t[bucket].head, ent, next); >> +else >> +SLIST_INSERT_AFTER(prev, ent, next); >> + >> +table->nb_ent++; >> +table->nb_ext_ent++; >> +*found = 0; >> +return 0; >> +} >> + >> +static int >> +k32v64_hash_delete(struct k32v64_hash_table *table, uint32_t key, >> +uint32_t hash, uint64_t *old_value) >> +{ >> +uint32_t bucket; >> +int i; >> +struct k32v64_ext_ent *ent; >> + >> +if (table == NULL) >> +return -EINVAL; >> + >> +bucket = hash & table->bucket_msk; >> + >> +for (i = 0; i < K32V64_KEYS_PER_BUCKET; i++) { >> +if ((key == table->t[bucket].key[i]) && >> +(table->t[bucket].key_mask & (1 << i))) { >> +*old_value = table->t[bucket].val[i]; >> +ent = SLIST_FIRST(&table->t[bucket].head); >> +if (ent) { >> +__atomic_fetch_add(&table->t[bucket].cnt, 1, >> +__ATOMIC_ACQUIRE); >> +table->t[bucket].key[i] = ent->key; >> +table->t[bucket].val[i] = ent->val; >> +SLIST_REMOVE_HEAD(&table->t[bucket].head, next); >> +__atomic_fetch_add(&table->t[bucket].cnt, 1, >> +__ATOMIC_RELEASE); >> +table->nb_ext_ent--; >> +} else >> +__atomic_and_fetch(&table->t[bucket].key_mask, >> +~(1 << i), __ATOMIC_RELEASE); > Same thought as above - safer to guard this mask update with cnt update. Agree > >> +if (ent) >> +rte_mempool_put(table->ext_ent_pool, ent); > Can't this 'if(ent)' be merged with previous 'if (ent) {...}' above? Yes, will merge > >> +table->nb_ent--; >> +return 0; >> +} >> +} >> + >> +SLIST_FOREACH(ent, &table->t[bucket].head, next) >> +if (ent->key == key) >> +break; >> + >> +if (ent == NULL) >> +return -ENOENT; >> + >> +*old_value = ent->val; >> + >> +__atomic_fetch_add(&table->t[bucket].cnt, 1, __ATOMIC_ACQUIRE); >> +SLIST_REMOVE(&table->t[bucket].head, ent, k32v64_ext_ent, next); >> +__atomic_fetch_add(&table->t[bucket].cnt, 1, __ATOMIC_RELEASE); >> +rte_mempool_put(table->ext_ent_pool, ent); >> + >> +table->nb_ext_ent--; >> +table->nb_ent--; >> + >> +return 0; >> +} >> + -- Regards, Vladimir