From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id C22A4A0C4B; Sat, 14 Aug 2021 01:52:47 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 11B3641266; Sat, 14 Aug 2021 01:52:41 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id 2EE8C40DDE for ; Sat, 14 Aug 2021 01:52:38 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10075"; a="213796354" X-IronPort-AV: E=Sophos;i="5.84,320,1620716400"; d="scan'208";a="213796354" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Aug 2021 16:52:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.84,320,1620716400"; d="scan'208";a="674639237" Received: from silpixa00400573.ir.intel.com (HELO silpixa00400573.ger.corp.intel.com) ([10.237.223.107]) by fmsmga006.fm.intel.com with ESMTP; 13 Aug 2021 16:52:33 -0700 From: Cristian Dumitrescu To: dev@dpdk.org Date: Sat, 14 Aug 2021 00:52:29 +0100 Message-Id: <20210813235232.65757-1-cristian.dumitrescu@intel.com> X-Mailer: git-send-email 2.17.1 Subject: [dpdk-dev] [PATCH 1/4] table: add support learner tables X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" A learner table is typically used for learning or connection tracking, where it allows for the implementation of the "add on miss" scenario: whenever the lookup key is not found in the table (lookup miss), the data plane can decide to add this key to the table with a given action with no control plane intervention. Likewise, the table keys expire based on a configurable timeout and are automatically deleted from the table with no control plane intervention. Signed-off-by: Cristian Dumitrescu --- lib/table/meson.build | 2 + lib/table/rte_swx_table_learner.c | 616 ++++++++++++++++++++++++++++++ lib/table/rte_swx_table_learner.h | 206 ++++++++++ lib/table/version.map | 9 + 4 files changed, 833 insertions(+) create mode 100644 lib/table/rte_swx_table_learner.c create mode 100644 lib/table/rte_swx_table_learner.h diff --git a/lib/table/meson.build b/lib/table/meson.build index a1384456a9..ac1f1aac27 100644 --- a/lib/table/meson.build +++ b/lib/table/meson.build @@ -3,6 +3,7 @@ sources = files( 'rte_swx_table_em.c', + 'rte_swx_table_learner.c', 'rte_swx_table_selector.c', 'rte_swx_table_wm.c', 'rte_table_acl.c', @@ -21,6 +22,7 @@ headers = files( 'rte_lru.h', 'rte_swx_table.h', 'rte_swx_table_em.h', + 'rte_swx_table_learner.h', 'rte_swx_table_selector.h', 'rte_swx_table_wm.h', 'rte_table.h', diff --git a/lib/table/rte_swx_table_learner.c b/lib/table/rte_swx_table_learner.c new file mode 100644 index 0000000000..5255fb0202 --- /dev/null +++ b/lib/table/rte_swx_table_learner.c @@ -0,0 +1,616 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2020 Intel Corporation + */ +#include +#include +#include +#include + +#include +#include +#include + +#include "rte_swx_table_learner.h" + +#ifndef RTE_SWX_TABLE_LEARNER_USE_HUGE_PAGES +#define RTE_SWX_TABLE_LEARNER_USE_HUGE_PAGES 1 +#endif + +#ifndef RTE_SWX_TABLE_SELECTOR_HUGE_PAGES_DISABLE + +#include + +static void * +env_calloc(size_t size, size_t alignment, int numa_node) +{ + return rte_zmalloc_socket(NULL, size, alignment, numa_node); +} + +static void +env_free(void *start, size_t size __rte_unused) +{ + rte_free(start); +} + +#else + +#include + +static void * +env_calloc(size_t size, size_t alignment __rte_unused, int numa_node) +{ + void *start; + + if (numa_available() == -1) + return NULL; + + start = numa_alloc_onnode(size, numa_node); + if (!start) + return NULL; + + memset(start, 0, size); + return start; +} + +static void +env_free(void *start, size_t size) +{ + if ((numa_available() == -1) || !start) + return; + + numa_free(start, size); +} + +#endif + +#if defined(RTE_ARCH_X86_64) + +#include + +#define crc32_u64(crc, v) _mm_crc32_u64(crc, v) + +#else + +static inline uint64_t +crc32_u64_generic(uint64_t crc, uint64_t value) +{ + int i; + + crc = (crc & 0xFFFFFFFFLLU) ^ value; + for (i = 63; i >= 0; i--) { + uint64_t mask; + + mask = -(crc & 1LLU); + crc = (crc >> 1LLU) ^ (0x82F63B78LLU & mask); + } + + return crc; +} + +#define crc32_u64(crc, v) crc32_u64_generic(crc, v) + +#endif + +/* Key size needs to be one of: 8, 16, 32 or 64. */ +static inline uint32_t +hash(void *key, void *key_mask, uint32_t key_size, uint32_t seed) +{ + uint64_t *k = key; + uint64_t *m = key_mask; + uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5; + + switch (key_size) { + case 8: + crc0 = crc32_u64(seed, k[0] & m[0]); + return crc0; + + case 16: + k0 = k[0] & m[0]; + + crc0 = crc32_u64(k0, seed); + crc1 = crc32_u64(k0 >> 32, k[1] & m[1]); + + crc0 ^= crc1; + + return crc0; + + case 32: + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + + crc0 = crc32_u64(k0, seed); + crc1 = crc32_u64(k0 >> 32, k[1] & m[1]); + + crc2 = crc32_u64(k2, k[3] & m[3]); + crc3 = k2 >> 32; + + crc0 = crc32_u64(crc0, crc1); + crc1 = crc32_u64(crc2, crc3); + + crc0 ^= crc1; + + return crc0; + + case 64: + k0 = k[0] & m[0]; + k2 = k[2] & m[2]; + k5 = k[5] & m[5]; + + crc0 = crc32_u64(k0, seed); + crc1 = crc32_u64(k0 >> 32, k[1] & m[1]); + + crc2 = crc32_u64(k2, k[3] & m[3]); + crc3 = crc32_u64(k2 >> 32, k[4] & m[4]); + + crc4 = crc32_u64(k5, k[6] & m[6]); + crc5 = crc32_u64(k5 >> 32, k[7] & m[7]); + + crc0 = crc32_u64(crc0, (crc1 << 32) ^ crc2); + crc1 = crc32_u64(crc3, (crc4 << 32) ^ crc5); + + crc0 ^= crc1; + + return crc0; + + default: + crc0 = 0; + return crc0; + } +} + +/* + * Return: 0 = Keys are NOT equal; 1 = Keys are equal. + */ +static inline uint32_t +table_keycmp(void *a, void *b, void *b_mask, uint32_t n_bytes) +{ + uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask; + + switch (n_bytes) { + case 8: { + uint64_t xor0 = a64[0] ^ (b64[0] & b_mask64[0]); + uint32_t result = 1; + + if (xor0) + result = 0; + return result; + } + + case 16: { + uint64_t xor0 = a64[0] ^ (b64[0] & b_mask64[0]); + uint64_t xor1 = a64[1] ^ (b64[1] & b_mask64[1]); + uint64_t or = xor0 | xor1; + uint32_t result = 1; + + if (or) + result = 0; + return result; + } + + case 32: { + uint64_t xor0 = a64[0] ^ (b64[0] & b_mask64[0]); + uint64_t xor1 = a64[1] ^ (b64[1] & b_mask64[1]); + uint64_t xor2 = a64[2] ^ (b64[2] & b_mask64[2]); + uint64_t xor3 = a64[3] ^ (b64[3] & b_mask64[3]); + uint64_t or = (xor0 | xor1) | (xor2 | xor3); + uint32_t result = 1; + + if (or) + result = 0; + return result; + } + + case 64: { + uint64_t xor0 = a64[0] ^ (b64[0] & b_mask64[0]); + uint64_t xor1 = a64[1] ^ (b64[1] & b_mask64[1]); + uint64_t xor2 = a64[2] ^ (b64[2] & b_mask64[2]); + uint64_t xor3 = a64[3] ^ (b64[3] & b_mask64[3]); + uint64_t xor4 = a64[4] ^ (b64[4] & b_mask64[4]); + uint64_t xor5 = a64[5] ^ (b64[5] & b_mask64[5]); + uint64_t xor6 = a64[6] ^ (b64[6] & b_mask64[6]); + uint64_t xor7 = a64[7] ^ (b64[7] & b_mask64[7]); + uint64_t or = ((xor0 | xor1) | (xor2 | xor3)) | + ((xor4 | xor5) | (xor6 | xor7)); + uint32_t result = 1; + + if (or) + result = 0; + return result; + } + + default: { + uint32_t i; + + for (i = 0; i < n_bytes / sizeof(uint64_t); i++) + if (a64[i] != (b64[i] & b_mask64[i])) + return 0; + return 1; + } + } +} + +#define TABLE_KEYS_PER_BUCKET 4 + +#define TABLE_BUCKET_PAD_SIZE \ + (RTE_CACHE_LINE_SIZE - TABLE_KEYS_PER_BUCKET * (sizeof(uint32_t) + sizeof(uint32_t))) + +struct table_bucket { + uint32_t time[TABLE_KEYS_PER_BUCKET]; + uint32_t sig[TABLE_KEYS_PER_BUCKET]; + uint8_t pad[TABLE_BUCKET_PAD_SIZE]; + uint8_t key[0]; +}; + +struct table_params { + /* The real key size. Must be non-zero. */ + size_t key_size; + + /* They key size upgrated to the next power of 2. This used for hash generation (in + * increments of 8 bytes, from 8 to 64 bytes) and for run-time key comparison. This is why + * key sizes bigger than 64 bytes are not allowed. + */ + size_t key_size_pow2; + + /* log2(key_size_pow2). Purpose: avoid multiplication with non-power-of-2 numbers. */ + size_t key_size_log2; + + /* The key offset within the key buffer. */ + size_t key_offset; + + /* The real action data size. */ + size_t action_data_size; + + /* The data size, i.e. the 8-byte action_id field plus the action data size, upgraded to the + * next power of 2. + */ + size_t data_size_pow2; + + /* log2(data_size_pow2). Purpose: avoid multiplication with non-power of 2 numbers. */ + size_t data_size_log2; + + /* Number of buckets. Must be a power of 2 to avoid modulo with non-power-of-2 numbers. */ + size_t n_buckets; + + /* Bucket mask. Purpose: replace modulo with bitmask and operation. */ + size_t bucket_mask; + + /* Total number of key bytes in the bucket, including the key padding bytes. There are + * (key_size_pow2 - key_size) padding bytes for each key in the bucket. + */ + size_t bucket_key_all_size; + + /* Bucket size. Must be a power of 2 to avoid multiplication with non-power-of-2 number. */ + size_t bucket_size; + + /* log2(bucket_size). Purpose: avoid multiplication with non-power of 2 numbers. */ + size_t bucket_size_log2; + + /* Timeout in CPU clock cycles. */ + uint64_t key_timeout; + + /* Total memory size. */ + size_t total_size; +}; + +struct table { + /* Table parameters. */ + struct table_params params; + + /* Key mask. Array of *key_size* bytes. */ + uint8_t key_mask0[RTE_CACHE_LINE_SIZE]; + + /* Table buckets. */ + uint8_t buckets[0]; +} __rte_cache_aligned; + +static int +table_params_get(struct table_params *p, struct rte_swx_table_learner_params *params) +{ + /* Check input parameters. */ + if (!params || + !params->key_size || + (params->key_size > 64) || + !params->n_keys_max || + (params->n_keys_max > 1U << 31) || + !params->key_timeout) + return -EINVAL; + + /* Key. */ + p->key_size = params->key_size; + + p->key_size_pow2 = rte_align64pow2(p->key_size); + if (p->key_size_pow2 < 8) + p->key_size_pow2 = 8; + + p->key_size_log2 = __builtin_ctzll(p->key_size_pow2); + + p->key_offset = params->key_offset; + + /* Data. */ + p->action_data_size = params->action_data_size; + + p->data_size_pow2 = rte_align64pow2(sizeof(uint64_t) + p->action_data_size); + + p->data_size_log2 = __builtin_ctzll(p->data_size_pow2); + + /* Buckets. */ + p->n_buckets = rte_align32pow2(params->n_keys_max); + + p->bucket_mask = p->n_buckets - 1; + + p->bucket_key_all_size = TABLE_KEYS_PER_BUCKET * p->key_size_pow2; + + p->bucket_size = rte_align64pow2(sizeof(struct table_bucket) + + p->bucket_key_all_size + + TABLE_KEYS_PER_BUCKET * p->data_size_pow2); + + p->bucket_size_log2 = __builtin_ctzll(p->bucket_size); + + /* Timeout. */ + p->key_timeout = params->key_timeout * rte_get_tsc_hz(); + + /* Total size. */ + p->total_size = sizeof(struct table) + p->n_buckets * p->bucket_size; + + return 0; +} + +static inline struct table_bucket * +table_bucket_get(struct table *t, size_t bucket_id) +{ + return (struct table_bucket *)&t->buckets[bucket_id << t->params.bucket_size_log2]; +} + +static inline uint8_t * +table_bucket_key_get(struct table *t, struct table_bucket *b, size_t bucket_key_pos) +{ + return &b->key[bucket_key_pos << t->params.key_size_log2]; +} + +static inline uint64_t * +table_bucket_data_get(struct table *t, struct table_bucket *b, size_t bucket_key_pos) +{ + return (uint64_t *)&b->key[t->params.bucket_key_all_size + + (bucket_key_pos << t->params.data_size_log2)]; +} + +uint64_t +rte_swx_table_learner_footprint_get(struct rte_swx_table_learner_params *params) +{ + struct table_params p; + int status; + + status = table_params_get(&p, params); + + return status ? 0 : p.total_size; +} + +void * +rte_swx_table_learner_create(struct rte_swx_table_learner_params *params, int numa_node) +{ + struct table_params p; + struct table *t; + int status; + + /* Check and process the input parameters. */ + status = table_params_get(&p, params); + if (status) + return NULL; + + /* Memory allocation. */ + t = env_calloc(p.total_size, RTE_CACHE_LINE_SIZE, numa_node); + if (!t) + return NULL; + + /* Memory initialization. */ + memcpy(&t->params, &p, sizeof(struct table_params)); + + if (params->key_mask0) + memcpy(t->key_mask0, params->key_mask0, params->key_size); + else + memset(t->key_mask0, 0xFF, params->key_size); + + return t; +} + +void +rte_swx_table_learner_free(void *table) +{ + struct table *t = table; + + if (!t) + return; + + env_free(t, t->params.total_size); +} + +struct mailbox { + /* Writer: lookup state 0. Reader(s): lookup state 1, add(). */ + struct table_bucket *bucket; + + /* Writer: lookup state 0. Reader(s): lookup state 1, add(). */ + uint32_t input_sig; + + /* Writer: lookup state 1. Reader(s): add(). */ + uint8_t *input_key; + + /* Writer: lookup state 1. Reader(s): add(). Values: 0 = miss; 1 = hit. */ + uint32_t hit; + + /* Writer: lookup state 1. Reader(s): add(). Valid only when hit is non-zero. */ + size_t bucket_key_pos; + + /* State. */ + int state; +}; + +uint64_t +rte_swx_table_learner_mailbox_size_get(void) +{ + return sizeof(struct mailbox); +} + +int +rte_swx_table_learner_lookup(void *table, + void *mailbox, + uint64_t input_time, + uint8_t **key, + uint64_t *action_id, + uint8_t **action_data, + int *hit) +{ + struct table *t = table; + struct mailbox *m = mailbox; + + switch (m->state) { + case 0: { + uint8_t *input_key; + struct table_bucket *b; + size_t bucket_id; + uint32_t input_sig; + + input_key = &(*key)[t->params.key_offset]; + input_sig = hash(input_key, t->key_mask0, t->params.key_size_pow2, 0); + bucket_id = input_sig & t->params.bucket_mask; + b = table_bucket_get(t, bucket_id); + + rte_prefetch0(b); + rte_prefetch0(&b->key[0]); + rte_prefetch0(&b->key[RTE_CACHE_LINE_SIZE]); + + m->bucket = b; + m->input_key = input_key; + m->input_sig = input_sig | 1; + m->state = 1; + return 0; + } + + case 1: { + struct table_bucket *b = m->bucket; + uint32_t i; + + /* Search the input key through the bucket keys. */ + for (i = 0; i < TABLE_KEYS_PER_BUCKET; i++) { + uint64_t time = b->time[i]; + uint32_t sig = b->sig[i]; + uint8_t *key = table_bucket_key_get(t, b, i); + + time <<= 32; + + if ((time > input_time) && + (sig == m->input_sig) && + table_keycmp(key, m->input_key, t->key_mask0, t->params.key_size_pow2)) { + uint64_t *data = table_bucket_data_get(t, b, i); + + /* Hit. */ + rte_prefetch0(data); + + b->time[i] = (input_time + t->params.key_timeout) >> 32; + + m->hit = 1; + m->bucket_key_pos = i; + m->state = 0; + + *action_id = data[0]; + *action_data = (uint8_t *)&data[1]; + *hit = 1; + return 1; + } + } + + /* Miss. */ + m->hit = 0; + m->state = 0; + + *hit = 0; + return 1; + } + + default: + /* This state should never be reached. Miss. */ + m->hit = 0; + m->state = 0; + + *hit = 0; + return 1; + } +} + +uint32_t +rte_swx_table_learner_add(void *table, + void *mailbox, + uint64_t input_time, + uint64_t action_id, + uint8_t *action_data) +{ + struct table *t = table; + struct mailbox *m = mailbox; + struct table_bucket *b = m->bucket; + uint32_t i; + + /* Lookup hit: The key, key signature and key time are already properly configured (the key + * time was bumped by lookup), only the key data need to be updated. + */ + if (m->hit) { + uint64_t *data = table_bucket_data_get(t, b, m->bucket_key_pos); + + /* Install the key data. */ + data[0] = action_id; + if (t->params.action_data_size && action_data) + memcpy(&data[1], action_data, t->params.action_data_size); + + return 0; + } + + /* Lookup miss: Search for a free position in the current bucket and install the key. */ + for (i = 0; i < TABLE_KEYS_PER_BUCKET; i++) { + uint64_t time = b->time[i]; + + time <<= 32; + + /* Free position: Either there was never a key installed here, so the key time is + * set to zero (the init value), which is always less than the current time, or this + * position was used before, but the key expired (the key time is in the past). + */ + if (time < input_time) { + uint8_t *key = table_bucket_key_get(t, b, i); + uint64_t *data = table_bucket_data_get(t, b, i); + + /* Install the key. */ + b->time[i] = (input_time + t->params.key_timeout) >> 32; + b->sig[i] = m->input_sig; + memcpy(key, m->input_key, t->params.key_size); + + /* Install the key data. */ + data[0] = action_id; + if (t->params.action_data_size && action_data) + memcpy(&data[1], action_data, t->params.action_data_size); + + /* Mailbox. */ + m->hit = 1; + m->bucket_key_pos = i; + + return 0; + } + } + + /* Bucket full. */ + return 1; +} + +void +rte_swx_table_learner_delete(void *table __rte_unused, + void *mailbox) +{ + struct mailbox *m = mailbox; + + if (m->hit) { + struct table_bucket *b = m->bucket; + + /* Expire the key. */ + b->time[m->bucket_key_pos] = 0; + + /* Mailbox. */ + m->hit = 0; + } +} diff --git a/lib/table/rte_swx_table_learner.h b/lib/table/rte_swx_table_learner.h new file mode 100644 index 0000000000..d6ec733655 --- /dev/null +++ b/lib/table/rte_swx_table_learner.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021 Intel Corporation + */ +#ifndef __INCLUDE_RTE_SWX_TABLE_LEARNER_H__ +#define __INCLUDE_RTE_SWX_TABLE_LEARNER_H__ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file + * RTE SWX Learner Table + * + * The learner table API. + * + * This table type is typically used for learning or connection tracking, where it allows for the + * implementation of the "add on miss" scenario: whenever the lookup key is not found in the table + * (lookup miss), the data plane can decide to add this key to the table with a given action with no + * control plane intervention. Likewise, the table keys expire based on a configurable timeout and + * are automatically deleted from the table with no control plane intervention. + */ + +#include +#include + +#include + +/** Learner table creation parameters. */ +struct rte_swx_table_learner_params { + /** Key size in bytes. Must be non-zero. */ + uint32_t key_size; + + /** Offset of the first byte of the key within the key buffer. */ + uint32_t key_offset; + + /** Mask of *key_size* bytes logically laid over the bytes at positions + * *key_offset* .. (*key_offset* + *key_size* - 1) of the key buffer in order to specify + * which bits from the key buffer are part of the key and which ones are not. A bit value of + * 1 in the *key_mask0* means the respective bit in the key buffer is part of the key, while + * a bit value of 0 means the opposite. A NULL value means that all the bits are part of the + * key, i.e. the *key_mask0* is an all-ones mask. + */ + uint8_t *key_mask0; + + /** Maximum size (in bytes) of the action data. The data stored in the table for each entry + * is equal to *action_data_size* plus 8 bytes, which are used to store the action ID. + */ + uint32_t action_data_size; + + /** Maximum number of keys to be stored in the table together with their associated data. */ + uint32_t n_keys_max; + + /** Key timeout in seconds. Must be non-zero. Each table key expires and is automatically + * deleted from the table after this many seconds. + */ + uint32_t key_timeout; +}; + +/** + * Learner table memory footprint get + * + * @param[in] params + * Table create parameters. + * @return + * Table memory footprint in bytes. + */ +__rte_experimental +uint64_t +rte_swx_table_learner_footprint_get(struct rte_swx_table_learner_params *params); + +/** + * Learner table mailbox size get + * + * The mailbox is used to store the context of a lookup operation that is in + * progress and it is passed as a parameter to the lookup operation. This allows + * for multiple concurrent lookup operations into the same table. + * + * @return + * Table mailbox footprint in bytes. + */ +__rte_experimental +uint64_t +rte_swx_table_learner_mailbox_size_get(void); + +/** + * Learner table create + * + * @param[in] params + * Table creation parameters. + * @param[in] numa_node + * Non-Uniform Memory Access (NUMA) node. + * @return + * Table handle, on success, or NULL, on error. + */ +__rte_experimental +void * +rte_swx_table_learner_create(struct rte_swx_table_learner_params *params, int numa_node); + +/** + * Learner table key lookup + * + * The table lookup operation searches a given key in the table and upon its completion it returns + * an indication of whether the key is found in the table (lookup hit) or not (lookup miss). In case + * of lookup hit, the action_id and the action_data associated with the key are also returned. + * + * Multiple invocations of this function may be required in order to complete a single table lookup + * operation for a given table and a given lookup key. The completion of the table lookup operation + * is flagged by a return value of 1; in case of a return value of 0, the function must be invoked + * again with exactly the same arguments. + * + * The mailbox argument is used to store the context of an on-going table key lookup operation, and + * possibly an associated key add operation. The mailbox mechanism allows for multiple concurrent + * table key lookup and add operations into the same table. + * + * @param[in] table + * Table handle. + * @param[in] mailbox + * Mailbox for the current table lookup operation. + * @param[in] time + * Current time measured in CPU clock cycles. + * @param[in] key + * Lookup key. Its size must be equal to the table *key_size*. + * @param[out] action_id + * ID of the action associated with the *key*. Must point to a valid 64-bit variable. Only valid + * when the function returns 1 and *hit* is set to true. + * @param[out] action_data + * Action data for the *action_id* action. Must point to a valid array of table *action_data_size* + * bytes. Only valid when the function returns 1 and *hit* is set to true. + * @param[out] hit + * Only valid when the function returns 1. Set to non-zero (true) on table lookup hit and to zero + * (false) on table lookup miss. + * @return + * 0 when the table lookup operation is not yet completed, and 1 when the table lookup operation + * is completed. No other return values are allowed. + */ +__rte_experimental +int +rte_swx_table_learner_lookup(void *table, + void *mailbox, + uint64_t time, + uint8_t **key, + uint64_t *action_id, + uint8_t **action_data, + int *hit); + +/** + * Learner table key add + * + * This operation takes the latest key that was looked up in the table and adds it to the table with + * the given action ID and action data. Typically, this operation is only invoked when the latest + * lookup operation in the current table resulted in lookup miss. + * + * @param[in] table + * Table handle. + * @param[in] mailbox + * Mailbox for the current operation. + * @param[in] time + * Current time measured in CPU clock cycles. + * @param[out] action_id + * ID of the action associated with the key. + * @param[out] action_data + * Action data for the *action_id* action. + * @return + * 0 on success, 1 or error (table full). + */ +__rte_experimental +uint32_t +rte_swx_table_learner_add(void *table, + void *mailbox, + uint64_t time, + uint64_t action_id, + uint8_t *action_data); + +/** + * Learner table key delete + * + * This operation takes the latest key that was looked up in the table and deletes it from the + * table. Typically, this operation is only invoked to force the deletion of the key before the key + * expires on timeout due to inactivity. + * + * @param[in] table + * Table handle. + * @param[in] mailbox + * Mailbox for the current operation. + */ +__rte_experimental +void +rte_swx_table_learner_delete(void *table, + void *mailbox); + +/** + * Learner table free + * + * @param[in] table + * Table handle. + */ +__rte_experimental +void +rte_swx_table_learner_free(void *table); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib/table/version.map b/lib/table/version.map index 29301480cb..f973a36ecc 100644 --- a/lib/table/version.map +++ b/lib/table/version.map @@ -36,4 +36,13 @@ EXPERIMENTAL { rte_swx_table_selector_group_set; rte_swx_table_selector_mailbox_size_get; rte_swx_table_selector_select; + + # added in 21.11 + rte_swx_table_learner_add; + rte_swx_table_learner_create; + rte_swx_table_learner_delete; + rte_swx_table_learner_footprint_get; + rte_swx_table_learner_free; + rte_swx_table_learner_lookup; + rte_swx_table_learner_mailbox_size_get; }; -- 2.17.1