From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id B5117A3168 for ; Wed, 16 Oct 2019 14:44:12 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2BB211E97E; Wed, 16 Oct 2019 14:43:42 +0200 (CEST) Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id BAF001E975 for ; Wed, 16 Oct 2019 14:43:38 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga102.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 16 Oct 2019 05:43:38 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.67,303,1566889200"; d="scan'208";a="370788291" Received: from silpixa00399498.ir.intel.com (HELO silpixa00399498.ger.corp.intel.com) ([10.237.223.151]) by orsmga005.jf.intel.com with ESMTP; 16 Oct 2019 05:43:35 -0700 From: Anatoly Burakov To: dev@dpdk.org Cc: Marcin Baran , Bruce Richardson , Vladimir Medvedkin , john.mcnamara@intel.com, thomas@monjalon.net, david.marchand@redhat.com Date: Wed, 16 Oct 2019 13:43:20 +0100 Message-Id: <418ed834083f40af0f1d5298941b8f7d6992b848.1571229052.git.anatoly.burakov@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: References: In-Reply-To: References: <20190930092139.2440-1-marcinx.baran@intel.com> Subject: [dpdk-dev] [PATCH v2 05/10] lpm: remove deprecated code X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Marcin Baran Remove code for old ABI versions ahead of ABI version bump. Signed-off-by: Marcin Baran Signed-off-by: Anatoly Burakov --- Notes: v2: - Moved this to before ABI version bump to avoid compile breakage lib/librte_lpm/rte_lpm.c | 996 ++------------------------------------ lib/librte_lpm/rte_lpm.h | 88 ---- lib/librte_lpm/rte_lpm6.c | 132 +---- lib/librte_lpm/rte_lpm6.h | 25 - 4 files changed, 48 insertions(+), 1193 deletions(-) diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index 3a929a1b16..2687564194 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -89,34 +89,8 @@ depth_to_range(uint8_t depth) /* * Find an existing lpm table and return a pointer to it. */ -struct rte_lpm_v20 * -rte_lpm_find_existing_v20(const char *name) -{ - struct rte_lpm_v20 *l = NULL; - struct rte_tailq_entry *te; - struct rte_lpm_list *lpm_list; - - lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - - rte_mcfg_tailq_read_lock(); - TAILQ_FOREACH(te, lpm_list, next) { - l = te->data; - if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0) - break; - } - rte_mcfg_tailq_read_unlock(); - - if (te == NULL) { - rte_errno = ENOENT; - return NULL; - } - - return l; -} -VERSION_SYMBOL(rte_lpm_find_existing, _v20, 2.0); - struct rte_lpm * -rte_lpm_find_existing_v1604(const char *name) +rte_lpm_find_existing(const char *name) { struct rte_lpm *l = NULL; struct rte_tailq_entry *te; @@ -139,88 +113,12 @@ rte_lpm_find_existing_v1604(const char *name) return l; } -BIND_DEFAULT_SYMBOL(rte_lpm_find_existing, _v1604, 16.04); -MAP_STATIC_SYMBOL(struct rte_lpm *rte_lpm_find_existing(const char *name), - rte_lpm_find_existing_v1604); /* * Allocates memory for LPM object */ -struct rte_lpm_v20 * -rte_lpm_create_v20(const char *name, int socket_id, int max_rules, - __rte_unused int flags) -{ - char mem_name[RTE_LPM_NAMESIZE]; - struct rte_lpm_v20 *lpm = NULL; - struct rte_tailq_entry *te; - uint32_t mem_size; - struct rte_lpm_list *lpm_list; - - lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - - RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry_v20) != 2); - - /* Check user arguments. */ - if ((name == NULL) || (socket_id < -1) || (max_rules == 0)) { - rte_errno = EINVAL; - return NULL; - } - - snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); - - /* Determine the amount of memory to allocate. */ - mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules); - - rte_mcfg_tailq_write_lock(); - - /* guarantee there's no existing */ - TAILQ_FOREACH(te, lpm_list, next) { - lpm = te->data; - if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0) - break; - } - - if (te != NULL) { - lpm = NULL; - rte_errno = EEXIST; - goto exit; - } - - /* allocate tailq entry */ - te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0); - if (te == NULL) { - RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n"); - rte_errno = ENOMEM; - goto exit; - } - - /* Allocate memory to store the LPM data structures. */ - lpm = rte_zmalloc_socket(mem_name, mem_size, - RTE_CACHE_LINE_SIZE, socket_id); - if (lpm == NULL) { - RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); - rte_free(te); - rte_errno = ENOMEM; - goto exit; - } - - /* Save user arguments. */ - lpm->max_rules = max_rules; - strlcpy(lpm->name, name, sizeof(lpm->name)); - - te->data = lpm; - - TAILQ_INSERT_TAIL(lpm_list, te, next); - -exit: - rte_mcfg_tailq_write_unlock(); - - return lpm; -} -VERSION_SYMBOL(rte_lpm_create, _v20, 2.0); - struct rte_lpm * -rte_lpm_create_v1604(const char *name, int socket_id, +rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config) { char mem_name[RTE_LPM_NAMESIZE]; @@ -320,45 +218,12 @@ rte_lpm_create_v1604(const char *name, int socket_id, return lpm; } -BIND_DEFAULT_SYMBOL(rte_lpm_create, _v1604, 16.04); -MAP_STATIC_SYMBOL( - struct rte_lpm *rte_lpm_create(const char *name, int socket_id, - const struct rte_lpm_config *config), rte_lpm_create_v1604); /* * Deallocates memory for given LPM table. */ void -rte_lpm_free_v20(struct rte_lpm_v20 *lpm) -{ - struct rte_lpm_list *lpm_list; - struct rte_tailq_entry *te; - - /* Check user arguments. */ - if (lpm == NULL) - return; - - lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - - rte_mcfg_tailq_write_lock(); - - /* find our tailq entry */ - TAILQ_FOREACH(te, lpm_list, next) { - if (te->data == (void *) lpm) - break; - } - if (te != NULL) - TAILQ_REMOVE(lpm_list, te, next); - - rte_mcfg_tailq_write_unlock(); - - rte_free(lpm); - rte_free(te); -} -VERSION_SYMBOL(rte_lpm_free, _v20, 2.0); - -void -rte_lpm_free_v1604(struct rte_lpm *lpm) +rte_lpm_free(struct rte_lpm *lpm) { struct rte_lpm_list *lpm_list; struct rte_tailq_entry *te; @@ -386,9 +251,6 @@ rte_lpm_free_v1604(struct rte_lpm *lpm) rte_free(lpm); rte_free(te); } -BIND_DEFAULT_SYMBOL(rte_lpm_free, _v1604, 16.04); -MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm), - rte_lpm_free_v1604); /* * Adds a rule to the rule table. @@ -401,79 +263,7 @@ MAP_STATIC_SYMBOL(void rte_lpm_free(struct rte_lpm *lpm), * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ static int32_t -rule_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, - uint8_t next_hop) -{ - uint32_t rule_gindex, rule_index, last_rule; - int i; - - VERIFY_DEPTH(depth); - - /* Scan through rule group to see if rule already exists. */ - if (lpm->rule_info[depth - 1].used_rules > 0) { - - /* rule_gindex stands for rule group index. */ - rule_gindex = lpm->rule_info[depth - 1].first_rule; - /* Initialise rule_index to point to start of rule group. */ - rule_index = rule_gindex; - /* Last rule = Last used rule in this rule group. */ - last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules; - - for (; rule_index < last_rule; rule_index++) { - - /* If rule already exists update its next_hop and return. */ - if (lpm->rules_tbl[rule_index].ip == ip_masked) { - lpm->rules_tbl[rule_index].next_hop = next_hop; - - return rule_index; - } - } - - if (rule_index == lpm->max_rules) - return -ENOSPC; - } else { - /* Calculate the position in which the rule will be stored. */ - rule_index = 0; - - for (i = depth - 1; i > 0; i--) { - if (lpm->rule_info[i - 1].used_rules > 0) { - rule_index = lpm->rule_info[i - 1].first_rule - + lpm->rule_info[i - 1].used_rules; - break; - } - } - if (rule_index == lpm->max_rules) - return -ENOSPC; - - lpm->rule_info[depth - 1].first_rule = rule_index; - } - - /* Make room for the new rule in the array. */ - for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) { - if (lpm->rule_info[i - 1].first_rule - + lpm->rule_info[i - 1].used_rules == lpm->max_rules) - return -ENOSPC; - - if (lpm->rule_info[i - 1].used_rules > 0) { - lpm->rules_tbl[lpm->rule_info[i - 1].first_rule - + lpm->rule_info[i - 1].used_rules] - = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule]; - lpm->rule_info[i - 1].first_rule++; - } - } - - /* Add the new rule. */ - lpm->rules_tbl[rule_index].ip = ip_masked; - lpm->rules_tbl[rule_index].next_hop = next_hop; - - /* Increment the used rules counter for this rule group. */ - lpm->rule_info[depth - 1].used_rules++; - - return rule_index; -} - -static int32_t -rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, +rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, uint32_t next_hop) { uint32_t rule_gindex, rule_index, last_rule; @@ -549,30 +339,7 @@ rule_add_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ static void -rule_delete_v20(struct rte_lpm_v20 *lpm, int32_t rule_index, uint8_t depth) -{ - int i; - - VERIFY_DEPTH(depth); - - lpm->rules_tbl[rule_index] = - lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule - + lpm->rule_info[depth - 1].used_rules - 1]; - - for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) { - if (lpm->rule_info[i].used_rules > 0) { - lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] = - lpm->rules_tbl[lpm->rule_info[i].first_rule - + lpm->rule_info[i].used_rules - 1]; - lpm->rule_info[i].first_rule--; - } - } - - lpm->rule_info[depth - 1].used_rules--; -} - -static void -rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth) +rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth) { int i; @@ -599,28 +366,7 @@ rule_delete_v1604(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth) * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. */ static int32_t -rule_find_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth) -{ - uint32_t rule_gindex, last_rule, rule_index; - - VERIFY_DEPTH(depth); - - rule_gindex = lpm->rule_info[depth - 1].first_rule; - last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules; - - /* Scan used rules at given depth to find rule. */ - for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) { - /* If rule is found return the rule index. */ - if (lpm->rules_tbl[rule_index].ip == ip_masked) - return rule_index; - } - - /* If rule is not found return -EINVAL. */ - return -EINVAL; -} - -static int32_t -rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) +rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) { uint32_t rule_gindex, last_rule, rule_index; @@ -644,42 +390,7 @@ rule_find_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) * Find, clean and allocate a tbl8. */ static int32_t -tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8) -{ - uint32_t group_idx; /* tbl8 group index. */ - struct rte_lpm_tbl_entry_v20 *tbl8_entry; - - /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */ - for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS; - group_idx++) { - tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; - /* If a free tbl8 group is found clean it and set as VALID. */ - if (!tbl8_entry->valid_group) { - struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { - .valid = INVALID, - .depth = 0, - .valid_group = VALID, - }; - new_tbl8_entry.next_hop = 0; - - memset(&tbl8_entry[0], 0, - RTE_LPM_TBL8_GROUP_NUM_ENTRIES * - sizeof(tbl8_entry[0])); - - __atomic_store(tbl8_entry, &new_tbl8_entry, - __ATOMIC_RELAXED); - - /* Return group index for allocated tbl8 group. */ - return group_idx; - } - } - - /* If there are no tbl8 groups free then return error. */ - return -ENOSPC; -} - -static int32_t -tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) +tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) { uint32_t group_idx; /* tbl8 group index. */ struct rte_lpm_tbl_entry *tbl8_entry; @@ -713,22 +424,7 @@ tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) } static void -tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start) -{ - /* Set tbl8 group invalid*/ - struct rte_lpm_tbl_entry_v20 zero_tbl8_entry = { - .valid = INVALID, - .depth = 0, - .valid_group = INVALID, - }; - zero_tbl8_entry.next_hop = 0; - - __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry, - __ATOMIC_RELAXED); -} - -static void -tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) +tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) { /* Set tbl8 group invalid*/ struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; @@ -738,78 +434,7 @@ tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) } static __rte_noinline int32_t -add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, - uint8_t next_hop) -{ - uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j; - - /* Calculate the index into Table24. */ - tbl24_index = ip >> 8; - tbl24_range = depth_to_range(depth); - - for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { - /* - * For invalid OR valid and non-extended tbl 24 entries set - * entry. - */ - if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 && - lpm->tbl24[i].depth <= depth)) { - - struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - .valid = VALID, - .valid_group = 0, - .depth = depth, - }; - new_tbl24_entry.next_hop = next_hop; - - /* Setting tbl24 entry in one go to avoid race - * conditions - */ - __atomic_store(&lpm->tbl24[i], &new_tbl24_entry, - __ATOMIC_RELEASE); - - continue; - } - - if (lpm->tbl24[i].valid_group == 1) { - /* If tbl24 entry is valid and extended calculate the - * index into tbl8. - */ - tbl8_index = lpm->tbl24[i].group_idx * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - tbl8_group_end = tbl8_index + - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - - for (j = tbl8_index; j < tbl8_group_end; j++) { - if (!lpm->tbl8[j].valid || - lpm->tbl8[j].depth <= depth) { - struct rte_lpm_tbl_entry_v20 - new_tbl8_entry = { - .valid = VALID, - .valid_group = VALID, - .depth = depth, - }; - new_tbl8_entry.next_hop = next_hop; - - /* - * Setting tbl8 entry in one go to avoid - * race conditions - */ - __atomic_store(&lpm->tbl8[j], - &new_tbl8_entry, - __ATOMIC_RELAXED); - - continue; - } - } - } - } - - return 0; -} - -static __rte_noinline int32_t -add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, +add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop) { #define group_idx next_hop @@ -881,150 +506,7 @@ add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, } static __rte_noinline int32_t -add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth, - uint8_t next_hop) -{ - uint32_t tbl24_index; - int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index, - tbl8_range, i; - - tbl24_index = (ip_masked >> 8); - tbl8_range = depth_to_range(depth); - - if (!lpm->tbl24[tbl24_index].valid) { - /* Search for a free tbl8 group. */ - tbl8_group_index = tbl8_alloc_v20(lpm->tbl8); - - /* Check tbl8 allocation was successful. */ - if (tbl8_group_index < 0) { - return tbl8_group_index; - } - - /* Find index into tbl8 and range. */ - tbl8_index = (tbl8_group_index * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES) + - (ip_masked & 0xFF); - - /* Set tbl8 entry. */ - for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { - struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { - .valid = VALID, - .depth = depth, - .valid_group = lpm->tbl8[i].valid_group, - }; - new_tbl8_entry.next_hop = next_hop; - __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, - __ATOMIC_RELAXED); - } - - /* - * Update tbl24 entry to point to new tbl8 entry. Note: The - * ext_flag and tbl8_index need to be updated simultaneously, - * so assign whole structure in one go - */ - - struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - .group_idx = (uint8_t)tbl8_group_index, - .valid = VALID, - .valid_group = 1, - .depth = 0, - }; - - __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, - __ATOMIC_RELEASE); - - } /* If valid entry but not extended calculate the index into Table8. */ - else if (lpm->tbl24[tbl24_index].valid_group == 0) { - /* Search for free tbl8 group. */ - tbl8_group_index = tbl8_alloc_v20(lpm->tbl8); - - if (tbl8_group_index < 0) { - return tbl8_group_index; - } - - tbl8_group_start = tbl8_group_index * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - tbl8_group_end = tbl8_group_start + - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - - /* Populate new tbl8 with tbl24 value. */ - for (i = tbl8_group_start; i < tbl8_group_end; i++) { - struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { - .valid = VALID, - .depth = lpm->tbl24[tbl24_index].depth, - .valid_group = lpm->tbl8[i].valid_group, - }; - new_tbl8_entry.next_hop = - lpm->tbl24[tbl24_index].next_hop; - __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, - __ATOMIC_RELAXED); - } - - tbl8_index = tbl8_group_start + (ip_masked & 0xFF); - - /* Insert new rule into the tbl8 entry. */ - for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) { - struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { - .valid = VALID, - .depth = depth, - .valid_group = lpm->tbl8[i].valid_group, - }; - new_tbl8_entry.next_hop = next_hop; - __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, - __ATOMIC_RELAXED); - } - - /* - * Update tbl24 entry to point to new tbl8 entry. Note: The - * ext_flag and tbl8_index need to be updated simultaneously, - * so assign whole structure in one go. - */ - - struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - .group_idx = (uint8_t)tbl8_group_index, - .valid = VALID, - .valid_group = 1, - .depth = 0, - }; - - __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, - __ATOMIC_RELEASE); - - } else { /* - * If it is valid, extended entry calculate the index into tbl8. - */ - tbl8_group_index = lpm->tbl24[tbl24_index].group_idx; - tbl8_group_start = tbl8_group_index * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - tbl8_index = tbl8_group_start + (ip_masked & 0xFF); - - for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { - - if (!lpm->tbl8[i].valid || - lpm->tbl8[i].depth <= depth) { - struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { - .valid = VALID, - .depth = depth, - .valid_group = lpm->tbl8[i].valid_group, - }; - new_tbl8_entry.next_hop = next_hop; - /* - * Setting tbl8 entry in one go to avoid race - * condition - */ - __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, - __ATOMIC_RELAXED); - - continue; - } - } - } - - return 0; -} - -static __rte_noinline int32_t -add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, +add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, uint32_t next_hop) { #define group_idx next_hop @@ -1037,7 +519,7 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, if (!lpm->tbl24[tbl24_index].valid) { /* Search for a free tbl8 group. */ - tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s); + tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s); /* Check tbl8 allocation was successful. */ if (tbl8_group_index < 0) { @@ -1083,7 +565,7 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, } /* If valid entry but not extended calculate the index into Table8. */ else if (lpm->tbl24[tbl24_index].valid_group == 0) { /* Search for free tbl8 group. */ - tbl8_group_index = tbl8_alloc_v1604(lpm->tbl8, lpm->number_tbl8s); + tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s); if (tbl8_group_index < 0) { return tbl8_group_index; @@ -1177,48 +659,7 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * Add a route */ int -rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, - uint8_t next_hop) -{ - int32_t rule_index, status = 0; - uint32_t ip_masked; - - /* Check user arguments. */ - if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) - return -EINVAL; - - ip_masked = ip & depth_to_mask(depth); - - /* Add the rule to the rule table. */ - rule_index = rule_add_v20(lpm, ip_masked, depth, next_hop); - - /* If the is no space available for new rule return error. */ - if (rule_index < 0) { - return rule_index; - } - - if (depth <= MAX_DEPTH_TBL24) { - status = add_depth_small_v20(lpm, ip_masked, depth, next_hop); - } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */ - status = add_depth_big_v20(lpm, ip_masked, depth, next_hop); - - /* - * If add fails due to exhaustion of tbl8 extensions delete - * rule that was added to rule table. - */ - if (status < 0) { - rule_delete_v20(lpm, rule_index, depth); - - return status; - } - } - - return 0; -} -VERSION_SYMBOL(rte_lpm_add, _v20, 2.0); - -int -rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, +rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop) { int32_t rule_index, status = 0; @@ -1231,7 +672,7 @@ rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, ip_masked = ip & depth_to_mask(depth); /* Add the rule to the rule table. */ - rule_index = rule_add_v1604(lpm, ip_masked, depth, next_hop); + rule_index = rule_add(lpm, ip_masked, depth, next_hop); /* If the is no space available for new rule return error. */ if (rule_index < 0) { @@ -1239,16 +680,16 @@ rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, } if (depth <= MAX_DEPTH_TBL24) { - status = add_depth_small_v1604(lpm, ip_masked, depth, next_hop); + status = add_depth_small(lpm, ip_masked, depth, next_hop); } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */ - status = add_depth_big_v1604(lpm, ip_masked, depth, next_hop); + status = add_depth_big(lpm, ip_masked, depth, next_hop); /* * If add fails due to exhaustion of tbl8 extensions delete * rule that was added to rule table. */ if (status < 0) { - rule_delete_v1604(lpm, rule_index, depth); + rule_delete(lpm, rule_index, depth); return status; } @@ -1256,42 +697,12 @@ rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, return 0; } -BIND_DEFAULT_SYMBOL(rte_lpm_add, _v1604, 16.04); -MAP_STATIC_SYMBOL(int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, - uint8_t depth, uint32_t next_hop), rte_lpm_add_v1604); /* * Look for a rule in the high-level rules table */ int -rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, -uint8_t *next_hop) -{ - uint32_t ip_masked; - int32_t rule_index; - - /* Check user arguments. */ - if ((lpm == NULL) || - (next_hop == NULL) || - (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) - return -EINVAL; - - /* Look for the rule using rule_find. */ - ip_masked = ip & depth_to_mask(depth); - rule_index = rule_find_v20(lpm, ip_masked, depth); - - if (rule_index >= 0) { - *next_hop = lpm->rules_tbl[rule_index].next_hop; - return 1; - } - - /* If rule is not found return 0. */ - return 0; -} -VERSION_SYMBOL(rte_lpm_is_rule_present, _v20, 2.0); - -int -rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, +rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop) { uint32_t ip_masked; @@ -1305,7 +716,7 @@ uint32_t *next_hop) /* Look for the rule using rule_find. */ ip_masked = ip & depth_to_mask(depth); - rule_index = rule_find_v1604(lpm, ip_masked, depth); + rule_index = rule_find(lpm, ip_masked, depth); if (rule_index >= 0) { *next_hop = lpm->rules_tbl[rule_index].next_hop; @@ -1315,12 +726,9 @@ uint32_t *next_hop) /* If rule is not found return 0. */ return 0; } -BIND_DEFAULT_SYMBOL(rte_lpm_is_rule_present, _v1604, 16.04); -MAP_STATIC_SYMBOL(int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, - uint8_t depth, uint32_t *next_hop), rte_lpm_is_rule_present_v1604); static int32_t -find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, +find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t *sub_rule_depth) { int32_t rule_index; @@ -1330,7 +738,7 @@ find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) { ip_masked = ip & depth_to_mask(prev_depth); - rule_index = rule_find_v20(lpm, ip_masked, prev_depth); + rule_index = rule_find(lpm, ip_masked, prev_depth); if (rule_index >= 0) { *sub_rule_depth = prev_depth; @@ -1342,133 +750,7 @@ find_previous_rule_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, } static int32_t -find_previous_rule_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, - uint8_t *sub_rule_depth) -{ - int32_t rule_index; - uint32_t ip_masked; - uint8_t prev_depth; - - for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) { - ip_masked = ip & depth_to_mask(prev_depth); - - rule_index = rule_find_v1604(lpm, ip_masked, prev_depth); - - if (rule_index >= 0) { - *sub_rule_depth = prev_depth; - return rule_index; - } - } - - return -1; -} - -static int32_t -delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, - uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) -{ - uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j; - - /* Calculate the range and index into Table24. */ - tbl24_range = depth_to_range(depth); - tbl24_index = (ip_masked >> 8); - - /* - * Firstly check the sub_rule_index. A -1 indicates no replacement rule - * and a positive number indicates a sub_rule_index. - */ - if (sub_rule_index < 0) { - /* - * If no replacement rule exists then invalidate entries - * associated with this rule. - */ - for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { - - if (lpm->tbl24[i].valid_group == 0 && - lpm->tbl24[i].depth <= depth) { - struct rte_lpm_tbl_entry_v20 - zero_tbl24_entry = { - .valid = INVALID, - .depth = 0, - .valid_group = 0, - }; - zero_tbl24_entry.next_hop = 0; - __atomic_store(&lpm->tbl24[i], - &zero_tbl24_entry, __ATOMIC_RELEASE); - } else if (lpm->tbl24[i].valid_group == 1) { - /* - * If TBL24 entry is extended, then there has - * to be a rule with depth >= 25 in the - * associated TBL8 group. - */ - - tbl8_group_index = lpm->tbl24[i].group_idx; - tbl8_index = tbl8_group_index * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - - for (j = tbl8_index; j < (tbl8_index + - RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { - - if (lpm->tbl8[j].depth <= depth) - lpm->tbl8[j].valid = INVALID; - } - } - } - } else { - /* - * If a replacement rule exists then modify entries - * associated with this rule. - */ - - struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - .next_hop = lpm->rules_tbl[sub_rule_index].next_hop, - .valid = VALID, - .valid_group = 0, - .depth = sub_rule_depth, - }; - - struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { - .valid = VALID, - .valid_group = VALID, - .depth = sub_rule_depth, - }; - new_tbl8_entry.next_hop = - lpm->rules_tbl[sub_rule_index].next_hop; - - for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { - - if (lpm->tbl24[i].valid_group == 0 && - lpm->tbl24[i].depth <= depth) { - __atomic_store(&lpm->tbl24[i], &new_tbl24_entry, - __ATOMIC_RELEASE); - } else if (lpm->tbl24[i].valid_group == 1) { - /* - * If TBL24 entry is extended, then there has - * to be a rule with depth >= 25 in the - * associated TBL8 group. - */ - - tbl8_group_index = lpm->tbl24[i].group_idx; - tbl8_index = tbl8_group_index * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - - for (j = tbl8_index; j < (tbl8_index + - RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { - - if (lpm->tbl8[j].depth <= depth) - __atomic_store(&lpm->tbl8[j], - &new_tbl8_entry, - __ATOMIC_RELAXED); - } - } - } - } - - return 0; -} - -static int32_t -delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, +delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { #define group_idx next_hop @@ -1575,7 +857,7 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked, * thus can be recycled */ static int32_t -tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8, +tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) { uint32_t tbl8_group_end, i; @@ -1622,140 +904,7 @@ tbl8_recycle_check_v20(struct rte_lpm_tbl_entry_v20 *tbl8, } static int32_t -tbl8_recycle_check_v1604(struct rte_lpm_tbl_entry *tbl8, - uint32_t tbl8_group_start) -{ - uint32_t tbl8_group_end, i; - tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - - /* - * Check the first entry of the given tbl8. If it is invalid we know - * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH - * (As they would affect all entries in a tbl8) and thus this table - * can not be recycled. - */ - if (tbl8[tbl8_group_start].valid) { - /* - * If first entry is valid check if the depth is less than 24 - * and if so check the rest of the entries to verify that they - * are all of this depth. - */ - if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) { - for (i = (tbl8_group_start + 1); i < tbl8_group_end; - i++) { - - if (tbl8[i].depth != - tbl8[tbl8_group_start].depth) { - - return -EEXIST; - } - } - /* If all entries are the same return the tb8 index */ - return tbl8_group_start; - } - - return -EEXIST; - } - /* - * If the first entry is invalid check if the rest of the entries in - * the tbl8 are invalid. - */ - for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) { - if (tbl8[i].valid) - return -EEXIST; - } - /* If no valid entries are found then return -EINVAL. */ - return -EINVAL; -} - -static int32_t -delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, - uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) -{ - uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, - tbl8_range, i; - int32_t tbl8_recycle_index; - - /* - * Calculate the index into tbl24 and range. Note: All depths larger - * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry. - */ - tbl24_index = ip_masked >> 8; - - /* Calculate the index into tbl8 and range. */ - tbl8_group_index = lpm->tbl24[tbl24_index].group_idx; - tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; - tbl8_index = tbl8_group_start + (ip_masked & 0xFF); - tbl8_range = depth_to_range(depth); - - if (sub_rule_index < 0) { - /* - * Loop through the range of entries on tbl8 for which the - * rule_to_delete must be removed or modified. - */ - for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { - if (lpm->tbl8[i].depth <= depth) - lpm->tbl8[i].valid = INVALID; - } - } else { - /* Set new tbl8 entry. */ - struct rte_lpm_tbl_entry_v20 new_tbl8_entry = { - .valid = VALID, - .depth = sub_rule_depth, - .valid_group = lpm->tbl8[tbl8_group_start].valid_group, - }; - - new_tbl8_entry.next_hop = - lpm->rules_tbl[sub_rule_index].next_hop; - /* - * Loop through the range of entries on tbl8 for which the - * rule_to_delete must be modified. - */ - for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { - if (lpm->tbl8[i].depth <= depth) - __atomic_store(&lpm->tbl8[i], &new_tbl8_entry, - __ATOMIC_RELAXED); - } - } - - /* - * Check if there are any valid entries in this tbl8 group. If all - * tbl8 entries are invalid we can free the tbl8 and invalidate the - * associated tbl24 entry. - */ - - tbl8_recycle_index = tbl8_recycle_check_v20(lpm->tbl8, tbl8_group_start); - - if (tbl8_recycle_index == -EINVAL) { - /* Set tbl24 before freeing tbl8 to avoid race condition. - * Prevent the free of the tbl8 group from hoisting. - */ - lpm->tbl24[tbl24_index].valid = 0; - __atomic_thread_fence(__ATOMIC_RELEASE); - tbl8_free_v20(lpm->tbl8, tbl8_group_start); - } else if (tbl8_recycle_index > -1) { - /* Update tbl24 entry. */ - struct rte_lpm_tbl_entry_v20 new_tbl24_entry = { - .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, - .valid = VALID, - .valid_group = 0, - .depth = lpm->tbl8[tbl8_recycle_index].depth, - }; - - /* Set tbl24 before freeing tbl8 to avoid race condition. - * Prevent the free of the tbl8 group from hoisting. - */ - __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, - __ATOMIC_RELAXED); - __atomic_thread_fence(__ATOMIC_RELEASE); - tbl8_free_v20(lpm->tbl8, tbl8_group_start); - } - - return 0; -} - -static int32_t -delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, +delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth) { #define group_idx next_hop @@ -1810,7 +959,7 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, * associated tbl24 entry. */ - tbl8_recycle_index = tbl8_recycle_check_v1604(lpm->tbl8, tbl8_group_start); + tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start); if (tbl8_recycle_index == -EINVAL) { /* Set tbl24 before freeing tbl8 to avoid race condition. @@ -1818,7 +967,7 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, */ lpm->tbl24[tbl24_index].valid = 0; __atomic_thread_fence(__ATOMIC_RELEASE); - tbl8_free_v1604(lpm->tbl8, tbl8_group_start); + tbl8_free(lpm->tbl8, tbl8_group_start); } else if (tbl8_recycle_index > -1) { /* Update tbl24 entry. */ struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -1834,7 +983,7 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, __ATOMIC_RELAXED); __atomic_thread_fence(__ATOMIC_RELEASE); - tbl8_free_v1604(lpm->tbl8, tbl8_group_start); + tbl8_free(lpm->tbl8, tbl8_group_start); } #undef group_idx return 0; @@ -1844,7 +993,7 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, * Deletes a rule */ int -rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth) +rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) { int32_t rule_to_delete_index, sub_rule_index; uint32_t ip_masked; @@ -1863,7 +1012,7 @@ rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth) * Find the index of the input rule, that needs to be deleted, in the * rule table. */ - rule_to_delete_index = rule_find_v20(lpm, ip_masked, depth); + rule_to_delete_index = rule_find(lpm, ip_masked, depth); /* * Check if rule_to_delete_index was found. If no rule was found the @@ -1873,7 +1022,7 @@ rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth) return -EINVAL; /* Delete the rule from the rule table. */ - rule_delete_v20(lpm, rule_to_delete_index, depth); + rule_delete(lpm, rule_to_delete_index, depth); /* * Find rule to replace the rule_to_delete. If there is no rule to @@ -1881,100 +1030,26 @@ rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth) * entries associated with this rule. */ sub_rule_depth = 0; - sub_rule_index = find_previous_rule_v20(lpm, ip, depth, &sub_rule_depth); + sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth); /* * If the input depth value is less than 25 use function * delete_depth_small otherwise use delete_depth_big. */ if (depth <= MAX_DEPTH_TBL24) { - return delete_depth_small_v20(lpm, ip_masked, depth, + return delete_depth_small(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth); } else { /* If depth > MAX_DEPTH_TBL24 */ - return delete_depth_big_v20(lpm, ip_masked, depth, sub_rule_index, + return delete_depth_big(lpm, ip_masked, depth, sub_rule_index, sub_rule_depth); } } -VERSION_SYMBOL(rte_lpm_delete, _v20, 2.0); - -int -rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) -{ - int32_t rule_to_delete_index, sub_rule_index; - uint32_t ip_masked; - uint8_t sub_rule_depth; - /* - * Check input arguments. Note: IP must be a positive integer of 32 - * bits in length therefore it need not be checked. - */ - if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) { - return -EINVAL; - } - - ip_masked = ip & depth_to_mask(depth); - - /* - * Find the index of the input rule, that needs to be deleted, in the - * rule table. - */ - rule_to_delete_index = rule_find_v1604(lpm, ip_masked, depth); - - /* - * Check if rule_to_delete_index was found. If no rule was found the - * function rule_find returns -EINVAL. - */ - if (rule_to_delete_index < 0) - return -EINVAL; - - /* Delete the rule from the rule table. */ - rule_delete_v1604(lpm, rule_to_delete_index, depth); - - /* - * Find rule to replace the rule_to_delete. If there is no rule to - * replace the rule_to_delete we return -1 and invalidate the table - * entries associated with this rule. - */ - sub_rule_depth = 0; - sub_rule_index = find_previous_rule_v1604(lpm, ip, depth, &sub_rule_depth); - - /* - * If the input depth value is less than 25 use function - * delete_depth_small otherwise use delete_depth_big. - */ - if (depth <= MAX_DEPTH_TBL24) { - return delete_depth_small_v1604(lpm, ip_masked, depth, - sub_rule_index, sub_rule_depth); - } else { /* If depth > MAX_DEPTH_TBL24 */ - return delete_depth_big_v1604(lpm, ip_masked, depth, sub_rule_index, - sub_rule_depth); - } -} -BIND_DEFAULT_SYMBOL(rte_lpm_delete, _v1604, 16.04); -MAP_STATIC_SYMBOL(int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, - uint8_t depth), rte_lpm_delete_v1604); /* * Delete all rules from the LPM table. */ void -rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm) -{ - /* Zero rule information. */ - memset(lpm->rule_info, 0, sizeof(lpm->rule_info)); - - /* Zero tbl24. */ - memset(lpm->tbl24, 0, sizeof(lpm->tbl24)); - - /* Zero tbl8. */ - memset(lpm->tbl8, 0, sizeof(lpm->tbl8)); - - /* Delete all rules form the rules table. */ - memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules); -} -VERSION_SYMBOL(rte_lpm_delete_all, _v20, 2.0); - -void -rte_lpm_delete_all_v1604(struct rte_lpm *lpm) +rte_lpm_delete_all(struct rte_lpm *lpm) { /* Zero rule information. */ memset(lpm->rule_info, 0, sizeof(lpm->rule_info)); @@ -1989,6 +1064,3 @@ rte_lpm_delete_all_v1604(struct rte_lpm *lpm) /* Delete all rules form the rules table. */ memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules); } -BIND_DEFAULT_SYMBOL(rte_lpm_delete_all, _v1604, 16.04); -MAP_STATIC_SYMBOL(void rte_lpm_delete_all(struct rte_lpm *lpm), - rte_lpm_delete_all_v1604); diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h index 906ec44830..ca9627a141 100644 --- a/lib/librte_lpm/rte_lpm.h +++ b/lib/librte_lpm/rte_lpm.h @@ -65,31 +65,6 @@ extern "C" { #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN /** @internal Tbl24 entry structure. */ -__extension__ -struct rte_lpm_tbl_entry_v20 { - /** - * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or - * a group index pointing to a tbl8 structure (tbl24 only, when - * valid_group is set) - */ - RTE_STD_C11 - union { - uint8_t next_hop; - uint8_t group_idx; - }; - /* Using single uint8_t to store 3 values. */ - uint8_t valid :1; /**< Validation flag. */ - /** - * For tbl24: - * - valid_group == 0: entry stores a next hop - * - valid_group == 1: entry stores a group_index pointing to a tbl8 - * For tbl8: - * - valid_group indicates whether the current tbl8 is in use or not - */ - uint8_t valid_group :1; - uint8_t depth :6; /**< Rule depth. */ -} __rte_aligned(sizeof(uint16_t)); - __extension__ struct rte_lpm_tbl_entry { /** @@ -112,16 +87,6 @@ struct rte_lpm_tbl_entry { }; #else -__extension__ -struct rte_lpm_tbl_entry_v20 { - uint8_t depth :6; - uint8_t valid_group :1; - uint8_t valid :1; - union { - uint8_t group_idx; - uint8_t next_hop; - }; -} __rte_aligned(sizeof(uint16_t)); __extension__ struct rte_lpm_tbl_entry { @@ -142,11 +107,6 @@ struct rte_lpm_config { }; /** @internal Rule structure. */ -struct rte_lpm_rule_v20 { - uint32_t ip; /**< Rule IP address. */ - uint8_t next_hop; /**< Rule next hop. */ -}; - struct rte_lpm_rule { uint32_t ip; /**< Rule IP address. */ uint32_t next_hop; /**< Rule next hop. */ @@ -159,21 +119,6 @@ struct rte_lpm_rule_info { }; /** @internal LPM structure. */ -struct rte_lpm_v20 { - /* LPM metadata. */ - char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */ - uint32_t max_rules; /**< Max. balanced rules per lpm. */ - struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */ - - /* LPM Tables. */ - struct rte_lpm_tbl_entry_v20 tbl24[RTE_LPM_TBL24_NUM_ENTRIES] - __rte_cache_aligned; /**< LPM tbl24 table. */ - struct rte_lpm_tbl_entry_v20 tbl8[RTE_LPM_TBL8_NUM_ENTRIES] - __rte_cache_aligned; /**< LPM tbl8 table. */ - struct rte_lpm_rule_v20 rules_tbl[] - __rte_cache_aligned; /**< LPM rules. */ -}; - struct rte_lpm { /* LPM metadata. */ char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */ @@ -210,11 +155,6 @@ struct rte_lpm { struct rte_lpm * rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config); -struct rte_lpm_v20 * -rte_lpm_create_v20(const char *name, int socket_id, int max_rules, int flags); -struct rte_lpm * -rte_lpm_create_v1604(const char *name, int socket_id, - const struct rte_lpm_config *config); /** * Find an existing LPM object and return a pointer to it. @@ -228,10 +168,6 @@ rte_lpm_create_v1604(const char *name, int socket_id, */ struct rte_lpm * rte_lpm_find_existing(const char *name); -struct rte_lpm_v20 * -rte_lpm_find_existing_v20(const char *name); -struct rte_lpm * -rte_lpm_find_existing_v1604(const char *name); /** * Free an LPM object. @@ -243,10 +179,6 @@ rte_lpm_find_existing_v1604(const char *name); */ void rte_lpm_free(struct rte_lpm *lpm); -void -rte_lpm_free_v20(struct rte_lpm_v20 *lpm); -void -rte_lpm_free_v1604(struct rte_lpm *lpm); /** * Add a rule to the LPM table. @@ -264,12 +196,6 @@ rte_lpm_free_v1604(struct rte_lpm *lpm); */ int rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t next_hop); -int -rte_lpm_add_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, - uint8_t next_hop); -int -rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, - uint32_t next_hop); /** * Check if a rule is present in the LPM table, @@ -289,12 +215,6 @@ rte_lpm_add_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, int rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint32_t *next_hop); -int -rte_lpm_is_rule_present_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth, -uint8_t *next_hop); -int -rte_lpm_is_rule_present_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, -uint32_t *next_hop); /** * Delete a rule from the LPM table. @@ -310,10 +230,6 @@ uint32_t *next_hop); */ int rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth); -int -rte_lpm_delete_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth); -int -rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth); /** * Delete all rules from the LPM table. @@ -323,10 +239,6 @@ rte_lpm_delete_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth); */ void rte_lpm_delete_all(struct rte_lpm *lpm); -void -rte_lpm_delete_all_v20(struct rte_lpm_v20 *lpm); -void -rte_lpm_delete_all_v1604(struct rte_lpm *lpm); /** * Lookup an IP into the LPM table. diff --git a/lib/librte_lpm/rte_lpm6.c b/lib/librte_lpm/rte_lpm6.c index 9b8aeb9721..b981e40714 100644 --- a/lib/librte_lpm/rte_lpm6.c +++ b/lib/librte_lpm/rte_lpm6.c @@ -808,18 +808,6 @@ add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl, return 1; } -/* - * Add a route - */ -int -rte_lpm6_add_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, - uint8_t next_hop) -{ - return rte_lpm6_add_v1705(lpm, ip, depth, next_hop); -} -VERSION_SYMBOL(rte_lpm6_add, _v20, 2.0); - - /* * Simulate adding a route to LPM * @@ -841,7 +829,7 @@ simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth) /* Inspect the first three bytes through tbl24 on the first step. */ ret = simulate_add_step(lpm, lpm->tbl24, &tbl_next, masked_ip, - ADD_FIRST_BYTE, 1, depth, &need_tbl_nb); + ADD_FIRST_BYTE, 1, depth, &need_tbl_nb); total_need_tbl_nb = need_tbl_nb; /* * Inspect one by one the rest of the bytes until @@ -850,7 +838,7 @@ simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth) for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && ret == 1; i++) { tbl = tbl_next; ret = simulate_add_step(lpm, tbl, &tbl_next, masked_ip, 1, - (uint8_t)(i+1), depth, &need_tbl_nb); + (uint8_t)(i + 1), depth, &need_tbl_nb); total_need_tbl_nb += need_tbl_nb; } @@ -861,9 +849,12 @@ simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth) return 0; } +/* + * Add a route + */ int -rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, - uint32_t next_hop) +rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, + uint32_t next_hop) { struct rte_lpm6_tbl_entry *tbl; struct rte_lpm6_tbl_entry *tbl_next = NULL; @@ -895,8 +886,8 @@ rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, /* Inspect the first three bytes through tbl24 on the first step. */ tbl = lpm->tbl24; status = add_step(lpm, tbl, TBL24_IND, &tbl_next, &tbl_next_num, - masked_ip, ADD_FIRST_BYTE, 1, depth, next_hop, - is_new_rule); + masked_ip, ADD_FIRST_BYTE, 1, depth, next_hop, + is_new_rule); assert(status >= 0); /* @@ -906,17 +897,13 @@ rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && status == 1; i++) { tbl = tbl_next; status = add_step(lpm, tbl, tbl_next_num, &tbl_next, - &tbl_next_num, masked_ip, 1, (uint8_t)(i+1), - depth, next_hop, is_new_rule); + &tbl_next_num, masked_ip, 1, (uint8_t)(i + 1), + depth, next_hop, is_new_rule); assert(status >= 0); } return status; } -BIND_DEFAULT_SYMBOL(rte_lpm6_add, _v1705, 17.05); -MAP_STATIC_SYMBOL(int rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, - uint8_t depth, uint32_t next_hop), - rte_lpm6_add_v1705); /* * Takes a pointer to a table entry and inspect one level. @@ -955,25 +942,7 @@ lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl, * Looks up an IP */ int -rte_lpm6_lookup_v20(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop) -{ - uint32_t next_hop32 = 0; - int32_t status; - - /* DEBUG: Check user input arguments. */ - if (next_hop == NULL) - return -EINVAL; - - status = rte_lpm6_lookup_v1705(lpm, ip, &next_hop32); - if (status == 0) - *next_hop = (uint8_t)next_hop32; - - return status; -} -VERSION_SYMBOL(rte_lpm6_lookup, _v20, 2.0); - -int -rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip, +rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint32_t *next_hop) { const struct rte_lpm6_tbl_entry *tbl; @@ -1000,56 +969,12 @@ rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip, return status; } -BIND_DEFAULT_SYMBOL(rte_lpm6_lookup, _v1705, 17.05); -MAP_STATIC_SYMBOL(int rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, - uint32_t *next_hop), rte_lpm6_lookup_v1705); /* * Looks up a group of IP addresses */ int -rte_lpm6_lookup_bulk_func_v20(const struct rte_lpm6 *lpm, - uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], - int16_t * next_hops, unsigned n) -{ - unsigned i; - const struct rte_lpm6_tbl_entry *tbl; - const struct rte_lpm6_tbl_entry *tbl_next = NULL; - uint32_t tbl24_index, next_hop; - uint8_t first_byte; - int status; - - /* DEBUG: Check user input arguments. */ - if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL)) - return -EINVAL; - - for (i = 0; i < n; i++) { - first_byte = LOOKUP_FIRST_BYTE; - tbl24_index = (ips[i][0] << BYTES2_SIZE) | - (ips[i][1] << BYTE_SIZE) | ips[i][2]; - - /* Calculate pointer to the first entry to be inspected */ - tbl = &lpm->tbl24[tbl24_index]; - - do { - /* Continue inspecting following levels until success or failure */ - status = lookup_step(lpm, tbl, &tbl_next, ips[i], first_byte++, - &next_hop); - tbl = tbl_next; - } while (status == 1); - - if (status < 0) - next_hops[i] = -1; - else - next_hops[i] = (int16_t)next_hop; - } - - return 0; -} -VERSION_SYMBOL(rte_lpm6_lookup_bulk_func, _v20, 2.0); - -int -rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm, +rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm, uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], int32_t *next_hops, unsigned int n) { @@ -1089,37 +1014,12 @@ rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm, return 0; } -BIND_DEFAULT_SYMBOL(rte_lpm6_lookup_bulk_func, _v1705, 17.05); -MAP_STATIC_SYMBOL(int rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm, - uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], - int32_t *next_hops, unsigned int n), - rte_lpm6_lookup_bulk_func_v1705); /* * Look for a rule in the high-level rules table */ int -rte_lpm6_is_rule_present_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, - uint8_t *next_hop) -{ - uint32_t next_hop32 = 0; - int32_t status; - - /* DEBUG: Check user input arguments. */ - if (next_hop == NULL) - return -EINVAL; - - status = rte_lpm6_is_rule_present_v1705(lpm, ip, depth, &next_hop32); - if (status > 0) - *next_hop = (uint8_t)next_hop32; - - return status; - -} -VERSION_SYMBOL(rte_lpm6_is_rule_present, _v20, 2.0); - -int -rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, +rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, uint32_t *next_hop) { uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE]; @@ -1135,10 +1035,6 @@ rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, return rule_find(lpm, masked_ip, depth, next_hop); } -BIND_DEFAULT_SYMBOL(rte_lpm6_is_rule_present, _v1705, 17.05); -MAP_STATIC_SYMBOL(int rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, - uint8_t *ip, uint8_t depth, uint32_t *next_hop), - rte_lpm6_is_rule_present_v1705); /* * Delete a rule from the rule table. diff --git a/lib/librte_lpm/rte_lpm6.h b/lib/librte_lpm/rte_lpm6.h index 5d59ccb1fe..37dfb20249 100644 --- a/lib/librte_lpm/rte_lpm6.h +++ b/lib/librte_lpm/rte_lpm6.h @@ -96,12 +96,6 @@ rte_lpm6_free(struct rte_lpm6 *lpm); int rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, uint32_t next_hop); -int -rte_lpm6_add_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, - uint8_t next_hop); -int -rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, - uint32_t next_hop); /** * Check if a rule is present in the LPM table, @@ -121,12 +115,6 @@ rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, int rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, uint32_t *next_hop); -int -rte_lpm6_is_rule_present_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, - uint8_t *next_hop); -int -rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, - uint32_t *next_hop); /** * Delete a rule from the LPM table. @@ -184,11 +172,6 @@ rte_lpm6_delete_all(struct rte_lpm6 *lpm); */ int rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint32_t *next_hop); -int -rte_lpm6_lookup_v20(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop); -int -rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip, - uint32_t *next_hop); /** * Lookup multiple IP addresses in an LPM table. @@ -210,14 +193,6 @@ int rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm, uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], int32_t *next_hops, unsigned int n); -int -rte_lpm6_lookup_bulk_func_v20(const struct rte_lpm6 *lpm, - uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], - int16_t *next_hops, unsigned int n); -int -rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm, - uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], - int32_t *next_hops, unsigned int n); #ifdef __cplusplus } -- 2.17.1