DPDK patches and discussions
 help / color / mirror / Atom feed
From: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
To: dev@dpdk.org
Cc: bruce.richardson@intel.com, konstantin.ananyev@intel.com,
	thomas@monjalon.net, aconole@redhat.com
Subject: [dpdk-dev] [PATCH v6 03/12] rib: add ipv6 support for RIB
Date: Fri,  1 Nov 2019 15:21:36 +0000	[thread overview]
Message-ID: <3ee0dfc1c1b2cade4d07cc9302cac0d6d17298a5.1572621163.git.vladimir.medvedkin@intel.com> (raw)
In-Reply-To: <cover.1572621162.git.vladimir.medvedkin@intel.com>
In-Reply-To: <cover.1572621162.git.vladimir.medvedkin@intel.com>

Extend RIB library with ipv6 support.

Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
 lib/librte_rib/Makefile            |   4 +-
 lib/librte_rib/meson.build         |   4 +-
 lib/librte_rib/rte_rib6.c          | 598 +++++++++++++++++++++++++++++++++++++
 lib/librte_rib/rte_rib6.h          | 334 +++++++++++++++++++++
 lib/librte_rib/rte_rib_version.map |  15 +
 5 files changed, 951 insertions(+), 4 deletions(-)
 create mode 100644 lib/librte_rib/rte_rib6.c
 create mode 100644 lib/librte_rib/rte_rib6.h

diff --git a/lib/librte_rib/Makefile b/lib/librte_rib/Makefile
index 79f259a..6d861ad 100644
--- a/lib/librte_rib/Makefile
+++ b/lib/librte_rib/Makefile
@@ -17,9 +17,9 @@ EXPORT_MAP := rte_rib_version.map
 LIBABIVER := 1
 
 # all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_RIB) := rte_rib.c
+SRCS-$(CONFIG_RTE_LIBRTE_RIB) := rte_rib.c rte_rib6.c
 
 # install this header file
-SYMLINK-$(CONFIG_RTE_LIBRTE_RIB)-include := rte_rib.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_RIB)-include := rte_rib.h rte_rib6.h
 
 include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_rib/meson.build b/lib/librte_rib/meson.build
index e7b8920..46a1c0c 100644
--- a/lib/librte_rib/meson.build
+++ b/lib/librte_rib/meson.build
@@ -3,6 +3,6 @@
 # Copyright(c) 2019 Intel Corporation
 
 allow_experimental_apis = true
-sources = files('rte_rib.c')
-headers = files('rte_rib.h')
+sources = files('rte_rib.c', 'rte_rib6.c')
+headers = files('rte_rib.h', 'rte_rib6.h')
 deps += ['mempool']
diff --git a/lib/librte_rib/rte_rib6.c b/lib/librte_rib/rte_rib6.c
new file mode 100644
index 0000000..78b8dcf
--- /dev/null
+++ b/lib/librte_rib/rte_rib6.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_mempool.h>
+#include <rte_rwlock.h>
+#include <rte_string_fns.h>
+#include <rte_tailq.h>
+
+#include <rte_rib6.h>
+
+#define RTE_RIB_VALID_NODE	1
+#define RIB6_MAXDEPTH		128
+/* Maximum length of a RIB6 name. */
+#define RTE_RIB6_NAMESIZE	64
+
+TAILQ_HEAD(rte_rib6_list, rte_tailq_entry);
+static struct rte_tailq_elem rte_rib6_tailq = {
+	.name = "RTE_RIB6",
+};
+EAL_REGISTER_TAILQ(rte_rib6_tailq)
+
+struct rte_rib6_node {
+	struct rte_rib6_node	*left;
+	struct rte_rib6_node	*right;
+	struct rte_rib6_node	*parent;
+	uint64_t		nh;
+	uint8_t			ip[RTE_RIB6_IPV6_ADDR_SIZE];
+	uint8_t			depth;
+	uint8_t			flag;
+	__extension__ uint64_t		ext[0];
+};
+
+struct rte_rib6 {
+	char		name[RTE_RIB6_NAMESIZE];
+	struct rte_rib6_node	*tree;
+	struct rte_mempool	*node_pool;
+	uint32_t		cur_nodes;
+	uint32_t		cur_routes;
+	int			max_nodes;
+};
+
+static inline bool
+is_valid_node(struct rte_rib6_node *node)
+{
+	return (node->flag & RTE_RIB_VALID_NODE) == RTE_RIB_VALID_NODE;
+}
+
+static inline bool
+is_right_node(struct rte_rib6_node *node)
+{
+	return node->parent->right == node;
+}
+
+/*
+ * Check if ip1 is covered by ip2/depth prefix
+ */
+static inline bool
+is_covered(const uint8_t ip1[RTE_RIB6_IPV6_ADDR_SIZE],
+		const uint8_t ip2[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
+{
+	int i;
+
+	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
+		if ((ip1[i] ^ ip2[i]) & get_msk_part(depth, i))
+			return false;
+
+	return true;
+}
+
+static inline int
+get_dir(const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
+{
+	int i = 0;
+	uint8_t p_depth, msk;
+
+	for (p_depth = depth; p_depth >= 8; p_depth -= 8)
+		i++;
+
+	msk = 1 << (7 - p_depth);
+	return (ip[i] & msk) != 0;
+}
+
+static inline struct rte_rib6_node *
+get_nxt_node(struct rte_rib6_node *node,
+	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE])
+{
+	return (get_dir(ip, node->depth)) ? node->right : node->left;
+}
+
+static struct rte_rib6_node *
+node_alloc(struct rte_rib6 *rib)
+{
+	struct rte_rib6_node *ent;
+	int ret;
+
+	ret = rte_mempool_get(rib->node_pool, (void *)&ent);
+	if (unlikely(ret != 0))
+		return NULL;
+	++rib->cur_nodes;
+	return ent;
+}
+
+static void
+node_free(struct rte_rib6 *rib, struct rte_rib6_node *ent)
+{
+	--rib->cur_nodes;
+	rte_mempool_put(rib->node_pool, ent);
+}
+
+struct rte_rib6_node *
+rte_rib6_lookup(struct rte_rib6 *rib,
+	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE])
+{
+	struct rte_rib6_node *cur;
+	struct rte_rib6_node *prev = NULL;
+
+	if (unlikely(rib == NULL)) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	cur = rib->tree;
+
+	while ((cur != NULL) && is_covered(ip, cur->ip, cur->depth)) {
+		if (is_valid_node(cur))
+			prev = cur;
+		cur = get_nxt_node(cur, ip);
+	}
+	return prev;
+}
+
+struct rte_rib6_node *
+rte_rib6_lookup_parent(struct rte_rib6_node *ent)
+{
+	struct rte_rib6_node *tmp;
+
+	if (ent == NULL)
+		return NULL;
+
+	tmp = ent->parent;
+	while ((tmp != NULL) && (!is_valid_node(tmp)))
+		tmp = tmp->parent;
+
+	return tmp;
+}
+
+struct rte_rib6_node *
+rte_rib6_lookup_exact(struct rte_rib6 *rib,
+	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
+{
+	struct rte_rib6_node *cur;
+	uint8_t tmp_ip[RTE_RIB6_IPV6_ADDR_SIZE];
+	int i;
+
+	if ((rib == NULL) || (ip == NULL) || (depth > RIB6_MAXDEPTH)) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+	cur = rib->tree;
+
+	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
+		tmp_ip[i] = ip[i] & get_msk_part(depth, i);
+
+	while (cur != NULL) {
+		if (rte_rib6_is_equal(cur->ip, tmp_ip) &&
+				(cur->depth == depth) &&
+				is_valid_node(cur))
+			return cur;
+
+		if (!(is_covered(tmp_ip, cur->ip, cur->depth)) ||
+				(cur->depth >= depth))
+			break;
+
+		cur = get_nxt_node(cur, tmp_ip);
+	}
+
+	return NULL;
+}
+
+/*
+ *  Traverses on subtree and retreeves more specific routes
+ *  for a given in args ip/depth prefix
+ *  last = NULL means the first invocation
+ */
+struct rte_rib6_node *
+rte_rib6_get_nxt(struct rte_rib6 *rib,
+	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE],
+	uint8_t depth, struct rte_rib6_node *last, int flag)
+{
+	struct rte_rib6_node *tmp, *prev = NULL;
+	uint8_t tmp_ip[RTE_RIB6_IPV6_ADDR_SIZE];
+	int i;
+
+	if ((rib == NULL) || (ip == NULL) || (depth > RIB6_MAXDEPTH)) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
+		tmp_ip[i] = ip[i] & get_msk_part(depth, i);
+
+	if (last == NULL) {
+		tmp = rib->tree;
+		while ((tmp) && (tmp->depth < depth))
+			tmp = get_nxt_node(tmp, tmp_ip);
+	} else {
+		tmp = last;
+		while ((tmp->parent != NULL) && (is_right_node(tmp) ||
+				(tmp->parent->right == NULL))) {
+			tmp = tmp->parent;
+			if (is_valid_node(tmp) &&
+					(is_covered(tmp->ip, tmp_ip, depth) &&
+					(tmp->depth > depth)))
+				return tmp;
+		}
+		tmp = (tmp->parent != NULL) ? tmp->parent->right : NULL;
+	}
+	while (tmp) {
+		if (is_valid_node(tmp) &&
+				(is_covered(tmp->ip, tmp_ip, depth) &&
+				(tmp->depth > depth))) {
+			prev = tmp;
+			if (flag == RTE_RIB6_GET_NXT_COVER)
+				return prev;
+		}
+		tmp = (tmp->left != NULL) ? tmp->left : tmp->right;
+	}
+	return prev;
+}
+
+void
+rte_rib6_remove(struct rte_rib6 *rib,
+	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
+{
+	struct rte_rib6_node *cur, *prev, *child;
+
+	cur = rte_rib6_lookup_exact(rib, ip, depth);
+	if (cur == NULL)
+		return;
+
+	--rib->cur_routes;
+	cur->flag &= ~RTE_RIB_VALID_NODE;
+	while (!is_valid_node(cur)) {
+		if ((cur->left != NULL) && (cur->right != NULL))
+			return;
+		child = (cur->left == NULL) ? cur->right : cur->left;
+		if (child != NULL)
+			child->parent = cur->parent;
+		if (cur->parent == NULL) {
+			rib->tree = child;
+			node_free(rib, cur);
+			return;
+		}
+		if (cur->parent->left == cur)
+			cur->parent->left = child;
+		else
+			cur->parent->right = child;
+		prev = cur;
+		cur = cur->parent;
+		node_free(rib, prev);
+	}
+}
+
+struct rte_rib6_node *
+rte_rib6_insert(struct rte_rib6 *rib,
+	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth)
+{
+	struct rte_rib6_node **tmp;
+	struct rte_rib6_node *prev = NULL;
+	struct rte_rib6_node *new_node = NULL;
+	struct rte_rib6_node *common_node = NULL;
+	uint8_t common_prefix[RTE_RIB6_IPV6_ADDR_SIZE];
+	uint8_t tmp_ip[RTE_RIB6_IPV6_ADDR_SIZE];
+	int i, d;
+	uint8_t common_depth, ip_xor;
+
+	if (unlikely((rib == NULL) || (ip == NULL) ||
+			(depth > RIB6_MAXDEPTH))) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	tmp = &rib->tree;
+
+	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
+		tmp_ip[i] = ip[i] & get_msk_part(depth, i);
+
+	new_node = rte_rib6_lookup_exact(rib, tmp_ip, depth);
+	if (new_node != NULL) {
+		rte_errno = EEXIST;
+		return NULL;
+	}
+
+	new_node = node_alloc(rib);
+	if (new_node == NULL) {
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+	new_node->left = NULL;
+	new_node->right = NULL;
+	new_node->parent = NULL;
+	rte_rib6_copy_addr(new_node->ip, tmp_ip);
+	new_node->depth = depth;
+	new_node->flag = RTE_RIB_VALID_NODE;
+
+	/* traverse down the tree to find matching node or closest matching */
+	while (1) {
+		/* insert as the last node in the branch */
+		if (*tmp == NULL) {
+			*tmp = new_node;
+			new_node->parent = prev;
+			++rib->cur_routes;
+			return *tmp;
+		}
+		/*
+		 * Intermediate node found.
+		 * Previous rte_rib6_lookup_exact() returned NULL
+		 * but node with proper search criteria is found.
+		 * Validate intermediate node and return.
+		 */
+		if (rte_rib6_is_equal(tmp_ip, (*tmp)->ip) &&
+				(depth == (*tmp)->depth)) {
+			node_free(rib, new_node);
+			(*tmp)->flag |= RTE_RIB_VALID_NODE;
+			++rib->cur_routes;
+			return *tmp;
+		}
+
+		if (!is_covered(tmp_ip, (*tmp)->ip, (*tmp)->depth) ||
+				((*tmp)->depth >= depth)) {
+			break;
+		}
+		prev = *tmp;
+
+		tmp = (get_dir(tmp_ip, (*tmp)->depth)) ? &(*tmp)->right :
+				&(*tmp)->left;
+	}
+
+	/* closest node found, new_node should be inserted in the middle */
+	common_depth = RTE_MIN(depth, (*tmp)->depth);
+	for (i = 0, d = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++) {
+		ip_xor = tmp_ip[i] ^ (*tmp)->ip[i];
+		if (ip_xor == 0)
+			d += 8;
+		else {
+			d += __builtin_clz(ip_xor << 24);
+			break;
+		}
+	}
+
+	common_depth = RTE_MIN(d, common_depth);
+
+	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++)
+		common_prefix[i] = tmp_ip[i] & get_msk_part(common_depth, i);
+
+	if (rte_rib6_is_equal(common_prefix, tmp_ip) &&
+			(common_depth == depth)) {
+		/* insert as a parent */
+		if (get_dir((*tmp)->ip, depth))
+			new_node->right = *tmp;
+		else
+			new_node->left = *tmp;
+		new_node->parent = (*tmp)->parent;
+		(*tmp)->parent = new_node;
+		*tmp = new_node;
+	} else {
+		/* create intermediate node */
+		common_node = node_alloc(rib);
+		if (common_node == NULL) {
+			node_free(rib, new_node);
+			rte_errno = ENOMEM;
+			return NULL;
+		}
+		rte_rib6_copy_addr(common_node->ip, common_prefix);
+		common_node->depth = common_depth;
+		common_node->flag = 0;
+		common_node->parent = (*tmp)->parent;
+		new_node->parent = common_node;
+		(*tmp)->parent = common_node;
+		if (get_dir((*tmp)->ip, common_depth) == 1) {
+			common_node->left = new_node;
+			common_node->right = *tmp;
+		} else {
+			common_node->left = *tmp;
+			common_node->right = new_node;
+		}
+		*tmp = common_node;
+	}
+	++rib->cur_routes;
+	return new_node;
+}
+
+int
+rte_rib6_get_ip(struct rte_rib6_node *node, uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE])
+{
+	if ((node == NULL) || (ip == NULL)) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+	rte_rib6_copy_addr(ip, node->ip);
+	return 0;
+}
+
+int
+rte_rib6_get_depth(struct rte_rib6_node *node, uint8_t *depth)
+{
+	if ((node == NULL) || (depth == NULL)) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+	*depth = node->depth;
+	return 0;
+}
+
+void *
+rte_rib6_get_ext(struct rte_rib6_node *node)
+{
+	return (node == NULL) ? NULL : &node->ext[0];
+}
+
+int
+rte_rib6_get_nh(struct rte_rib6_node *node, uint64_t *nh)
+{
+	if ((node == NULL) || (nh == NULL)) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+	*nh = node->nh;
+	return 0;
+}
+
+int
+rte_rib6_set_nh(struct rte_rib6_node *node, uint64_t nh)
+{
+	if (node == NULL) {
+		rte_errno = EINVAL;
+		return -1;
+	}
+	node->nh = nh;
+	return 0;
+}
+
+struct rte_rib6 *
+rte_rib6_create(const char *name, int socket_id, struct rte_rib6_conf *conf)
+{
+	char mem_name[RTE_RIB6_NAMESIZE];
+	struct rte_rib6 *rib = NULL;
+	struct rte_tailq_entry *te;
+	struct rte_rib6_list *rib6_list;
+	struct rte_mempool *node_pool;
+
+	/* Check user arguments. */
+	if ((name == NULL) || (conf == NULL) ||
+			(conf->max_nodes == 0)) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	snprintf(mem_name, sizeof(mem_name), "MP_%s", name);
+	node_pool = rte_mempool_create(mem_name, conf->max_nodes,
+		sizeof(struct rte_rib6_node) + conf->ext_sz, 0, 0,
+		NULL, NULL, NULL, NULL, socket_id, 0);
+
+	if (node_pool == NULL) {
+		RTE_LOG(ERR, LPM,
+			"Can not allocate mempool for RIB6 %s\n", name);
+		return NULL;
+	}
+
+	snprintf(mem_name, sizeof(mem_name), "RIB6_%s", name);
+	rib6_list = RTE_TAILQ_CAST(rte_rib6_tailq.head, rte_rib6_list);
+
+	rte_mcfg_tailq_write_lock();
+
+	/* guarantee there's no existing */
+	TAILQ_FOREACH(te, rib6_list, next) {
+		rib = (struct rte_rib6 *)te->data;
+		if (strncmp(name, rib->name, RTE_RIB6_NAMESIZE) == 0)
+			break;
+	}
+	rib = NULL;
+	if (te != NULL) {
+		rte_errno = EEXIST;
+		goto exit;
+	}
+
+	/* allocate tailq entry */
+	te = rte_zmalloc("RIB6_TAILQ_ENTRY", sizeof(*te), 0);
+	if (te == NULL) {
+		RTE_LOG(ERR, LPM,
+			"Can not allocate tailq entry for RIB6 %s\n", name);
+		rte_errno = ENOMEM;
+		goto exit;
+	}
+
+	/* Allocate memory to store the RIB6 data structures. */
+	rib = rte_zmalloc_socket(mem_name,
+		sizeof(struct rte_rib6), RTE_CACHE_LINE_SIZE, socket_id);
+	if (rib == NULL) {
+		RTE_LOG(ERR, LPM, "RIB6 %s memory allocation failed\n", name);
+		rte_errno = ENOMEM;
+		goto free_te;
+	}
+
+	rte_strlcpy(rib->name, name, sizeof(rib->name));
+	rib->tree = NULL;
+	rib->max_nodes = conf->max_nodes;
+	rib->node_pool = node_pool;
+
+	te->data = (void *)rib;
+	TAILQ_INSERT_TAIL(rib6_list, te, next);
+
+	rte_mcfg_tailq_write_unlock();
+
+	return rib;
+
+free_te:
+	rte_free(te);
+exit:
+	rte_mcfg_tailq_write_unlock();
+	rte_mempool_free(node_pool);
+
+	return NULL;
+}
+
+struct rte_rib6 *
+rte_rib6_find_existing(const char *name)
+{
+	struct rte_rib6 *rib = NULL;
+	struct rte_tailq_entry *te;
+	struct rte_rib6_list *rib6_list;
+
+	if (unlikely(name == NULL)) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	rib6_list = RTE_TAILQ_CAST(rte_rib6_tailq.head, rte_rib6_list);
+
+	rte_mcfg_tailq_read_lock();
+	TAILQ_FOREACH(te, rib6_list, next) {
+		rib = (struct rte_rib6 *) te->data;
+		if (strncmp(name, rib->name, RTE_RIB6_NAMESIZE) == 0)
+			break;
+	}
+	rte_mcfg_tailq_read_unlock();
+
+	if (te == NULL) {
+		rte_errno = ENOENT;
+		return NULL;
+	}
+
+	return rib;
+}
+
+void
+rte_rib6_free(struct rte_rib6 *rib)
+{
+	struct rte_tailq_entry *te;
+	struct rte_rib6_list *rib6_list;
+	struct rte_rib6_node *tmp = NULL;
+
+	if (unlikely(rib == NULL)) {
+		rte_errno = EINVAL;
+		return;
+	}
+
+	rib6_list = RTE_TAILQ_CAST(rte_rib6_tailq.head, rte_rib6_list);
+
+	rte_mcfg_tailq_write_lock();
+
+	/* find our tailq entry */
+	TAILQ_FOREACH(te, rib6_list, next) {
+		if (te->data == (void *)rib)
+			break;
+	}
+	if (te != NULL)
+		TAILQ_REMOVE(rib6_list, te, next);
+
+	rte_mcfg_tailq_write_unlock();
+
+	while ((tmp = rte_rib6_get_nxt(rib, 0, 0, tmp,
+			RTE_RIB6_GET_NXT_ALL)) != NULL)
+		rte_rib6_remove(rib, tmp->ip, tmp->depth);
+
+	rte_mempool_free(rib->node_pool);
+
+	rte_free(rib);
+	rte_free(te);
+}
diff --git a/lib/librte_rib/rte_rib6.h b/lib/librte_rib/rte_rib6.h
new file mode 100644
index 0000000..8714571
--- /dev/null
+++ b/lib/librte_rib/rte_rib6.h
@@ -0,0 +1,334 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
+ * Copyright(c) 2019 Intel Corporation
+ */
+
+#ifndef _RTE_RIB6_H_
+#define _RTE_RIB6_H_
+
+/**
+ * @file
+ * Level compressed tree implementation for IPv6 Longest Prefix Match
+ */
+
+#include <rte_memcpy.h>
+#include <rte_compat.h>
+
+#define RTE_RIB6_IPV6_ADDR_SIZE	16
+
+/**
+ * rte_rib6_get_nxt() flags
+ */
+enum {
+	/** flag to get all subroutes in a RIB tree */
+	RTE_RIB6_GET_NXT_ALL,
+	/** flag to get first matched subroutes in a RIB tree */
+	RTE_RIB6_GET_NXT_COVER
+};
+
+struct rte_rib6;
+struct rte_rib6_node;
+
+/** RIB configuration structure */
+struct rte_rib6_conf {
+	/**
+	 * Size of extension block inside rte_rib_node.
+	 * This space could be used to store additional user
+	 * defined data.
+	 */
+	size_t	ext_sz;
+	/* size of rte_rib_node's pool */
+	int	max_nodes;
+};
+
+/**
+ * Copy IPv6 address from one location to another
+ *
+ * @param dst
+ *  pointer to the place to copy
+ * @param src
+ *  pointer from where to copy
+ */
+static inline void
+rte_rib6_copy_addr(uint8_t *dst, const uint8_t *src)
+{
+	if ((dst == NULL) || (src == NULL))
+		return;
+	rte_memcpy(dst, src, RTE_RIB6_IPV6_ADDR_SIZE);
+}
+
+/**
+ * Compare two IPv6 addresses
+ *
+ * @param ip1
+ *  pointer to the first ipv6 address
+ * @param ip2
+ *  pointer to the second ipv6 address
+ *
+ * @return
+ *  1 if equal
+ *  0 otherwise
+ */
+static inline int
+rte_rib6_is_equal(uint8_t *ip1, uint8_t *ip2) {
+	int i;
+
+	if ((ip1 == NULL) || (ip2 == NULL))
+		return 0;
+	for (i = 0; i < RTE_RIB6_IPV6_ADDR_SIZE; i++) {
+		if (ip1[i] != ip2[i])
+			return 0;
+	}
+	return 1;
+}
+
+/**
+ * Get 8-bit part of 128-bit IPv6 mask
+ *
+ * @param depth
+ *  ipv6 prefix length
+ * @param byte
+ *  position of a 8-bit chunk in the 128-bit mask
+ *
+ * @return
+ *  8-bit chunk of the 128-bit IPv6 mask
+ */
+static inline uint8_t
+get_msk_part(uint8_t depth, int byte) {
+	uint8_t part;
+
+	byte &= 0xf;
+	depth = RTE_MIN(depth, 128);
+	part = RTE_MAX((int16_t)depth - (byte * 8), 0);
+	part = (part > 8) ? 8 : part;
+	return (uint16_t)(~UINT8_MAX) >> part;
+}
+
+/**
+ * Lookup an IP into the RIB structure
+ *
+ * @param rib
+ *  RIB object handle
+ * @param ip
+ *  IP to be looked up in the RIB
+ * @return
+ *  pointer to struct rte_rib6_node on success
+ *  NULL otherwise
+ */
+__rte_experimental
+struct rte_rib6_node *
+rte_rib6_lookup(struct rte_rib6 *rib,
+	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE]);
+
+/**
+ * Lookup less specific route into the RIB structure
+ *
+ * @param ent
+ *  Pointer to struct rte_rib6_node that represents target route
+ * @return
+ *  pointer to struct rte_rib6_node that represents
+ *   less specific route on success
+ *  NULL otherwise
+ */
+__rte_experimental
+struct rte_rib6_node *
+rte_rib6_lookup_parent(struct rte_rib6_node *ent);
+
+/**
+ * Provides exact mach lookup of the prefix into the RIB structure
+ *
+ * @param rib
+ *  RIB object handle
+ * @param ip
+ *  net to be looked up in the RIB
+ * @param depth
+ *  prefix length
+ * @return
+ *  pointer to struct rte_rib6_node on success
+ *  NULL otherwise
+ */
+__rte_experimental
+struct rte_rib6_node *
+rte_rib6_lookup_exact(struct rte_rib6 *rib,
+	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth);
+
+/**
+ * Retrieve next more specific prefix from the RIB
+ * that is covered by ip/depth supernet in an ascending order
+ *
+ * @param rib
+ *  RIB object handle
+ * @param ip
+ *  net address of supernet prefix that covers returned more specific prefixes
+ * @param depth
+ *  supernet prefix length
+ * @param last
+ *   pointer to the last returned prefix to get next prefix
+ *   or
+ *   NULL to get first more specific prefix
+ * @param flag
+ *  -RTE_RIB6_GET_NXT_ALL
+ *   get all prefixes from subtrie
+ *  -RTE_RIB6_GET_NXT_COVER
+ *   get only first more specific prefix even if it have more specifics
+ * @return
+ *  pointer to the next more specific prefix
+ *  NULL if there is no prefixes left
+ */
+__rte_experimental
+struct rte_rib6_node *
+rte_rib6_get_nxt(struct rte_rib6 *rib,
+	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE],
+	uint8_t depth, struct rte_rib6_node *last, int flag);
+
+/**
+ * Remove prefix from the RIB
+ *
+ * @param rib
+ *  RIB object handle
+ * @param ip
+ *  net to be removed from the RIB
+ * @param depth
+ *  prefix length
+ */
+__rte_experimental
+void
+rte_rib6_remove(struct rte_rib6 *rib,
+	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth);
+
+/**
+ * Insert prefix into the RIB
+ *
+ * @param rib
+ *  RIB object handle
+ * @param ip
+ *  net to be inserted to the RIB
+ * @param depth
+ *  prefix length
+ * @return
+ *  pointer to new rte_rib6_node on success
+ *  NULL otherwise
+ */
+__rte_experimental
+struct rte_rib6_node *
+rte_rib6_insert(struct rte_rib6 *rib,
+	const uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE], uint8_t depth);
+
+/**
+ * Get an ip from rte_rib6_node
+ *
+ * @param node
+ *  pointer to the rib6 node
+ * @param ip
+ *  pointer to the ipv6 to save
+ * @return
+ *  0 on success
+ *  -1 on failure with rte_errno indicating reason for failure.
+ */
+__rte_experimental
+int
+rte_rib6_get_ip(struct rte_rib6_node *node,
+	uint8_t ip[RTE_RIB6_IPV6_ADDR_SIZE]);
+
+/**
+ * Get a depth from rte_rib6_node
+ *
+ * @param node
+ *  pointer to the rib6 node
+ * @param depth
+ *  pointer to the depth to save
+ * @return
+ *  0 on success
+ *  -1 on failure with rte_errno indicating reason for failure.
+ */
+__rte_experimental
+int
+rte_rib6_get_depth(struct rte_rib6_node *node, uint8_t *depth);
+
+/**
+ * Get ext field from the rte_rib6_node
+ * It is caller responsibility to make sure there are necessary space
+ * for the ext field inside rib6 node.
+ *
+ * @param node
+ *  pointer to the rte_rib6_node
+ * @return
+ *  pointer to the ext
+ */
+__rte_experimental
+void *
+rte_rib6_get_ext(struct rte_rib6_node *node);
+
+/**
+ * Get nexthop from the rte_rib6_node
+ *
+ * @param node
+ *  pointer to the rib6 node
+ * @param nh
+ *  pointer to the nexthop to save
+ * @return
+ *  0 on success
+ *  -1 on failure, with rte_errno indicating reason for failure.
+ */
+__rte_experimental
+int
+rte_rib6_get_nh(struct rte_rib6_node *node, uint64_t *nh);
+
+/**
+ * Set nexthop into the rte_rib6_node
+ *
+ * @param node
+ *  pointer to the rib6 node
+ * @param nh
+ *  nexthop value to set to the rib6 node
+ * @return
+ *  0 on success
+ *  -1 on failure, with rte_errno indicating reason for failure.
+ */
+__rte_experimental
+int
+rte_rib6_set_nh(struct rte_rib6_node *node, uint64_t nh);
+
+/**
+ * Create RIB
+ *
+ * @param name
+ *  RIB name
+ * @param socket_id
+ *  NUMA socket ID for RIB table memory allocation
+ * @param conf
+ *  Structure containing the configuration
+ * @return
+ *  Pointer to RIB object on success
+ *  NULL otherwise with rte_errno indicating reason for failure.
+ */
+__rte_experimental
+struct rte_rib6 *
+rte_rib6_create(const char *name, int socket_id, struct rte_rib6_conf *conf);
+
+/**
+ * Find an existing RIB object and return a pointer to it.
+ *
+ * @param name
+ *  Name of the rib object as passed to rte_rib_create()
+ * @return
+ *  Pointer to RIB object on success
+ *  NULL otherwise with rte_errno indicating reason for failure.
+ */
+__rte_experimental
+struct rte_rib6 *
+rte_rib6_find_existing(const char *name);
+
+/**
+ * Free an RIB object.
+ *
+ * @param rib
+ *   RIB object handle
+ * @return
+ *   None
+ */
+__rte_experimental
+void
+rte_rib6_free(struct rte_rib6 *rib);
+
+#endif /* _RTE_RIB_H_ */
diff --git a/lib/librte_rib/rte_rib_version.map b/lib/librte_rib/rte_rib_version.map
index 1432a22..9b6161a 100644
--- a/lib/librte_rib/rte_rib_version.map
+++ b/lib/librte_rib/rte_rib_version.map
@@ -16,5 +16,20 @@ EXPERIMENTAL {
 	rte_rib_set_nh;
 	rte_rib_remove;
 
+	rte_rib6_create;
+	rte_rib6_find_existing;
+	rte_rib6_free;
+	rte_rib6_get_depth;
+	rte_rib6_get_ext;
+	rte_rib6_get_ip;
+	rte_rib6_get_nh;
+	rte_rib6_get_nxt;
+	rte_rib6_insert;
+	rte_rib6_lookup;
+	rte_rib6_lookup_parent;
+	rte_rib6_lookup_exact;
+	rte_rib6_set_nh;
+	rte_rib6_remove;
+
 	local: *;
 };
-- 
2.7.4


  parent reply	other threads:[~2019-11-01 15:22 UTC|newest]

Thread overview: 61+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-04-26 22:03 [dpdk-dev] [PATCH v4 0/4] lib/rib: Add Routing Information Base library Medvedkin Vladimir
2018-04-26 22:03 ` [dpdk-dev] [PATCH v4 1/4] Add RIB library Medvedkin Vladimir
2018-04-26 22:17   ` Stephen Hemminger
2018-04-26 22:18   ` Stephen Hemminger
2018-04-26 22:19   ` Stephen Hemminger
2018-04-26 22:19   ` Stephen Hemminger
2018-04-26 22:20   ` Stephen Hemminger
2018-04-27  6:45     ` Vladimir Medvedkin
2018-06-29 13:54   ` Bruce Richardson
2018-06-29 14:02   ` Bruce Richardson
2018-04-26 22:03 ` [dpdk-dev] [PATCH v4 2/4] Add dir24_8 implementation for rib library Medvedkin Vladimir
2018-04-26 22:03 ` [dpdk-dev] [PATCH v4 3/4] Add autotests for RIB library Medvedkin Vladimir
2018-06-29 14:13   ` Bruce Richardson
2018-06-29 15:07   ` Bruce Richardson
2018-06-29 15:31   ` Bruce Richardson
2018-04-26 22:03 ` [dpdk-dev] [PATCH v4 4/4] Add support for lpm and rib bulk lookup Medvedkin Vladimir
2018-04-26 22:24 ` [dpdk-dev] [PATCH v4 0/4] lib/rib: Add Routing Information Base library Stephen Hemminger
2018-04-26 22:27   ` Thomas Monjalon
2018-04-26 22:42     ` Stephen Hemminger
2018-04-26 22:49     ` Stephen Hemminger
2018-04-27  8:27   ` Vladimir Medvedkin
2018-06-29 15:48 ` Bruce Richardson
2019-09-11 17:09 ` [dpdk-dev] [PATCH v5 00/12] lib: add RIB and FIB liraries Vladimir Medvedkin
2019-09-12  7:37   ` Morten Brørup
2019-09-12  9:47     ` Medvedkin, Vladimir
2019-09-12 12:00       ` Morten Brørup
2019-11-01 15:21   ` [dpdk-dev] [PATCH v6 " Vladimir Medvedkin
2019-11-05 23:14     ` Thomas Monjalon
2019-11-06  5:50       ` David Marchand
2019-11-06  7:50         ` Thomas Monjalon
2019-11-06 11:53           ` Medvedkin, Vladimir
2019-11-06 11:59             ` Thomas Monjalon
2019-11-06 14:37               ` Aaron Conole
2019-11-06 11:50         ` Medvedkin, Vladimir
2019-11-01 15:21   ` [dpdk-dev] [PATCH v6 01/12] rib: add RIB library Vladimir Medvedkin
2019-11-01 15:21   ` [dpdk-dev] [PATCH v6 02/12] test/rib: add RIB library autotests Vladimir Medvedkin
2019-11-01 15:21   ` Vladimir Medvedkin [this message]
2019-11-01 15:21   ` [dpdk-dev] [PATCH v6 04/12] test/rib: add ipv6 support for RIB autotests Vladimir Medvedkin
2019-11-01 15:21   ` [dpdk-dev] [PATCH v6 05/12] fib: add FIB library Vladimir Medvedkin
2019-11-01 15:21   ` [dpdk-dev] [PATCH v6 06/12] fib: add FIB ipv6 support Vladimir Medvedkin
2019-11-01 15:21   ` [dpdk-dev] [PATCH v6 07/12] fib: add DIR24-8 dataplane algorithm Vladimir Medvedkin
2019-11-01 15:21   ` [dpdk-dev] [PATCH v6 08/12] fib: add dataplane algorithm for ipv6 Vladimir Medvedkin
2019-11-01 15:21   ` [dpdk-dev] [PATCH v6 09/12] test/fib: add FIB library autotests Vladimir Medvedkin
2019-11-01 15:21   ` [dpdk-dev] [PATCH v6 10/12] test/fib: add ipv6 support for FIB autotests Vladimir Medvedkin
2019-11-01 15:21   ` [dpdk-dev] [PATCH v6 11/12] test/fib: add FIB library performance autotests Vladimir Medvedkin
2019-11-01 15:21   ` [dpdk-dev] [PATCH v6 12/12] test/fib: add FIB library ipv6 " Vladimir Medvedkin
2019-09-11 17:09 ` [dpdk-dev] [PATCH v5 01/12] rib: add RIB library Vladimir Medvedkin
2019-09-11 17:09 ` [dpdk-dev] [PATCH v5 02/12] test/rib: add RIB library autotests Vladimir Medvedkin
2019-09-11 17:09 ` [dpdk-dev] [PATCH v5 03/12] rib: add ipv6 support for RIB Vladimir Medvedkin
2019-09-11 17:09 ` [dpdk-dev] [PATCH v5 04/12] test/rib: add ipv6 support for RIB autotests Vladimir Medvedkin
2019-09-11 17:09 ` [dpdk-dev] [PATCH v5 05/12] fib: add FIB library Vladimir Medvedkin
2019-09-11 17:09 ` [dpdk-dev] [PATCH v5 06/12] fib: add FIB ipv6 support Vladimir Medvedkin
2019-09-11 17:09 ` [dpdk-dev] [PATCH v5 07/12] fib: add DIR24-8 dataplane algorithm Vladimir Medvedkin
2019-09-11 17:09 ` [dpdk-dev] [PATCH v5 08/12] fib: add dataplane algorithm for ipv6 Vladimir Medvedkin
2019-09-11 17:09 ` [dpdk-dev] [PATCH v5 09/12] test/fib: add FIB library autotests Vladimir Medvedkin
2019-09-12 14:07   ` Aaron Conole
2019-10-01 17:12     ` Medvedkin, Vladimir
2019-10-24 15:55       ` Thomas Monjalon
2019-09-11 17:09 ` [dpdk-dev] [PATCH v5 10/12] test/fib: add ipv6 support for FIB autotests Vladimir Medvedkin
2019-09-11 17:09 ` [dpdk-dev] [PATCH v5 11/12] test/fib: add FIB library performance autotests Vladimir Medvedkin
2019-09-11 17:09 ` [dpdk-dev] [PATCH v5 12/12] test/fib: add FIB library ipv6 " Vladimir Medvedkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=3ee0dfc1c1b2cade4d07cc9302cac0d6d17298a5.1572621163.git.vladimir.medvedkin@intel.com \
    --to=vladimir.medvedkin@intel.com \
    --cc=aconole@redhat.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=konstantin.ananyev@intel.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).