From: Vladimir Medvedkin <medvedkinv@gmail.com>
To: Matthew Hall <mhall@mhcomputing.net>
Cc: "dev@dpdk.org" <dev@dpdk.org>
Subject: Re: [dpdk-dev] [PATCH v1 0/3] lpm: increase number of next hops for lpm (ipv4)
Date: Sun, 25 Oct 2015 20:52:04 +0300 [thread overview]
Message-ID: <CANDrEHm5ycfPY5ROUXK0RQFMenZfc_0bMkUqZ1j2Vb17VxbYow@mail.gmail.com> (raw)
In-Reply-To: <562B209A.6030507@mhcomputing.net>
Hi all,
Here my implementation
Signed-off-by: Vladimir Medvedkin <medvedkinv@gmail.com>
---
config/common_bsdapp | 1 +
config/common_linuxapp | 1 +
lib/librte_lpm/rte_lpm.c | 194
+++++++++++++++++++++++++++++------------------
lib/librte_lpm/rte_lpm.h | 163 +++++++++++++++++++++++----------------
4 files changed, 219 insertions(+), 140 deletions(-)
diff --git a/config/common_bsdapp b/config/common_bsdapp
index b37dcf4..408cc2c 100644
--- a/config/common_bsdapp
+++ b/config/common_bsdapp
@@ -344,6 +344,7 @@ CONFIG_RTE_LIBRTE_JOBSTATS=y
#
CONFIG_RTE_LIBRTE_LPM=y
CONFIG_RTE_LIBRTE_LPM_DEBUG=n
+CONFIG_RTE_LIBRTE_LPM_ASNUM=n
#
# Compile librte_acl
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 0de43d5..1c60e63 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -352,6 +352,7 @@ CONFIG_RTE_LIBRTE_JOBSTATS=y
#
CONFIG_RTE_LIBRTE_LPM=y
CONFIG_RTE_LIBRTE_LPM_DEBUG=n
+CONFIG_RTE_LIBRTE_LPM_ASNUM=n
#
# Compile librte_acl
diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index 163ba3c..363b400 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -159,9 +159,11 @@ rte_lpm_create(const char *name, int socket_id, int
max_rules,
lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
- RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
- RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);
-
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 8);
+#else
+ RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
+#endif
/* Check user arguments. */
if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){
rte_errno = EINVAL;
@@ -261,7 +263,7 @@ rte_lpm_free(struct rte_lpm *lpm)
*/
static inline int32_t
rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
- uint8_t next_hop)
+ struct rte_lpm_res *res)
{
uint32_t rule_gindex, rule_index, last_rule;
int i;
@@ -282,8 +284,11 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked,
uint8_t depth,
/* If rule already exists update its next_hop and
return. */
if (lpm->rules_tbl[rule_index].ip == ip_masked) {
- lpm->rules_tbl[rule_index].next_hop =
next_hop;
-
+ lpm->rules_tbl[rule_index].next_hop =
res->next_hop;
+ lpm->rules_tbl[rule_index].fwd_class =
res->fwd_class;
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ lpm->rules_tbl[rule_index].as_num =
res->as_num;
+#endif
return rule_index;
}
}
@@ -320,7 +325,11 @@ rule_add(struct rte_lpm *lpm, uint32_t ip_masked,
uint8_t depth,
/* Add the new rule. */
lpm->rules_tbl[rule_index].ip = ip_masked;
- lpm->rules_tbl[rule_index].next_hop = next_hop;
+ lpm->rules_tbl[rule_index].next_hop = res->next_hop;
+ lpm->rules_tbl[rule_index].fwd_class = res->fwd_class;
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ lpm->rules_tbl[rule_index].as_num = res->as_num;
+#endif
/* Increment the used rules counter for this rule group. */
lpm->rule_info[depth - 1].used_rules++;
@@ -382,10 +391,10 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked,
uint8_t depth)
* Find, clean and allocate a tbl8.
*/
static inline int32_t
-tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
+tbl8_alloc(struct rte_lpm_tbl_entry *tbl8)
{
uint32_t tbl8_gindex; /* tbl8 group index. */
- struct rte_lpm_tbl8_entry *tbl8_entry;
+ struct rte_lpm_tbl_entry *tbl8_entry;
/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS;
@@ -393,12 +402,12 @@ tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
tbl8_entry = &tbl8[tbl8_gindex *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID.
*/
- if (!tbl8_entry->valid_group) {
+ if (!tbl8_entry->ext_valid) {
memset(&tbl8_entry[0], 0,
RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
sizeof(tbl8_entry[0]));
- tbl8_entry->valid_group = VALID;
+ tbl8_entry->ext_valid = VALID;
/* Return group index for allocated tbl8 group. */
return tbl8_gindex;
@@ -410,46 +419,50 @@ tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
}
static inline void
-tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
+tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
{
/* Set tbl8 group invalid*/
- tbl8[tbl8_group_start].valid_group = INVALID;
+ tbl8[tbl8_group_start].ext_valid = INVALID;
}
static inline int32_t
add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
- uint8_t next_hop)
+ struct rte_lpm_res *res)
{
uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
/* Calculate the index into Table24. */
tbl24_index = ip >> 8;
tbl24_range = depth_to_range(depth);
+ struct rte_lpm_tbl_entry new_tbl_entry = {
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ .as_num = res->as_num,
+#endif
+ .next_hop = res->next_hop,
+ .fwd_class = res->fwd_class,
+ .ext_valid = 0,
+ .depth = depth,
+ .valid = VALID,
+ };
+
for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
/*
* For invalid OR valid and non-extended tbl 24 entries set
* entry.
*/
- if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 &&
+ if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_valid == 0 &&
lpm->tbl24[i].depth <= depth)) {
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
- { .next_hop = next_hop, },
- .valid = VALID,
- .ext_entry = 0,
- .depth = depth,
- };
-
/* Setting tbl24 entry in one go to avoid race
* conditions
*/
- lpm->tbl24[i] = new_tbl24_entry;
+ lpm->tbl24[i] = new_tbl_entry;
continue;
}
- if (lpm->tbl24[i].ext_entry == 1) {
+ if (lpm->tbl24[i].ext_valid == 1) {
/* If tbl24 entry is valid and extended calculate
the
* index into tbl8.
*/
@@ -461,19 +474,14 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip,
uint8_t depth,
for (j = tbl8_index; j < tbl8_group_end; j++) {
if (!lpm->tbl8[j].valid ||
lpm->tbl8[j].depth <=
depth) {
- struct rte_lpm_tbl8_entry
- new_tbl8_entry = {
- .valid = VALID,
- .valid_group = VALID,
- .depth = depth,
- .next_hop = next_hop,
- };
+
+ new_tbl_entry.ext_valid = VALID;
/*
* Setting tbl8 entry in one go to
avoid
* race conditions
*/
- lpm->tbl8[j] = new_tbl8_entry;
+ lpm->tbl8[j] = new_tbl_entry;
continue;
}
@@ -486,7 +494,7 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip,
uint8_t depth,
static inline int32_t
add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
- uint8_t next_hop)
+ struct rte_lpm_res *res)
{
uint32_t tbl24_index;
int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end,
tbl8_index,
@@ -512,7 +520,11 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
uint8_t depth,
/* Set tbl8 entry. */
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
lpm->tbl8[i].depth = depth;
- lpm->tbl8[i].next_hop = next_hop;
+ lpm->tbl8[i].next_hop = res->next_hop;
+ lpm->tbl8[i].fwd_class = res->fwd_class;
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ lpm->tbl8[i].as_num = res->as_num;
+#endif
lpm->tbl8[i].valid = VALID;
}
@@ -522,17 +534,17 @@ add_depth_big(struct rte_lpm *lpm, uint32_t
ip_masked, uint8_t depth,
* so assign whole structure in one go
*/
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
- { .tbl8_gindex = (uint8_t)tbl8_group_index, },
- .valid = VALID,
- .ext_entry = 1,
+ struct rte_lpm_tbl_entry new_tbl24_entry = {
+ .tbl8_gindex = (uint16_t)tbl8_group_index,
.depth = 0,
+ .ext_valid = 1,
+ .valid = VALID,
};
lpm->tbl24[tbl24_index] = new_tbl24_entry;
}/* If valid entry but not extended calculate the index into
Table8. */
- else if (lpm->tbl24[tbl24_index].ext_entry == 0) {
+ else if (lpm->tbl24[tbl24_index].ext_valid == 0) {
/* Search for free tbl8 group. */
tbl8_group_index = tbl8_alloc(lpm->tbl8);
@@ -551,6 +563,11 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
uint8_t depth,
lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
lpm->tbl8[i].next_hop =
lpm->tbl24[tbl24_index].next_hop;
+ lpm->tbl8[i].fwd_class =
+ lpm->tbl24[tbl24_index].fwd_class;
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ lpm->tbl8[i].as_num =
lpm->tbl24[tbl24_index].as_num;
+#endif
}
tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
@@ -561,7 +578,11 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
uint8_t depth,
lpm->tbl8[i].depth <= depth) {
lpm->tbl8[i].valid = VALID;
lpm->tbl8[i].depth = depth;
- lpm->tbl8[i].next_hop = next_hop;
+ lpm->tbl8[i].next_hop = res->next_hop;
+ lpm->tbl8[i].fwd_class = res->fwd_class;
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ lpm->tbl8[i].as_num = res->as_num;
+#endif
continue;
}
@@ -573,11 +594,11 @@ add_depth_big(struct rte_lpm *lpm, uint32_t
ip_masked, uint8_t depth,
* so assign whole structure in one go.
*/
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
- { .tbl8_gindex = (uint8_t)tbl8_group_index,
},
- .valid = VALID,
- .ext_entry = 1,
+ struct rte_lpm_tbl_entry new_tbl24_entry = {
+ .tbl8_gindex = (uint16_t)tbl8_group_index,
.depth = 0,
+ .ext_valid = 1,
+ .valid = VALID,
};
lpm->tbl24[tbl24_index] = new_tbl24_entry;
@@ -595,11 +616,15 @@ add_depth_big(struct rte_lpm *lpm, uint32_t
ip_masked, uint8_t depth,
if (!lpm->tbl8[i].valid ||
lpm->tbl8[i].depth <= depth) {
- struct rte_lpm_tbl8_entry new_tbl8_entry = {
- .valid = VALID,
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ .as_num = res->as_num,
+#endif
+ .next_hop = res->next_hop,
+ .fwd_class = res->fwd_class,
.depth = depth,
- .next_hop = next_hop,
- .valid_group =
lpm->tbl8[i].valid_group,
+ .ext_valid = lpm->tbl8[i].ext_valid,
+ .valid = VALID,
};
/*
@@ -621,19 +646,19 @@ add_depth_big(struct rte_lpm *lpm, uint32_t
ip_masked, uint8_t depth,
*/
int
rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
- uint8_t next_hop)
+ struct rte_lpm_res *res)
{
int32_t rule_index, status = 0;
uint32_t ip_masked;
/* Check user arguments. */
- if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+ if ((lpm == NULL) || (res == NULL) || (depth < 1) || (depth >
RTE_LPM_MAX_DEPTH))
return -EINVAL;
ip_masked = ip & depth_to_mask(depth);
/* Add the rule to the rule table. */
- rule_index = rule_add(lpm, ip_masked, depth, next_hop);
+ rule_index = rule_add(lpm, ip_masked, depth, res);
/* If the is no space available for new rule return error. */
if (rule_index < 0) {
@@ -641,10 +666,10 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t
depth,
}
if (depth <= MAX_DEPTH_TBL24) {
- status = add_depth_small(lpm, ip_masked, depth, next_hop);
+ status = add_depth_small(lpm, ip_masked, depth, res);
}
else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
- status = add_depth_big(lpm, ip_masked, depth, next_hop);
+ status = add_depth_big(lpm, ip_masked, depth, res);
/*
* If add fails due to exhaustion of tbl8 extensions delete
@@ -665,14 +690,14 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t
depth,
*/
int
rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
-uint8_t *next_hop)
+ struct rte_lpm_res *res)
{
uint32_t ip_masked;
int32_t rule_index;
/* Check user arguments. */
if ((lpm == NULL) ||
- (next_hop == NULL) ||
+ (res == NULL) ||
(depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
return -EINVAL;
@@ -681,7 +706,11 @@ uint8_t *next_hop)
rule_index = rule_find(lpm, ip_masked, depth);
if (rule_index >= 0) {
- *next_hop = lpm->rules_tbl[rule_index].next_hop;
+ res->next_hop = lpm->rules_tbl[rule_index].next_hop;
+ res->fwd_class = lpm->rules_tbl[rule_index].fwd_class;
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ res->as_num = lpm->rules_tbl[rule_index].as_num;
+#endif
return 1;
}
@@ -731,7 +760,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t
ip_masked,
*/
for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++)
{
- if (lpm->tbl24[i].ext_entry == 0 &&
+ if (lpm->tbl24[i].ext_valid == 0 &&
lpm->tbl24[i].depth <= depth ) {
lpm->tbl24[i].valid = INVALID;
}
@@ -761,23 +790,30 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t
ip_masked,
* associated with this rule.
*/
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
- {.next_hop =
lpm->rules_tbl[sub_rule_index].next_hop,},
- .valid = VALID,
- .ext_entry = 0,
+ struct rte_lpm_tbl_entry new_tbl24_entry = {
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ .as_num = lpm->rules_tbl[sub_rule_index].as_num,
+#endif
+ .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
+ .fwd_class =
lpm->rules_tbl[sub_rule_index].fwd_class,
.depth = sub_rule_depth,
+ .ext_valid = 0,
+ .valid = VALID,
};
- struct rte_lpm_tbl8_entry new_tbl8_entry = {
- .valid = VALID,
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ .as_num = lpm->rules_tbl[sub_rule_index].as_num,
+#endif
+ .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
+ .fwd_class =
lpm->rules_tbl[sub_rule_index].fwd_class,
.depth = sub_rule_depth,
- .next_hop = lpm->rules_tbl
- [sub_rule_index].next_hop,
+ .valid = VALID,
};
for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++)
{
- if (lpm->tbl24[i].ext_entry == 0 &&
+ if (lpm->tbl24[i].ext_valid == 0 &&
lpm->tbl24[i].depth <= depth ) {
lpm->tbl24[i] = new_tbl24_entry;
}
@@ -814,7 +850,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t
ip_masked,
* thus can be recycled
*/
static inline int32_t
-tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t
tbl8_group_start)
+tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8, uint32_t
tbl8_group_start)
{
uint32_t tbl8_group_end, i;
tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
@@ -891,11 +927,15 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t
ip_masked,
}
else {
/* Set new tbl8 entry. */
- struct rte_lpm_tbl8_entry new_tbl8_entry = {
- .valid = VALID,
- .depth = sub_rule_depth,
- .valid_group =
lpm->tbl8[tbl8_group_start].valid_group,
+ struct rte_lpm_tbl_entry new_tbl8_entry = {
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ .as_num = lpm->rules_tbl[sub_rule_index].as_num,
+#endif
+ .fwd_class =
lpm->rules_tbl[sub_rule_index].fwd_class,
.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
+ .depth = sub_rule_depth,
+ .ext_valid = lpm->tbl8[tbl8_group_start].ext_valid,
+ .valid = VALID,
};
/*
@@ -923,11 +963,15 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t
ip_masked,
}
else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
- struct rte_lpm_tbl24_entry new_tbl24_entry = {
- { .next_hop =
lpm->tbl8[tbl8_recycle_index].next_hop, },
- .valid = VALID,
- .ext_entry = 0,
+ struct rte_lpm_tbl_entry new_tbl24_entry = {
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ .as_num = lpm->tbl8[tbl8_recycle_index].as_num,
+#endif
+ .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
+ .fwd_class =
lpm->tbl8[tbl8_recycle_index].fwd_class,
.depth = lpm->tbl8[tbl8_recycle_index].depth,
+ .ext_valid = 0,
+ .valid = VALID,
};
/* Set tbl24 before freeing tbl8 to avoid race condition. */
diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h
index c299ce2..7c615bc 100644
--- a/lib/librte_lpm/rte_lpm.h
+++ b/lib/librte_lpm/rte_lpm.h
@@ -31,8 +31,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _RTE_LPM_H_
-#define _RTE_LPM_H_
+#ifndef _RTE_LPM_EXT_H_
+#define _RTE_LPM_EXT_H_
/**
* @file
@@ -81,57 +81,58 @@ extern "C" {
#define RTE_LPM_RETURN_IF_TRUE(cond, retval)
#endif
-/** @internal bitmask with valid and ext_entry/valid_group fields set */
-#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300
+/** @internal bitmask with valid and ext_valid/ext_valid fields set */
+#define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x03
/** Bitmask used to indicate successful lookup */
-#define RTE_LPM_LOOKUP_SUCCESS 0x0100
+#define RTE_LPM_LOOKUP_SUCCESS 0x01
+
+struct rte_lpm_res {
+ uint16_t next_hop;
+ uint8_t fwd_class;
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ uint32_t as_num;
+#endif
+};
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
-/** @internal Tbl24 entry structure. */
-struct rte_lpm_tbl24_entry {
- /* Stores Next hop or group index (i.e. gindex)into tbl8. */
+struct rte_lpm_tbl_entry {
+ uint8_t valid :1;
+ uint8_t ext_valid :1;
+ uint8_t depth :6;
+ uint8_t fwd_class;
union {
- uint8_t next_hop;
- uint8_t tbl8_gindex;
+ uint16_t next_hop;
+ uint16_t tbl8_gindex;
};
- /* Using single uint8_t to store 3 values. */
- uint8_t valid :1; /**< Validation flag. */
- uint8_t ext_entry :1; /**< External entry. */
- uint8_t depth :6; /**< Rule depth. */
-};
-
-/** @internal Tbl8 entry structure. */
-struct rte_lpm_tbl8_entry {
- uint8_t next_hop; /**< next hop. */
- /* Using single uint8_t to store 3 values. */
- uint8_t valid :1; /**< Validation flag. */
- uint8_t valid_group :1; /**< Group validation flag. */
- uint8_t depth :6; /**< Rule depth. */
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ uint32_t as_num;
+#endif
};
#else
-struct rte_lpm_tbl24_entry {
- uint8_t depth :6;
- uint8_t ext_entry :1;
- uint8_t valid :1;
+struct rte_lpm_tbl_entry {
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ uint32_t as_num;
+#endif
union {
- uint8_t tbl8_gindex;
- uint8_t next_hop;
+ uint16_t tbl8_gindex;
+ uint16_t next_hop;
};
-};
-
-struct rte_lpm_tbl8_entry {
- uint8_t depth :6;
- uint8_t valid_group :1;
- uint8_t valid :1;
- uint8_t next_hop;
+ uint8_t fwd_class;
+ uint8_t depth :6;
+ uint8_t ext_valid :1;
+ uint8_t valid :1;
};
#endif
/** @internal Rule structure. */
struct rte_lpm_rule {
uint32_t ip; /**< Rule IP address. */
- uint8_t next_hop; /**< Rule next hop. */
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ uint32_t as_num;
+#endif
+ uint16_t next_hop; /**< Rule next hop. */
+ uint8_t fwd_class;
};
/** @internal Contains metadata about the rules table. */
@@ -148,9 +149,9 @@ struct rte_lpm {
struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule
info table. */
/* LPM Tables. */
- struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \
+ struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \
__rte_cache_aligned; /**< LPM tbl24 table. */
- struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \
+ struct rte_lpm_tbl_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \
__rte_cache_aligned; /**< LPM tbl8 table. */
struct rte_lpm_rule rules_tbl[0] \
__rte_cache_aligned; /**< LPM rules. */
@@ -219,7 +220,7 @@ rte_lpm_free(struct rte_lpm *lpm);
* 0 on success, negative value otherwise
*/
int
-rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t
next_hop);
+rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, struct
rte_lpm_res *res);
/**
* Check if a rule is present in the LPM table,
@@ -238,7 +239,7 @@ rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t
depth, uint8_t next_hop);
*/
int
rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
-uint8_t *next_hop);
+ struct rte_lpm_res *res);
/**
* Delete a rule from the LPM table.
@@ -277,29 +278,43 @@ rte_lpm_delete_all(struct rte_lpm *lpm);
* -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup
hit
*/
static inline int
-rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
+rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, struct rte_lpm_res *res)
{
unsigned tbl24_index = (ip >> 8);
- uint16_t tbl_entry;
-
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ uint64_t tbl_entry;
+#else
+ uint32_t tbl_entry;
+#endif
/* DEBUG: Check user input arguments. */
- RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)),
-EINVAL);
+ RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (res == NULL)), -EINVAL);
/* Copy tbl24 entry */
- tbl_entry = *(const uint16_t *)&lpm->tbl24[tbl24_index];
-
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ tbl_entry = *(const uint64_t *)&lpm->tbl24[tbl24_index];
+#else
+ tbl_entry = *(const uint32_t *)&lpm->tbl24[tbl24_index];
+#endif
/* Copy tbl8 entry (only if needed) */
if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
unsigned tbl8_index = (uint8_t)ip +
- ((uint8_t)tbl_entry *
RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
+ ((*(struct rte_lpm_tbl_entry
*)&tbl_entry).tbl8_gindex * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
- tbl_entry = *(const uint16_t *)&lpm->tbl8[tbl8_index];
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ tbl_entry = *(const uint64_t *)&lpm->tbl8[tbl8_index];
+#else
+ tbl_entry = *(const uint32_t *)&lpm->tbl8[tbl8_index];
+#endif
}
-
- *next_hop = (uint8_t)tbl_entry;
+ res->next_hop = ((struct rte_lpm_tbl_entry *)&tbl_entry)->next_hop;
+ res->fwd_class = ((struct rte_lpm_tbl_entry
*)&tbl_entry)->fwd_class;
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ res->as_num = ((struct rte_lpm_tbl_entry
*)&tbl_entry)->as_num;
+#endif
return (tbl_entry & RTE_LPM_LOOKUP_SUCCESS) ? 0 : -ENOENT;
+
}
/**
@@ -322,19 +337,25 @@ rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip,
uint8_t *next_hop)
* @return
* -EINVAL for incorrect arguments, otherwise 0
*/
-#define rte_lpm_lookup_bulk(lpm, ips, next_hops, n) \
- rte_lpm_lookup_bulk_func(lpm, ips, next_hops, n)
+#define rte_lpm_lookup_bulk(lpm, ips, res_tbl, n) \
+ rte_lpm_lookup_bulk_func(lpm, ips, res_tbl, n)
static inline int
-rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t * ips,
- uint16_t * next_hops, const unsigned n)
+rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm, const uint32_t *ips,
+ struct rte_lpm_res *res_tbl, const unsigned n)
{
unsigned i;
+ int ret = 0;
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ uint64_t tbl_entry;
+#else
+ uint32_t tbl_entry;
+#endif
unsigned tbl24_indexes[n];
/* DEBUG: Check user input arguments. */
RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (ips == NULL) ||
- (next_hops == NULL)), -EINVAL);
+ (res_tbl == NULL)), -EINVAL);
for (i = 0; i < n; i++) {
tbl24_indexes[i] = ips[i] >> 8;
@@ -342,20 +363,32 @@ rte_lpm_lookup_bulk_func(const struct rte_lpm *lpm,
const uint32_t * ips,
for (i = 0; i < n; i++) {
/* Simply copy tbl24 entry to output */
- next_hops[i] = *(const uint16_t
*)&lpm->tbl24[tbl24_indexes[i]];
-
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ tbl_entry = *(const uint64_t
*)&lpm->tbl24[tbl24_indexes[i]];
+#else
+ tbl_entry = *(const uint32_t
*)&lpm->tbl24[tbl24_indexes[i]];
+#endif
/* Overwrite output with tbl8 entry if needed */
- if (unlikely((next_hops[i] &
RTE_LPM_VALID_EXT_ENTRY_BITMASK) ==
- RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
+ if (unlikely((tbl_entry & RTE_LPM_VALID_EXT_ENTRY_BITMASK)
==
+ RTE_LPM_VALID_EXT_ENTRY_BITMASK)) {
unsigned tbl8_index = (uint8_t)ips[i] +
- ((uint8_t)next_hops[i] *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
+ ((*(struct rte_lpm_tbl_entry
*)&tbl_entry).tbl8_gindex * RTE_LPM_TBL8_GROUP_NUM_ENTRIES);
- next_hops[i] = *(const uint16_t
*)&lpm->tbl8[tbl8_index];
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ tbl_entry = *(const uint64_t
*)&lpm->tbl8[tbl8_index];
+#else
+ tbl_entry = *(const uint32_t
*)&lpm->tbl8[tbl8_index];
+#endif
}
+ res_tbl[i].next_hop = ((struct rte_lpm_tbl_entry
*)&tbl_entry)->next_hop;
+ res_tbl[i].fwd_class = ((struct rte_lpm_tbl_entry
*)&tbl_entry)->next_hop;
+#ifdef RTE_LIBRTE_LPM_ASNUM
+ res_tbl[i].as_num = ((struct rte_lpm_tbl_entry
*)&tbl_entry)->as_num;
+#endif
+ ret |= 1 << i;
}
- return 0;
+ return ret;
}
/* Mask four results. */
@@ -477,4 +510,4 @@ rte_lpm_lookupx4(const struct rte_lpm *lpm, __m128i ip,
uint16_t hop[4],
}
#endif
-#endif /* _RTE_LPM_H_ */
+#endif /* _RTE_LPM_EXT_H_ */
2015-10-24 9:09 GMT+03:00 Matthew Hall <mhall@mhcomputing.net>:
> On 10/23/15 9:20 AM, Matthew Hall wrote:
>
>> On Fri, Oct 23, 2015 at 03:51:48PM +0200, Michal Jastrzebski wrote:
>>
>>> From: Michal Kobylinski <michalx.kobylinski@intel.com>
>>>
>>> The current DPDK implementation for LPM for IPv4 and IPv6 limits the
>>> number of next hops to 256, as the next hop ID is an 8-bit long field.
>>> Proposed extension increase number of next hops for IPv4 to 2^24 and
>>> also allows 32-bits read/write operations.
>>>
>>> This patchset requires additional change to rte_table library to meet
>>> ABI compatibility requirements. A v2 will be sent next week.
>>>
>>
>> I also have a patchset for this.
>>
>> I will send it out as well so we could compare.
>>
>> Matthew.
>>
>
> Sorry about the delay; I only work on DPDK in personal time and not as
> part of a job. My patchset is attached to this email.
>
> One possible advantage with my patchset, compared to others, is that the
> space problem is fixed in both IPV4 and in IPV6, to prevent asymmetry
> between these two standards, which is something I try to avoid as much as
> humanly possible.
>
> This is because my application code is green-field, so I absolutely don't
> want to put any ugly hacks or incompatibilities in this code if I can
> possibly avoid it.
>
> Otherwise, I am not necessarily as expert about rte_lpm as some of the
> full-time guys, but I think with four or five of us in the thread hammering
> out patches we will be able to create something amazing together and I am
> very very very very very happy about this.
>
> Matthew.
>
next prev parent reply other threads:[~2015-10-25 17:52 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-10-23 13:51 Michal Jastrzebski
2015-10-23 13:51 ` [dpdk-dev] [PATCH v1 1/3] " Michal Jastrzebski
2015-10-23 14:38 ` Bruce Richardson
2015-10-23 14:59 ` Jastrzebski, MichalX K
2015-10-23 13:51 ` [dpdk-dev] [PATCH v1 2/3] examples: update of apps using librte_lpm (ipv4) Michal Jastrzebski
2015-10-23 13:51 ` [dpdk-dev] [PATCH v1 3/3] doc: update release 2.2 after changes in librte_lpm Michal Jastrzebski
2015-10-23 14:21 ` Bruce Richardson
2015-10-23 14:33 ` Jastrzebski, MichalX K
2015-10-23 16:20 ` [dpdk-dev] [PATCH v1 0/3] lpm: increase number of next hops for lpm (ipv4) Matthew Hall
2015-10-23 16:33 ` Stephen Hemminger
2015-10-23 18:38 ` Matthew Hall
2015-10-23 19:13 ` Vladimir Medvedkin
2015-10-23 19:59 ` Stephen Hemminger
2015-10-24 6:09 ` Matthew Hall
2015-10-25 17:52 ` Vladimir Medvedkin [this message]
[not found] ` <20151026115519.GA7576@MKJASTRX-MOBL>
2015-10-26 11:57 ` Jastrzebski, MichalX K
2015-10-26 14:03 ` Vladimir Medvedkin
2015-10-26 15:39 ` Michal Jastrzebski
2015-10-26 16:59 ` Vladimir Medvedkin
2015-10-26 12:13 ` Jastrzebski, MichalX K
2015-10-26 18:40 ` Matthew Hall
2015-10-27 10:35 ` Vladimir Medvedkin
2015-10-27 10:33 ` Vladimir Medvedkin
2015-10-30 7:17 ` Matthew Hall
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=CANDrEHm5ycfPY5ROUXK0RQFMenZfc_0bMkUqZ1j2Vb17VxbYow@mail.gmail.com \
--to=medvedkinv@gmail.com \
--cc=dev@dpdk.org \
--cc=mhall@mhcomputing.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).