From: Anatoly Burakov <anatoly.burakov@intel.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 06/13] ip_frag: renaming structures in fragmentation table to be more generic
Date: Wed, 28 May 2014 18:32:40 +0100 [thread overview]
Message-ID: <3b2b8b8faa75b89f0ca0eea5ff5d14401fa3dd7a.1401298292.git.anatoly.burakov@intel.com> (raw)
In-Reply-To: <cover.1401298292.git.anatoly.burakov@intel.com>
In-Reply-To: <cover.1401298292.git.anatoly.burakov@intel.com>
Technically, fragmentation table can work for both IPv4 and IPv6
packets, so we're renaming everything to be generic enough to make sense
in IPv6 context.
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
---
examples/ip_reassembly/main.c | 16 ++---
lib/librte_ip_frag/ip_frag_common.h | 2 +
lib/librte_ip_frag/ipv4_frag_tbl.h | 130 ++++++++++++++++++------------------
lib/librte_ip_frag/rte_ipv4_rsmbl.h | 92 ++++++++++++-------------
4 files changed, 122 insertions(+), 118 deletions(-)
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 42ade5c..23ec4be 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -407,9 +407,9 @@ struct lcore_conf {
#else
lookup_struct_t * ipv6_lookup_struct;
#endif
- struct ipv4_frag_tbl *frag_tbl[MAX_RX_QUEUE_PER_LCORE];
+ struct ip_frag_tbl *frag_tbl[MAX_RX_QUEUE_PER_LCORE];
struct rte_mempool *pool[MAX_RX_QUEUE_PER_LCORE];
- struct ipv4_frag_death_row death_row;
+ struct ip_frag_death_row death_row;
struct mbuf_table *tx_mbufs[MAX_PORTS];
struct tx_lcore_stat tx_stat;
} __rte_cache_aligned;
@@ -673,8 +673,8 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
if (ip_flag != 0 || ip_ofs != 0) {
struct rte_mbuf *mo;
- struct ipv4_frag_tbl *tbl;
- struct ipv4_frag_death_row *dr;
+ struct ip_frag_tbl *tbl;
+ struct ip_frag_death_row *dr;
tbl = qconf->frag_tbl[queue];
dr = &qconf->death_row;
@@ -684,7 +684,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
m->pkt.vlan_macip.f.l3_len = sizeof(*ipv4_hdr);
/* process this fragment. */
- if ((mo = ipv4_frag_mbuf(tbl, dr, m, tms, ipv4_hdr,
+ if ((mo = rte_ipv4_reassemble_packet(tbl, dr, m, tms, ipv4_hdr,
ip_ofs, ip_flag)) == NULL)
/* no packet to send out. */
return;
@@ -822,7 +822,7 @@ main_loop(__attribute__((unused)) void *dummy)
i, qconf, cur_tsc);
}
- ipv4_frag_free_death_row(&qconf->death_row,
+ rte_ip_frag_free_death_row(&qconf->death_row,
PREFETCH_OFFSET);
}
}
@@ -1456,7 +1456,7 @@ setup_queue_tbl(struct lcore_conf *qconf, uint32_t lcore, int socket,
frag_cycles = (rte_get_tsc_hz() + MS_PER_S - 1) / MS_PER_S *
max_flow_ttl;
- if ((qconf->frag_tbl[queue] = ipv4_frag_tbl_create(max_flow_num,
+ if ((qconf->frag_tbl[queue] = rte_ip_frag_table_create(max_flow_num,
IPV4_FRAG_TBL_BUCKET_ENTRIES, max_flow_num, frag_cycles,
socket)) == NULL)
rte_exit(EXIT_FAILURE, "ipv4_frag_tbl_create(%u) on "
@@ -1501,7 +1501,7 @@ queue_dump_stat(void)
"rxqueueid=%hhu frag tbl stat:\n",
lcore, qconf->rx_queue_list[i].port_id,
qconf->rx_queue_list[i].queue_id);
- ipv4_frag_tbl_dump_stat(stdout, qconf->frag_tbl[i]);
+ rte_ip_frag_table_statistics_dump(stdout, qconf->frag_tbl[i]);
fprintf(stdout, "TX bursts:\t%" PRIu64 "\n"
"TX packets _queued:\t%" PRIu64 "\n"
"TX packets dropped:\t%" PRIu64 "\n"
diff --git a/lib/librte_ip_frag/ip_frag_common.h b/lib/librte_ip_frag/ip_frag_common.h
index c9741c0..6d4706a 100644
--- a/lib/librte_ip_frag/ip_frag_common.h
+++ b/lib/librte_ip_frag/ip_frag_common.h
@@ -34,6 +34,8 @@
#ifndef _IP_FRAG_COMMON_H_
#define _IP_FRAG_COMMON_H_
+#include "rte_ip_frag.h"
+
/* Debug on/off */
#ifdef RTE_IP_FRAG_DEBUG
diff --git a/lib/librte_ip_frag/ipv4_frag_tbl.h b/lib/librte_ip_frag/ipv4_frag_tbl.h
index 5487230..fa3291d 100644
--- a/lib/librte_ip_frag/ipv4_frag_tbl.h
+++ b/lib/librte_ip_frag/ipv4_frag_tbl.h
@@ -43,7 +43,7 @@
*/
/*
- * The ipv4_frag_tbl is a simple hash table:
+ * The ip_frag_tbl is a simple hash table:
* The basic idea is to use two hash functions and <bucket_entries>
* associativity. This provides 2 * <bucket_entries> possible locations in
* the hash table for each key. Sort of simplified Cuckoo hashing,
@@ -64,9 +64,9 @@
#define PRIME_VALUE 0xeaad8405
-TAILQ_HEAD(ipv4_pkt_list, ipv4_frag_pkt);
+TAILQ_HEAD(ip_pkt_list, ip_frag_pkt);
-struct ipv4_frag_tbl_stat {
+struct ip_frag_tbl_stat {
uint64_t find_num; /* total # of find/insert attempts. */
uint64_t add_num; /* # of add ops. */
uint64_t del_num; /* # of del ops. */
@@ -75,7 +75,7 @@ struct ipv4_frag_tbl_stat {
uint64_t fail_nospace; /* # of 'no space' add failures. */
} __rte_cache_aligned;
-struct ipv4_frag_tbl {
+struct ip_frag_tbl {
uint64_t max_cycles; /* ttl for table entries. */
uint32_t entry_mask; /* hash value mask. */
uint32_t max_entries; /* max entries allowed. */
@@ -83,25 +83,25 @@ struct ipv4_frag_tbl {
uint32_t bucket_entries; /* hash assocaitivity. */
uint32_t nb_entries; /* total size of the table. */
uint32_t nb_buckets; /* num of associativity lines. */
- struct ipv4_frag_pkt *last; /* last used entry. */
- struct ipv4_pkt_list lru; /* LRU list for table entries. */
- struct ipv4_frag_tbl_stat stat; /* statistics counters. */
- struct ipv4_frag_pkt pkt[0]; /* hash table. */
+ struct ip_frag_pkt *last; /* last used entry. */
+ struct ip_pkt_list lru; /* LRU list for table entries. */
+ struct ip_frag_tbl_stat stat; /* statistics counters. */
+ struct ip_frag_pkt pkt[0]; /* hash table. */
};
-#define IPV4_FRAG_TBL_POS(tbl, sig) \
+#define IP_FRAG_TBL_POS(tbl, sig) \
((tbl)->pkt + ((sig) & (tbl)->entry_mask))
-#define IPV4_FRAG_HASH_FNUM 2
+#define IP_FRAG_HASH_FNUM 2
-#ifdef IPV4_FRAG_TBL_STAT
-#define IPV4_FRAG_TBL_STAT_UPDATE(s, f, v) ((s)->f += (v))
+#ifdef IP_FRAG_TBL_STAT
+#define IP_FRAG_TBL_STAT_UPDATE(s, f, v) ((s)->f += (v))
#else
-#define IPV4_FRAG_TBL_STAT_UPDATE(s, f, v) do {} while (0)
+#define IP_FRAG_TBL_STAT_UPDATE(s, f, v) do {} while (0)
#endif /* IPV4_FRAG_TBL_STAT */
static inline void
-ipv4_frag_hash(const struct ipv4_frag_key *key, uint32_t *v1, uint32_t *v2)
+ipv4_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
{
uint32_t v;
const uint32_t *p;
@@ -125,9 +125,9 @@ ipv4_frag_hash(const struct ipv4_frag_key *key, uint32_t *v1, uint32_t *v2)
* Update the table, after we finish processing it's entry.
*/
static inline void
-ipv4_frag_inuse(struct ipv4_frag_tbl *tbl, const struct ipv4_frag_pkt *fp)
+ip_frag_inuse(struct ip_frag_tbl *tbl, const struct ip_frag_pkt *fp)
{
- if (IPV4_FRAG_KEY_EMPTY(&fp->key)) {
+ if (IP_FRAG_KEY_EMPTY(&fp->key)) {
TAILQ_REMOVE(&tbl->lru, fp, lru);
tbl->use_entries--;
}
@@ -138,13 +138,13 @@ ipv4_frag_inuse(struct ipv4_frag_tbl *tbl, const struct ipv4_frag_pkt *fp)
* If such entry doesn't exist, will return free and/or timed-out entry,
* that can be used for that key.
*/
-static inline struct ipv4_frag_pkt *
-ipv4_frag_lookup(struct ipv4_frag_tbl *tbl,
- const struct ipv4_frag_key *key, uint64_t tms,
- struct ipv4_frag_pkt **free, struct ipv4_frag_pkt **stale)
+static inline struct ip_frag_pkt *
+ip_frag_lookup(struct ip_frag_tbl *tbl,
+ const struct ip_frag_key *key, uint64_t tms,
+ struct ip_frag_pkt **free, struct ip_frag_pkt **stale)
{
- struct ipv4_frag_pkt *p1, *p2;
- struct ipv4_frag_pkt *empty, *old;
+ struct ip_frag_pkt *p1, *p2;
+ struct ip_frag_pkt *empty, *old;
uint64_t max_cycles;
uint32_t i, assoc, sig1, sig2;
@@ -154,43 +154,43 @@ ipv4_frag_lookup(struct ipv4_frag_tbl *tbl,
max_cycles = tbl->max_cycles;
assoc = tbl->bucket_entries;
- if (tbl->last != NULL && IPV4_FRAG_KEY_CMP(&tbl->last->key, key) == 0)
+ if (tbl->last != NULL && IP_FRAG_KEY_CMP(&tbl->last->key, key) == 0)
return (tbl->last);
ipv4_frag_hash(key, &sig1, &sig2);
- p1 = IPV4_FRAG_TBL_POS(tbl, sig1);
- p2 = IPV4_FRAG_TBL_POS(tbl, sig2);
+ p1 = IP_FRAG_TBL_POS(tbl, sig1);
+ p2 = IP_FRAG_TBL_POS(tbl, sig2);
for (i = 0; i != assoc; i++) {
- IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv4_frag_pkt line0: %p, index: %u from %u\n"
+ "ip_frag_pkt line0: %p, index: %u from %u\n"
"key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
__func__, __LINE__,
tbl, tbl->max_entries, tbl->use_entries,
p1, i, assoc,
p1[i].key.src_dst, p1[i].key.id, p1[i].start);
- if (IPV4_FRAG_KEY_CMP(&p1[i].key, key) == 0)
+ if (IP_FRAG_KEY_CMP(&p1[i].key, key) == 0)
return (p1 + i);
- else if (IPV4_FRAG_KEY_EMPTY(&p1[i].key))
+ else if (IP_FRAG_KEY_EMPTY(&p1[i].key))
empty = (empty == NULL) ? (p1 + i) : empty;
else if (max_cycles + p1[i].start < tms)
old = (old == NULL) ? (p1 + i) : old;
- IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv4_frag_pkt line1: %p, index: %u from %u\n"
+ "ip_frag_pkt line1: %p, index: %u from %u\n"
"key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
__func__, __LINE__,
tbl, tbl->max_entries, tbl->use_entries,
p2, i, assoc,
p2[i].key.src_dst, p2[i].key.id, p2[i].start);
- if (IPV4_FRAG_KEY_CMP(&p2[i].key, key) == 0)
+ if (IP_FRAG_KEY_CMP(&p2[i].key, key) == 0)
return (p2 + i);
- else if (IPV4_FRAG_KEY_EMPTY(&p2[i].key))
+ else if (IP_FRAG_KEY_EMPTY(&p2[i].key))
empty = (empty == NULL) ?( p2 + i) : empty;
else if (max_cycles + p2[i].start < tms)
old = (old == NULL) ? (p2 + i) : old;
@@ -202,36 +202,36 @@ ipv4_frag_lookup(struct ipv4_frag_tbl *tbl,
}
static inline void
-ipv4_frag_tbl_del(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
- struct ipv4_frag_pkt *fp)
+ip_frag_tbl_del(struct ip_frag_tbl *tbl, struct ip_frag_death_row *dr,
+ struct ip_frag_pkt *fp)
{
- ipv4_frag_free(fp, dr);
- IPV4_FRAG_KEY_INVALIDATE(&fp->key);
+ ip_frag_free(fp, dr);
+ IP_FRAG_KEY_INVALIDATE(&fp->key);
TAILQ_REMOVE(&tbl->lru, fp, lru);
tbl->use_entries--;
- IPV4_FRAG_TBL_STAT_UPDATE(&tbl->stat, del_num, 1);
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, del_num, 1);
}
static inline void
-ipv4_frag_tbl_add(struct ipv4_frag_tbl *tbl, struct ipv4_frag_pkt *fp,
- const struct ipv4_frag_key *key, uint64_t tms)
+ip_frag_tbl_add(struct ip_frag_tbl *tbl, struct ip_frag_pkt *fp,
+ const struct ip_frag_key *key, uint64_t tms)
{
fp->key = key[0];
- ipv4_frag_reset(fp, tms);
+ ip_frag_reset(fp, tms);
TAILQ_INSERT_TAIL(&tbl->lru, fp, lru);
tbl->use_entries++;
- IPV4_FRAG_TBL_STAT_UPDATE(&tbl->stat, add_num, 1);
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, add_num, 1);
}
static inline void
-ipv4_frag_tbl_reuse(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
- struct ipv4_frag_pkt *fp, uint64_t tms)
+ip_frag_tbl_reuse(struct ip_frag_tbl *tbl, struct ip_frag_death_row *dr,
+ struct ip_frag_pkt *fp, uint64_t tms)
{
- ipv4_frag_free(fp, dr);
- ipv4_frag_reset(fp, tms);
+ ip_frag_free(fp, dr);
+ ip_frag_reset(fp, tms);
TAILQ_REMOVE(&tbl->lru, fp, lru);
TAILQ_INSERT_TAIL(&tbl->lru, fp, lru);
- IPV4_FRAG_TBL_STAT_UPDATE(&tbl->stat, reuse_num, 1);
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, reuse_num, 1);
}
/*
@@ -239,11 +239,11 @@ ipv4_frag_tbl_reuse(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
* If such entry is not present, then allocate a new one.
* If the entry is stale, then free and reuse it.
*/
-static inline struct ipv4_frag_pkt *
-ipv4_frag_find(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
- const struct ipv4_frag_key *key, uint64_t tms)
+static inline struct ip_frag_pkt *
+ip_frag_find(struct ip_frag_tbl *tbl, struct ip_frag_death_row *dr,
+ const struct ip_frag_key *key, uint64_t tms)
{
- struct ipv4_frag_pkt *pkt, *free, *stale, *lru;
+ struct ip_frag_pkt *pkt, *free, *stale, *lru;
uint64_t max_cycles;
/*
@@ -254,13 +254,13 @@ ipv4_frag_find(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
stale = NULL;
max_cycles = tbl->max_cycles;
- IPV4_FRAG_TBL_STAT_UPDATE(&tbl->stat, find_num, 1);
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, find_num, 1);
- if ((pkt = ipv4_frag_lookup(tbl, key, tms, &free, &stale)) == NULL) {
+ if ((pkt = ip_frag_lookup(tbl, key, tms, &free, &stale)) == NULL) {
/*timed-out entry, free and invalidate it*/
if (stale != NULL) {
- ipv4_frag_tbl_del(tbl, dr, stale);
+ ip_frag_tbl_del(tbl, dr, stale);
free = stale;
/*
@@ -272,17 +272,17 @@ ipv4_frag_find(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
tbl->max_entries <= tbl->use_entries) {
lru = TAILQ_FIRST(&tbl->lru);
if (max_cycles + lru->start < tms) {
- ipv4_frag_tbl_del(tbl, dr, lru);
+ ip_frag_tbl_del(tbl, dr, lru);
} else {
free = NULL;
- IPV4_FRAG_TBL_STAT_UPDATE(&tbl->stat,
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat,
fail_nospace, 1);
}
}
/* found a free entry to reuse. */
if (free != NULL) {
- ipv4_frag_tbl_add(tbl, free, key, tms);
+ ip_frag_tbl_add(tbl, free, key, tms);
pkt = free;
}
@@ -292,10 +292,10 @@ ipv4_frag_find(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
* and reuse it.
*/
} else if (max_cycles + pkt->start < tms) {
- ipv4_frag_tbl_reuse(tbl, dr, pkt, tms);
+ ip_frag_tbl_reuse(tbl, dr, pkt, tms);
}
- IPV4_FRAG_TBL_STAT_UPDATE(&tbl->stat, fail_total, (pkt == NULL));
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, fail_total, (pkt == NULL));
tbl->last = pkt;
return (pkt);
@@ -319,17 +319,17 @@ ipv4_frag_find(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
* @return
* The pointer to the new allocated mempool, on success. NULL on error.
*/
-static struct ipv4_frag_tbl *
-ipv4_frag_tbl_create(uint32_t bucket_num, uint32_t bucket_entries,
+static struct ip_frag_tbl *
+rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
uint32_t max_entries, uint64_t max_cycles, int socket_id)
{
- struct ipv4_frag_tbl *tbl;
+ struct ip_frag_tbl *tbl;
size_t sz;
uint64_t nb_entries;
nb_entries = rte_align32pow2(bucket_num);
nb_entries *= bucket_entries;
- nb_entries *= IPV4_FRAG_HASH_FNUM;
+ nb_entries *= IP_FRAG_HASH_FNUM;
/* check input parameters. */
if (rte_is_power_of_2(bucket_entries) == 0 ||
@@ -363,13 +363,13 @@ ipv4_frag_tbl_create(uint32_t bucket_num, uint32_t bucket_entries,
}
static inline void
-ipv4_frag_tbl_destroy( struct ipv4_frag_tbl *tbl)
+rte_ip_frag_table_destroy( struct ip_frag_tbl *tbl)
{
rte_free(tbl);
}
static void
-ipv4_frag_tbl_dump_stat(FILE *f, const struct ipv4_frag_tbl *tbl)
+rte_ip_frag_table_statistics_dump(FILE *f, const struct ip_frag_tbl *tbl)
{
uint64_t fail_total, fail_nospace;
diff --git a/lib/librte_ip_frag/rte_ipv4_rsmbl.h b/lib/librte_ip_frag/rte_ipv4_rsmbl.h
index 58ec1ee..82cb9b5 100644
--- a/lib/librte_ip_frag/rte_ipv4_rsmbl.h
+++ b/lib/librte_ip_frag/rte_ipv4_rsmbl.h
@@ -34,6 +34,8 @@
#ifndef _IPV4_RSMBL_H_
#define _IPV4_RSMBL_H_
+#include "ip_frag_common.h"
+
/**
* @file
* IPv4 reassemble
@@ -49,7 +51,7 @@ enum {
MAX_FRAG_NUM = 4,
};
-struct ipv4_frag {
+struct ip_frag {
uint16_t ofs;
uint16_t len;
struct rte_mbuf *mb;
@@ -58,15 +60,15 @@ struct ipv4_frag {
/*
* Use <src addr, dst_addr, id> to uniquely indetify fragmented datagram.
*/
-struct ipv4_frag_key {
+struct ip_frag_key {
uint64_t src_dst;
uint32_t id;
};
-#define IPV4_FRAG_KEY_INVALIDATE(k) ((k)->src_dst = 0)
-#define IPV4_FRAG_KEY_EMPTY(k) ((k)->src_dst == 0)
+#define IP_FRAG_KEY_INVALIDATE(k) ((k)->src_dst = 0)
+#define IP_FRAG_KEY_EMPTY(k) ((k)->src_dst == 0)
-#define IPV4_FRAG_KEY_CMP(k1, k2) \
+#define IP_FRAG_KEY_CMP(k1, k2) \
(((k1)->src_dst ^ (k2)->src_dst) | ((k1)->id ^ (k2)->id))
@@ -74,37 +76,37 @@ struct ipv4_frag_key {
* Fragmented packet to reassemble.
* First two entries in the frags[] array are for the last and first fragments.
*/
-struct ipv4_frag_pkt {
- TAILQ_ENTRY(ipv4_frag_pkt) lru; /* LRU list */
- struct ipv4_frag_key key;
+struct ip_frag_pkt {
+ TAILQ_ENTRY(ip_frag_pkt) lru; /* LRU list */
+ struct ip_frag_key key;
uint64_t start; /* creation timestamp */
uint32_t total_size; /* expected reassembled size */
uint32_t frag_size; /* size of fragments received */
uint32_t last_idx; /* index of next entry to fill */
- struct ipv4_frag frags[MAX_FRAG_NUM];
+ struct ip_frag frags[MAX_FRAG_NUM];
} __rte_cache_aligned;
-struct ipv4_frag_death_row {
+struct ip_frag_death_row {
uint32_t cnt;
struct rte_mbuf *row[MAX_PKT_BURST * (MAX_FRAG_NUM + 1)];
};
-#define IPV4_FRAG_MBUF2DR(dr, mb) ((dr)->row[(dr)->cnt++] = (mb))
+#define IP_FRAG_MBUF2DR(dr, mb) ((dr)->row[(dr)->cnt++] = (mb))
/* logging macros. */
-#ifdef IPV4_FRAG_DEBUG
-#define IPV4_FRAG_LOG(lvl, fmt, args...) RTE_LOG(lvl, USER1, fmt, ##args)
+#ifdef IP_FRAG_DEBUG
+#define IP_FRAG_LOG(lvl, fmt, args...) RTE_LOG(lvl, USER1, fmt, ##args)
#else
-#define IPV4_FRAG_LOG(lvl, fmt, args...) do {} while(0)
-#endif /* IPV4_FRAG_DEBUG */
+#define IP_FRAG_LOG(lvl, fmt, args...) do {} while(0)
+#endif /* IP_FRAG_DEBUG */
static inline void
-ipv4_frag_reset(struct ipv4_frag_pkt *fp, uint64_t tms)
+ip_frag_reset(struct ip_frag_pkt *fp, uint64_t tms)
{
- static const struct ipv4_frag zero_frag = {
+ static const struct ip_frag zero_frag = {
.ofs = 0,
.len = 0,
.mb = NULL,
@@ -119,7 +121,7 @@ ipv4_frag_reset(struct ipv4_frag_pkt *fp, uint64_t tms)
}
static inline void
-ipv4_frag_free(struct ipv4_frag_pkt *fp, struct ipv4_frag_death_row *dr)
+ip_frag_free(struct ip_frag_pkt *fp, struct ip_frag_death_row *dr)
{
uint32_t i, k;
@@ -136,7 +138,7 @@ ipv4_frag_free(struct ipv4_frag_pkt *fp, struct ipv4_frag_death_row *dr)
}
static inline void
-ipv4_frag_free_death_row(struct ipv4_frag_death_row *dr, uint32_t prefetch)
+rte_ip_frag_free_death_row(struct ip_frag_death_row *dr, uint32_t prefetch)
{
uint32_t i, k, n;
@@ -163,7 +165,7 @@ ipv4_frag_free_death_row(struct ipv4_frag_death_row *dr, uint32_t prefetch)
* chains them into one mbuf.
*/
static inline void
-ipv4_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)
+ip_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)
{
struct rte_mbuf *ms;
@@ -188,7 +190,7 @@ ipv4_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)
* Reassemble fragments into one packet.
*/
static inline struct rte_mbuf *
-ipv4_frag_reassemble(const struct ipv4_frag_pkt *fp)
+ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
{
struct ipv4_hdr *ip_hdr;
struct rte_mbuf *m, *prev;
@@ -210,7 +212,7 @@ ipv4_frag_reassemble(const struct ipv4_frag_pkt *fp)
/* previous fragment found. */
if(fp->frags[i].ofs + fp->frags[i].len == ofs) {
- ipv4_frag_chain(fp->frags[i].mb, m);
+ ip_frag_chain(fp->frags[i].mb, m);
/* update our last fragment and offset. */
m = fp->frags[i].mb;
@@ -225,14 +227,14 @@ ipv4_frag_reassemble(const struct ipv4_frag_pkt *fp)
}
/* chain with the first fragment. */
- ipv4_frag_chain(fp->frags[FIRST_FRAG_IDX].mb, m);
+ ip_frag_chain(fp->frags[FIRST_FRAG_IDX].mb, m);
m = fp->frags[FIRST_FRAG_IDX].mb;
/* update mbuf fields for reassembled packet. */
m->ol_flags |= PKT_TX_IP_CKSUM;
/* update ipv4 header for the reassmebled packet */
- ip_hdr = (struct ipv4_hdr*)(rte_pktmbuf_mtod(m, uint8_t *) +
+ ip_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, uint8_t *) +
m->pkt.vlan_macip.f.l2_len);
ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
@@ -245,7 +247,7 @@ ipv4_frag_reassemble(const struct ipv4_frag_pkt *fp)
}
static inline struct rte_mbuf *
-ipv4_frag_process(struct ipv4_frag_pkt *fp, struct ipv4_frag_death_row *dr,
+ip_frag_process(struct ip_frag_pkt *fp, struct ip_frag_death_row *dr,
struct rte_mbuf *mb, uint16_t ofs, uint16_t len, uint16_t more_frags)
{
uint32_t idx;
@@ -276,7 +278,7 @@ ipv4_frag_process(struct ipv4_frag_pkt *fp, struct ipv4_frag_death_row *dr,
if (idx >= sizeof (fp->frags) / sizeof (fp->frags[0])) {
/* report an error. */
- IPV4_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
"ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
"total_size: %u, frag_size: %u, last_idx: %u\n"
"first fragment: ofs: %u, len: %u\n"
@@ -290,9 +292,9 @@ ipv4_frag_process(struct ipv4_frag_pkt *fp, struct ipv4_frag_death_row *dr,
fp->frags[LAST_FRAG_IDX].len);
/* free all fragments, invalidate the entry. */
- ipv4_frag_free(fp, dr);
- IPV4_FRAG_KEY_INVALIDATE(&fp->key);
- IPV4_FRAG_MBUF2DR(dr, mb);
+ ip_frag_free(fp, dr);
+ IP_FRAG_KEY_INVALIDATE(&fp->key);
+ IP_FRAG_MBUF2DR(dr, mb);
return (NULL);
}
@@ -317,7 +319,7 @@ ipv4_frag_process(struct ipv4_frag_pkt *fp, struct ipv4_frag_death_row *dr,
if (mb == NULL) {
/* report an error. */
- IPV4_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
"ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
"total_size: %u, frag_size: %u, last_idx: %u\n"
"first fragment: ofs: %u, len: %u\n"
@@ -331,11 +333,11 @@ ipv4_frag_process(struct ipv4_frag_pkt *fp, struct ipv4_frag_death_row *dr,
fp->frags[LAST_FRAG_IDX].len);
/* free associated resources. */
- ipv4_frag_free(fp, dr);
+ ip_frag_free(fp, dr);
}
/* we are done with that entry, invalidate it. */
- IPV4_FRAG_KEY_INVALIDATE(&fp->key);
+ IP_FRAG_KEY_INVALIDATE(&fp->key);
return (mb);
}
@@ -362,12 +364,12 @@ ipv4_frag_process(struct ipv4_frag_pkt *fp, struct ipv4_frag_death_row *dr,
* - not all fragments of the packet are collected yet.
*/
static inline struct rte_mbuf *
-ipv4_frag_mbuf(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
- struct rte_mbuf *mb, uint64_t tms, struct ipv4_hdr *ip_hdr,
- uint16_t ip_ofs, uint16_t ip_flag)
+rte_ipv4_reassemble_packet(struct ip_frag_tbl *tbl,
+ struct ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
+ struct ipv4_hdr *ip_hdr, uint16_t ip_ofs, uint16_t ip_flag)
{
- struct ipv4_frag_pkt *fp;
- struct ipv4_frag_key key;
+ struct ip_frag_pkt *fp;
+ struct ip_frag_key key;
const uint64_t *psd;
uint16_t ip_len;
@@ -379,7 +381,7 @@ ipv4_frag_mbuf(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
ip_len = (uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) -
mb->pkt.vlan_macip.f.l3_len);
- IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"mbuf: %p, tms: %" PRIu64
", key: <%" PRIx64 ", %#x>, ofs: %u, len: %u, flags: %#x\n"
"tbl: %p, max_cycles: %" PRIu64 ", entry_mask: %#x, "
@@ -390,12 +392,12 @@ ipv4_frag_mbuf(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
tbl->use_entries);
/* try to find/add entry into the fragment's table. */
- if ((fp = ipv4_frag_find(tbl, dr, &key, tms)) == NULL) {
- IPV4_FRAG_MBUF2DR(dr, mb);
- return (NULL);
+ if ((fp = ip_frag_find(tbl, dr, &key, tms)) == NULL) {
+ IP_FRAG_MBUF2DR(dr, mb);
+ return NULL;
}
- IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"tbl: %p, max_entries: %u, use_entries: %u\n"
"ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, start: %" PRIu64
", total_size: %u, frag_size: %u, last_idx: %u\n\n",
@@ -406,10 +408,10 @@ ipv4_frag_mbuf(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
/* process the fragmented packet. */
- mb = ipv4_frag_process(fp, dr, mb, ip_ofs, ip_len, ip_flag);
- ipv4_frag_inuse(tbl, fp);
+ mb = ip_frag_process(fp, dr, mb, ip_ofs, ip_len, ip_flag);
+ ip_frag_inuse(tbl, fp);
- IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"mbuf: %p\n"
"tbl: %p, max_entries: %u, use_entries: %u\n"
"ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, start: %" PRIu64
--
1.8.1.4
next prev parent reply other threads:[~2014-05-28 17:32 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-05-28 17:32 [dpdk-dev] [PATCH 00/13] *** SUBJECT HERE *** Anatoly Burakov
2014-05-28 17:32 ` [dpdk-dev] [PATCH 00/13] IPv4/IPv6 fragmentation/reassembly library Anatoly Burakov
2014-05-28 17:32 ` [dpdk-dev] [PATCH 01/13] ip_frag: Moving fragmentation/reassembly headers into a separate library Anatoly Burakov
2014-05-28 17:32 ` [dpdk-dev] [PATCH 02/13] Refactored IPv4 fragmentation into a proper library Anatoly Burakov
2014-05-28 17:32 ` [dpdk-dev] [PATCH 03/13] Fixing issues reported by checkpatch Anatoly Burakov
2014-05-28 17:32 ` [dpdk-dev] [PATCH 04/13] ip_frag: new internal common header Anatoly Burakov
2014-05-28 17:32 ` [dpdk-dev] [PATCH 05/13] ip_frag: removed unneeded check and macro Anatoly Burakov
2014-05-28 17:32 ` Anatoly Burakov [this message]
2014-05-28 17:32 ` [dpdk-dev] [PATCH 07/13] ip_frag: refactored reassembly code and made it a proper library Anatoly Burakov
2014-05-28 17:32 ` [dpdk-dev] [PATCH 08/13] ip_frag: renamed ipv4 frag function Anatoly Burakov
2014-05-28 17:32 ` [dpdk-dev] [PATCH 09/13] ip_frag: added IPv6 fragmentation support Anatoly Burakov
2014-05-28 17:32 ` [dpdk-dev] [PATCH 10/13] examples: renamed ipv4_frag example app to ip_fragmentation Anatoly Burakov
2014-05-28 17:32 ` [dpdk-dev] [PATCH 11/13] example: overhaul of ip_fragmentation example app Anatoly Burakov
2014-05-28 17:32 ` [dpdk-dev] [PATCH 12/13] ip_frag: add support for IPv6 reassembly Anatoly Burakov
2014-05-28 17:32 ` [dpdk-dev] [PATCH 13/13] examples: overhaul of ip_reassembly app Anatoly Burakov
2014-05-28 17:34 ` [dpdk-dev] [PATCH 00/13] *** SUBJECT HERE *** Burakov, Anatoly
2014-06-06 15:58 ` [dpdk-dev] [PATCH 00/13] IPv4/IPv6 fragmentation/reassembly library Cao, Waterman
2014-06-16 16:59 ` [dpdk-dev] [PATCH 00/13] IP fragmentation and reassembly Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=3b2b8b8faa75b89f0ca0eea5ff5d14401fa3dd7a.1401298292.git.anatoly.burakov@intel.com \
--to=anatoly.burakov@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).