From: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
To: dev@dpdk.org
Cc: konstantin.ananyev@intel.com, andrey.chilikin@intel.com,
ray.kinsella@intel.com, yipeng1.wang@intel.com,
sameh.gobriel@intel.com, bruce.richardson@intel.com,
david.marchand@redhat.com, kda@semihalf.com,
vladimir.medvedkin@intel.com
Subject: [dpdk-dev] [PATCH v2] hash: fix tuple adjustment
Date: Tue, 4 May 2021 15:25:04 +0100 [thread overview]
Message-ID: <1620138304-203463-1-git-send-email-vladimir.medvedkin@intel.com> (raw)
In-Reply-To: <1620137248-203174-1-git-send-email-vladimir.medvedkin@intel.com>
rte_thash_adjust_tuple() uses random to generate a new subtuple if
fn() callback reports about collision. In some cases random changes
the subtuple in a way that after complementary bits are applied the
original tuple is obtained. This patch replaces random with subtuple
increment.
Fixes: 28ebff11c2dc ("hash: add predictable RSS")
Cc: vladimir.medvedkin@intel.com
Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
lib/hash/rte_thash.c | 121 ++++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 100 insertions(+), 21 deletions(-)
diff --git a/lib/hash/rte_thash.c b/lib/hash/rte_thash.c
index 135a26d..58129df 100644
--- a/lib/hash/rte_thash.c
+++ b/lib/hash/rte_thash.c
@@ -610,16 +610,91 @@ rte_thash_get_key(struct rte_thash_ctx *ctx)
return ctx->hash_key;
}
+static inline uint8_t
+read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset)
+{
+ uint8_t ret = 0;
+
+ ret = ptr[offset / CHAR_BIT];
+ if (offset % CHAR_BIT) {
+ ret <<= (offset % CHAR_BIT);
+ ret |= ptr[(offset / CHAR_BIT) + 1] >>
+ (CHAR_BIT - (offset % CHAR_BIT));
+ }
+
+ return ret >> (CHAR_BIT - len);
+}
+
+static inline uint32_t
+read_unaligned_bits(uint8_t *ptr, int len, int offset)
+{
+ uint32_t ret = 0;
+
+ len = RTE_MAX(len, 0);
+ len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT));
+
+ while (len > 0) {
+ ret <<= CHAR_BIT;
+
+ ret |= read_unaligned_byte(ptr, RTE_MIN(len, CHAR_BIT),
+ offset);
+ offset += CHAR_BIT;
+ len -= CHAR_BIT;
+ }
+
+ return ret;
+}
+
+/* returns mask for len bits with given offset inside byte */
+static inline uint8_t
+get_bits_mask(unsigned int len, unsigned int offset)
+{
+ unsigned int last_bit;
+
+ offset %= CHAR_BIT;
+ /* last bit within byte */
+ last_bit = RTE_MIN((unsigned int)CHAR_BIT, offset + len);
+
+ return ((1 << (CHAR_BIT - offset)) - 1) ^
+ ((1 << (CHAR_BIT - last_bit)) - 1);
+}
+
+static inline void
+write_unaligned_byte(uint8_t *ptr, unsigned int len,
+ unsigned int offset, uint8_t val)
+{
+ uint8_t tmp;
+
+ tmp = ptr[offset / CHAR_BIT];
+ tmp &= ~get_bits_mask(len, offset);
+ tmp |= ((val << (CHAR_BIT - len)) >> (offset % CHAR_BIT));
+ ptr[offset / CHAR_BIT] = tmp;
+ if (((offset + len) / CHAR_BIT) != (offset / CHAR_BIT)) {
+ int rest_len = (offset + len) % CHAR_BIT;
+ tmp = ptr[(offset + len) / CHAR_BIT];
+ tmp &= ~get_bits_mask(rest_len, 0);
+ tmp |= val << (CHAR_BIT - rest_len);
+ ptr[(offset + len) / CHAR_BIT] = tmp;
+ }
+}
+
static inline void
-xor_bit(uint8_t *ptr, uint32_t bit, uint32_t pos)
+write_unaligned_bits(uint8_t *ptr, int len, int offset, uint32_t val)
{
- uint32_t byte_idx = pos >> 3;
- uint32_t bit_idx = (CHAR_BIT - 1) - (pos & (CHAR_BIT - 1));
uint8_t tmp;
+ unsigned int part_len;
+
+ len = RTE_MAX(len, 0);
+ len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT));
- tmp = ptr[byte_idx];
- tmp ^= bit << bit_idx;
- ptr[byte_idx] = tmp;
+ while (len > 0) {
+ part_len = RTE_MIN(CHAR_BIT, len);
+ tmp = (uint8_t)val & ((1 << part_len) - 1);
+ write_unaligned_byte(ptr, part_len,
+ offset + len - part_len, tmp);
+ len -= CHAR_BIT;
+ val >>= CHAR_BIT;
+ }
}
int
@@ -632,8 +707,10 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
uint32_t tmp_tuple[tuple_len / sizeof(uint32_t)];
unsigned int i, j, ret = 0;
uint32_t hash, adj_bits;
- uint8_t bit;
const uint8_t *hash_key;
+ uint32_t tmp;
+ int offset;
+ int tmp_len;
if ((ctx == NULL) || (h == NULL) || (tuple == NULL) ||
(tuple_len % sizeof(uint32_t) != 0) || (attempts <= 0))
@@ -641,6 +718,8 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
hash_key = rte_thash_get_key(ctx);
+ attempts = RTE_MIN(attempts, 1U << (h->tuple_len - ctx->reta_sz_log));
+
for (i = 0; i < attempts; i++) {
for (j = 0; j < (tuple_len / 4); j++)
tmp_tuple[j] =
@@ -651,14 +730,12 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
/*
* Hint: LSB of adj_bits corresponds to
- * offset + len bit of tuple
+ * offset + len bit of the subtuple
*/
- for (j = 0; j < sizeof(uint32_t) * CHAR_BIT; j++) {
- bit = (adj_bits >> j) & 0x1;
- if (bit)
- xor_bit(tuple, bit, h->tuple_offset +
- h->tuple_len - 1 - j);
- }
+ offset = h->tuple_offset + h->tuple_len - ctx->reta_sz_log;
+ tmp = read_unaligned_bits(tuple, ctx->reta_sz_log, offset);
+ tmp ^= adj_bits;
+ write_unaligned_bits(tuple, ctx->reta_sz_log, offset, tmp);
if (fn != NULL) {
ret = (fn(userdata, tuple)) ? 0 : -EEXIST;
@@ -666,13 +743,15 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
return 0;
else if (i < (attempts - 1)) {
/* Update tuple with random bits */
- for (j = 0; j < h->tuple_len; j++) {
- bit = rte_rand() & 0x1;
- if (bit)
- xor_bit(tuple, bit,
- h->tuple_offset +
- h->tuple_len - 1 - j);
- }
+ tmp_len = RTE_MIN(sizeof(uint32_t) * CHAR_BIT,
+ h->tuple_len - ctx->reta_sz_log);
+ offset -= tmp_len;
+ tmp = read_unaligned_bits(tuple, tmp_len,
+ offset);
+ tmp++;
+ tmp &= (1 << tmp_len) - 1;
+ write_unaligned_bits(tuple, tmp_len, offset,
+ tmp);
}
} else
return 0;
--
2.7.4
next prev parent reply other threads:[~2021-05-04 14:25 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-04 14:07 [dpdk-dev] [PATCH] " Vladimir Medvedkin
2021-05-04 14:25 ` Vladimir Medvedkin [this message]
2021-05-05 10:16 ` [dpdk-dev] [PATCH v2] " Thomas Monjalon
2021-05-05 17:57 ` Wang, Yipeng1
2021-05-05 19:13 ` Stanislaw Kardach
2021-05-06 11:40 ` [dpdk-dev] [PATCH v3] " Vladimir Medvedkin
2021-05-10 13:25 ` Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1620138304-203463-1-git-send-email-vladimir.medvedkin@intel.com \
--to=vladimir.medvedkin@intel.com \
--cc=andrey.chilikin@intel.com \
--cc=bruce.richardson@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=kda@semihalf.com \
--cc=konstantin.ananyev@intel.com \
--cc=ray.kinsella@intel.com \
--cc=sameh.gobriel@intel.com \
--cc=yipeng1.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).