* [dpdk-dev] [PATCH] hash: fix tuple adjustment
@ 2021-05-04 14:07 Vladimir Medvedkin
2021-05-04 14:25 ` [dpdk-dev] [PATCH v2] " Vladimir Medvedkin
0 siblings, 1 reply; 7+ messages in thread
From: Vladimir Medvedkin @ 2021-05-04 14:07 UTC (permalink / raw)
To: dev
Cc: konstantin.ananyev, andrey.chilikin, ray.kinsella, yipeng1.wang,
sameh.gobriel, bruce.richardson, david.marchand, kda,
vladimir.medvedkin
rte_thash_adjust_tuple() uses random to generate a new subtuple if
fn() callback reports about collision. In some cases random changes
the subtuple in a way that after complementary bits are applied the
original tuple is obtained. This patch replaces random with subtuple
increment.
Fixes: 28ebff11c2dc ("hash: add predictable RSS")
Cc: vladimir.medvedkin@intel.com
Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
lib/librte_hash/rte_thash.c | 121 ++++++++++++++++++++++++++++++++++++--------
1 file changed, 100 insertions(+), 21 deletions(-)
diff --git a/lib/librte_hash/rte_thash.c b/lib/librte_hash/rte_thash.c
index 135a26d..58129df 100644
--- a/lib/librte_hash/rte_thash.c
+++ b/lib/librte_hash/rte_thash.c
@@ -610,16 +610,91 @@ rte_thash_get_key(struct rte_thash_ctx *ctx)
return ctx->hash_key;
}
+static inline uint8_t
+read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset)
+{
+ uint8_t ret = 0;
+
+ ret = ptr[offset / CHAR_BIT];
+ if (offset % CHAR_BIT) {
+ ret <<= (offset % CHAR_BIT);
+ ret |= ptr[(offset / CHAR_BIT) + 1] >>
+ (CHAR_BIT - (offset % CHAR_BIT));
+ }
+
+ return ret >> (CHAR_BIT - len);
+}
+
+static inline uint32_t
+read_unaligned_bits(uint8_t *ptr, int len, int offset)
+{
+ uint32_t ret = 0;
+
+ len = RTE_MAX(len, 0);
+ len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT));
+
+ while (len > 0) {
+ ret <<= CHAR_BIT;
+
+ ret |= read_unaligned_byte(ptr, RTE_MIN(len, CHAR_BIT),
+ offset);
+ offset += CHAR_BIT;
+ len -= CHAR_BIT;
+ }
+
+ return ret;
+}
+
+/* returns mask for len bits with given offset inside byte */
+static inline uint8_t
+get_bits_mask(unsigned int len, unsigned int offset)
+{
+ unsigned int last_bit;
+
+ offset %= CHAR_BIT;
+ /* last bit within byte */
+ last_bit = RTE_MIN((unsigned int)CHAR_BIT, offset + len);
+
+ return ((1 << (CHAR_BIT - offset)) - 1) ^
+ ((1 << (CHAR_BIT - last_bit)) - 1);
+}
+
+static inline void
+write_unaligned_byte(uint8_t *ptr, unsigned int len,
+ unsigned int offset, uint8_t val)
+{
+ uint8_t tmp;
+
+ tmp = ptr[offset / CHAR_BIT];
+ tmp &= ~get_bits_mask(len, offset);
+ tmp |= ((val << (CHAR_BIT - len)) >> (offset % CHAR_BIT));
+ ptr[offset / CHAR_BIT] = tmp;
+ if (((offset + len) / CHAR_BIT) != (offset / CHAR_BIT)) {
+ int rest_len = (offset + len) % CHAR_BIT;
+ tmp = ptr[(offset + len) / CHAR_BIT];
+ tmp &= ~get_bits_mask(rest_len, 0);
+ tmp |= val << (CHAR_BIT - rest_len);
+ ptr[(offset + len) / CHAR_BIT] = tmp;
+ }
+}
+
static inline void
-xor_bit(uint8_t *ptr, uint32_t bit, uint32_t pos)
+write_unaligned_bits(uint8_t *ptr, int len, int offset, uint32_t val)
{
- uint32_t byte_idx = pos >> 3;
- uint32_t bit_idx = (CHAR_BIT - 1) - (pos & (CHAR_BIT - 1));
uint8_t tmp;
+ unsigned int part_len;
+
+ len = RTE_MAX(len, 0);
+ len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT));
- tmp = ptr[byte_idx];
- tmp ^= bit << bit_idx;
- ptr[byte_idx] = tmp;
+ while (len > 0) {
+ part_len = RTE_MIN(CHAR_BIT, len);
+ tmp = (uint8_t)val & ((1 << part_len) - 1);
+ write_unaligned_byte(ptr, part_len,
+ offset + len - part_len, tmp);
+ len -= CHAR_BIT;
+ val >>= CHAR_BIT;
+ }
}
int
@@ -632,8 +707,10 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
uint32_t tmp_tuple[tuple_len / sizeof(uint32_t)];
unsigned int i, j, ret = 0;
uint32_t hash, adj_bits;
- uint8_t bit;
const uint8_t *hash_key;
+ uint32_t tmp;
+ int offset;
+ int tmp_len;
if ((ctx == NULL) || (h == NULL) || (tuple == NULL) ||
(tuple_len % sizeof(uint32_t) != 0) || (attempts <= 0))
@@ -641,6 +718,8 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
hash_key = rte_thash_get_key(ctx);
+ attempts = RTE_MIN(attempts, 1U << (h->tuple_len - ctx->reta_sz_log));
+
for (i = 0; i < attempts; i++) {
for (j = 0; j < (tuple_len / 4); j++)
tmp_tuple[j] =
@@ -651,14 +730,12 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
/*
* Hint: LSB of adj_bits corresponds to
- * offset + len bit of tuple
+ * offset + len bit of the subtuple
*/
- for (j = 0; j < sizeof(uint32_t) * CHAR_BIT; j++) {
- bit = (adj_bits >> j) & 0x1;
- if (bit)
- xor_bit(tuple, bit, h->tuple_offset +
- h->tuple_len - 1 - j);
- }
+ offset = h->tuple_offset + h->tuple_len - ctx->reta_sz_log;
+ tmp = read_unaligned_bits(tuple, ctx->reta_sz_log, offset);
+ tmp ^= adj_bits;
+ write_unaligned_bits(tuple, ctx->reta_sz_log, offset, tmp);
if (fn != NULL) {
ret = (fn(userdata, tuple)) ? 0 : -EEXIST;
@@ -666,13 +743,15 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
return 0;
else if (i < (attempts - 1)) {
/* Update tuple with random bits */
- for (j = 0; j < h->tuple_len; j++) {
- bit = rte_rand() & 0x1;
- if (bit)
- xor_bit(tuple, bit,
- h->tuple_offset +
- h->tuple_len - 1 - j);
- }
+ tmp_len = RTE_MIN(sizeof(uint32_t) * CHAR_BIT,
+ h->tuple_len - ctx->reta_sz_log);
+ offset -= tmp_len;
+ tmp = read_unaligned_bits(tuple, tmp_len,
+ offset);
+ tmp++;
+ tmp &= (1 << tmp_len) - 1;
+ write_unaligned_bits(tuple, tmp_len, offset,
+ tmp);
}
} else
return 0;
--
2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH v2] hash: fix tuple adjustment
2021-05-04 14:07 [dpdk-dev] [PATCH] hash: fix tuple adjustment Vladimir Medvedkin
@ 2021-05-04 14:25 ` Vladimir Medvedkin
2021-05-05 10:16 ` Thomas Monjalon
` (3 more replies)
0 siblings, 4 replies; 7+ messages in thread
From: Vladimir Medvedkin @ 2021-05-04 14:25 UTC (permalink / raw)
To: dev
Cc: konstantin.ananyev, andrey.chilikin, ray.kinsella, yipeng1.wang,
sameh.gobriel, bruce.richardson, david.marchand, kda,
vladimir.medvedkin
rte_thash_adjust_tuple() uses random to generate a new subtuple if
fn() callback reports about collision. In some cases random changes
the subtuple in a way that after complementary bits are applied the
original tuple is obtained. This patch replaces random with subtuple
increment.
Fixes: 28ebff11c2dc ("hash: add predictable RSS")
Cc: vladimir.medvedkin@intel.com
Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
---
lib/hash/rte_thash.c | 121 ++++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 100 insertions(+), 21 deletions(-)
diff --git a/lib/hash/rte_thash.c b/lib/hash/rte_thash.c
index 135a26d..58129df 100644
--- a/lib/hash/rte_thash.c
+++ b/lib/hash/rte_thash.c
@@ -610,16 +610,91 @@ rte_thash_get_key(struct rte_thash_ctx *ctx)
return ctx->hash_key;
}
+static inline uint8_t
+read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset)
+{
+ uint8_t ret = 0;
+
+ ret = ptr[offset / CHAR_BIT];
+ if (offset % CHAR_BIT) {
+ ret <<= (offset % CHAR_BIT);
+ ret |= ptr[(offset / CHAR_BIT) + 1] >>
+ (CHAR_BIT - (offset % CHAR_BIT));
+ }
+
+ return ret >> (CHAR_BIT - len);
+}
+
+static inline uint32_t
+read_unaligned_bits(uint8_t *ptr, int len, int offset)
+{
+ uint32_t ret = 0;
+
+ len = RTE_MAX(len, 0);
+ len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT));
+
+ while (len > 0) {
+ ret <<= CHAR_BIT;
+
+ ret |= read_unaligned_byte(ptr, RTE_MIN(len, CHAR_BIT),
+ offset);
+ offset += CHAR_BIT;
+ len -= CHAR_BIT;
+ }
+
+ return ret;
+}
+
+/* returns mask for len bits with given offset inside byte */
+static inline uint8_t
+get_bits_mask(unsigned int len, unsigned int offset)
+{
+ unsigned int last_bit;
+
+ offset %= CHAR_BIT;
+ /* last bit within byte */
+ last_bit = RTE_MIN((unsigned int)CHAR_BIT, offset + len);
+
+ return ((1 << (CHAR_BIT - offset)) - 1) ^
+ ((1 << (CHAR_BIT - last_bit)) - 1);
+}
+
+static inline void
+write_unaligned_byte(uint8_t *ptr, unsigned int len,
+ unsigned int offset, uint8_t val)
+{
+ uint8_t tmp;
+
+ tmp = ptr[offset / CHAR_BIT];
+ tmp &= ~get_bits_mask(len, offset);
+ tmp |= ((val << (CHAR_BIT - len)) >> (offset % CHAR_BIT));
+ ptr[offset / CHAR_BIT] = tmp;
+ if (((offset + len) / CHAR_BIT) != (offset / CHAR_BIT)) {
+ int rest_len = (offset + len) % CHAR_BIT;
+ tmp = ptr[(offset + len) / CHAR_BIT];
+ tmp &= ~get_bits_mask(rest_len, 0);
+ tmp |= val << (CHAR_BIT - rest_len);
+ ptr[(offset + len) / CHAR_BIT] = tmp;
+ }
+}
+
static inline void
-xor_bit(uint8_t *ptr, uint32_t bit, uint32_t pos)
+write_unaligned_bits(uint8_t *ptr, int len, int offset, uint32_t val)
{
- uint32_t byte_idx = pos >> 3;
- uint32_t bit_idx = (CHAR_BIT - 1) - (pos & (CHAR_BIT - 1));
uint8_t tmp;
+ unsigned int part_len;
+
+ len = RTE_MAX(len, 0);
+ len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT));
- tmp = ptr[byte_idx];
- tmp ^= bit << bit_idx;
- ptr[byte_idx] = tmp;
+ while (len > 0) {
+ part_len = RTE_MIN(CHAR_BIT, len);
+ tmp = (uint8_t)val & ((1 << part_len) - 1);
+ write_unaligned_byte(ptr, part_len,
+ offset + len - part_len, tmp);
+ len -= CHAR_BIT;
+ val >>= CHAR_BIT;
+ }
}
int
@@ -632,8 +707,10 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
uint32_t tmp_tuple[tuple_len / sizeof(uint32_t)];
unsigned int i, j, ret = 0;
uint32_t hash, adj_bits;
- uint8_t bit;
const uint8_t *hash_key;
+ uint32_t tmp;
+ int offset;
+ int tmp_len;
if ((ctx == NULL) || (h == NULL) || (tuple == NULL) ||
(tuple_len % sizeof(uint32_t) != 0) || (attempts <= 0))
@@ -641,6 +718,8 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
hash_key = rte_thash_get_key(ctx);
+ attempts = RTE_MIN(attempts, 1U << (h->tuple_len - ctx->reta_sz_log));
+
for (i = 0; i < attempts; i++) {
for (j = 0; j < (tuple_len / 4); j++)
tmp_tuple[j] =
@@ -651,14 +730,12 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
/*
* Hint: LSB of adj_bits corresponds to
- * offset + len bit of tuple
+ * offset + len bit of the subtuple
*/
- for (j = 0; j < sizeof(uint32_t) * CHAR_BIT; j++) {
- bit = (adj_bits >> j) & 0x1;
- if (bit)
- xor_bit(tuple, bit, h->tuple_offset +
- h->tuple_len - 1 - j);
- }
+ offset = h->tuple_offset + h->tuple_len - ctx->reta_sz_log;
+ tmp = read_unaligned_bits(tuple, ctx->reta_sz_log, offset);
+ tmp ^= adj_bits;
+ write_unaligned_bits(tuple, ctx->reta_sz_log, offset, tmp);
if (fn != NULL) {
ret = (fn(userdata, tuple)) ? 0 : -EEXIST;
@@ -666,13 +743,15 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
return 0;
else if (i < (attempts - 1)) {
/* Update tuple with random bits */
- for (j = 0; j < h->tuple_len; j++) {
- bit = rte_rand() & 0x1;
- if (bit)
- xor_bit(tuple, bit,
- h->tuple_offset +
- h->tuple_len - 1 - j);
- }
+ tmp_len = RTE_MIN(sizeof(uint32_t) * CHAR_BIT,
+ h->tuple_len - ctx->reta_sz_log);
+ offset -= tmp_len;
+ tmp = read_unaligned_bits(tuple, tmp_len,
+ offset);
+ tmp++;
+ tmp &= (1 << tmp_len) - 1;
+ write_unaligned_bits(tuple, tmp_len, offset,
+ tmp);
}
} else
return 0;
--
2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v2] hash: fix tuple adjustment
2021-05-04 14:25 ` [dpdk-dev] [PATCH v2] " Vladimir Medvedkin
@ 2021-05-05 10:16 ` Thomas Monjalon
2021-05-05 17:57 ` Wang, Yipeng1
` (2 subsequent siblings)
3 siblings, 0 replies; 7+ messages in thread
From: Thomas Monjalon @ 2021-05-05 10:16 UTC (permalink / raw)
To: Vladimir Medvedkin
Cc: dev, konstantin.ananyev, andrey.chilikin, ray.kinsella,
yipeng1.wang, sameh.gobriel, bruce.richardson, david.marchand,
kda
04/05/2021 16:25, Vladimir Medvedkin:
> rte_thash_adjust_tuple() uses random to generate a new subtuple if
> fn() callback reports about collision. In some cases random changes
> the subtuple in a way that after complementary bits are applied the
> original tuple is obtained. This patch replaces random with subtuple
> increment.
>
> Fixes: 28ebff11c2dc ("hash: add predictable RSS")
> Cc: vladimir.medvedkin@intel.com
>
> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> ---
> lib/hash/rte_thash.c | 121 ++++++++++++++++++++++++++++++++++++++++++---------
> 1 file changed, 100 insertions(+), 21 deletions(-)
Waiting for a review.
It is a complex change.
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v2] hash: fix tuple adjustment
2021-05-04 14:25 ` [dpdk-dev] [PATCH v2] " Vladimir Medvedkin
2021-05-05 10:16 ` Thomas Monjalon
@ 2021-05-05 17:57 ` Wang, Yipeng1
2021-05-05 19:13 ` Stanislaw Kardach
2021-05-06 11:40 ` [dpdk-dev] [PATCH v3] " Vladimir Medvedkin
3 siblings, 0 replies; 7+ messages in thread
From: Wang, Yipeng1 @ 2021-05-05 17:57 UTC (permalink / raw)
To: Medvedkin, Vladimir, dev
Cc: Ananyev, Konstantin, Chilikin, Andrey, Kinsella, Ray, Gobriel,
Sameh, Richardson, Bruce, david.marchand, kda
> -----Original Message-----
> From: Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> Sent: Tuesday, May 4, 2021 7:25 AM
> To: dev@dpdk.org
> Cc: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Chilikin, Andrey
> <andrey.chilikin@intel.com>; Kinsella, Ray <ray.kinsella@intel.com>; Wang,
> Yipeng1 <yipeng1.wang@intel.com>; Gobriel, Sameh
> <sameh.gobriel@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; david.marchand@redhat.com;
> kda@semihalf.com; Medvedkin, Vladimir <vladimir.medvedkin@intel.com>
> Subject: [PATCH v2] hash: fix tuple adjustment
>
> rte_thash_adjust_tuple() uses random to generate a new subtuple if
> fn() callback reports about collision. In some cases random changes the
> subtuple in a way that after complementary bits are applied the original tuple
> is obtained. This patch replaces random with subtuple increment.
>
> Fixes: 28ebff11c2dc ("hash: add predictable RSS")
> Cc: vladimir.medvedkin@intel.com
>
> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> ---
Acked-by: Yipeng Wang <yipeng1.wang@intel.com>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v2] hash: fix tuple adjustment
2021-05-04 14:25 ` [dpdk-dev] [PATCH v2] " Vladimir Medvedkin
2021-05-05 10:16 ` Thomas Monjalon
2021-05-05 17:57 ` Wang, Yipeng1
@ 2021-05-05 19:13 ` Stanislaw Kardach
2021-05-06 11:40 ` [dpdk-dev] [PATCH v3] " Vladimir Medvedkin
3 siblings, 0 replies; 7+ messages in thread
From: Stanislaw Kardach @ 2021-05-05 19:13 UTC (permalink / raw)
To: Vladimir Medvedkin
Cc: dev, konstantin.ananyev, andrey.chilikin, ray.kinsella,
yipeng1.wang, sameh.gobriel, bruce.richardson, david.marchand
On Tue, May 04, 2021 at 03:25:04PM +0100, Vladimir Medvedkin wrote:
> rte_thash_adjust_tuple() uses random to generate a new subtuple if
> fn() callback reports about collision. In some cases random changes
> the subtuple in a way that after complementary bits are applied the
> original tuple is obtained. This patch replaces random with subtuple
> increment.
>
> Fixes: 28ebff11c2dc ("hash: add predictable RSS")
> Cc: vladimir.medvedkin@intel.com
>
> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> ---
> lib/hash/rte_thash.c | 121 ++++++++++++++++++++++++++++++++++++++++++---------
> 1 file changed, 100 insertions(+), 21 deletions(-)
>
> diff --git a/lib/hash/rte_thash.c b/lib/hash/rte_thash.c
> index 135a26d..58129df 100644
> --- a/lib/hash/rte_thash.c
> +++ b/lib/hash/rte_thash.c
> @@ -610,16 +610,91 @@ rte_thash_get_key(struct rte_thash_ctx *ctx)
> return ctx->hash_key;
> }
>
> +static inline uint8_t
> +read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset)
> +{
> + uint8_t ret = 0;
> +
> + ret = ptr[offset / CHAR_BIT];
> + if (offset % CHAR_BIT) {
> + ret <<= (offset % CHAR_BIT);
> + ret |= ptr[(offset / CHAR_BIT) + 1] >>
> + (CHAR_BIT - (offset % CHAR_BIT));
> + }
> +
> + return ret >> (CHAR_BIT - len);
> +}
> +
> +static inline uint32_t
> +read_unaligned_bits(uint8_t *ptr, int len, int offset)
> +{
> + uint32_t ret = 0;
> +
> + len = RTE_MAX(len, 0);
> + len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT));
> +
> + while (len > 0) {
> + ret <<= CHAR_BIT;
> +
> + ret |= read_unaligned_byte(ptr, RTE_MIN(len, CHAR_BIT),
> + offset);
> + offset += CHAR_BIT;
> + len -= CHAR_BIT;
> + }
> +
> + return ret;
> +}
> +
> +/* returns mask for len bits with given offset inside byte */
> +static inline uint8_t
> +get_bits_mask(unsigned int len, unsigned int offset)
> +{
> + unsigned int last_bit;
> +
> + offset %= CHAR_BIT;
> + /* last bit within byte */
> + last_bit = RTE_MIN((unsigned int)CHAR_BIT, offset + len);
> +
> + return ((1 << (CHAR_BIT - offset)) - 1) ^
> + ((1 << (CHAR_BIT - last_bit)) - 1);
> +}
> +
> +static inline void
> +write_unaligned_byte(uint8_t *ptr, unsigned int len,
> + unsigned int offset, uint8_t val)
> +{
> + uint8_t tmp;
> +
> + tmp = ptr[offset / CHAR_BIT];
> + tmp &= ~get_bits_mask(len, offset);
> + tmp |= ((val << (CHAR_BIT - len)) >> (offset % CHAR_BIT));
> + ptr[offset / CHAR_BIT] = tmp;
> + if (((offset + len) / CHAR_BIT) != (offset / CHAR_BIT)) {
> + int rest_len = (offset + len) % CHAR_BIT;
> + tmp = ptr[(offset + len) / CHAR_BIT];
> + tmp &= ~get_bits_mask(rest_len, 0);
> + tmp |= val << (CHAR_BIT - rest_len);
> + ptr[(offset + len) / CHAR_BIT] = tmp;
> + }
> +}
> +
> static inline void
> -xor_bit(uint8_t *ptr, uint32_t bit, uint32_t pos)
> +write_unaligned_bits(uint8_t *ptr, int len, int offset, uint32_t val)
> {
> - uint32_t byte_idx = pos >> 3;
> - uint32_t bit_idx = (CHAR_BIT - 1) - (pos & (CHAR_BIT - 1));
> uint8_t tmp;
> + unsigned int part_len;
> +
> + len = RTE_MAX(len, 0);
> + len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT));
>
> - tmp = ptr[byte_idx];
> - tmp ^= bit << bit_idx;
> - ptr[byte_idx] = tmp;
> + while (len > 0) {
> + part_len = RTE_MIN(CHAR_BIT, len);
> + tmp = (uint8_t)val & ((1 << part_len) - 1);
> + write_unaligned_byte(ptr, part_len,
> + offset + len - part_len, tmp);
> + len -= CHAR_BIT;
> + val >>= CHAR_BIT;
> + }
> }
>
> int
> @@ -632,8 +707,10 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
> uint32_t tmp_tuple[tuple_len / sizeof(uint32_t)];
> unsigned int i, j, ret = 0;
> uint32_t hash, adj_bits;
> - uint8_t bit;
> const uint8_t *hash_key;
> + uint32_t tmp;
> + int offset;
> + int tmp_len;
>
> if ((ctx == NULL) || (h == NULL) || (tuple == NULL) ||
> (tuple_len % sizeof(uint32_t) != 0) || (attempts <= 0))
> @@ -641,6 +718,8 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
>
> hash_key = rte_thash_get_key(ctx);
>
> + attempts = RTE_MIN(attempts, 1U << (h->tuple_len - ctx->reta_sz_log));
> +
> for (i = 0; i < attempts; i++) {
> for (j = 0; j < (tuple_len / 4); j++)
> tmp_tuple[j] =
> @@ -651,14 +730,12 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
>
> /*
> * Hint: LSB of adj_bits corresponds to
> - * offset + len bit of tuple
> + * offset + len bit of the subtuple
> */
> - for (j = 0; j < sizeof(uint32_t) * CHAR_BIT; j++) {
> - bit = (adj_bits >> j) & 0x1;
> - if (bit)
> - xor_bit(tuple, bit, h->tuple_offset +
> - h->tuple_len - 1 - j);
> - }
> + offset = h->tuple_offset + h->tuple_len - ctx->reta_sz_log;
> + tmp = read_unaligned_bits(tuple, ctx->reta_sz_log, offset);
> + tmp ^= adj_bits;
> + write_unaligned_bits(tuple, ctx->reta_sz_log, offset, tmp);
>
> if (fn != NULL) {
> ret = (fn(userdata, tuple)) ? 0 : -EEXIST;
> @@ -666,13 +743,15 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
> return 0;
> else if (i < (attempts - 1)) {
Small nit. This comment should be updated as the bits aren't random
anymore, just incremented.
> /* Update tuple with random bits */
> - for (j = 0; j < h->tuple_len; j++) {
> - bit = rte_rand() & 0x1;
> - if (bit)
> - xor_bit(tuple, bit,
> - h->tuple_offset +
> - h->tuple_len - 1 - j);
> - }
> + tmp_len = RTE_MIN(sizeof(uint32_t) * CHAR_BIT,
> + h->tuple_len - ctx->reta_sz_log);
> + offset -= tmp_len;
> + tmp = read_unaligned_bits(tuple, tmp_len,
> + offset);
> + tmp++;
> + tmp &= (1 << tmp_len) - 1;
> + write_unaligned_bits(tuple, tmp_len, offset,
> + tmp);
> }
> } else
> return 0;
> --
> 2.7.4
>
Makes the issue not visible on both x86 and RISC-V.
Tested-by: Stanislaw Kardach <kda@semihalf.com>
Reviewed-by: Stanislaw Kardach <kda@semihalf.com>
--
Best Regards,
Stanislaw Kardach
^ permalink raw reply [flat|nested] 7+ messages in thread
* [dpdk-dev] [PATCH v3] hash: fix tuple adjustment
2021-05-04 14:25 ` [dpdk-dev] [PATCH v2] " Vladimir Medvedkin
` (2 preceding siblings ...)
2021-05-05 19:13 ` Stanislaw Kardach
@ 2021-05-06 11:40 ` Vladimir Medvedkin
2021-05-10 13:25 ` Thomas Monjalon
3 siblings, 1 reply; 7+ messages in thread
From: Vladimir Medvedkin @ 2021-05-06 11:40 UTC (permalink / raw)
To: dev
Cc: konstantin.ananyev, andrey.chilikin, ray.kinsella, yipeng1.wang,
sameh.gobriel, bruce.richardson, david.marchand, kda,
vladimir.medvedkin
rte_thash_adjust_tuple() uses random to generate a new subtuple if
fn() callback reports about collision. In some cases random changes
the subtuple in a way that after complementary bits are applied the
original tuple is obtained. This patch replaces random with subtuple
increment.
Fixes: 28ebff11c2dc ("hash: add predictable RSS")
Cc: vladimir.medvedkin@intel.com
Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Acked-by: Yipeng Wang <yipeng1.wang@intel.com>
Tested-by: Stanislaw Kardach <kda@semihalf.com>
Reviewed-by: Stanislaw Kardach <kda@semihalf.com>
---
Notes:
v3:
- fix comment
v2:
- fix path from lib/librte_hash/ to lib/hash/
lib/hash/rte_thash.c | 123 ++++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 101 insertions(+), 22 deletions(-)
diff --git a/lib/hash/rte_thash.c b/lib/hash/rte_thash.c
index 135a26d..d5a95a6 100644
--- a/lib/hash/rte_thash.c
+++ b/lib/hash/rte_thash.c
@@ -610,16 +610,91 @@ rte_thash_get_key(struct rte_thash_ctx *ctx)
return ctx->hash_key;
}
+static inline uint8_t
+read_unaligned_byte(uint8_t *ptr, unsigned int len, unsigned int offset)
+{
+ uint8_t ret = 0;
+
+ ret = ptr[offset / CHAR_BIT];
+ if (offset % CHAR_BIT) {
+ ret <<= (offset % CHAR_BIT);
+ ret |= ptr[(offset / CHAR_BIT) + 1] >>
+ (CHAR_BIT - (offset % CHAR_BIT));
+ }
+
+ return ret >> (CHAR_BIT - len);
+}
+
+static inline uint32_t
+read_unaligned_bits(uint8_t *ptr, int len, int offset)
+{
+ uint32_t ret = 0;
+
+ len = RTE_MAX(len, 0);
+ len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT));
+
+ while (len > 0) {
+ ret <<= CHAR_BIT;
+
+ ret |= read_unaligned_byte(ptr, RTE_MIN(len, CHAR_BIT),
+ offset);
+ offset += CHAR_BIT;
+ len -= CHAR_BIT;
+ }
+
+ return ret;
+}
+
+/* returns mask for len bits with given offset inside byte */
+static inline uint8_t
+get_bits_mask(unsigned int len, unsigned int offset)
+{
+ unsigned int last_bit;
+
+ offset %= CHAR_BIT;
+ /* last bit within byte */
+ last_bit = RTE_MIN((unsigned int)CHAR_BIT, offset + len);
+
+ return ((1 << (CHAR_BIT - offset)) - 1) ^
+ ((1 << (CHAR_BIT - last_bit)) - 1);
+}
+
+static inline void
+write_unaligned_byte(uint8_t *ptr, unsigned int len,
+ unsigned int offset, uint8_t val)
+{
+ uint8_t tmp;
+
+ tmp = ptr[offset / CHAR_BIT];
+ tmp &= ~get_bits_mask(len, offset);
+ tmp |= ((val << (CHAR_BIT - len)) >> (offset % CHAR_BIT));
+ ptr[offset / CHAR_BIT] = tmp;
+ if (((offset + len) / CHAR_BIT) != (offset / CHAR_BIT)) {
+ int rest_len = (offset + len) % CHAR_BIT;
+ tmp = ptr[(offset + len) / CHAR_BIT];
+ tmp &= ~get_bits_mask(rest_len, 0);
+ tmp |= val << (CHAR_BIT - rest_len);
+ ptr[(offset + len) / CHAR_BIT] = tmp;
+ }
+}
+
static inline void
-xor_bit(uint8_t *ptr, uint32_t bit, uint32_t pos)
+write_unaligned_bits(uint8_t *ptr, int len, int offset, uint32_t val)
{
- uint32_t byte_idx = pos >> 3;
- uint32_t bit_idx = (CHAR_BIT - 1) - (pos & (CHAR_BIT - 1));
uint8_t tmp;
+ unsigned int part_len;
+
+ len = RTE_MAX(len, 0);
+ len = RTE_MIN(len, (int)(sizeof(uint32_t) * CHAR_BIT));
- tmp = ptr[byte_idx];
- tmp ^= bit << bit_idx;
- ptr[byte_idx] = tmp;
+ while (len > 0) {
+ part_len = RTE_MIN(CHAR_BIT, len);
+ tmp = (uint8_t)val & ((1 << part_len) - 1);
+ write_unaligned_byte(ptr, part_len,
+ offset + len - part_len, tmp);
+ len -= CHAR_BIT;
+ val >>= CHAR_BIT;
+ }
}
int
@@ -632,8 +707,10 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
uint32_t tmp_tuple[tuple_len / sizeof(uint32_t)];
unsigned int i, j, ret = 0;
uint32_t hash, adj_bits;
- uint8_t bit;
const uint8_t *hash_key;
+ uint32_t tmp;
+ int offset;
+ int tmp_len;
if ((ctx == NULL) || (h == NULL) || (tuple == NULL) ||
(tuple_len % sizeof(uint32_t) != 0) || (attempts <= 0))
@@ -641,6 +718,8 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
hash_key = rte_thash_get_key(ctx);
+ attempts = RTE_MIN(attempts, 1U << (h->tuple_len - ctx->reta_sz_log));
+
for (i = 0; i < attempts; i++) {
for (j = 0; j < (tuple_len / 4); j++)
tmp_tuple[j] =
@@ -651,28 +730,28 @@ rte_thash_adjust_tuple(struct rte_thash_ctx *ctx,
/*
* Hint: LSB of adj_bits corresponds to
- * offset + len bit of tuple
+ * offset + len bit of the subtuple
*/
- for (j = 0; j < sizeof(uint32_t) * CHAR_BIT; j++) {
- bit = (adj_bits >> j) & 0x1;
- if (bit)
- xor_bit(tuple, bit, h->tuple_offset +
- h->tuple_len - 1 - j);
- }
+ offset = h->tuple_offset + h->tuple_len - ctx->reta_sz_log;
+ tmp = read_unaligned_bits(tuple, ctx->reta_sz_log, offset);
+ tmp ^= adj_bits;
+ write_unaligned_bits(tuple, ctx->reta_sz_log, offset, tmp);
if (fn != NULL) {
ret = (fn(userdata, tuple)) ? 0 : -EEXIST;
if (ret == 0)
return 0;
else if (i < (attempts - 1)) {
- /* Update tuple with random bits */
- for (j = 0; j < h->tuple_len; j++) {
- bit = rte_rand() & 0x1;
- if (bit)
- xor_bit(tuple, bit,
- h->tuple_offset +
- h->tuple_len - 1 - j);
- }
+ /* increment subtuple part by 1 */
+ tmp_len = RTE_MIN(sizeof(uint32_t) * CHAR_BIT,
+ h->tuple_len - ctx->reta_sz_log);
+ offset -= tmp_len;
+ tmp = read_unaligned_bits(tuple, tmp_len,
+ offset);
+ tmp++;
+ tmp &= (1 << tmp_len) - 1;
+ write_unaligned_bits(tuple, tmp_len, offset,
+ tmp);
}
} else
return 0;
--
2.7.4
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [dpdk-dev] [PATCH v3] hash: fix tuple adjustment
2021-05-06 11:40 ` [dpdk-dev] [PATCH v3] " Vladimir Medvedkin
@ 2021-05-10 13:25 ` Thomas Monjalon
0 siblings, 0 replies; 7+ messages in thread
From: Thomas Monjalon @ 2021-05-10 13:25 UTC (permalink / raw)
To: Vladimir Medvedkin
Cc: dev, konstantin.ananyev, andrey.chilikin, ray.kinsella,
yipeng1.wang, sameh.gobriel, bruce.richardson, david.marchand,
kda
06/05/2021 13:40, Vladimir Medvedkin:
> rte_thash_adjust_tuple() uses random to generate a new subtuple if
> fn() callback reports about collision. In some cases random changes
> the subtuple in a way that after complementary bits are applied the
> original tuple is obtained. This patch replaces random with subtuple
> increment.
>
> Fixes: 28ebff11c2dc ("hash: add predictable RSS")
> Cc: vladimir.medvedkin@intel.com
>
> Signed-off-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
> Acked-by: Yipeng Wang <yipeng1.wang@intel.com>
> Tested-by: Stanislaw Kardach <kda@semihalf.com>
> Reviewed-by: Stanislaw Kardach <kda@semihalf.com>
Applied, thanks
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2021-05-10 13:25 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-04 14:07 [dpdk-dev] [PATCH] hash: fix tuple adjustment Vladimir Medvedkin
2021-05-04 14:25 ` [dpdk-dev] [PATCH v2] " Vladimir Medvedkin
2021-05-05 10:16 ` Thomas Monjalon
2021-05-05 17:57 ` Wang, Yipeng1
2021-05-05 19:13 ` Stanislaw Kardach
2021-05-06 11:40 ` [dpdk-dev] [PATCH v3] " Vladimir Medvedkin
2021-05-10 13:25 ` Thomas Monjalon
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).