* [PATCH] lib/lpm: use standard atomic_store_explicit
@ 2024-12-04 2:20 Andre Muezerie
2024-12-04 7:56 ` David Marchand
0 siblings, 1 reply; 5+ messages in thread
From: Andre Muezerie @ 2024-12-04 2:20 UTC (permalink / raw)
To: Bruce Richardson, Vladimir Medvedkin; +Cc: dev, Andre Muezerie
MSVC issues the warning below:
../lib/lpm/rte_lpm.c(297): warning C4013
'__atomic_store' undefined; assuming extern returning int
../lib/lpm/rte_lpm.c(298): error C2065:
'__ATOMIC_RELAXED': undeclared identifier
The fix is to use standard atomic_store_explicit() instead of
gcc specific __atomic_store().
atomic_store_explicit() was already being used in other parts
of DPDK and is compatible
with many compilers, including MSVC.
Signed-off-by: Andre Muezerie <andremue@linux.microsoft.com>
---
lib/lpm/rte_lpm.c | 108 ++++++++++++++++++++++++++++++----------------
lib/lpm/rte_lpm.h | 56 ++++++++++++++----------
2 files changed, 104 insertions(+), 60 deletions(-)
diff --git a/lib/lpm/rte_lpm.c b/lib/lpm/rte_lpm.c
index a5c9e7c9fc..7ec85f1718 100644
--- a/lib/lpm/rte_lpm.c
+++ b/lib/lpm/rte_lpm.c
@@ -294,8 +294,8 @@ __lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n)
RTE_SET_USED(n);
/* Set tbl8 group invalid */
- __atomic_store(&tbl8[tbl8_group_index], &zero_tbl8_entry,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tbl8[tbl8_group_index].val,
+ zero_tbl8_entry.val, rte_memory_order_relaxed);
}
/* Associate QSBR variable with an LPM object.
@@ -515,8 +515,8 @@ _tbl8_alloc(struct __rte_lpm *i_lpm)
RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
sizeof(tbl8_entry[0]));
- __atomic_store(tbl8_entry, &new_tbl8_entry,
- __ATOMIC_RELAXED);
+ rte_atomic_store_explicit(&tbl8_entry->val, new_tbl8_entry.val,
+ rte_memory_order_relaxed);
/* Return group index for allocated tbl8 group. */
return group_idx;
@@ -551,15 +551,19 @@ tbl8_free(struct __rte_lpm *i_lpm, uint32_t tbl8_group_start)
if (i_lpm->v == NULL) {
/* Set tbl8 group invalid*/
- __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
- __ATOMIC_RELAXED);
+ struct rte_lpm_tbl_entry *tbl8_entry =
+ &i_lpm->lpm.tbl8[tbl8_group_start];
+ rte_atomic_store_explicit(&tbl8_entry->val, zero_tbl8_entry.val,
+ rte_memory_order_relaxed);
} else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
/* Wait for quiescent state change. */
rte_rcu_qsbr_synchronize(i_lpm->v,
RTE_QSBR_THRID_INVALID);
/* Set tbl8 group invalid*/
- __atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
- __ATOMIC_RELAXED);
+ struct rte_lpm_tbl_entry *tbl8_entry =
+ &i_lpm->lpm.tbl8[tbl8_group_start];
+ rte_atomic_store_explicit(&tbl8_entry->val, zero_tbl8_entry.val,
+ rte_memory_order_relaxed);
} else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
/* Push into QSBR defer queue. */
status = rte_rcu_qsbr_dq_enqueue(i_lpm->dq,
@@ -602,8 +606,10 @@ add_depth_small(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
/* Setting tbl24 entry in one go to avoid race
* conditions
*/
- __atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
- __ATOMIC_RELEASE);
+ struct rte_lpm_tbl_entry *tbl24_entry =
+ &i_lpm->lpm.tbl24[i];
+ rte_atomic_store_explicit(&tbl24_entry->val, new_tbl24_entry.val,
+ rte_memory_order_release);
continue;
}
@@ -632,9 +638,11 @@ add_depth_small(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
* Setting tbl8 entry in one go to avoid
* race conditions
*/
- __atomic_store(&i_lpm->lpm.tbl8[j],
- &new_tbl8_entry,
- __ATOMIC_RELAXED);
+ struct rte_lpm_tbl_entry *tbl8_entry =
+ &i_lpm->lpm.tbl8[j];
+ rte_atomic_store_explicit(&tbl8_entry->val,
+ new_tbl8_entry.val,
+ rte_memory_order_relaxed);
continue;
}
@@ -679,8 +687,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
.valid_group = i_lpm->lpm.tbl8[i].valid_group,
.next_hop = next_hop,
};
- __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
- __ATOMIC_RELAXED);
+ struct rte_lpm_tbl_entry *tbl8_entry =
+ &i_lpm->lpm.tbl8[i];
+ rte_atomic_store_explicit(&tbl8_entry->val, new_tbl8_entry.val,
+ rte_memory_order_relaxed);
}
/*
@@ -699,8 +709,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
/* The tbl24 entry must be written only after the
* tbl8 entries are written.
*/
- __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
- __ATOMIC_RELEASE);
+ struct rte_lpm_tbl_entry *tbl24_entry =
+ &i_lpm->lpm.tbl24[tbl24_index];
+ rte_atomic_store_explicit(&tbl24_entry->val, new_tbl24_entry.val,
+ rte_memory_order_release);
} /* If valid entry but not extended calculate the index into Table8. */
else if (i_lpm->lpm.tbl24[tbl24_index].valid_group == 0) {
@@ -724,8 +736,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
.valid_group = i_lpm->lpm.tbl8[i].valid_group,
.next_hop = i_lpm->lpm.tbl24[tbl24_index].next_hop,
};
- __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
- __ATOMIC_RELAXED);
+ struct rte_lpm_tbl_entry *tbl8_entry =
+ &i_lpm->lpm.tbl8[i];
+ rte_atomic_store_explicit(&tbl8_entry->val, new_tbl8_entry.val,
+ rte_memory_order_relaxed);
}
tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
@@ -738,8 +752,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
.valid_group = i_lpm->lpm.tbl8[i].valid_group,
.next_hop = next_hop,
};
- __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
- __ATOMIC_RELAXED);
+ struct rte_lpm_tbl_entry *tbl8_entry =
+ &i_lpm->lpm.tbl8[i];
+ rte_atomic_store_explicit(&tbl8_entry->val, new_tbl8_entry.val,
+ rte_memory_order_relaxed);
}
/*
@@ -758,8 +774,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
/* The tbl24 entry must be written only after the
* tbl8 entries are written.
*/
- __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
- __ATOMIC_RELEASE);
+ struct rte_lpm_tbl_entry *tbl24_entry =
+ &i_lpm->lpm.tbl24[tbl24_index];
+ rte_atomic_store_explicit(&tbl24_entry->val, new_tbl24_entry.val,
+ rte_memory_order_release);
} else { /*
* If it is valid, extended entry calculate the index into tbl8.
@@ -784,8 +802,10 @@ add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
* Setting tbl8 entry in one go to avoid race
* condition
*/
- __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
- __ATOMIC_RELAXED);
+ struct rte_lpm_tbl_entry *tbl8_entry =
+ &i_lpm->lpm.tbl8[i];
+ rte_atomic_store_explicit(&tbl8_entry->val,
+ new_tbl8_entry.val, rte_memory_order_relaxed);
continue;
}
@@ -924,8 +944,10 @@ delete_depth_small(struct __rte_lpm *i_lpm, uint32_t ip_masked,
if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
i_lpm->lpm.tbl24[i].depth <= depth) {
- __atomic_store(&i_lpm->lpm.tbl24[i],
- &zero_tbl24_entry, __ATOMIC_RELEASE);
+ struct rte_lpm_tbl_entry *tbl24_entry =
+ &i_lpm->lpm.tbl24[i];
+ rte_atomic_store_explicit(&tbl24_entry->val,
+ zero_tbl24_entry.val, rte_memory_order_release);
} else if (i_lpm->lpm.tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
@@ -970,8 +992,10 @@ delete_depth_small(struct __rte_lpm *i_lpm, uint32_t ip_masked,
if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
i_lpm->lpm.tbl24[i].depth <= depth) {
- __atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
- __ATOMIC_RELEASE);
+ struct rte_lpm_tbl_entry *tbl24_entry =
+ &i_lpm->lpm.tbl24[i];
+ rte_atomic_store_explicit(&tbl24_entry->val,
+ new_tbl24_entry.val, rte_memory_order_release);
} else if (i_lpm->lpm.tbl24[i].valid_group == 1) {
/*
* If TBL24 entry is extended, then there has
@@ -986,10 +1010,13 @@ delete_depth_small(struct __rte_lpm *i_lpm, uint32_t ip_masked,
for (j = tbl8_index; j < (tbl8_index +
RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
- if (i_lpm->lpm.tbl8[j].depth <= depth)
- __atomic_store(&i_lpm->lpm.tbl8[j],
- &new_tbl8_entry,
- __ATOMIC_RELAXED);
+ if (i_lpm->lpm.tbl8[j].depth <= depth) {
+ struct rte_lpm_tbl_entry *tbl8_entry =
+ &i_lpm->lpm.tbl8[j];
+ rte_atomic_store_explicit(&tbl8_entry->val,
+ new_tbl8_entry.val,
+ rte_memory_order_relaxed);
+ }
}
}
}
@@ -1097,9 +1124,12 @@ delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
* rule_to_delete must be modified.
*/
for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
- if (i_lpm->lpm.tbl8[i].depth <= depth)
- __atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
- __ATOMIC_RELAXED);
+ if (i_lpm->lpm.tbl8[i].depth <= depth) {
+ struct rte_lpm_tbl_entry *tbl8_entry =
+ &i_lpm->lpm.tbl8[i];
+ rte_atomic_store_explicit(&tbl8_entry->val,
+ new_tbl8_entry.val, rte_memory_order_relaxed);
+ }
}
}
@@ -1130,8 +1160,10 @@ delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
/* Set tbl24 before freeing tbl8 to avoid race condition.
* Prevent the free of the tbl8 group from hoisting.
*/
- __atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
- __ATOMIC_RELAXED);
+ struct rte_lpm_tbl_entry *tbl24_entry =
+ &i_lpm->lpm.tbl24[tbl24_index];
+ rte_atomic_store_explicit(&tbl24_entry->val, new_tbl24_entry.val,
+ rte_memory_order_relaxed);
rte_atomic_thread_fence(rte_memory_order_release);
status = tbl8_free(i_lpm, tbl8_group_start);
}
diff --git a/lib/lpm/rte_lpm.h b/lib/lpm/rte_lpm.h
index 329dc1aad4..c1f30f96e3 100644
--- a/lib/lpm/rte_lpm.h
+++ b/lib/lpm/rte_lpm.h
@@ -77,38 +77,50 @@ enum rte_lpm_qsbr_mode {
/** @internal Tbl24 entry structure. */
__extension__
struct rte_lpm_tbl_entry {
- /**
- * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or
- * a group index pointing to a tbl8 structure (tbl24 only, when
- * valid_group is set)
- */
- uint32_t next_hop :24;
- /* Using single uint8_t to store 3 values. */
- uint32_t valid :1; /**< Validation flag. */
- /**
- * For tbl24:
- * - valid_group == 0: entry stores a next hop
- * - valid_group == 1: entry stores a group_index pointing to a tbl8
- * For tbl8:
- * - valid_group indicates whether the current tbl8 is in use or not
- */
- uint32_t valid_group :1;
- uint32_t depth :6; /**< Rule depth. */
+ union {
+ RTE_ATOMIC(uint32_t) val;
+ struct {
+ /**
+ * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or
+ * a group index pointing to a tbl8 structure (tbl24 only, when
+ * valid_group is set)
+ */
+ uint32_t next_hop :24;
+ /* Using single uint8_t to store 3 values. */
+ uint32_t valid :1; /**< Validation flag. */
+ /**
+ * For tbl24:
+ * - valid_group == 0: entry stores a next hop
+ * - valid_group == 1: entry stores a group_index pointing to a tbl8
+ * For tbl8:
+ * - valid_group indicates whether the current tbl8 is in use or not
+ */
+ uint32_t valid_group :1;
+ uint32_t depth :6; /**< Rule depth. */
+ };
+ };
};
#else
__extension__
struct rte_lpm_tbl_entry {
- uint32_t depth :6;
- uint32_t valid_group :1;
- uint32_t valid :1;
- uint32_t next_hop :24;
-
+ union {
+ RTE_ATOMIC(uint32_t) val;
+ struct {
+ uint32_t depth :6;
+ uint32_t valid_group :1;
+ uint32_t valid :1;
+ uint32_t next_hop :24;
+ };
+ };
};
#endif
+static_assert(sizeof(struct rte_lpm_tbl_entry) == sizeof(uint32_t),
+ "sizeof(struct rte_lpm_tbl_entry) == sizeof(uint32_t)");
+
/** LPM configuration structure. */
struct rte_lpm_config {
uint32_t max_rules; /**< Max number of rules. */
--
2.47.0.vfs.0.3
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] lib/lpm: use standard atomic_store_explicit
2024-12-04 2:20 [PATCH] lib/lpm: use standard atomic_store_explicit Andre Muezerie
@ 2024-12-04 7:56 ` David Marchand
2024-12-04 16:20 ` Andre Muezerie
0 siblings, 1 reply; 5+ messages in thread
From: David Marchand @ 2024-12-04 7:56 UTC (permalink / raw)
To: Andre Muezerie; +Cc: Bruce Richardson, Vladimir Medvedkin, dev
Hello Andre,
On Wed, Dec 4, 2024 at 3:20 AM Andre Muezerie
<andremue@linux.microsoft.com> wrote:
>
> MSVC issues the warning below:
>
> ../lib/lpm/rte_lpm.c(297): warning C4013
> '__atomic_store' undefined; assuming extern returning int
> ../lib/lpm/rte_lpm.c(298): error C2065:
> '__ATOMIC_RELAXED': undeclared identifier
>
> The fix is to use standard atomic_store_explicit() instead of
> gcc specific __atomic_store().
> atomic_store_explicit() was already being used in other parts
> of DPDK and is compatible
> with many compilers, including MSVC.
>
> Signed-off-by: Andre Muezerie <andremue@linux.microsoft.com>
With this change, is there anything remaining that blocks this library
compilation with MSVC?
If not, please update meson.build so that CI can test lpm compilation
with MSVC on this patch (and that will detect regressions once
merged).
--
David Marchand
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] lib/lpm: use standard atomic_store_explicit
2024-12-04 7:56 ` David Marchand
@ 2024-12-04 16:20 ` Andre Muezerie
2024-12-04 16:52 ` Bruce Richardson
0 siblings, 1 reply; 5+ messages in thread
From: Andre Muezerie @ 2024-12-04 16:20 UTC (permalink / raw)
To: David Marchand; +Cc: Bruce Richardson, Vladimir Medvedkin, dev
On Wed, Dec 04, 2024 at 08:56:35AM +0100, David Marchand wrote:
> Hello Andre,
>
> On Wed, Dec 4, 2024 at 3:20 AM Andre Muezerie
> <andremue@linux.microsoft.com> wrote:
> >
> > MSVC issues the warning below:
> >
> > ../lib/lpm/rte_lpm.c(297): warning C4013
> > '__atomic_store' undefined; assuming extern returning int
> > ../lib/lpm/rte_lpm.c(298): error C2065:
> > '__ATOMIC_RELAXED': undeclared identifier
> >
> > The fix is to use standard atomic_store_explicit() instead of
> > gcc specific __atomic_store().
> > atomic_store_explicit() was already being used in other parts
> > of DPDK and is compatible
> > with many compilers, including MSVC.
> >
> > Signed-off-by: Andre Muezerie <andremue@linux.microsoft.com>
>
> With this change, is there anything remaining that blocks this library
> compilation with MSVC?
> If not, please update meson.build so that CI can test lpm compilation
> with MSVC on this patch (and that will detect regressions once
> merged).
>
>
> --
> David Marchand
Hi David,
I'm eager to enable lpm to be compiled with MSVC. Even though
this was the last issue I observed for this lib on my machine,
lpm depends on hash, which depends on net, which depends on mbuf and
mbuf is not enabled for MSVC yet.
I have several fixes affecting these pending review and would prefer
to not depend on lpm's dependencies for the system to start compiling
this code in case some critical fix gets completed later. I have not
analyzed all sequences in which patches can be completed, and it's
quite possible that some sequences would result in MSVC compilation
failures if the libs were enabled in meson.build.
However, this code would still get compiled on Linux as usual, and
hopefully we can enable all these libs once the patches get
completed. I am aware that regressions can happen before that point.
We will address them if that happens.
It is tricky to handle so many paches/dependencies. Let me know if
there's something that can be improved.
Andre
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] lib/lpm: use standard atomic_store_explicit
2024-12-04 16:20 ` Andre Muezerie
@ 2024-12-04 16:52 ` Bruce Richardson
2024-12-04 19:09 ` Andre Muezerie
0 siblings, 1 reply; 5+ messages in thread
From: Bruce Richardson @ 2024-12-04 16:52 UTC (permalink / raw)
To: Andre Muezerie; +Cc: David Marchand, Vladimir Medvedkin, dev
On Wed, Dec 04, 2024 at 08:20:19AM -0800, Andre Muezerie wrote:
> On Wed, Dec 04, 2024 at 08:56:35AM +0100, David Marchand wrote:
> > Hello Andre,
> >
> > On Wed, Dec 4, 2024 at 3:20 AM Andre Muezerie
> > <andremue@linux.microsoft.com> wrote:
> > >
> > > MSVC issues the warning below:
> > >
> > > ../lib/lpm/rte_lpm.c(297): warning C4013
> > > '__atomic_store' undefined; assuming extern returning int
> > > ../lib/lpm/rte_lpm.c(298): error C2065:
> > > '__ATOMIC_RELAXED': undeclared identifier
> > >
> > > The fix is to use standard atomic_store_explicit() instead of
> > > gcc specific __atomic_store().
> > > atomic_store_explicit() was already being used in other parts
> > > of DPDK and is compatible
> > > with many compilers, including MSVC.
> > >
> > > Signed-off-by: Andre Muezerie <andremue@linux.microsoft.com>
> >
> > With this change, is there anything remaining that blocks this library
> > compilation with MSVC?
> > If not, please update meson.build so that CI can test lpm compilation
> > with MSVC on this patch (and that will detect regressions once
> > merged).
> >
> >
> > --
> > David Marchand
>
> Hi David,
>
> I'm eager to enable lpm to be compiled with MSVC. Even though
> this was the last issue I observed for this lib on my machine,
> lpm depends on hash, which depends on net, which depends on mbuf and
> mbuf is not enabled for MSVC yet.
>
I was a bit curious about this dependency chain and decided to investigate
a bit. The "weak link" in this chain appears to me to be the link between
the hash library and the net library. Within the hash library, I believe
only the thash functionality depends on net, for definitions of the ipv6
headers and address fields.
If we want to break that dependency (temporarily, since net is pretty much
an essential DPDK lib), the following patch should work.
Regards,
/Bruce
diff --git a/lib/hash/meson.build b/lib/hash/meson.build
index e6cb1ebe3b..f9096edd67 100644
--- a/lib/hash/meson.build
+++ b/lib/hash/meson.build
@@ -6,24 +6,34 @@ headers = files(
'rte_hash_crc.h',
'rte_hash.h',
'rte_jhash.h',
- 'rte_thash.h',
- 'rte_thash_gfni.h',
)
indirect_headers += files(
'rte_crc_arm64.h',
'rte_crc_generic.h',
'rte_crc_sw.h',
'rte_crc_x86.h',
- 'rte_thash_x86_gfni.h',
)
sources = files(
'rte_cuckoo_hash.c',
'rte_hash_crc.c',
'rte_fbk_hash.c',
+)
+
+deps = ['rcu']
+
+if dpdk_conf.has('RTE_LIB_NET')
+ headers += files(
+ 'rte_thash.h',
+ 'rte_thash_gfni.h',
+ )
+ indirect_headers += files(
+ 'rte_thash_x86_gfni.h',
+ )
+ sources += files(
'rte_thash.c',
'rte_thash_gfni.c',
'rte_thash_gf2_poly_math.c',
-)
-
-deps = ['net', 'rcu']
+ )
+ deps += ['net']
+endif
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] lib/lpm: use standard atomic_store_explicit
2024-12-04 16:52 ` Bruce Richardson
@ 2024-12-04 19:09 ` Andre Muezerie
0 siblings, 0 replies; 5+ messages in thread
From: Andre Muezerie @ 2024-12-04 19:09 UTC (permalink / raw)
To: Bruce Richardson; +Cc: David Marchand, Vladimir Medvedkin, dev
On Wed, Dec 04, 2024 at 04:52:43PM +0000, Bruce Richardson wrote:
> On Wed, Dec 04, 2024 at 08:20:19AM -0800, Andre Muezerie wrote:
> > On Wed, Dec 04, 2024 at 08:56:35AM +0100, David Marchand wrote:
> > > Hello Andre,
> > >
> > > On Wed, Dec 4, 2024 at 3:20 AM Andre Muezerie
> > > <andremue@linux.microsoft.com> wrote:
> > > >
> > > > MSVC issues the warning below:
> > > >
> > > > ../lib/lpm/rte_lpm.c(297): warning C4013
> > > > '__atomic_store' undefined; assuming extern returning int
> > > > ../lib/lpm/rte_lpm.c(298): error C2065:
> > > > '__ATOMIC_RELAXED': undeclared identifier
> > > >
> > > > The fix is to use standard atomic_store_explicit() instead of
> > > > gcc specific __atomic_store().
> > > > atomic_store_explicit() was already being used in other parts
> > > > of DPDK and is compatible
> > > > with many compilers, including MSVC.
> > > >
> > > > Signed-off-by: Andre Muezerie <andremue@linux.microsoft.com>
> > >
> > > With this change, is there anything remaining that blocks this library
> > > compilation with MSVC?
> > > If not, please update meson.build so that CI can test lpm compilation
> > > with MSVC on this patch (and that will detect regressions once
> > > merged).
> > >
> > >
> > > --
> > > David Marchand
> >
> > Hi David,
> >
> > I'm eager to enable lpm to be compiled with MSVC. Even though
> > this was the last issue I observed for this lib on my machine,
> > lpm depends on hash, which depends on net, which depends on mbuf and
> > mbuf is not enabled for MSVC yet.
> >
> I was a bit curious about this dependency chain and decided to investigate
> a bit. The "weak link" in this chain appears to me to be the link between
> the hash library and the net library. Within the hash library, I believe
> only the thash functionality depends on net, for definitions of the ipv6
> headers and address fields.
>
> If we want to break that dependency (temporarily, since net is pretty much
> an essential DPDK lib), the following patch should work.
>
> Regards,
> /Bruce
>
> diff --git a/lib/hash/meson.build b/lib/hash/meson.build
> index e6cb1ebe3b..f9096edd67 100644
> --- a/lib/hash/meson.build
> +++ b/lib/hash/meson.build
> @@ -6,24 +6,34 @@ headers = files(
> 'rte_hash_crc.h',
> 'rte_hash.h',
> 'rte_jhash.h',
> - 'rte_thash.h',
> - 'rte_thash_gfni.h',
> )
> indirect_headers += files(
> 'rte_crc_arm64.h',
> 'rte_crc_generic.h',
> 'rte_crc_sw.h',
> 'rte_crc_x86.h',
> - 'rte_thash_x86_gfni.h',
> )
>
> sources = files(
> 'rte_cuckoo_hash.c',
> 'rte_hash_crc.c',
> 'rte_fbk_hash.c',
> +)
> +
> +deps = ['rcu']
> +
> +if dpdk_conf.has('RTE_LIB_NET')
> + headers += files(
> + 'rte_thash.h',
> + 'rte_thash_gfni.h',
> + )
> + indirect_headers += files(
> + 'rte_thash_x86_gfni.h',
> + )
> + sources += files(
> 'rte_thash.c',
> 'rte_thash_gfni.c',
> 'rte_thash_gf2_poly_math.c',
> -)
> -
> -deps = ['net', 'rcu']
> + )
> + deps += ['net']
> +endif
That's a great suggestion. Unfortunately hash also directly depends on
rcu, which is also not yet enabled for MSVC due to pending reviews.
Regards,
Andre Muezerie
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2024-12-04 19:09 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-12-04 2:20 [PATCH] lib/lpm: use standard atomic_store_explicit Andre Muezerie
2024-12-04 7:56 ` David Marchand
2024-12-04 16:20 ` Andre Muezerie
2024-12-04 16:52 ` Bruce Richardson
2024-12-04 19:09 ` Andre Muezerie
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).