DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ruifeng Wang <ruifeng.wang@arm.com>
To: vladimir.medvedkin@intel.com, bruce.richardson@intel.com
Cc: dev@dpdk.org, honnappa.nagarahalli@arm.com, gavin.hu@arm.com,
	nd@arm.com, Ruifeng Wang <ruifeng.wang@arm.com>
Subject: [dpdk-dev] [PATCH v5 4/6] lib/lpm: use atomic store to avoid partial update
Date: Fri, 12 Jul 2019 11:09:21 +0800	[thread overview]
Message-ID: <20190712030923.37832-5-ruifeng.wang@arm.com> (raw)
In-Reply-To: <20190712030923.37832-1-ruifeng.wang@arm.com>

Compiler could generate non-atomic stores for whole table entry
updating. This may cause incorrect nexthop to be returned, if
the byte with valid flag is updated prior to the byte with nexthop
is updated.

Changed to use atomic store to update whole table entry.

Suggested-by: Medvedkin Vladimir <vladimir.medvedkin@intel.com>
Signed-off-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
---
 lib/librte_lpm/rte_lpm.c | 69 ++++++++++++++++++++++++++++++++--------
 1 file changed, 55 insertions(+), 14 deletions(-)

diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index 0a94630db..d35d64448 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -654,11 +654,19 @@ tbl8_alloc_v20(struct rte_lpm_tbl_entry_v20 *tbl8)
 		tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
 		/* If a free tbl8 group is found clean it and set as VALID. */
 		if (!tbl8_entry->valid_group) {
+			struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
+				.valid = INVALID,
+				.depth = 0,
+				.valid_group = VALID,
+			};
+			new_tbl8_entry.next_hop = 0;
+
 			memset(&tbl8_entry[0], 0,
 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
 					sizeof(tbl8_entry[0]));
 
-			tbl8_entry->valid_group = VALID;
+			__atomic_store(tbl8_entry, &new_tbl8_entry,
+					__ATOMIC_RELAXED);
 
 			/* Return group index for allocated tbl8 group. */
 			return group_idx;
@@ -680,11 +688,19 @@ tbl8_alloc_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
 		tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
 		/* If a free tbl8 group is found clean it and set as VALID. */
 		if (!tbl8_entry->valid_group) {
+			struct rte_lpm_tbl_entry new_tbl8_entry = {
+				.next_hop = 0,
+				.valid = INVALID,
+				.depth = 0,
+				.valid_group = VALID,
+			};
+
 			memset(&tbl8_entry[0], 0,
 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
 					sizeof(tbl8_entry[0]));
 
-			tbl8_entry->valid_group = VALID;
+			__atomic_store(tbl8_entry, &new_tbl8_entry,
+					__ATOMIC_RELAXED);
 
 			/* Return group index for allocated tbl8 group. */
 			return group_idx;
@@ -699,14 +715,25 @@ static void
 tbl8_free_v20(struct rte_lpm_tbl_entry_v20 *tbl8, uint32_t tbl8_group_start)
 {
 	/* Set tbl8 group invalid*/
-	tbl8[tbl8_group_start].valid_group = INVALID;
+	struct rte_lpm_tbl_entry_v20 zero_tbl8_entry = {
+		.valid = INVALID,
+		.depth = 0,
+		.valid_group = INVALID,
+	};
+	zero_tbl8_entry.next_hop = 0;
+
+	__atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
+			__ATOMIC_RELAXED);
 }
 
 static void
 tbl8_free_v1604(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
 {
 	/* Set tbl8 group invalid*/
-	tbl8[tbl8_group_start].valid_group = INVALID;
+	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
+
+	__atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
+			__ATOMIC_RELAXED);
 }
 
 static __rte_noinline int32_t
@@ -767,7 +794,9 @@ add_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip, uint8_t depth,
 					 * Setting tbl8 entry in one go to avoid
 					 * race conditions
 					 */
-					lpm->tbl8[j] = new_tbl8_entry;
+					__atomic_store(&lpm->tbl8[j],
+						&new_tbl8_entry,
+						__ATOMIC_RELAXED);
 
 					continue;
 				}
@@ -837,7 +866,9 @@ add_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
 					 * Setting tbl8 entry in one go to avoid
 					 * race conditions
 					 */
-					lpm->tbl8[j] = new_tbl8_entry;
+					__atomic_store(&lpm->tbl8[j],
+						&new_tbl8_entry,
+						__ATOMIC_RELAXED);
 
 					continue;
 				}
@@ -965,7 +996,8 @@ add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
 				 * Setting tbl8 entry in one go to avoid race
 				 * condition
 				 */
-				lpm->tbl8[i] = new_tbl8_entry;
+				__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+						__ATOMIC_RELAXED);
 
 				continue;
 			}
@@ -1100,7 +1132,8 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
 				 * Setting tbl8 entry in one go to avoid race
 				 * condition
 				 */
-				lpm->tbl8[i] = new_tbl8_entry;
+				__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+						__ATOMIC_RELAXED);
 
 				continue;
 			}
@@ -1393,7 +1426,9 @@ delete_depth_small_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
 
 					if (lpm->tbl8[j].depth <= depth)
-						lpm->tbl8[j] = new_tbl8_entry;
+						__atomic_store(&lpm->tbl8[j],
+							&new_tbl8_entry,
+							__ATOMIC_RELAXED);
 				}
 			}
 		}
@@ -1490,7 +1525,9 @@ delete_depth_small_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
 
 					if (lpm->tbl8[j].depth <= depth)
-						lpm->tbl8[j] = new_tbl8_entry;
+						__atomic_store(&lpm->tbl8[j],
+							&new_tbl8_entry,
+							__ATOMIC_RELAXED);
 				}
 			}
 		}
@@ -1646,7 +1683,8 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
 		 */
 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
 			if (lpm->tbl8[i].depth <= depth)
-				lpm->tbl8[i] = new_tbl8_entry;
+				__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+						__ATOMIC_RELAXED);
 		}
 	}
 
@@ -1677,7 +1715,8 @@ delete_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked,
 		/* Set tbl24 before freeing tbl8 to avoid race condition.
 		 * Prevent the free of the tbl8 group from hoisting.
 		 */
-		lpm->tbl24[tbl24_index] = new_tbl24_entry;
+		__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+				__ATOMIC_RELAXED);
 		__atomic_thread_fence(__ATOMIC_RELEASE);
 		tbl8_free_v20(lpm->tbl8, tbl8_group_start);
 	}
@@ -1730,7 +1769,8 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
 		 */
 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
 			if (lpm->tbl8[i].depth <= depth)
-				lpm->tbl8[i] = new_tbl8_entry;
+				__atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
+						__ATOMIC_RELAXED);
 		}
 	}
 
@@ -1761,7 +1801,8 @@ delete_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked,
 		/* Set tbl24 before freeing tbl8 to avoid race condition.
 		 * Prevent the free of the tbl8 group from hoisting.
 		 */
-		lpm->tbl24[tbl24_index] = new_tbl24_entry;
+		__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
+				__ATOMIC_RELAXED);
 		__atomic_thread_fence(__ATOMIC_RELEASE);
 		tbl8_free_v1604(lpm->tbl8, tbl8_group_start);
 	}
-- 
2.17.1


  parent reply	other threads:[~2019-07-12  3:10 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-05  5:54 [dpdk-dev] [PATCH v1 1/2] lib/lpm: memory orderings to avoid race conditions for v1604 Ruifeng Wang
2019-06-05  5:54 ` [dpdk-dev] [PATCH v1 2/2] lib/lpm: memory orderings to avoid race conditions for v20 Ruifeng Wang
2019-06-05 10:50 ` [dpdk-dev] [PATCH v1 1/2] lib/lpm: memory orderings to avoid race conditions for v1604 Medvedkin, Vladimir
2019-06-05 14:12   ` Ruifeng Wang (Arm Technology China)
2019-06-05 19:23     ` Honnappa Nagarahalli
2019-06-10 15:22       ` Medvedkin, Vladimir
2019-06-17 15:27         ` Ruifeng Wang (Arm Technology China)
2019-06-17 15:33           ` Medvedkin, Vladimir
2019-07-12  3:09 ` [dpdk-dev] [PATCH v5 0/6] LPM4 memory ordering changes Ruifeng Wang
2019-07-12  3:09   ` [dpdk-dev] [PATCH v5 1/6] lib/lpm: not inline unnecessary functions Ruifeng Wang
2019-07-12  3:09   ` [dpdk-dev] [PATCH v5 2/6] lib/lpm: memory orderings to avoid race conditions for v1604 Ruifeng Wang
2019-07-12  3:09   ` [dpdk-dev] [PATCH v5 3/6] lib/lpm: memory orderings to avoid race conditions for v20 Ruifeng Wang
2019-07-12  3:09   ` Ruifeng Wang [this message]
2019-07-12  3:09   ` [dpdk-dev] [PATCH v5 5/6] lib/lpm: data update optimization for v1604 Ruifeng Wang
2019-07-12 20:08     ` Honnappa Nagarahalli
2019-07-12  3:09   ` [dpdk-dev] [PATCH v5 6/6] lib/lpm: data update optimization for v20 Ruifeng Wang
2019-07-12 20:09     ` Honnappa Nagarahalli
2019-07-18  6:22 ` [dpdk-dev] [PATCH v6 0/4] LPM4 memory ordering changes Ruifeng Wang
2019-07-18  6:22   ` [dpdk-dev] [PATCH v6 1/4] lib/lpm: not inline unnecessary functions Ruifeng Wang
2019-07-18  6:22   ` [dpdk-dev] [PATCH v6 2/4] lib/lpm: memory orderings to avoid race conditions for v1604 Ruifeng Wang
2019-07-18  6:22   ` [dpdk-dev] [PATCH v6 3/4] lib/lpm: memory orderings to avoid race conditions for v20 Ruifeng Wang
2019-07-18  6:22   ` [dpdk-dev] [PATCH v6 4/4] lib/lpm: use atomic store to avoid partial update Ruifeng Wang
2019-07-18 14:00   ` [dpdk-dev] [PATCH v6 0/4] LPM4 memory ordering changes Medvedkin, Vladimir
2019-07-19 10:37     ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190712030923.37832-5-ruifeng.wang@arm.com \
    --to=ruifeng.wang@arm.com \
    --cc=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=gavin.hu@arm.com \
    --cc=honnappa.nagarahalli@arm.com \
    --cc=nd@arm.com \
    --cc=vladimir.medvedkin@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).