patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH v2 1/2] ticketlock: ticket based to improve fairness
       [not found] <20190113144631.23493-1-gavin.hu@arm.com>
@ 2019-01-18  9:15 ` Joyce Kong
  2019-01-18  9:15 ` [dpdk-stable] [PATCH v2 2/2] test/ticketlock: add ticket lock test case Joyce Kong
  1 sibling, 0 replies; 2+ messages in thread
From: Joyce Kong @ 2019-01-18  9:15 UTC (permalink / raw)
  To: dev
  Cc: OSS-DPDK-dev, nd, thomas, jerin.jacob, stephen,
	honnappa.nagarahalli, gavin.hu, stable, Joyce kong

The spinlock implementation is unfair, some threads may take locks
aggressively while leaving the other threads starving for long time.
As shown in the following test, within same period of time, there
are threads taking locks much more times than the others.

The ticketlock gives each waiting thread a ticket and they can take
the lock one by one, first come, first serviced, this avoids starvation
for too long time and is more predictable.

*** ticketlock_autotest with this patch ***
    Core [0] count = 496
    Core [1] count = 495
    Core [2] count = 498
    ...
    Core [209] count = 488
    Core [210] count = 490
    Core [211] count = 474

Suggested-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: Joyce kong <joyce.kong@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Phil Yang <phil.yang@arm.com>
---
 doc/api/doxy-api-index.md                          |   1 +
 lib/librte_eal/common/Makefile                     |   2 +-
 .../common/include/generic/rte_ticketlock.h        | 198 +++++++++++++++++++++
 lib/librte_eal/common/meson.build                  |   1 +
 4 files changed, 201 insertions(+), 1 deletion(-)
 create mode 100644 lib/librte_eal/common/include/generic/rte_ticketlock.h

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index d95ad56..aacc66b 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -65,6 +65,7 @@ The public API headers are grouped by topics:
   [atomic]             (@ref rte_atomic.h),
   [rwlock]             (@ref rte_rwlock.h),
   [spinlock]           (@ref rte_spinlock.h)
+  [ticketlock]         (@ref rte_ticketlock.h)
 
 - **CPU arch**:
   [branch prediction]  (@ref rte_branch_prediction.h),
diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile
index 87d8c45..99e948b 100644
--- a/lib/librte_eal/common/Makefile
+++ b/lib/librte_eal/common/Makefile
@@ -20,7 +20,7 @@ INC += rte_bitmap.h rte_vfio.h rte_hypervisor.h rte_test.h
 INC += rte_reciprocal.h rte_fbarray.h rte_uuid.h
 
 GENERIC_INC := rte_atomic.h rte_byteorder.h rte_cycles.h rte_prefetch.h
-GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h rte_rwlock.h
+GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h rte_rwlock.h rte_ticketlock.h
 GENERIC_INC += rte_vect.h rte_pause.h rte_io.h
 
 # defined in mk/arch/$(RTE_ARCH)/rte.vars.mk
diff --git a/lib/librte_eal/common/include/generic/rte_ticketlock.h b/lib/librte_eal/common/include/generic/rte_ticketlock.h
new file mode 100644
index 0000000..2979528
--- /dev/null
+++ b/lib/librte_eal/common/include/generic/rte_ticketlock.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2019 Arm Limited
+ */
+
+#ifndef _RTE_TICKETLOCK_H_
+#define _RTE_TICKETLOCK_H_
+
+/**
+ * @file
+ *
+ * RTE ticketlocks
+ *
+ * This file defines an API for read-write locks, which are implemented
+ * in an architecture-specific way. This kind of lock simply waits in
+ * a loop repeatedly checking until the lock becomes available.
+ *
+ * All locks must be initialised before use, and only initialised once.
+ *
+ */
+
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_pause.h>
+
+/**
+ * The rte_ticketlock_t type.
+ */
+typedef struct {
+	uint16_t current;
+	uint16_t next;
+} rte_ticketlock_t;
+
+/**
+ * A static ticketlock initializer.
+ */
+#define RTE_TICKETLOCK_INITIALIZER { 0 }
+
+/**
+ * Initialize the ticketlock to an unlocked state.
+ *
+ * @param tl
+ *   A pointer to the ticketlock.
+ */
+static inline __rte_experimental void
+rte_ticketlock_init(rte_ticketlock_t *tl)
+{
+	__atomic_store_n(&tl->current, 0, __ATOMIC_RELAXED);
+	__atomic_store_n(&tl->next, 0, __ATOMIC_RELAXED);
+}
+
+/**
+ * Take the ticketlock.
+ *
+ * @param tl
+ *   A pointer to the ticketlock.
+ */
+static inline __rte_experimental void
+rte_ticketlock_lock(rte_ticketlock_t *tl)
+{
+	uint16_t me = __atomic_fetch_add(&tl->next, 1, __ATOMIC_RELAXED);
+	while (__atomic_load_n(&tl->current, __ATOMIC_ACQUIRE) != me)
+		rte_pause();
+}
+
+/**
+ * Release the ticketlock.
+ *
+ * @param tl
+ *   A pointer to the ticketlock.
+ */
+static inline __rte_experimental void
+rte_ticketlock_unlock(rte_ticketlock_t *tl)
+{
+	uint16_t i = __atomic_load_n(&tl->current, __ATOMIC_RELAXED);
+	i++;
+	__atomic_store_n(&tl->current, i, __ATOMIC_RELEASE);
+}
+
+/**
+ * Try to take the lock.
+ *
+ * @param tl
+ *   A pointer to the ticketlock.
+ * @return
+ *   1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline __rte_experimental int
+rte_ticketlock_trylock(rte_ticketlock_t *tl)
+{
+	uint16_t me = __atomic_fetch_add(&tl->next, 1, __ATOMIC_RELAXED);
+	while (__atomic_load_n(&tl->current, __ATOMIC_RELAXED) != me) {
+		__atomic_sub_fetch(&tl->next, 1, __ATOMIC_RELAXED);
+		return 0;
+	}
+
+	return 1;
+}
+
+/**
+ * Test if the lock is taken.
+ *
+ * @param tl
+ *   A pointer to the ticketlock.
+ * @return
+ *   1 if the lock icurrently taken; 0 otherwise.
+ */
+static inline __rte_experimental int
+rte_ticketlock_is_locked(rte_ticketlock_t *tl)
+{
+	return (__atomic_load_n(&tl->current, __ATOMIC_RELAXED) !=
+			__atomic_load_n(&tl->next, __ATOMIC_RELAXED));
+}
+
+/**
+ * The rte_ticketlock_recursive_t type.
+ */
+typedef struct {
+	rte_ticketlock_t tl; /**< the actual ticketlock */
+	volatile int user; /**< core id using lock, -1 for unused */
+	volatile int count; /**< count of time this lock has been called */
+} rte_ticketlock_recursive_t;
+
+/**
+ * A static recursive ticketlock initializer.
+ */
+#define RTE_TICKETLOCK_RECURSIVE_INITIALIZER {RTE_TICKETLOCK_INITIALIZER, -1, 0}
+
+/**
+ * Initialize the recursive ticketlock to an unlocked state.
+ *
+ * @param tlr
+ *   A pointer to the recursive ticketlock.
+ */
+static inline __rte_experimental void rte_ticketlock_recursive_init(
+					rte_ticketlock_recursive_t *tlr)
+{
+	rte_ticketlock_init(&tlr->tl);
+	tlr->user = -1;
+	tlr->count = 0;
+}
+
+/**
+ * Take the recursive ticketlock.
+ *
+ * @param tlr
+ *   A pointer to the recursive ticketlock.
+ */
+static inline __rte_experimental void rte_ticketlock_recursive_lock(
+					rte_ticketlock_recursive_t *tlr)
+{
+	int id = rte_gettid();
+
+	if (tlr->user != id) {
+		rte_ticketlock_lock(&tlr->tl);
+		tlr->user = id;
+	}
+	tlr->count++;
+}
+
+/**
+ * Release the recursive ticketlock.
+ *
+ * @param tlr
+ *   A pointer to the recursive ticketlock.
+ */
+static inline __rte_experimental void rte_ticketlock_recursive_unlock(
+					rte_ticketlock_recursive_t *tlr)
+{
+	if (--(tlr->count) == 0) {
+		tlr->user = -1;
+		rte_ticketlock_unlock(&tlr->tl);
+	}
+
+}
+
+/**
+ * Try to take the recursive lock.
+ *
+ * @param tlr
+ *   A pointer to the recursive ticketlock.
+ * @return
+ *   1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline __rte_experimental int rte_ticketlock_recursive_trylock(
+					rte_ticketlock_recursive_t *tlr)
+{
+	int id = rte_gettid();
+
+	if (tlr->user != id) {
+		if (rte_ticketlock_trylock(&tlr->tl) == 0)
+			return 0;
+		tlr->user = id;
+	}
+	tlr->count++;
+	return 1;
+}
+
+#endif /* _RTE_TICKETLOCK_H_ */
diff --git a/lib/librte_eal/common/meson.build b/lib/librte_eal/common/meson.build
index 2a10d57..23f9416 100644
--- a/lib/librte_eal/common/meson.build
+++ b/lib/librte_eal/common/meson.build
@@ -98,6 +98,7 @@ generic_headers = files(
 	'include/generic/rte_prefetch.h',
 	'include/generic/rte_rwlock.h',
 	'include/generic/rte_spinlock.h',
+	'include/generic/rte_ticketlock.h',
 	'include/generic/rte_vect.h')
 install_headers(generic_headers, subdir: 'generic')
 
-- 
2.7.4

^ permalink raw reply	[flat|nested] 2+ messages in thread

* [dpdk-stable] [PATCH v2 2/2] test/ticketlock: add ticket lock test case
       [not found] <20190113144631.23493-1-gavin.hu@arm.com>
  2019-01-18  9:15 ` [dpdk-stable] [PATCH v2 1/2] ticketlock: ticket based to improve fairness Joyce Kong
@ 2019-01-18  9:15 ` Joyce Kong
  1 sibling, 0 replies; 2+ messages in thread
From: Joyce Kong @ 2019-01-18  9:15 UTC (permalink / raw)
  To: dev
  Cc: OSS-DPDK-dev, nd, thomas, jerin.jacob, stephen,
	honnappa.nagarahalli, gavin.hu, stable

Add test cases for ticket lock, recursive ticket lock,
and ticket lock performance.

Signed-off-by: Joyce Kong <joyce.kong@arm.com>
Reviewed-by: Gavin Hu <gavin.hu@arm.com>
Reviewed-by: Phil Yang <phil.yang@arm.com>
---
 test/test/Makefile          |   1 +
 test/test/autotest_data.py  |   6 +
 test/test/meson.build       |   1 +
 test/test/test_ticketlock.c | 311 ++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 319 insertions(+)
 create mode 100644 test/test/test_ticketlock.c

diff --git a/test/test/Makefile b/test/test/Makefile
index e7c8108..fa50aaf 100644
--- a/test/test/Makefile
+++ b/test/test/Makefile
@@ -65,6 +65,7 @@ SRCS-y += test_barrier.c
 SRCS-y += test_malloc.c
 SRCS-y += test_cycles.c
 SRCS-y += test_spinlock.c
+SRCS-y += test_ticketlock.c
 SRCS-y += test_memory.c
 SRCS-y += test_memzone.c
 SRCS-y += test_bitmap.c
diff --git a/test/test/autotest_data.py b/test/test/autotest_data.py
index 0fb7866..f18b9b9 100644
--- a/test/test/autotest_data.py
+++ b/test/test/autotest_data.py
@@ -171,6 +171,12 @@
         "Report":  None,
     },
     {
+        "Name":    "Ticketlock_autotest",
+        "Command": "ticketlock_autotest",
+        "Func":    ticketlock_autotest,
+        "Report": None,
+    },
+    {
         "Name":    "Byte order autotest",
         "Command": "byteorder_autotest",
         "Func":    default_autotest,
diff --git a/test/test/meson.build b/test/test/meson.build
index 9e45baf..d473f2b 100644
--- a/test/test/meson.build
+++ b/test/test/meson.build
@@ -103,6 +103,7 @@ test_sources = files('commands.c',
 	'test_timer.c',
 	'test_timer_perf.c',
 	'test_timer_racecond.c',
+	'test_ticketlock.c',
 	'test_version.c',
 	'virtual_pmd.c'
 )
diff --git a/test/test/test_ticketlock.c b/test/test/test_ticketlock.c
new file mode 100644
index 0000000..10294f6
--- /dev/null
+++ b/test/test/test_ticketlock.c
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2019 Arm Limited
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_cycles.h>
+#include <generic/rte_ticketlock.h>
+#include <rte_atomic.h>
+
+#include "test.h"
+
+/*
+ * Ticketlock test
+ * =============
+ *
+ * - There is a global ticketlock and a table of ticketlocks (one per lcore).
+ *
+ * - The test function takes all of these locks and launches the
+ *   ``test_ticketlock_per_core()`` function on each core (except the master).
+ *
+ *   - The function takes the global lock, display something, then releases
+ *     the global lock.
+ *   - The function takes the per-lcore lock, display something, then releases
+ *     the per-core lock.
+ *
+ * - The main function unlocks the per-lcore locks sequentially and
+ *   waits between each lock. This triggers the display of a message
+ *   for each core, in the correct order. The autotest script checks that
+ *   this order is correct.
+ *
+ * - A load test is carried out, with all cores attempting to lock a single lock
+ *   multiple times
+ */
+
+static rte_ticketlock_t sl, sl_try;
+static rte_ticketlock_t sl_tab[RTE_MAX_LCORE];
+static rte_ticketlock_recursive_t slr;
+static unsigned int count;
+
+static rte_atomic32_t synchro;
+
+static int
+test_ticketlock_per_core(__attribute__((unused)) void *arg)
+{
+	rte_ticketlock_lock(&sl);
+	printf("Global lock taken on core %u\n", rte_lcore_id());
+	rte_ticketlock_unlock(&sl);
+
+	rte_ticketlock_lock(&sl_tab[rte_lcore_id()]);
+	printf("Hello from core %u !\n", rte_lcore_id());
+	rte_ticketlock_unlock(&sl_tab[rte_lcore_id()]);
+
+	return 0;
+}
+
+static int
+test_ticketlock_recursive_per_core(__attribute__((unused)) void *arg)
+{
+	unsigned int id = rte_lcore_id();
+
+	rte_ticketlock_recursive_lock(&slr);
+	printf("Global recursive lock taken on core %u - count = %d\n",
+	       id, slr.count);
+	rte_ticketlock_recursive_lock(&slr);
+	printf("Global recursive lock taken on core %u - count = %d\n",
+	       id, slr.count);
+	rte_ticketlock_recursive_lock(&slr);
+	printf("Global recursive lock taken on core %u - count = %d\n",
+	       id, slr.count);
+
+	printf("Hello from within recursive locks from core %u !\n", id);
+
+	rte_ticketlock_recursive_unlock(&slr);
+	printf("Global recursive lock released on core %u - count = %d\n",
+	       id, slr.count);
+	rte_ticketlock_recursive_unlock(&slr);
+	printf("Global recursive lock released on core %u - count = %d\n",
+	       id, slr.count);
+	rte_ticketlock_recursive_unlock(&slr);
+	printf("Global recursive lock released on core %u - count = %d\n",
+	       id, slr.count);
+
+	return 0;
+}
+
+static rte_ticketlock_t lk = RTE_TICKETLOCK_INITIALIZER;
+static uint64_t lock_count[RTE_MAX_LCORE] = {0};
+
+#define TIME_MS 100
+
+static int
+load_loop_fn(void *func_param)
+{
+	uint64_t time_diff = 0, begin;
+	uint64_t hz = rte_get_timer_hz();
+	uint64_t lcount = 0;
+	const int use_lock = *(int *)func_param;
+	const unsigned int lcore = rte_lcore_id();
+
+	/* wait synchro for slaves */
+	if (lcore != rte_get_master_lcore())
+		while (rte_atomic32_read(&synchro) == 0)
+			;
+
+	begin = rte_get_timer_cycles();
+	while (time_diff < hz * TIME_MS / 1000) {
+		if (use_lock)
+			rte_ticketlock_lock(&lk);
+		lcount++;
+		if (use_lock)
+			rte_ticketlock_unlock(&lk);
+		/* delay to make lock duty cycle slighlty realistic */
+		rte_delay_us(1);
+		time_diff = rte_get_timer_cycles() - begin;
+	}
+	lock_count[lcore] = lcount;
+	return 0;
+}
+
+static int
+test_ticketlock_perf(void)
+{
+	unsigned int i;
+	uint64_t total = 0;
+	int lock = 0;
+	const unsigned int lcore = rte_lcore_id();
+
+	printf("\nTest with no lock on single core...\n");
+	load_loop_fn(&lock);
+	printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
+	memset(lock_count, 0, sizeof(lock_count));
+
+	printf("\nTest with lock on single core...\n");
+	lock = 1;
+	load_loop_fn(&lock);
+	printf("Core [%u] count = %"PRIu64"\n", lcore, lock_count[lcore]);
+	memset(lock_count, 0, sizeof(lock_count));
+
+	printf("\nTest with lock on %u cores...\n", rte_lcore_count());
+
+	/* Clear synchro and start slaves */
+	rte_atomic32_set(&synchro, 0);
+	rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MASTER);
+
+	/* start synchro and launch test on master */
+	rte_atomic32_set(&synchro, 1);
+	load_loop_fn(&lock);
+
+	rte_eal_mp_wait_lcore();
+
+	RTE_LCORE_FOREACH(i) {
+		printf("Core [%u] count = %"PRIu64"\n", i, lock_count[i]);
+		total += lock_count[i];
+	}
+
+	printf("Total count = %"PRIu64"\n", total);
+
+	return 0;
+}
+
+/*
+ * Use rte_ticketlock_trylock() to trylock a ticketlock object,
+ * If it could not lock the object successfully, it would
+ * return immediately and the variable of "count" would be
+ * increased by one per times. the value of "count" could be
+ * checked as the result later.
+ */
+static int
+test_ticketlock_try(__attribute__((unused)) void *arg)
+{
+	if (rte_ticketlock_trylock(&sl_try) == 0) {
+		rte_ticketlock_lock(&sl);
+		count++;
+		rte_ticketlock_unlock(&sl);
+	}
+
+	return 0;
+}
+
+
+/*
+ * Test rte_eal_get_lcore_state() in addition to ticketlocks
+ * as we have "waiting" then "running" lcores.
+ */
+static int
+test_ticketlock(void)
+{
+	int ret = 0;
+	int i;
+
+	/* slave cores should be waiting: print it */
+	RTE_LCORE_FOREACH_SLAVE(i) {
+		printf("lcore %d state: %d\n", i,
+		       (int) rte_eal_get_lcore_state(i));
+	}
+
+	rte_ticketlock_init(&sl);
+	rte_ticketlock_init(&sl_try);
+	rte_ticketlock_recursive_init(&slr);
+	RTE_LCORE_FOREACH_SLAVE(i) {
+		rte_ticketlock_init(&sl_tab[i]);
+	}
+
+	rte_ticketlock_lock(&sl);
+
+	RTE_LCORE_FOREACH_SLAVE(i) {
+		rte_ticketlock_lock(&sl_tab[i]);
+		rte_eal_remote_launch(test_ticketlock_per_core, NULL, i);
+	}
+
+	/* slave cores should be busy: print it */
+	RTE_LCORE_FOREACH_SLAVE(i) {
+		printf("lcore %d state: %d\n", i,
+		       (int) rte_eal_get_lcore_state(i));
+	}
+	rte_ticketlock_unlock(&sl);
+
+	RTE_LCORE_FOREACH_SLAVE(i) {
+		rte_ticketlock_unlock(&sl_tab[i]);
+		rte_delay_ms(10);
+	}
+
+	rte_eal_mp_wait_lcore();
+
+	rte_ticketlock_recursive_lock(&slr);
+
+	/*
+	 * Try to acquire a lock that we already own
+	 */
+	if (!rte_ticketlock_recursive_trylock(&slr)) {
+		printf("rte_ticketlock_recursive_trylock failed on a lock that "
+		       "we already own\n");
+		ret = -1;
+	} else
+		rte_ticketlock_recursive_unlock(&slr);
+
+	RTE_LCORE_FOREACH_SLAVE(i) {
+		rte_eal_remote_launch(test_ticketlock_recursive_per_core,
+							  NULL, i);
+	}
+	rte_ticketlock_recursive_unlock(&slr);
+	rte_eal_mp_wait_lcore();
+
+	/*
+	 * Test if it could return immediately from try-locking a locked object.
+	 * Here it will lock the ticketlock object first, then launch all the
+	 * slave lcores to trylock the same ticketlock object.
+	 * All the slave lcores should give up try-locking a locked object and
+	 * return immediately, and then increase the "count" initialized with
+	 * zero by one per times.
+	 * We can check if the "count" is finally equal to the number of all
+	 * slave lcores to see if the behavior of try-locking a locked
+	 * ticketlock object is correct.
+	 */
+	if (rte_ticketlock_trylock(&sl_try) == 0)
+		return -1;
+
+	count = 0;
+	RTE_LCORE_FOREACH_SLAVE(i) {
+		rte_eal_remote_launch(test_ticketlock_try, NULL, i);
+	}
+	rte_eal_mp_wait_lcore();
+	rte_ticketlock_unlock(&sl_try);
+	if (rte_ticketlock_is_locked(&sl)) {
+		printf("ticketlock is locked but it should not be\n");
+		return -1;
+	}
+	rte_ticketlock_lock(&sl);
+	if (count != (rte_lcore_count() - 1))
+		ret = -1;
+
+	rte_ticketlock_unlock(&sl);
+
+	/*
+	 * Test if it can trylock recursively.
+	 * Use rte_ticketlock_recursive_trylock() to check if it can lock
+	 * a ticketlock object recursively. Here it will try to lock a
+	 * ticketlock object twice.
+	 */
+	if (rte_ticketlock_recursive_trylock(&slr) == 0) {
+		printf("It failed to do the first ticketlock_recursive_trylock "
+			   "but it should able to do\n");
+		return -1;
+	}
+	if (rte_ticketlock_recursive_trylock(&slr) == 0) {
+		printf("It failed to do the second ticketlock_recursive_trylock "
+			   "but it should able to do\n");
+		return -1;
+	}
+	rte_ticketlock_recursive_unlock(&slr);
+	rte_ticketlock_recursive_unlock(&slr);
+
+	if (test_ticketlock_perf() < 0)
+		return -1;
+
+	return ret;
+}
+
+REGISTER_TEST_COMMAND(ticketlock_autotest, test_ticketlock);
-- 
2.7.4

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2019-01-18  9:16 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20190113144631.23493-1-gavin.hu@arm.com>
2019-01-18  9:15 ` [dpdk-stable] [PATCH v2 1/2] ticketlock: ticket based to improve fairness Joyce Kong
2019-01-18  9:15 ` [dpdk-stable] [PATCH v2 2/2] test/ticketlock: add ticket lock test case Joyce Kong

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).