1 // SPDX-License-Identifier: LGPL-2.1
14 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
16 struct percpu_lock_entry {
18 } __attribute__((aligned(128)));
21 struct percpu_lock_entry c[CPU_SETSIZE];
24 struct test_data_entry {
26 } __attribute__((aligned(128)));
28 struct spinlock_test_data {
29 struct percpu_lock lock;
30 struct test_data_entry c[CPU_SETSIZE];
34 struct percpu_list_node {
36 struct percpu_list_node *next;
39 struct percpu_list_entry {
40 struct percpu_list_node *head;
41 } __attribute__((aligned(128)));
44 struct percpu_list_entry c[CPU_SETSIZE];
47 /* A simple percpu spinlock. Returns the cpu lock was acquired on. */
48 int rseq_this_cpu_lock(struct percpu_lock *lock)
55 cpu = rseq_cpu_start();
56 ret = rseq_cmpeqv_storev(&lock->c[cpu].v,
58 if (rseq_likely(!ret))
60 /* Retry if comparison fails or rseq aborts. */
63 * Acquire semantic when taking lock after control dependency.
64 * Matches rseq_smp_store_release().
66 rseq_smp_acquire__after_ctrl_dep();
70 void rseq_percpu_unlock(struct percpu_lock *lock, int cpu)
72 assert(lock->c[cpu].v == 1);
74 * Release lock, with release semantic. Matches
75 * rseq_smp_acquire__after_ctrl_dep().
77 rseq_smp_store_release(&lock->c[cpu].v, 0);
80 void *test_percpu_spinlock_thread(void *arg)
82 struct spinlock_test_data *data = arg;
85 if (rseq_register_current_thread()) {
86 fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
87 errno, strerror(errno));
90 for (i = 0; i < data->reps; i++) {
91 cpu = rseq_this_cpu_lock(&data->lock);
93 rseq_percpu_unlock(&data->lock, cpu);
95 if (rseq_unregister_current_thread()) {
96 fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
97 errno, strerror(errno));
105 * A simple test which implements a sharded counter using a per-cpu
106 * lock. Obviously real applications might prefer to simply use a
107 * per-cpu increment; however, this is reasonable for a test and the
108 * lock can be extended to synchronize more complicated operations.
110 void test_percpu_spinlock(void)
112 const int num_threads = 200;
115 pthread_t test_threads[num_threads];
116 struct spinlock_test_data data;
118 memset(&data, 0, sizeof(data));
121 for (i = 0; i < num_threads; i++)
122 pthread_create(&test_threads[i], NULL,
123 test_percpu_spinlock_thread, &data);
125 for (i = 0; i < num_threads; i++)
126 pthread_join(test_threads[i], NULL);
129 for (i = 0; i < CPU_SETSIZE; i++)
130 sum += data.c[i].count;
132 assert(sum == (uint64_t)data.reps * num_threads);
135 void this_cpu_list_push(struct percpu_list *list,
136 struct percpu_list_node *node,
142 intptr_t *targetptr, newval, expect;
145 cpu = rseq_cpu_start();
146 /* Load list->c[cpu].head with single-copy atomicity. */
147 expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
148 newval = (intptr_t)node;
149 targetptr = (intptr_t *)&list->c[cpu].head;
150 node->next = (struct percpu_list_node *)expect;
151 ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu);
152 if (rseq_likely(!ret))
154 /* Retry if comparison fails or rseq aborts. */
161 * Unlike a traditional lock-less linked list; the availability of a
162 * rseq primitive allows us to implement pop without concerns over
165 struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
169 struct percpu_list_node *head;
170 intptr_t *targetptr, expectnot, *load;
174 cpu = rseq_cpu_start();
175 targetptr = (intptr_t *)&list->c[cpu].head;
176 expectnot = (intptr_t)NULL;
177 offset = offsetof(struct percpu_list_node, next);
178 load = (intptr_t *)&head;
179 ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot,
181 if (rseq_likely(!ret)) {
188 /* Retry if rseq aborts. */
193 * __percpu_list_pop is not safe against concurrent accesses. Should
194 * only be used on lists that are not concurrently modified.
196 struct percpu_list_node *__percpu_list_pop(struct percpu_list *list, int cpu)
198 struct percpu_list_node *node;
200 node = list->c[cpu].head;
203 list->c[cpu].head = node->next;
207 void *test_percpu_list_thread(void *arg)
210 struct percpu_list *list = (struct percpu_list *)arg;
212 if (rseq_register_current_thread()) {
213 fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
214 errno, strerror(errno));
218 for (i = 0; i < 100000; i++) {
219 struct percpu_list_node *node;
221 node = this_cpu_list_pop(list, NULL);
222 sched_yield(); /* encourage shuffling */
224 this_cpu_list_push(list, node, NULL);
227 if (rseq_unregister_current_thread()) {
228 fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
229 errno, strerror(errno));
236 /* Simultaneous modification to a per-cpu linked list from many threads. */
237 void test_percpu_list(void)
240 uint64_t sum = 0, expected_sum = 0;
241 struct percpu_list list;
242 pthread_t test_threads[200];
243 cpu_set_t allowed_cpus;
245 memset(&list, 0, sizeof(list));
247 /* Generate list entries for every usable cpu. */
248 sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
249 for (i = 0; i < CPU_SETSIZE; i++) {
250 if (!CPU_ISSET(i, &allowed_cpus))
252 for (j = 1; j <= 100; j++) {
253 struct percpu_list_node *node;
257 node = malloc(sizeof(*node));
260 node->next = list.c[i].head;
261 list.c[i].head = node;
265 for (i = 0; i < 200; i++)
266 pthread_create(&test_threads[i], NULL,
267 test_percpu_list_thread, &list);
269 for (i = 0; i < 200; i++)
270 pthread_join(test_threads[i], NULL);
272 for (i = 0; i < CPU_SETSIZE; i++) {
273 struct percpu_list_node *node;
275 if (!CPU_ISSET(i, &allowed_cpus))
278 while ((node = __percpu_list_pop(&list, i))) {
285 * All entries should now be accounted for (unless some external
286 * actor is interfering with our allowed affinity while this
289 assert(sum == expected_sum);
292 int main(int argc, char **argv)
294 if (rseq_register_current_thread()) {
295 fprintf(stderr, "Error: rseq_register_current_thread(...) failed(%d): %s\n",
296 errno, strerror(errno));
299 printf("spinlock\n");
300 test_percpu_spinlock();
301 printf("percpu_list\n");
303 if (rseq_unregister_current_thread()) {
304 fprintf(stderr, "Error: rseq_unregister_current_thread(...) failed(%d): %s\n",
305 errno, strerror(errno));