2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
18 * Copyright (C) IBM Corporation, 2006
19 * Copyright (C) Fujitsu, 2012
21 * Author: Paul McKenney <paulmck@us.ibm.com>
22 * Lai Jiangshan <laijs@cn.fujitsu.com>
24 * For detailed explanation of Read-Copy Update mechanism see -
25 * Documentation/RCU/ *.txt
29 #define pr_fmt(fmt) "rcu: " fmt
31 #include <linux/export.h>
32 #include <linux/mutex.h>
33 #include <linux/percpu.h>
34 #include <linux/preempt.h>
35 #include <linux/rcupdate_wait.h>
36 #include <linux/sched.h>
37 #include <linux/smp.h>
38 #include <linux/delay.h>
39 #include <linux/module.h>
40 #include <linux/srcu.h>
43 #include "rcu_segcblist.h"
45 /* Holdoff in nanoseconds for auto-expediting. */
46 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
47 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
48 module_param(exp_holdoff, ulong, 0444);
50 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
51 static ulong counter_wrap_check = (ULONG_MAX >> 2);
52 module_param(counter_wrap_check, ulong, 0444);
54 static void srcu_invoke_callbacks(struct work_struct *work);
55 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
56 static void process_srcu(struct work_struct *work);
58 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
59 #define spin_lock_rcu_node(p) \
61 spin_lock(&ACCESS_PRIVATE(p, lock)); \
62 smp_mb__after_unlock_lock(); \
65 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
67 #define spin_lock_irq_rcu_node(p) \
69 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
70 smp_mb__after_unlock_lock(); \
73 #define spin_unlock_irq_rcu_node(p) \
74 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
76 #define spin_lock_irqsave_rcu_node(p, flags) \
78 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
79 smp_mb__after_unlock_lock(); \
82 #define spin_unlock_irqrestore_rcu_node(p, flags) \
83 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
86 * Initialize SRCU combining tree. Note that statically allocated
87 * srcu_struct structures might already have srcu_read_lock() and
88 * srcu_read_unlock() running against them. So if the is_static parameter
89 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
91 static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
96 int levelspread[RCU_NUM_LVLS];
97 struct srcu_data *sdp;
98 struct srcu_node *snp;
99 struct srcu_node *snp_first;
101 /* Work out the overall tree geometry. */
102 sp->level[0] = &sp->node[0];
103 for (i = 1; i < rcu_num_lvls; i++)
104 sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
105 rcu_init_levelspread(levelspread, num_rcu_lvl);
107 /* Each pass through this loop initializes one srcu_node structure. */
108 rcu_for_each_node_breadth_first(sp, snp) {
109 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
110 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
111 ARRAY_SIZE(snp->srcu_data_have_cbs));
112 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
113 snp->srcu_have_cbs[i] = 0;
114 snp->srcu_data_have_cbs[i] = 0;
116 snp->srcu_gp_seq_needed_exp = 0;
119 if (snp == &sp->node[0]) {
120 /* Root node, special case. */
121 snp->srcu_parent = NULL;
126 if (snp == sp->level[level + 1])
128 snp->srcu_parent = sp->level[level - 1] +
129 (snp - sp->level[level]) /
130 levelspread[level - 1];
134 * Initialize the per-CPU srcu_data array, which feeds into the
135 * leaves of the srcu_node tree.
137 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
138 ARRAY_SIZE(sdp->srcu_unlock_count));
139 level = rcu_num_lvls - 1;
140 snp_first = sp->level[level];
141 for_each_possible_cpu(cpu) {
142 sdp = per_cpu_ptr(sp->sda, cpu);
143 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
144 rcu_segcblist_init(&sdp->srcu_cblist);
145 sdp->srcu_cblist_invoking = false;
146 sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
147 sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
148 sdp->mynode = &snp_first[cpu / levelspread[level]];
149 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
155 INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
157 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
161 /* Dynamically allocated, better be no srcu_read_locks()! */
162 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
163 sdp->srcu_lock_count[i] = 0;
164 sdp->srcu_unlock_count[i] = 0;
170 * Initialize non-compile-time initialized fields, including the
171 * associated srcu_node and srcu_data structures. The is_static
172 * parameter is passed through to init_srcu_struct_nodes(), and
173 * also tells us that ->sda has already been wired up to srcu_data.
175 static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
177 mutex_init(&sp->srcu_cb_mutex);
178 mutex_init(&sp->srcu_gp_mutex);
181 sp->srcu_barrier_seq = 0;
182 mutex_init(&sp->srcu_barrier_mutex);
183 atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
184 INIT_DELAYED_WORK(&sp->work, process_srcu);
186 sp->sda = alloc_percpu(struct srcu_data);
187 init_srcu_struct_nodes(sp, is_static);
188 sp->srcu_gp_seq_needed_exp = 0;
189 sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
190 smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */
191 return sp->sda ? 0 : -ENOMEM;
194 #ifdef CONFIG_DEBUG_LOCK_ALLOC
196 int __init_srcu_struct(struct srcu_struct *sp, const char *name,
197 struct lock_class_key *key)
199 /* Don't re-initialize a lock while it is held. */
200 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
201 lockdep_init_map(&sp->dep_map, name, key, 0);
202 spin_lock_init(&ACCESS_PRIVATE(sp, lock));
203 return init_srcu_struct_fields(sp, false);
205 EXPORT_SYMBOL_GPL(__init_srcu_struct);
207 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
210 * init_srcu_struct - initialize a sleep-RCU structure
211 * @sp: structure to initialize.
213 * Must invoke this on a given srcu_struct before passing that srcu_struct
214 * to any other function. Each srcu_struct represents a separate domain
215 * of SRCU protection.
217 int init_srcu_struct(struct srcu_struct *sp)
219 spin_lock_init(&ACCESS_PRIVATE(sp, lock));
220 return init_srcu_struct_fields(sp, false);
222 EXPORT_SYMBOL_GPL(init_srcu_struct);
224 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
227 * First-use initialization of statically allocated srcu_struct
228 * structure. Wiring up the combining tree is more than can be
229 * done with compile-time initialization, so this check is added
230 * to each update-side SRCU primitive. Use sp->lock, which -is-
231 * compile-time initialized, to resolve races involving multiple
232 * CPUs trying to garner first-use privileges.
234 static void check_init_srcu_struct(struct srcu_struct *sp)
238 WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);
239 /* The smp_load_acquire() pairs with the smp_store_release(). */
240 if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
241 return; /* Already initialized. */
242 spin_lock_irqsave_rcu_node(sp, flags);
243 if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
244 spin_unlock_irqrestore_rcu_node(sp, flags);
247 init_srcu_struct_fields(sp, true);
248 spin_unlock_irqrestore_rcu_node(sp, flags);
252 * Returns approximate total of the readers' ->srcu_lock_count[] values
253 * for the rank of per-CPU counters specified by idx.
255 static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
258 unsigned long sum = 0;
260 for_each_possible_cpu(cpu) {
261 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
263 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
269 * Returns approximate total of the readers' ->srcu_unlock_count[] values
270 * for the rank of per-CPU counters specified by idx.
272 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
275 unsigned long sum = 0;
277 for_each_possible_cpu(cpu) {
278 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
280 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
286 * Return true if the number of pre-existing readers is determined to
289 static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
291 unsigned long unlocks;
293 unlocks = srcu_readers_unlock_idx(sp, idx);
296 * Make sure that a lock is always counted if the corresponding
297 * unlock is counted. Needs to be a smp_mb() as the read side may
298 * contain a read from a variable that is written to before the
299 * synchronize_srcu() in the write side. In this case smp_mb()s
300 * A and B act like the store buffering pattern.
302 * This smp_mb() also pairs with smp_mb() C to prevent accesses
303 * after the synchronize_srcu() from being executed before the
309 * If the locks are the same as the unlocks, then there must have
310 * been no readers on this index at some time in between. This does
311 * not mean that there are no more readers, as one could have read
312 * the current index but not have incremented the lock counter yet.
314 * So suppose that the updater is preempted here for so long
315 * that more than ULONG_MAX non-nested readers come and go in
316 * the meantime. It turns out that this cannot result in overflow
317 * because if a reader modifies its unlock count after we read it
318 * above, then that reader's next load of ->srcu_idx is guaranteed
319 * to get the new value, which will cause it to operate on the
320 * other bank of counters, where it cannot contribute to the
321 * overflow of these counters. This means that there is a maximum
322 * of 2*NR_CPUS increments, which cannot overflow given current
323 * systems, especially not on 64-bit systems.
325 * OK, how about nesting? This does impose a limit on nesting
326 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
327 * especially on 64-bit systems.
329 return srcu_readers_lock_idx(sp, idx) == unlocks;
333 * srcu_readers_active - returns true if there are readers. and false
335 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
337 * Note that this is not an atomic primitive, and can therefore suffer
338 * severe errors when invoked on an active srcu_struct. That said, it
339 * can be useful as an error check at cleanup time.
341 static bool srcu_readers_active(struct srcu_struct *sp)
344 unsigned long sum = 0;
346 for_each_possible_cpu(cpu) {
347 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
349 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
350 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
351 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
352 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
357 #define SRCU_INTERVAL 1
360 * Return grace-period delay, zero if there are expedited grace
361 * periods pending, SRCU_INTERVAL otherwise.
363 static unsigned long srcu_get_delay(struct srcu_struct *sp)
365 if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
366 READ_ONCE(sp->srcu_gp_seq_needed_exp)))
368 return SRCU_INTERVAL;
371 /* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */
372 void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
376 if (WARN_ON(!srcu_get_delay(sp)))
377 return; /* Just leak it! */
378 if (WARN_ON(srcu_readers_active(sp)))
379 return; /* Just leak it! */
381 if (WARN_ON(delayed_work_pending(&sp->work)))
382 return; /* Just leak it! */
384 flush_delayed_work(&sp->work);
386 for_each_possible_cpu(cpu)
388 if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work)))
389 return; /* Just leak it! */
391 flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
393 if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
394 WARN_ON(srcu_readers_active(sp))) {
395 pr_info("%s: Active srcu_struct %p state: %d\n",
396 __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
397 return; /* Caller forgot to stop doing call_srcu()? */
399 free_percpu(sp->sda);
402 EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
405 * Counts the new reader in the appropriate per-CPU element of the
407 * Returns an index that must be passed to the matching srcu_read_unlock().
409 int __srcu_read_lock(struct srcu_struct *sp)
413 idx = READ_ONCE(sp->srcu_idx) & 0x1;
414 this_cpu_inc(sp->sda->srcu_lock_count[idx]);
415 smp_mb(); /* B */ /* Avoid leaking the critical section. */
418 EXPORT_SYMBOL_GPL(__srcu_read_lock);
421 * Removes the count for the old reader from the appropriate per-CPU
422 * element of the srcu_struct. Note that this may well be a different
423 * CPU than that which was incremented by the corresponding srcu_read_lock().
425 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
427 smp_mb(); /* C */ /* Avoid leaking the critical section. */
428 this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
430 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
433 * We use an adaptive strategy for synchronize_srcu() and especially for
434 * synchronize_srcu_expedited(). We spin for a fixed time period
435 * (defined below) to allow SRCU readers to exit their read-side critical
436 * sections. If there are still some readers after a few microseconds,
437 * we repeatedly block for 1-millisecond time periods.
439 #define SRCU_RETRY_CHECK_DELAY 5
442 * Start an SRCU grace period.
444 static void srcu_gp_start(struct srcu_struct *sp)
446 struct srcu_data *sdp = this_cpu_ptr(sp->sda);
449 lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
450 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
451 spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
452 rcu_segcblist_advance(&sdp->srcu_cblist,
453 rcu_seq_current(&sp->srcu_gp_seq));
454 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
455 rcu_seq_snap(&sp->srcu_gp_seq));
456 spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
457 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
458 rcu_seq_start(&sp->srcu_gp_seq);
459 state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
460 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
464 * Track online CPUs to guide callback workqueue placement.
466 DEFINE_PER_CPU(bool, srcu_online);
468 void srcu_online_cpu(unsigned int cpu)
470 WRITE_ONCE(per_cpu(srcu_online, cpu), true);
473 void srcu_offline_cpu(unsigned int cpu)
475 WRITE_ONCE(per_cpu(srcu_online, cpu), false);
479 * Place the workqueue handler on the specified CPU if online, otherwise
480 * just run it whereever. This is useful for placing workqueue handlers
481 * that are to invoke the specified CPU's callbacks.
483 static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
484 struct delayed_work *dwork,
490 if (READ_ONCE(per_cpu(srcu_online, cpu)))
491 ret = queue_delayed_work_on(cpu, wq, dwork, delay);
493 ret = queue_delayed_work(wq, dwork, delay);
499 * Schedule callback invocation for the specified srcu_data structure,
500 * if possible, on the corresponding CPU.
502 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
504 srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay);
508 * Schedule callback invocation for all srcu_data structures associated
509 * with the specified srcu_node structure that have callbacks for the
510 * just-completed grace period, the one corresponding to idx. If possible,
511 * schedule this invocation on the corresponding CPUs.
513 static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
514 unsigned long mask, unsigned long delay)
518 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
519 if (!(mask & (1 << (cpu - snp->grplo))))
521 srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
526 * Note the end of an SRCU grace period. Initiates callback invocation
527 * and starts a new grace period if needed.
529 * The ->srcu_cb_mutex acquisition does not protect any data, but
530 * instead prevents more than one grace period from starting while we
531 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
532 * array to have a finite number of elements.
534 static void srcu_gp_end(struct srcu_struct *sp)
536 unsigned long cbdelay;
544 struct srcu_data *sdp;
545 struct srcu_node *snp;
547 /* Prevent more than one additional grace period. */
548 mutex_lock(&sp->srcu_cb_mutex);
550 /* End the current grace period. */
551 spin_lock_irq_rcu_node(sp);
552 idx = rcu_seq_state(sp->srcu_gp_seq);
553 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
554 cbdelay = srcu_get_delay(sp);
555 sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
556 rcu_seq_end(&sp->srcu_gp_seq);
557 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
558 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
559 sp->srcu_gp_seq_needed_exp = gpseq;
560 spin_unlock_irq_rcu_node(sp);
561 mutex_unlock(&sp->srcu_gp_mutex);
562 /* A new grace period can start at this point. But only one. */
564 /* Initiate callback invocation as needed. */
565 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
566 rcu_for_each_node_breadth_first(sp, snp) {
567 spin_lock_irq_rcu_node(snp);
569 last_lvl = snp >= sp->level[rcu_num_lvls - 1];
571 cbs = snp->srcu_have_cbs[idx] == gpseq;
572 snp->srcu_have_cbs[idx] = gpseq;
573 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
574 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
575 snp->srcu_gp_seq_needed_exp = gpseq;
576 mask = snp->srcu_data_have_cbs[idx];
577 snp->srcu_data_have_cbs[idx] = 0;
578 spin_unlock_irq_rcu_node(snp);
580 srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
582 /* Occasionally prevent srcu_data counter wrap. */
583 if (!(gpseq & counter_wrap_check) && last_lvl)
584 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
585 sdp = per_cpu_ptr(sp->sda, cpu);
586 spin_lock_irqsave_rcu_node(sdp, flags);
587 if (ULONG_CMP_GE(gpseq,
588 sdp->srcu_gp_seq_needed + 100))
589 sdp->srcu_gp_seq_needed = gpseq;
590 if (ULONG_CMP_GE(gpseq,
591 sdp->srcu_gp_seq_needed_exp + 100))
592 sdp->srcu_gp_seq_needed_exp = gpseq;
593 spin_unlock_irqrestore_rcu_node(sdp, flags);
597 /* Callback initiation done, allow grace periods after next. */
598 mutex_unlock(&sp->srcu_cb_mutex);
600 /* Start a new grace period if needed. */
601 spin_lock_irq_rcu_node(sp);
602 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
603 if (!rcu_seq_state(gpseq) &&
604 ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
606 spin_unlock_irq_rcu_node(sp);
607 srcu_reschedule(sp, 0);
609 spin_unlock_irq_rcu_node(sp);
614 * Funnel-locking scheme to scalably mediate many concurrent expedited
615 * grace-period requests. This function is invoked for the first known
616 * expedited request for a grace period that has already been requested,
617 * but without expediting. To start a completely new grace period,
618 * whether expedited or not, use srcu_funnel_gp_start() instead.
620 static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
625 for (; snp != NULL; snp = snp->srcu_parent) {
626 if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
627 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
629 spin_lock_irqsave_rcu_node(snp, flags);
630 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
631 spin_unlock_irqrestore_rcu_node(snp, flags);
634 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
635 spin_unlock_irqrestore_rcu_node(snp, flags);
637 spin_lock_irqsave_rcu_node(sp, flags);
638 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
639 sp->srcu_gp_seq_needed_exp = s;
640 spin_unlock_irqrestore_rcu_node(sp, flags);
644 * Funnel-locking scheme to scalably mediate many concurrent grace-period
645 * requests. The winner has to do the work of actually starting grace
646 * period s. Losers must either ensure that their desired grace-period
647 * number is recorded on at least their leaf srcu_node structure, or they
648 * must take steps to invoke their own callbacks.
650 * Note that this function also does the work of srcu_funnel_exp_start(),
651 * in some cases by directly invoking it.
653 static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
654 unsigned long s, bool do_norm)
657 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
658 struct srcu_node *snp = sdp->mynode;
659 unsigned long snp_seq;
661 /* Each pass through the loop does one level of the srcu_node tree. */
662 for (; snp != NULL; snp = snp->srcu_parent) {
663 if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
664 return; /* GP already done and CBs recorded. */
665 spin_lock_irqsave_rcu_node(snp, flags);
666 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
667 snp_seq = snp->srcu_have_cbs[idx];
668 if (snp == sdp->mynode && snp_seq == s)
669 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
670 spin_unlock_irqrestore_rcu_node(snp, flags);
671 if (snp == sdp->mynode && snp_seq != s) {
672 srcu_schedule_cbs_sdp(sdp, do_norm
678 srcu_funnel_exp_start(sp, snp, s);
681 snp->srcu_have_cbs[idx] = s;
682 if (snp == sdp->mynode)
683 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
684 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
685 snp->srcu_gp_seq_needed_exp = s;
686 spin_unlock_irqrestore_rcu_node(snp, flags);
689 /* Top of tree, must ensure the grace period will be started. */
690 spin_lock_irqsave_rcu_node(sp, flags);
691 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
693 * Record need for grace period s. Pair with load
694 * acquire setting up for initialization.
696 smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/
698 if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
699 sp->srcu_gp_seq_needed_exp = s;
701 /* If grace period not already done and none in progress, start it. */
702 if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
703 rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
704 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
706 queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp));
708 spin_unlock_irqrestore_rcu_node(sp, flags);
712 * Wait until all readers counted by array index idx complete, but
713 * loop an additional time if there is an expedited grace period pending.
714 * The caller must ensure that ->srcu_idx is not changed while checking.
716 static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
719 if (srcu_readers_active_idx_check(sp, idx))
721 if (--trycount + !srcu_get_delay(sp) <= 0)
723 udelay(SRCU_RETRY_CHECK_DELAY);
728 * Increment the ->srcu_idx counter so that future SRCU readers will
729 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
730 * us to wait for pre-existing readers in a starvation-free manner.
732 static void srcu_flip(struct srcu_struct *sp)
735 * Ensure that if this updater saw a given reader's increment
736 * from __srcu_read_lock(), that reader was using an old value
737 * of ->srcu_idx. Also ensure that if a given reader sees the
738 * new value of ->srcu_idx, this updater's earlier scans cannot
739 * have seen that reader's increments (which is OK, because this
740 * grace period need not wait on that reader).
742 smp_mb(); /* E */ /* Pairs with B and C. */
744 WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
747 * Ensure that if the updater misses an __srcu_read_unlock()
748 * increment, that task's next __srcu_read_lock() will see the
749 * above counter update. Note that both this memory barrier
750 * and the one in srcu_readers_active_idx_check() provide the
751 * guarantee for __srcu_read_lock().
753 smp_mb(); /* D */ /* Pairs with C. */
757 * If SRCU is likely idle, return true, otherwise return false.
759 * Note that it is OK for several current from-idle requests for a new
760 * grace period from idle to specify expediting because they will all end
761 * up requesting the same grace period anyhow. So no loss.
763 * Note also that if any CPU (including the current one) is still invoking
764 * callbacks, this function will nevertheless say "idle". This is not
765 * ideal, but the overhead of checking all CPUs' callback lists is even
766 * less ideal, especially on large systems. Furthermore, the wakeup
767 * can happen before the callback is fully removed, so we have no choice
768 * but to accept this type of error.
770 * This function is also subject to counter-wrap errors, but let's face
771 * it, if this function was preempted for enough time for the counters
772 * to wrap, it really doesn't matter whether or not we expedite the grace
773 * period. The extra overhead of a needlessly expedited grace period is
774 * negligible when amoritized over that time period, and the extra latency
775 * of a needlessly non-expedited grace period is similarly negligible.
777 static bool srcu_might_be_idle(struct srcu_struct *sp)
779 unsigned long curseq;
781 struct srcu_data *sdp;
784 /* If the local srcu_data structure has callbacks, not idle. */
785 local_irq_save(flags);
786 sdp = this_cpu_ptr(sp->sda);
787 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
788 local_irq_restore(flags);
789 return false; /* Callbacks already present, so not idle. */
791 local_irq_restore(flags);
794 * No local callbacks, so probabalistically probe global state.
795 * Exact information would require acquiring locks, which would
796 * kill scalability, hence the probabalistic nature of the probe.
799 /* First, see if enough time has passed since the last GP. */
800 t = ktime_get_mono_fast_ns();
801 if (exp_holdoff == 0 ||
802 time_in_range_open(t, sp->srcu_last_gp_end,
803 sp->srcu_last_gp_end + exp_holdoff))
804 return false; /* Too soon after last GP. */
806 /* Next, check for probable idleness. */
807 curseq = rcu_seq_current(&sp->srcu_gp_seq);
808 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
809 if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
810 return false; /* Grace period in progress, so not idle. */
811 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
812 if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
813 return false; /* GP # changed, so not idle. */
814 return true; /* With reasonable probability, idle! */
818 * SRCU callback function to leak a callback.
820 static void srcu_leak_callback(struct rcu_head *rhp)
825 * Enqueue an SRCU callback on the srcu_data structure associated with
826 * the current CPU and the specified srcu_struct structure, initiating
827 * grace-period processing if it is not already running.
829 * Note that all CPUs must agree that the grace period extended beyond
830 * all pre-existing SRCU read-side critical section. On systems with
831 * more than one CPU, this means that when "func()" is invoked, each CPU
832 * is guaranteed to have executed a full memory barrier since the end of
833 * its last corresponding SRCU read-side critical section whose beginning
834 * preceded the call to call_srcu(). It also means that each CPU executing
835 * an SRCU read-side critical section that continues beyond the start of
836 * "func()" must have executed a memory barrier after the call_srcu()
837 * but before the beginning of that SRCU read-side critical section.
838 * Note that these guarantees include CPUs that are offline, idle, or
839 * executing in user mode, as well as CPUs that are executing in the kernel.
841 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
842 * resulting SRCU callback function "func()", then both CPU A and CPU
843 * B are guaranteed to execute a full memory barrier during the time
844 * interval between the call to call_srcu() and the invocation of "func()".
845 * This guarantee applies even if CPU A and CPU B are the same CPU (but
846 * again only if the system has more than one CPU).
848 * Of course, these guarantees apply only for invocations of call_srcu(),
849 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
850 * srcu_struct structure.
852 void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
853 rcu_callback_t func, bool do_norm)
856 bool needexp = false;
859 struct srcu_data *sdp;
861 check_init_srcu_struct(sp);
862 if (debug_rcu_head_queue(rhp)) {
863 /* Probable double call_srcu(), so leak the callback. */
864 WRITE_ONCE(rhp->func, srcu_leak_callback);
865 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
869 local_irq_save(flags);
870 sdp = this_cpu_ptr(sp->sda);
871 spin_lock_rcu_node(sdp);
872 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
873 rcu_segcblist_advance(&sdp->srcu_cblist,
874 rcu_seq_current(&sp->srcu_gp_seq));
875 s = rcu_seq_snap(&sp->srcu_gp_seq);
876 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
877 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
878 sdp->srcu_gp_seq_needed = s;
881 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
882 sdp->srcu_gp_seq_needed_exp = s;
885 spin_unlock_irqrestore_rcu_node(sdp, flags);
887 srcu_funnel_gp_start(sp, sdp, s, do_norm);
889 srcu_funnel_exp_start(sp, sdp->mynode, s);
893 * call_srcu() - Queue a callback for invocation after an SRCU grace period
894 * @sp: srcu_struct in queue the callback
895 * @rhp: structure to be used for queueing the SRCU callback.
896 * @func: function to be invoked after the SRCU grace period
898 * The callback function will be invoked some time after a full SRCU
899 * grace period elapses, in other words after all pre-existing SRCU
900 * read-side critical sections have completed. However, the callback
901 * function might well execute concurrently with other SRCU read-side
902 * critical sections that started after call_srcu() was invoked. SRCU
903 * read-side critical sections are delimited by srcu_read_lock() and
904 * srcu_read_unlock(), and may be nested.
906 * The callback will be invoked from process context, but must nevertheless
907 * be fast and must not block.
909 void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
912 __call_srcu(sp, rhp, func, true);
914 EXPORT_SYMBOL_GPL(call_srcu);
917 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
919 static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
921 struct rcu_synchronize rcu;
923 RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
924 lock_is_held(&rcu_bh_lock_map) ||
925 lock_is_held(&rcu_lock_map) ||
926 lock_is_held(&rcu_sched_lock_map),
927 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
929 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
932 check_init_srcu_struct(sp);
933 init_completion(&rcu.completion);
934 init_rcu_head_on_stack(&rcu.head);
935 __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
936 wait_for_completion(&rcu.completion);
937 destroy_rcu_head_on_stack(&rcu.head);
940 * Make sure that later code is ordered after the SRCU grace
941 * period. This pairs with the spin_lock_irq_rcu_node()
942 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
943 * because the current CPU might have been totally uninvolved with
944 * (and thus unordered against) that grace period.
950 * synchronize_srcu_expedited - Brute-force SRCU grace period
951 * @sp: srcu_struct with which to synchronize.
953 * Wait for an SRCU grace period to elapse, but be more aggressive about
954 * spinning rather than blocking when waiting.
956 * Note that synchronize_srcu_expedited() has the same deadlock and
957 * memory-ordering properties as does synchronize_srcu().
959 void synchronize_srcu_expedited(struct srcu_struct *sp)
961 __synchronize_srcu(sp, rcu_gp_is_normal());
963 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
966 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
967 * @sp: srcu_struct with which to synchronize.
969 * Wait for the count to drain to zero of both indexes. To avoid the
970 * possible starvation of synchronize_srcu(), it waits for the count of
971 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
972 * and then flip the srcu_idx and wait for the count of the other index.
974 * Can block; must be called from process context.
976 * Note that it is illegal to call synchronize_srcu() from the corresponding
977 * SRCU read-side critical section; doing so will result in deadlock.
978 * However, it is perfectly legal to call synchronize_srcu() on one
979 * srcu_struct from some other srcu_struct's read-side critical section,
980 * as long as the resulting graph of srcu_structs is acyclic.
982 * There are memory-ordering constraints implied by synchronize_srcu().
983 * On systems with more than one CPU, when synchronize_srcu() returns,
984 * each CPU is guaranteed to have executed a full memory barrier since
985 * the end of its last corresponding SRCU-sched read-side critical section
986 * whose beginning preceded the call to synchronize_srcu(). In addition,
987 * each CPU having an SRCU read-side critical section that extends beyond
988 * the return from synchronize_srcu() is guaranteed to have executed a
989 * full memory barrier after the beginning of synchronize_srcu() and before
990 * the beginning of that SRCU read-side critical section. Note that these
991 * guarantees include CPUs that are offline, idle, or executing in user mode,
992 * as well as CPUs that are executing in the kernel.
994 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
995 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
996 * to have executed a full memory barrier during the execution of
997 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
998 * are the same CPU, but again only if the system has more than one CPU.
1000 * Of course, these memory-ordering guarantees apply only when
1001 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1002 * passed the same srcu_struct structure.
1004 * If SRCU is likely idle, expedite the first request. This semantic
1005 * was provided by Classic SRCU, and is relied upon by its users, so TREE
1006 * SRCU must also provide it. Note that detecting idleness is heuristic
1007 * and subject to both false positives and negatives.
1009 void synchronize_srcu(struct srcu_struct *sp)
1011 if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
1012 synchronize_srcu_expedited(sp);
1014 __synchronize_srcu(sp, true);
1016 EXPORT_SYMBOL_GPL(synchronize_srcu);
1019 * Callback function for srcu_barrier() use.
1021 static void srcu_barrier_cb(struct rcu_head *rhp)
1023 struct srcu_data *sdp;
1024 struct srcu_struct *sp;
1026 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1028 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1029 complete(&sp->srcu_barrier_completion);
1033 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1034 * @sp: srcu_struct on which to wait for in-flight callbacks.
1036 void srcu_barrier(struct srcu_struct *sp)
1039 struct srcu_data *sdp;
1040 unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
1042 check_init_srcu_struct(sp);
1043 mutex_lock(&sp->srcu_barrier_mutex);
1044 if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
1045 smp_mb(); /* Force ordering following return. */
1046 mutex_unlock(&sp->srcu_barrier_mutex);
1047 return; /* Someone else did our work for us. */
1049 rcu_seq_start(&sp->srcu_barrier_seq);
1050 init_completion(&sp->srcu_barrier_completion);
1052 /* Initial count prevents reaching zero until all CBs are posted. */
1053 atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
1056 * Each pass through this loop enqueues a callback, but only
1057 * on CPUs already having callbacks enqueued. Note that if
1058 * a CPU already has callbacks enqueue, it must have already
1059 * registered the need for a future grace period, so all we
1060 * need do is enqueue a callback that will use the same
1061 * grace period as the last callback already in the queue.
1063 for_each_possible_cpu(cpu) {
1064 sdp = per_cpu_ptr(sp->sda, cpu);
1065 spin_lock_irq_rcu_node(sdp);
1066 atomic_inc(&sp->srcu_barrier_cpu_cnt);
1067 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1068 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1069 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1070 &sdp->srcu_barrier_head, 0)) {
1071 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1072 atomic_dec(&sp->srcu_barrier_cpu_cnt);
1074 spin_unlock_irq_rcu_node(sdp);
1077 /* Remove the initial count, at which point reaching zero can happen. */
1078 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1079 complete(&sp->srcu_barrier_completion);
1080 wait_for_completion(&sp->srcu_barrier_completion);
1082 rcu_seq_end(&sp->srcu_barrier_seq);
1083 mutex_unlock(&sp->srcu_barrier_mutex);
1085 EXPORT_SYMBOL_GPL(srcu_barrier);
1088 * srcu_batches_completed - return batches completed.
1089 * @sp: srcu_struct on which to report batch completion.
1091 * Report the number of batches, correlated with, but not necessarily
1092 * precisely the same as, the number of grace periods that have elapsed.
1094 unsigned long srcu_batches_completed(struct srcu_struct *sp)
1096 return sp->srcu_idx;
1098 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1101 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1102 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1103 * completed in that state.
1105 static void srcu_advance_state(struct srcu_struct *sp)
1109 mutex_lock(&sp->srcu_gp_mutex);
1112 * Because readers might be delayed for an extended period after
1113 * fetching ->srcu_idx for their index, at any point in time there
1114 * might well be readers using both idx=0 and idx=1. We therefore
1115 * need to wait for readers to clear from both index values before
1116 * invoking a callback.
1118 * The load-acquire ensures that we see the accesses performed
1119 * by the prior grace period.
1121 idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
1122 if (idx == SRCU_STATE_IDLE) {
1123 spin_lock_irq_rcu_node(sp);
1124 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1125 WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
1126 spin_unlock_irq_rcu_node(sp);
1127 mutex_unlock(&sp->srcu_gp_mutex);
1130 idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
1131 if (idx == SRCU_STATE_IDLE)
1133 spin_unlock_irq_rcu_node(sp);
1134 if (idx != SRCU_STATE_IDLE) {
1135 mutex_unlock(&sp->srcu_gp_mutex);
1136 return; /* Someone else started the grace period. */
1140 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1141 idx = 1 ^ (sp->srcu_idx & 1);
1142 if (!try_check_zero(sp, idx, 1)) {
1143 mutex_unlock(&sp->srcu_gp_mutex);
1144 return; /* readers present, retry later. */
1147 rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
1150 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1153 * SRCU read-side critical sections are normally short,
1154 * so check at least twice in quick succession after a flip.
1156 idx = 1 ^ (sp->srcu_idx & 1);
1157 if (!try_check_zero(sp, idx, 2)) {
1158 mutex_unlock(&sp->srcu_gp_mutex);
1159 return; /* readers present, retry later. */
1161 srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */
1166 * Invoke a limited number of SRCU callbacks that have passed through
1167 * their grace period. If there are more to do, SRCU will reschedule
1168 * the workqueue. Note that needed memory barriers have been executed
1169 * in this task's context by srcu_readers_active_idx_check().
1171 static void srcu_invoke_callbacks(struct work_struct *work)
1174 struct rcu_cblist ready_cbs;
1175 struct rcu_head *rhp;
1176 struct srcu_data *sdp;
1177 struct srcu_struct *sp;
1179 sdp = container_of(work, struct srcu_data, work.work);
1181 rcu_cblist_init(&ready_cbs);
1182 spin_lock_irq_rcu_node(sdp);
1183 rcu_segcblist_advance(&sdp->srcu_cblist,
1184 rcu_seq_current(&sp->srcu_gp_seq));
1185 if (sdp->srcu_cblist_invoking ||
1186 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1187 spin_unlock_irq_rcu_node(sdp);
1188 return; /* Someone else on the job or nothing to do. */
1191 /* We are on the job! Extract and invoke ready callbacks. */
1192 sdp->srcu_cblist_invoking = true;
1193 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1194 spin_unlock_irq_rcu_node(sdp);
1195 rhp = rcu_cblist_dequeue(&ready_cbs);
1196 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1197 debug_rcu_head_unqueue(rhp);
1204 * Update counts, accelerate new callbacks, and if needed,
1205 * schedule another round of callback invocation.
1207 spin_lock_irq_rcu_node(sdp);
1208 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1209 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1210 rcu_seq_snap(&sp->srcu_gp_seq));
1211 sdp->srcu_cblist_invoking = false;
1212 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1213 spin_unlock_irq_rcu_node(sdp);
1215 srcu_schedule_cbs_sdp(sdp, 0);
1219 * Finished one round of SRCU grace period. Start another if there are
1220 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1222 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
1226 spin_lock_irq_rcu_node(sp);
1227 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1228 if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
1229 /* All requests fulfilled, time to go idle. */
1232 } else if (!rcu_seq_state(sp->srcu_gp_seq)) {
1233 /* Outstanding request and no GP. Start one. */
1236 spin_unlock_irq_rcu_node(sp);
1239 queue_delayed_work(rcu_gp_wq, &sp->work, delay);
1243 * This is the work-queue function that handles SRCU grace periods.
1245 static void process_srcu(struct work_struct *work)
1247 struct srcu_struct *sp;
1249 sp = container_of(work, struct srcu_struct, work.work);
1251 srcu_advance_state(sp);
1252 srcu_reschedule(sp, srcu_get_delay(sp));
1255 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1256 struct srcu_struct *sp, int *flags,
1257 unsigned long *gp_seq)
1259 if (test_type != SRCU_FLAVOR)
1262 *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
1264 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1266 void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
1270 unsigned long s0 = 0, s1 = 0;
1272 idx = sp->srcu_idx & 0x1;
1273 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1274 tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
1275 for_each_possible_cpu(cpu) {
1276 unsigned long l0, l1;
1277 unsigned long u0, u1;
1279 struct srcu_data *sdp;
1281 sdp = per_cpu_ptr(sp->sda, cpu);
1282 u0 = sdp->srcu_unlock_count[!idx];
1283 u1 = sdp->srcu_unlock_count[idx];
1286 * Make sure that a lock is always counted if the corresponding
1287 * unlock is counted.
1291 l0 = sdp->srcu_lock_count[!idx];
1292 l1 = sdp->srcu_lock_count[idx];
1296 pr_cont(" %d(%ld,%ld %1p)",
1297 cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
1301 pr_cont(" T(%ld,%ld)\n", s0, s1);
1303 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1305 static int __init srcu_bootup_announce(void)
1307 pr_info("Hierarchical SRCU implementation.\n");
1308 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1309 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1312 early_initcall(srcu_bootup_announce);