GNU Linux-libre 5.19-rc6-gnu
[releases.git] / kernel / rcu / tree_exp.h
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * RCU expedited grace periods
4  *
5  * Copyright IBM Corporation, 2016
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9
10 #include <linux/lockdep.h>
11
12 static void rcu_exp_handler(void *unused);
13 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14
15 /*
16  * Record the start of an expedited grace period.
17  */
18 static void rcu_exp_gp_seq_start(void)
19 {
20         rcu_seq_start(&rcu_state.expedited_sequence);
21 }
22
23 /*
24  * Return the value that the expedited-grace-period counter will have
25  * at the end of the current grace period.
26  */
27 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
28 {
29         return rcu_seq_endval(&rcu_state.expedited_sequence);
30 }
31
32 /*
33  * Record the end of an expedited grace period.
34  */
35 static void rcu_exp_gp_seq_end(void)
36 {
37         rcu_seq_end(&rcu_state.expedited_sequence);
38         smp_mb(); /* Ensure that consecutive grace periods serialize. */
39 }
40
41 /*
42  * Take a snapshot of the expedited-grace-period counter, which is the
43  * earliest value that will indicate that a full grace period has
44  * elapsed since the current time.
45  */
46 static unsigned long rcu_exp_gp_seq_snap(void)
47 {
48         unsigned long s;
49
50         smp_mb(); /* Caller's modifications seen first by other CPUs. */
51         s = rcu_seq_snap(&rcu_state.expedited_sequence);
52         trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
53         return s;
54 }
55
56 /*
57  * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
58  * if a full expedited grace period has elapsed since that snapshot
59  * was taken.
60  */
61 static bool rcu_exp_gp_seq_done(unsigned long s)
62 {
63         return rcu_seq_done(&rcu_state.expedited_sequence, s);
64 }
65
66 /*
67  * Reset the ->expmaskinit values in the rcu_node tree to reflect any
68  * recent CPU-online activity.  Note that these masks are not cleared
69  * when CPUs go offline, so they reflect the union of all CPUs that have
70  * ever been online.  This means that this function normally takes its
71  * no-work-to-do fastpath.
72  */
73 static void sync_exp_reset_tree_hotplug(void)
74 {
75         bool done;
76         unsigned long flags;
77         unsigned long mask;
78         unsigned long oldmask;
79         int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
80         struct rcu_node *rnp;
81         struct rcu_node *rnp_up;
82
83         /* If no new CPUs onlined since last time, nothing to do. */
84         if (likely(ncpus == rcu_state.ncpus_snap))
85                 return;
86         rcu_state.ncpus_snap = ncpus;
87
88         /*
89          * Each pass through the following loop propagates newly onlined
90          * CPUs for the current rcu_node structure up the rcu_node tree.
91          */
92         rcu_for_each_leaf_node(rnp) {
93                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
94                 if (rnp->expmaskinit == rnp->expmaskinitnext) {
95                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
96                         continue;  /* No new CPUs, nothing to do. */
97                 }
98
99                 /* Update this node's mask, track old value for propagation. */
100                 oldmask = rnp->expmaskinit;
101                 rnp->expmaskinit = rnp->expmaskinitnext;
102                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
103
104                 /* If was already nonzero, nothing to propagate. */
105                 if (oldmask)
106                         continue;
107
108                 /* Propagate the new CPU up the tree. */
109                 mask = rnp->grpmask;
110                 rnp_up = rnp->parent;
111                 done = false;
112                 while (rnp_up) {
113                         raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
114                         if (rnp_up->expmaskinit)
115                                 done = true;
116                         rnp_up->expmaskinit |= mask;
117                         raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
118                         if (done)
119                                 break;
120                         mask = rnp_up->grpmask;
121                         rnp_up = rnp_up->parent;
122                 }
123         }
124 }
125
126 /*
127  * Reset the ->expmask values in the rcu_node tree in preparation for
128  * a new expedited grace period.
129  */
130 static void __maybe_unused sync_exp_reset_tree(void)
131 {
132         unsigned long flags;
133         struct rcu_node *rnp;
134
135         sync_exp_reset_tree_hotplug();
136         rcu_for_each_node_breadth_first(rnp) {
137                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
138                 WARN_ON_ONCE(rnp->expmask);
139                 WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
140                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
141         }
142 }
143
144 /*
145  * Return non-zero if there is no RCU expedited grace period in progress
146  * for the specified rcu_node structure, in other words, if all CPUs and
147  * tasks covered by the specified rcu_node structure have done their bit
148  * for the current expedited grace period.
149  */
150 static bool sync_rcu_exp_done(struct rcu_node *rnp)
151 {
152         raw_lockdep_assert_held_rcu_node(rnp);
153         return READ_ONCE(rnp->exp_tasks) == NULL &&
154                READ_ONCE(rnp->expmask) == 0;
155 }
156
157 /*
158  * Like sync_rcu_exp_done(), but where the caller does not hold the
159  * rcu_node's ->lock.
160  */
161 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
162 {
163         unsigned long flags;
164         bool ret;
165
166         raw_spin_lock_irqsave_rcu_node(rnp, flags);
167         ret = sync_rcu_exp_done(rnp);
168         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
169
170         return ret;
171 }
172
173
174 /*
175  * Report the exit from RCU read-side critical section for the last task
176  * that queued itself during or before the current expedited preemptible-RCU
177  * grace period.  This event is reported either to the rcu_node structure on
178  * which the task was queued or to one of that rcu_node structure's ancestors,
179  * recursively up the tree.  (Calm down, calm down, we do the recursion
180  * iteratively!)
181  */
182 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
183                                  bool wake, unsigned long flags)
184         __releases(rnp->lock)
185 {
186         unsigned long mask;
187
188         raw_lockdep_assert_held_rcu_node(rnp);
189         for (;;) {
190                 if (!sync_rcu_exp_done(rnp)) {
191                         if (!rnp->expmask)
192                                 rcu_initiate_boost(rnp, flags);
193                         else
194                                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
195                         break;
196                 }
197                 if (rnp->parent == NULL) {
198                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
199                         if (wake) {
200                                 smp_mb(); /* EGP done before wake_up(). */
201                                 swake_up_one(&rcu_state.expedited_wq);
202                         }
203                         break;
204                 }
205                 mask = rnp->grpmask;
206                 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
207                 rnp = rnp->parent;
208                 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
209                 WARN_ON_ONCE(!(rnp->expmask & mask));
210                 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
211         }
212 }
213
214 /*
215  * Report expedited quiescent state for specified node.  This is a
216  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
217  */
218 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
219 {
220         unsigned long flags;
221
222         raw_spin_lock_irqsave_rcu_node(rnp, flags);
223         __rcu_report_exp_rnp(rnp, wake, flags);
224 }
225
226 /*
227  * Report expedited quiescent state for multiple CPUs, all covered by the
228  * specified leaf rcu_node structure.
229  */
230 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
231                                     unsigned long mask, bool wake)
232 {
233         int cpu;
234         unsigned long flags;
235         struct rcu_data *rdp;
236
237         raw_spin_lock_irqsave_rcu_node(rnp, flags);
238         if (!(rnp->expmask & mask)) {
239                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
240                 return;
241         }
242         WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
243         for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
244                 rdp = per_cpu_ptr(&rcu_data, cpu);
245                 if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
246                         continue;
247                 rdp->rcu_forced_tick_exp = false;
248                 tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
249         }
250         __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
251 }
252
253 /*
254  * Report expedited quiescent state for specified rcu_data (CPU).
255  */
256 static void rcu_report_exp_rdp(struct rcu_data *rdp)
257 {
258         WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
259         rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
260 }
261
262 /* Common code for work-done checking. */
263 static bool sync_exp_work_done(unsigned long s)
264 {
265         if (rcu_exp_gp_seq_done(s)) {
266                 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
267                 smp_mb(); /* Ensure test happens before caller kfree(). */
268                 return true;
269         }
270         return false;
271 }
272
273 /*
274  * Funnel-lock acquisition for expedited grace periods.  Returns true
275  * if some other task completed an expedited grace period that this task
276  * can piggy-back on, and with no mutex held.  Otherwise, returns false
277  * with the mutex held, indicating that the caller must actually do the
278  * expedited grace period.
279  */
280 static bool exp_funnel_lock(unsigned long s)
281 {
282         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
283         struct rcu_node *rnp = rdp->mynode;
284         struct rcu_node *rnp_root = rcu_get_root();
285
286         /* Low-contention fastpath. */
287         if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
288             (rnp == rnp_root ||
289              ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
290             mutex_trylock(&rcu_state.exp_mutex))
291                 goto fastpath;
292
293         /*
294          * Each pass through the following loop works its way up
295          * the rcu_node tree, returning if others have done the work or
296          * otherwise falls through to acquire ->exp_mutex.  The mapping
297          * from CPU to rcu_node structure can be inexact, as it is just
298          * promoting locality and is not strictly needed for correctness.
299          */
300         for (; rnp != NULL; rnp = rnp->parent) {
301                 if (sync_exp_work_done(s))
302                         return true;
303
304                 /* Work not done, either wait here or go up. */
305                 spin_lock(&rnp->exp_lock);
306                 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
307
308                         /* Someone else doing GP, so wait for them. */
309                         spin_unlock(&rnp->exp_lock);
310                         trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
311                                                   rnp->grplo, rnp->grphi,
312                                                   TPS("wait"));
313                         wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
314                                    sync_exp_work_done(s));
315                         return true;
316                 }
317                 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
318                 spin_unlock(&rnp->exp_lock);
319                 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
320                                           rnp->grplo, rnp->grphi, TPS("nxtlvl"));
321         }
322         mutex_lock(&rcu_state.exp_mutex);
323 fastpath:
324         if (sync_exp_work_done(s)) {
325                 mutex_unlock(&rcu_state.exp_mutex);
326                 return true;
327         }
328         rcu_exp_gp_seq_start();
329         trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
330         return false;
331 }
332
333 /*
334  * Select the CPUs within the specified rcu_node that the upcoming
335  * expedited grace period needs to wait for.
336  */
337 static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
338 {
339         int cpu;
340         unsigned long flags;
341         unsigned long mask_ofl_test;
342         unsigned long mask_ofl_ipi;
343         int ret;
344         struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
345
346         raw_spin_lock_irqsave_rcu_node(rnp, flags);
347
348         /* Each pass checks a CPU for identity, offline, and idle. */
349         mask_ofl_test = 0;
350         for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
351                 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
352                 unsigned long mask = rdp->grpmask;
353                 int snap;
354
355                 if (raw_smp_processor_id() == cpu ||
356                     !(rnp->qsmaskinitnext & mask)) {
357                         mask_ofl_test |= mask;
358                 } else {
359                         snap = rcu_dynticks_snap(rdp);
360                         if (rcu_dynticks_in_eqs(snap))
361                                 mask_ofl_test |= mask;
362                         else
363                                 rdp->exp_dynticks_snap = snap;
364                 }
365         }
366         mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
367
368         /*
369          * Need to wait for any blocked tasks as well.  Note that
370          * additional blocking tasks will also block the expedited GP
371          * until such time as the ->expmask bits are cleared.
372          */
373         if (rcu_preempt_has_tasks(rnp))
374                 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
375         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
376
377         /* IPI the remaining CPUs for expedited quiescent state. */
378         for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
379                 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
380                 unsigned long mask = rdp->grpmask;
381
382 retry_ipi:
383                 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
384                         mask_ofl_test |= mask;
385                         continue;
386                 }
387                 if (get_cpu() == cpu) {
388                         mask_ofl_test |= mask;
389                         put_cpu();
390                         continue;
391                 }
392                 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
393                 put_cpu();
394                 /* The CPU will report the QS in response to the IPI. */
395                 if (!ret)
396                         continue;
397
398                 /* Failed, raced with CPU hotplug operation. */
399                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
400                 if ((rnp->qsmaskinitnext & mask) &&
401                     (rnp->expmask & mask)) {
402                         /* Online, so delay for a bit and try again. */
403                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
404                         trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
405                         schedule_timeout_idle(1);
406                         goto retry_ipi;
407                 }
408                 /* CPU really is offline, so we must report its QS. */
409                 if (rnp->expmask & mask)
410                         mask_ofl_test |= mask;
411                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
412         }
413         /* Report quiescent states for those that went offline. */
414         if (mask_ofl_test)
415                 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
416 }
417
418 static void rcu_exp_sel_wait_wake(unsigned long s);
419
420 #ifdef CONFIG_RCU_EXP_KTHREAD
421 static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
422 {
423         struct rcu_exp_work *rewp =
424                 container_of(wp, struct rcu_exp_work, rew_work);
425
426         __sync_rcu_exp_select_node_cpus(rewp);
427 }
428
429 static inline bool rcu_gp_par_worker_started(void)
430 {
431         return !!READ_ONCE(rcu_exp_par_gp_kworker);
432 }
433
434 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
435 {
436         kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
437         /*
438          * Use rcu_exp_par_gp_kworker, because flushing a work item from
439          * another work item on the same kthread worker can result in
440          * deadlock.
441          */
442         kthread_queue_work(rcu_exp_par_gp_kworker, &rnp->rew.rew_work);
443 }
444
445 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
446 {
447         kthread_flush_work(&rnp->rew.rew_work);
448 }
449
450 /*
451  * Work-queue handler to drive an expedited grace period forward.
452  */
453 static void wait_rcu_exp_gp(struct kthread_work *wp)
454 {
455         struct rcu_exp_work *rewp;
456
457         rewp = container_of(wp, struct rcu_exp_work, rew_work);
458         rcu_exp_sel_wait_wake(rewp->rew_s);
459 }
460
461 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
462 {
463         kthread_init_work(&rew->rew_work, wait_rcu_exp_gp);
464         kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work);
465 }
466
467 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
468 {
469 }
470 #else /* !CONFIG_RCU_EXP_KTHREAD */
471 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
472 {
473         struct rcu_exp_work *rewp =
474                 container_of(wp, struct rcu_exp_work, rew_work);
475
476         __sync_rcu_exp_select_node_cpus(rewp);
477 }
478
479 static inline bool rcu_gp_par_worker_started(void)
480 {
481         return !!READ_ONCE(rcu_par_gp_wq);
482 }
483
484 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
485 {
486         int cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
487
488         INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
489         /* If all offline, queue the work on an unbound CPU. */
490         if (unlikely(cpu > rnp->grphi - rnp->grplo))
491                 cpu = WORK_CPU_UNBOUND;
492         else
493                 cpu += rnp->grplo;
494         queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
495 }
496
497 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
498 {
499         flush_work(&rnp->rew.rew_work);
500 }
501
502 /*
503  * Work-queue handler to drive an expedited grace period forward.
504  */
505 static void wait_rcu_exp_gp(struct work_struct *wp)
506 {
507         struct rcu_exp_work *rewp;
508
509         rewp = container_of(wp, struct rcu_exp_work, rew_work);
510         rcu_exp_sel_wait_wake(rewp->rew_s);
511 }
512
513 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
514 {
515         INIT_WORK_ONSTACK(&rew->rew_work, wait_rcu_exp_gp);
516         queue_work(rcu_gp_wq, &rew->rew_work);
517 }
518
519 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
520 {
521         destroy_work_on_stack(&rew->rew_work);
522 }
523 #endif /* CONFIG_RCU_EXP_KTHREAD */
524
525 /*
526  * Select the nodes that the upcoming expedited grace period needs
527  * to wait for.
528  */
529 static void sync_rcu_exp_select_cpus(void)
530 {
531         struct rcu_node *rnp;
532
533         trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
534         sync_exp_reset_tree();
535         trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
536
537         /* Schedule work for each leaf rcu_node structure. */
538         rcu_for_each_leaf_node(rnp) {
539                 rnp->exp_need_flush = false;
540                 if (!READ_ONCE(rnp->expmask))
541                         continue; /* Avoid early boot non-existent wq. */
542                 if (!rcu_gp_par_worker_started() ||
543                     rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
544                     rcu_is_last_leaf_node(rnp)) {
545                         /* No worker started yet or last leaf, do direct call. */
546                         sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
547                         continue;
548                 }
549                 sync_rcu_exp_select_cpus_queue_work(rnp);
550                 rnp->exp_need_flush = true;
551         }
552
553         /* Wait for jobs (if any) to complete. */
554         rcu_for_each_leaf_node(rnp)
555                 if (rnp->exp_need_flush)
556                         sync_rcu_exp_select_cpus_flush_work(rnp);
557 }
558
559 /*
560  * Wait for the expedited grace period to elapse, within time limit.
561  * If the time limit is exceeded without the grace period elapsing,
562  * return false, otherwise return true.
563  */
564 static bool synchronize_rcu_expedited_wait_once(long tlimit)
565 {
566         int t;
567         struct rcu_node *rnp_root = rcu_get_root();
568
569         t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
570                                           sync_rcu_exp_done_unlocked(rnp_root),
571                                           tlimit);
572         // Workqueues should not be signaled.
573         if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
574                 return true;
575         WARN_ON(t < 0);  /* workqueues should not be signaled. */
576         return false;
577 }
578
579 /*
580  * Wait for the expedited grace period to elapse, issuing any needed
581  * RCU CPU stall warnings along the way.
582  */
583 static void synchronize_rcu_expedited_wait(void)
584 {
585         int cpu;
586         unsigned long j;
587         unsigned long jiffies_stall;
588         unsigned long jiffies_start;
589         unsigned long mask;
590         int ndetected;
591         struct rcu_data *rdp;
592         struct rcu_node *rnp;
593         struct rcu_node *rnp_root = rcu_get_root();
594
595         trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
596         jiffies_stall = rcu_exp_jiffies_till_stall_check();
597         jiffies_start = jiffies;
598         if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
599                 if (synchronize_rcu_expedited_wait_once(1))
600                         return;
601                 rcu_for_each_leaf_node(rnp) {
602                         mask = READ_ONCE(rnp->expmask);
603                         for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
604                                 rdp = per_cpu_ptr(&rcu_data, cpu);
605                                 if (rdp->rcu_forced_tick_exp)
606                                         continue;
607                                 rdp->rcu_forced_tick_exp = true;
608                                 preempt_disable();
609                                 if (cpu_online(cpu))
610                                         tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
611                                 preempt_enable();
612                         }
613                 }
614                 j = READ_ONCE(jiffies_till_first_fqs);
615                 if (synchronize_rcu_expedited_wait_once(j + HZ))
616                         return;
617         }
618
619         for (;;) {
620                 if (synchronize_rcu_expedited_wait_once(jiffies_stall))
621                         return;
622                 if (rcu_stall_is_suppressed())
623                         continue;
624                 panic_on_rcu_stall();
625                 trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
626                 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
627                        rcu_state.name);
628                 ndetected = 0;
629                 rcu_for_each_leaf_node(rnp) {
630                         ndetected += rcu_print_task_exp_stall(rnp);
631                         for_each_leaf_node_possible_cpu(rnp, cpu) {
632                                 struct rcu_data *rdp;
633
634                                 mask = leaf_node_cpu_bit(rnp, cpu);
635                                 if (!(READ_ONCE(rnp->expmask) & mask))
636                                         continue;
637                                 ndetected++;
638                                 rdp = per_cpu_ptr(&rcu_data, cpu);
639                                 pr_cont(" %d-%c%c%c", cpu,
640                                         "O."[!!cpu_online(cpu)],
641                                         "o."[!!(rdp->grpmask & rnp->expmaskinit)],
642                                         "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
643                         }
644                 }
645                 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
646                         jiffies - jiffies_start, rcu_state.expedited_sequence,
647                         data_race(rnp_root->expmask),
648                         ".T"[!!data_race(rnp_root->exp_tasks)]);
649                 if (ndetected) {
650                         pr_err("blocking rcu_node structures (internal RCU debug):");
651                         rcu_for_each_node_breadth_first(rnp) {
652                                 if (rnp == rnp_root)
653                                         continue; /* printed unconditionally */
654                                 if (sync_rcu_exp_done_unlocked(rnp))
655                                         continue;
656                                 pr_cont(" l=%u:%d-%d:%#lx/%c",
657                                         rnp->level, rnp->grplo, rnp->grphi,
658                                         data_race(rnp->expmask),
659                                         ".T"[!!data_race(rnp->exp_tasks)]);
660                         }
661                         pr_cont("\n");
662                 }
663                 rcu_for_each_leaf_node(rnp) {
664                         for_each_leaf_node_possible_cpu(rnp, cpu) {
665                                 mask = leaf_node_cpu_bit(rnp, cpu);
666                                 if (!(READ_ONCE(rnp->expmask) & mask))
667                                         continue;
668                                 dump_cpu_task(cpu);
669                         }
670                 }
671                 jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
672         }
673 }
674
675 /*
676  * Wait for the current expedited grace period to complete, and then
677  * wake up everyone who piggybacked on the just-completed expedited
678  * grace period.  Also update all the ->exp_seq_rq counters as needed
679  * in order to avoid counter-wrap problems.
680  */
681 static void rcu_exp_wait_wake(unsigned long s)
682 {
683         struct rcu_node *rnp;
684
685         synchronize_rcu_expedited_wait();
686
687         // Switch over to wakeup mode, allowing the next GP to proceed.
688         // End the previous grace period only after acquiring the mutex
689         // to ensure that only one GP runs concurrently with wakeups.
690         mutex_lock(&rcu_state.exp_wake_mutex);
691         rcu_exp_gp_seq_end();
692         trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
693
694         rcu_for_each_node_breadth_first(rnp) {
695                 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
696                         spin_lock(&rnp->exp_lock);
697                         /* Recheck, avoid hang in case someone just arrived. */
698                         if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
699                                 WRITE_ONCE(rnp->exp_seq_rq, s);
700                         spin_unlock(&rnp->exp_lock);
701                 }
702                 smp_mb(); /* All above changes before wakeup. */
703                 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
704         }
705         trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
706         mutex_unlock(&rcu_state.exp_wake_mutex);
707 }
708
709 /*
710  * Common code to drive an expedited grace period forward, used by
711  * workqueues and mid-boot-time tasks.
712  */
713 static void rcu_exp_sel_wait_wake(unsigned long s)
714 {
715         /* Initialize the rcu_node tree in preparation for the wait. */
716         sync_rcu_exp_select_cpus();
717
718         /* Wait and clean up, including waking everyone. */
719         rcu_exp_wait_wake(s);
720 }
721
722 #ifdef CONFIG_PREEMPT_RCU
723
724 /*
725  * Remote handler for smp_call_function_single().  If there is an
726  * RCU read-side critical section in effect, request that the
727  * next rcu_read_unlock() record the quiescent state up the
728  * ->expmask fields in the rcu_node tree.  Otherwise, immediately
729  * report the quiescent state.
730  */
731 static void rcu_exp_handler(void *unused)
732 {
733         int depth = rcu_preempt_depth();
734         unsigned long flags;
735         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
736         struct rcu_node *rnp = rdp->mynode;
737         struct task_struct *t = current;
738
739         /*
740          * First, the common case of not being in an RCU read-side
741          * critical section.  If also enabled or idle, immediately
742          * report the quiescent state, otherwise defer.
743          */
744         if (!depth) {
745                 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
746                     rcu_is_cpu_rrupt_from_idle()) {
747                         rcu_report_exp_rdp(rdp);
748                 } else {
749                         WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
750                         set_tsk_need_resched(t);
751                         set_preempt_need_resched();
752                 }
753                 return;
754         }
755
756         /*
757          * Second, the less-common case of being in an RCU read-side
758          * critical section.  In this case we can count on a future
759          * rcu_read_unlock().  However, this rcu_read_unlock() might
760          * execute on some other CPU, but in that case there will be
761          * a future context switch.  Either way, if the expedited
762          * grace period is still waiting on this CPU, set ->deferred_qs
763          * so that the eventual quiescent state will be reported.
764          * Note that there is a large group of race conditions that
765          * can have caused this quiescent state to already have been
766          * reported, so we really do need to check ->expmask.
767          */
768         if (depth > 0) {
769                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
770                 if (rnp->expmask & rdp->grpmask) {
771                         WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
772                         t->rcu_read_unlock_special.b.exp_hint = true;
773                 }
774                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
775                 return;
776         }
777
778         // Finally, negative nesting depth should not happen.
779         WARN_ON_ONCE(1);
780 }
781
782 /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
783 static void sync_sched_exp_online_cleanup(int cpu)
784 {
785 }
786
787 /*
788  * Scan the current list of tasks blocked within RCU read-side critical
789  * sections, printing out the tid of each that is blocking the current
790  * expedited grace period.
791  */
792 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
793 {
794         unsigned long flags;
795         int ndetected = 0;
796         struct task_struct *t;
797
798         if (!READ_ONCE(rnp->exp_tasks))
799                 return 0;
800         raw_spin_lock_irqsave_rcu_node(rnp, flags);
801         t = list_entry(rnp->exp_tasks->prev,
802                        struct task_struct, rcu_node_entry);
803         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
804                 pr_cont(" P%d", t->pid);
805                 ndetected++;
806         }
807         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
808         return ndetected;
809 }
810
811 #else /* #ifdef CONFIG_PREEMPT_RCU */
812
813 /* Request an expedited quiescent state. */
814 static void rcu_exp_need_qs(void)
815 {
816         __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
817         /* Store .exp before .rcu_urgent_qs. */
818         smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
819         set_tsk_need_resched(current);
820         set_preempt_need_resched();
821 }
822
823 /* Invoked on each online non-idle CPU for expedited quiescent state. */
824 static void rcu_exp_handler(void *unused)
825 {
826         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
827         struct rcu_node *rnp = rdp->mynode;
828
829         if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
830             __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
831                 return;
832         if (rcu_is_cpu_rrupt_from_idle()) {
833                 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
834                 return;
835         }
836         rcu_exp_need_qs();
837 }
838
839 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
840 static void sync_sched_exp_online_cleanup(int cpu)
841 {
842         unsigned long flags;
843         int my_cpu;
844         struct rcu_data *rdp;
845         int ret;
846         struct rcu_node *rnp;
847
848         rdp = per_cpu_ptr(&rcu_data, cpu);
849         rnp = rdp->mynode;
850         my_cpu = get_cpu();
851         /* Quiescent state either not needed or already requested, leave. */
852         if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
853             READ_ONCE(rdp->cpu_no_qs.b.exp)) {
854                 put_cpu();
855                 return;
856         }
857         /* Quiescent state needed on current CPU, so set it up locally. */
858         if (my_cpu == cpu) {
859                 local_irq_save(flags);
860                 rcu_exp_need_qs();
861                 local_irq_restore(flags);
862                 put_cpu();
863                 return;
864         }
865         /* Quiescent state needed on some other CPU, send IPI. */
866         ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
867         put_cpu();
868         WARN_ON_ONCE(ret);
869 }
870
871 /*
872  * Because preemptible RCU does not exist, we never have to check for
873  * tasks blocked within RCU read-side critical sections that are
874  * blocking the current expedited grace period.
875  */
876 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
877 {
878         return 0;
879 }
880
881 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
882
883 /**
884  * synchronize_rcu_expedited - Brute-force RCU grace period
885  *
886  * Wait for an RCU grace period, but expedite it.  The basic idea is to
887  * IPI all non-idle non-nohz online CPUs.  The IPI handler checks whether
888  * the CPU is in an RCU critical section, and if so, it sets a flag that
889  * causes the outermost rcu_read_unlock() to report the quiescent state
890  * for RCU-preempt or asks the scheduler for help for RCU-sched.  On the
891  * other hand, if the CPU is not in an RCU read-side critical section,
892  * the IPI handler reports the quiescent state immediately.
893  *
894  * Although this is a great improvement over previous expedited
895  * implementations, it is still unfriendly to real-time workloads, so is
896  * thus not recommended for any sort of common-case code.  In fact, if
897  * you are using synchronize_rcu_expedited() in a loop, please restructure
898  * your code to batch your updates, and then use a single synchronize_rcu()
899  * instead.
900  *
901  * This has the same semantics as (but is more brutal than) synchronize_rcu().
902  */
903 void synchronize_rcu_expedited(void)
904 {
905         bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
906         struct rcu_exp_work rew;
907         struct rcu_node *rnp;
908         unsigned long s;
909
910         RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
911                          lock_is_held(&rcu_lock_map) ||
912                          lock_is_held(&rcu_sched_lock_map),
913                          "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
914
915         /* Is the state is such that the call is a grace period? */
916         if (rcu_blocking_is_gp())
917                 return;
918
919         /* If expedited grace periods are prohibited, fall back to normal. */
920         if (rcu_gp_is_normal()) {
921                 wait_rcu_gp(call_rcu);
922                 return;
923         }
924
925         /* Take a snapshot of the sequence number.  */
926         s = rcu_exp_gp_seq_snap();
927         if (exp_funnel_lock(s))
928                 return;  /* Someone else did our work for us. */
929
930         /* Ensure that load happens before action based on it. */
931         if (unlikely(boottime)) {
932                 /* Direct call during scheduler init and early_initcalls(). */
933                 rcu_exp_sel_wait_wake(s);
934         } else {
935                 /* Marshall arguments & schedule the expedited grace period. */
936                 rew.rew_s = s;
937                 synchronize_rcu_expedited_queue_work(&rew);
938         }
939
940         /* Wait for expedited grace period to complete. */
941         rnp = rcu_get_root();
942         wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
943                    sync_exp_work_done(s));
944         smp_mb(); /* Work actions happen before return. */
945
946         /* Let the next expedited grace period start. */
947         mutex_unlock(&rcu_state.exp_mutex);
948
949         if (likely(!boottime))
950                 synchronize_rcu_expedited_destroy_work(&rew);
951 }
952 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);