1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * RCU expedited grace periods
5 * Copyright IBM Corporation, 2016
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
10 #include <linux/lockdep.h>
12 static void rcu_exp_handler(void *unused);
15 * Record the start of an expedited grace period.
17 static void rcu_exp_gp_seq_start(void)
19 rcu_seq_start(&rcu_state.expedited_sequence);
23 * Return then value that expedited-grace-period counter will have
24 * at the end of the current grace period.
26 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
28 return rcu_seq_endval(&rcu_state.expedited_sequence);
32 * Record the end of an expedited grace period.
34 static void rcu_exp_gp_seq_end(void)
36 rcu_seq_end(&rcu_state.expedited_sequence);
37 smp_mb(); /* Ensure that consecutive grace periods serialize. */
41 * Take a snapshot of the expedited-grace-period counter.
43 static unsigned long rcu_exp_gp_seq_snap(void)
47 smp_mb(); /* Caller's modifications seen first by other CPUs. */
48 s = rcu_seq_snap(&rcu_state.expedited_sequence);
49 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
54 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
55 * if a full expedited grace period has elapsed since that snapshot
58 static bool rcu_exp_gp_seq_done(unsigned long s)
60 return rcu_seq_done(&rcu_state.expedited_sequence, s);
64 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
65 * recent CPU-online activity. Note that these masks are not cleared
66 * when CPUs go offline, so they reflect the union of all CPUs that have
67 * ever been online. This means that this function normally takes its
68 * no-work-to-do fastpath.
70 static void sync_exp_reset_tree_hotplug(void)
75 unsigned long oldmask;
76 int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
78 struct rcu_node *rnp_up;
80 /* If no new CPUs onlined since last time, nothing to do. */
81 if (likely(ncpus == rcu_state.ncpus_snap))
83 rcu_state.ncpus_snap = ncpus;
86 * Each pass through the following loop propagates newly onlined
87 * CPUs for the current rcu_node structure up the rcu_node tree.
89 rcu_for_each_leaf_node(rnp) {
90 raw_spin_lock_irqsave_rcu_node(rnp, flags);
91 if (rnp->expmaskinit == rnp->expmaskinitnext) {
92 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
93 continue; /* No new CPUs, nothing to do. */
96 /* Update this node's mask, track old value for propagation. */
97 oldmask = rnp->expmaskinit;
98 rnp->expmaskinit = rnp->expmaskinitnext;
99 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
101 /* If was already nonzero, nothing to propagate. */
105 /* Propagate the new CPU up the tree. */
107 rnp_up = rnp->parent;
110 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
111 if (rnp_up->expmaskinit)
113 rnp_up->expmaskinit |= mask;
114 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
117 mask = rnp_up->grpmask;
118 rnp_up = rnp_up->parent;
124 * Reset the ->expmask values in the rcu_node tree in preparation for
125 * a new expedited grace period.
127 static void __maybe_unused sync_exp_reset_tree(void)
130 struct rcu_node *rnp;
132 sync_exp_reset_tree_hotplug();
133 rcu_for_each_node_breadth_first(rnp) {
134 raw_spin_lock_irqsave_rcu_node(rnp, flags);
135 WARN_ON_ONCE(rnp->expmask);
136 rnp->expmask = rnp->expmaskinit;
137 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
142 * Return non-zero if there is no RCU expedited grace period in progress
143 * for the specified rcu_node structure, in other words, if all CPUs and
144 * tasks covered by the specified rcu_node structure have done their bit
145 * for the current expedited grace period. Works only for preemptible
146 * RCU -- other RCU implementation use other means.
148 * Caller must hold the specificed rcu_node structure's ->lock
150 static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
152 raw_lockdep_assert_held_rcu_node(rnp);
154 return rnp->exp_tasks == NULL &&
155 READ_ONCE(rnp->expmask) == 0;
159 * Like sync_rcu_preempt_exp_done(), but this function assumes the caller
160 * doesn't hold the rcu_node's ->lock, and will acquire and release the lock
163 static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
168 raw_spin_lock_irqsave_rcu_node(rnp, flags);
169 ret = sync_rcu_preempt_exp_done(rnp);
170 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
177 * Report the exit from RCU read-side critical section for the last task
178 * that queued itself during or before the current expedited preemptible-RCU
179 * grace period. This event is reported either to the rcu_node structure on
180 * which the task was queued or to one of that rcu_node structure's ancestors,
181 * recursively up the tree. (Calm down, calm down, we do the recursion
184 * Caller must hold the specified rcu_node structure's ->lock.
186 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
187 bool wake, unsigned long flags)
188 __releases(rnp->lock)
193 if (!sync_rcu_preempt_exp_done(rnp)) {
195 rcu_initiate_boost(rnp, flags);
197 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
200 if (rnp->parent == NULL) {
201 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
203 smp_mb(); /* EGP done before wake_up(). */
204 swake_up_one(&rcu_state.expedited_wq);
209 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
211 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
212 WARN_ON_ONCE(!(rnp->expmask & mask));
213 rnp->expmask &= ~mask;
218 * Report expedited quiescent state for specified node. This is a
219 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
221 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
225 raw_spin_lock_irqsave_rcu_node(rnp, flags);
226 __rcu_report_exp_rnp(rnp, wake, flags);
230 * Report expedited quiescent state for multiple CPUs, all covered by the
231 * specified leaf rcu_node structure.
233 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
234 unsigned long mask, bool wake)
238 raw_spin_lock_irqsave_rcu_node(rnp, flags);
239 if (!(rnp->expmask & mask)) {
240 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
243 rnp->expmask &= ~mask;
244 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
248 * Report expedited quiescent state for specified rcu_data (CPU).
250 static void rcu_report_exp_rdp(struct rcu_data *rdp)
252 WRITE_ONCE(rdp->deferred_qs, false);
253 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
256 /* Common code for work-done checking. */
257 static bool sync_exp_work_done(unsigned long s)
259 if (rcu_exp_gp_seq_done(s)) {
260 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
261 /* Ensure test happens before caller kfree(). */
262 smp_mb__before_atomic(); /* ^^^ */
269 * Funnel-lock acquisition for expedited grace periods. Returns true
270 * if some other task completed an expedited grace period that this task
271 * can piggy-back on, and with no mutex held. Otherwise, returns false
272 * with the mutex held, indicating that the caller must actually do the
273 * expedited grace period.
275 static bool exp_funnel_lock(unsigned long s)
277 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
278 struct rcu_node *rnp = rdp->mynode;
279 struct rcu_node *rnp_root = rcu_get_root();
281 /* Low-contention fastpath. */
282 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
284 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
285 mutex_trylock(&rcu_state.exp_mutex))
289 * Each pass through the following loop works its way up
290 * the rcu_node tree, returning if others have done the work or
291 * otherwise falls through to acquire ->exp_mutex. The mapping
292 * from CPU to rcu_node structure can be inexact, as it is just
293 * promoting locality and is not strictly needed for correctness.
295 for (; rnp != NULL; rnp = rnp->parent) {
296 if (sync_exp_work_done(s))
299 /* Work not done, either wait here or go up. */
300 spin_lock(&rnp->exp_lock);
301 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
303 /* Someone else doing GP, so wait for them. */
304 spin_unlock(&rnp->exp_lock);
305 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
306 rnp->grplo, rnp->grphi,
308 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
309 sync_exp_work_done(s));
312 rnp->exp_seq_rq = s; /* Followers can wait on us. */
313 spin_unlock(&rnp->exp_lock);
314 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
315 rnp->grplo, rnp->grphi, TPS("nxtlvl"));
317 mutex_lock(&rcu_state.exp_mutex);
319 if (sync_exp_work_done(s)) {
320 mutex_unlock(&rcu_state.exp_mutex);
323 rcu_exp_gp_seq_start();
324 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
329 * Select the CPUs within the specified rcu_node that the upcoming
330 * expedited grace period needs to wait for.
332 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
336 unsigned long mask_ofl_test;
337 unsigned long mask_ofl_ipi;
339 struct rcu_exp_work *rewp =
340 container_of(wp, struct rcu_exp_work, rew_work);
341 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
343 raw_spin_lock_irqsave_rcu_node(rnp, flags);
345 /* Each pass checks a CPU for identity, offline, and idle. */
347 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
348 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
349 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
352 if (raw_smp_processor_id() == cpu ||
353 !(rnp->qsmaskinitnext & mask)) {
354 mask_ofl_test |= mask;
356 snap = rcu_dynticks_snap(rdp);
357 if (rcu_dynticks_in_eqs(snap))
358 mask_ofl_test |= mask;
360 rdp->exp_dynticks_snap = snap;
363 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
366 * Need to wait for any blocked tasks as well. Note that
367 * additional blocking tasks will also block the expedited GP
368 * until such time as the ->expmask bits are cleared.
370 if (rcu_preempt_has_tasks(rnp))
371 rnp->exp_tasks = rnp->blkd_tasks.next;
372 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
374 /* IPI the remaining CPUs for expedited quiescent state. */
375 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
376 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
377 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
379 if (!(mask_ofl_ipi & mask))
382 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
383 mask_ofl_test |= mask;
386 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
388 mask_ofl_ipi &= ~mask;
391 /* Failed, raced with CPU hotplug operation. */
392 raw_spin_lock_irqsave_rcu_node(rnp, flags);
393 if ((rnp->qsmaskinitnext & mask) &&
394 (rnp->expmask & mask)) {
395 /* Online, so delay for a bit and try again. */
396 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
397 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
398 schedule_timeout_uninterruptible(1);
401 /* CPU really is offline, so we can ignore it. */
402 if (!(rnp->expmask & mask))
403 mask_ofl_ipi &= ~mask;
404 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
406 /* Report quiescent states for those that went offline. */
407 mask_ofl_test |= mask_ofl_ipi;
409 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
413 * Select the nodes that the upcoming expedited grace period needs
416 static void sync_rcu_exp_select_cpus(void)
419 struct rcu_node *rnp;
421 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
422 sync_exp_reset_tree();
423 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
425 /* Schedule work for each leaf rcu_node structure. */
426 rcu_for_each_leaf_node(rnp) {
427 rnp->exp_need_flush = false;
428 if (!READ_ONCE(rnp->expmask))
429 continue; /* Avoid early boot non-existent wq. */
430 if (!READ_ONCE(rcu_par_gp_wq) ||
431 rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
432 rcu_is_last_leaf_node(rnp)) {
433 /* No workqueues yet or last leaf, do direct call. */
434 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
437 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
438 cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
439 /* If all offline, queue the work on an unbound CPU. */
440 if (unlikely(cpu > rnp->grphi - rnp->grplo))
441 cpu = WORK_CPU_UNBOUND;
444 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
445 rnp->exp_need_flush = true;
448 /* Wait for workqueue jobs (if any) to complete. */
449 rcu_for_each_leaf_node(rnp)
450 if (rnp->exp_need_flush)
451 flush_work(&rnp->rew.rew_work);
454 static void synchronize_sched_expedited_wait(void)
457 unsigned long jiffies_stall;
458 unsigned long jiffies_start;
461 struct rcu_node *rnp;
462 struct rcu_node *rnp_root = rcu_get_root();
465 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
466 jiffies_stall = rcu_jiffies_till_stall_check();
467 jiffies_start = jiffies;
470 ret = swait_event_timeout_exclusive(
471 rcu_state.expedited_wq,
472 sync_rcu_preempt_exp_done_unlocked(rnp_root),
474 if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
476 WARN_ON(ret < 0); /* workqueues should not be signaled. */
477 if (rcu_cpu_stall_suppress)
479 panic_on_rcu_stall();
480 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
483 rcu_for_each_leaf_node(rnp) {
484 ndetected += rcu_print_task_exp_stall(rnp);
485 for_each_leaf_node_possible_cpu(rnp, cpu) {
486 struct rcu_data *rdp;
488 mask = leaf_node_cpu_bit(rnp, cpu);
489 if (!(rnp->expmask & mask))
492 rdp = per_cpu_ptr(&rcu_data, cpu);
493 pr_cont(" %d-%c%c%c", cpu,
494 "O."[!!cpu_online(cpu)],
495 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
496 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
499 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
500 jiffies - jiffies_start, rcu_state.expedited_sequence,
501 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
503 pr_err("blocking rcu_node structures:");
504 rcu_for_each_node_breadth_first(rnp) {
506 continue; /* printed unconditionally */
507 if (sync_rcu_preempt_exp_done_unlocked(rnp))
509 pr_cont(" l=%u:%d-%d:%#lx/%c",
510 rnp->level, rnp->grplo, rnp->grphi,
512 ".T"[!!rnp->exp_tasks]);
516 rcu_for_each_leaf_node(rnp) {
517 for_each_leaf_node_possible_cpu(rnp, cpu) {
518 mask = leaf_node_cpu_bit(rnp, cpu);
519 if (!(rnp->expmask & mask))
524 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
529 * Wait for the current expedited grace period to complete, and then
530 * wake up everyone who piggybacked on the just-completed expedited
531 * grace period. Also update all the ->exp_seq_rq counters as needed
532 * in order to avoid counter-wrap problems.
534 static void rcu_exp_wait_wake(unsigned long s)
536 struct rcu_node *rnp;
538 synchronize_sched_expedited_wait();
539 rcu_exp_gp_seq_end();
540 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
543 * Switch over to wakeup mode, allowing the next GP, but -only- the
544 * next GP, to proceed.
546 mutex_lock(&rcu_state.exp_wake_mutex);
548 rcu_for_each_node_breadth_first(rnp) {
549 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
550 spin_lock(&rnp->exp_lock);
551 /* Recheck, avoid hang in case someone just arrived. */
552 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
554 spin_unlock(&rnp->exp_lock);
556 smp_mb(); /* All above changes before wakeup. */
557 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]);
559 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
560 mutex_unlock(&rcu_state.exp_wake_mutex);
564 * Common code to drive an expedited grace period forward, used by
565 * workqueues and mid-boot-time tasks.
567 static void rcu_exp_sel_wait_wake(unsigned long s)
569 /* Initialize the rcu_node tree in preparation for the wait. */
570 sync_rcu_exp_select_cpus();
572 /* Wait and clean up, including waking everyone. */
573 rcu_exp_wait_wake(s);
577 * Work-queue handler to drive an expedited grace period forward.
579 static void wait_rcu_exp_gp(struct work_struct *wp)
581 struct rcu_exp_work *rewp;
583 rewp = container_of(wp, struct rcu_exp_work, rew_work);
584 rcu_exp_sel_wait_wake(rewp->rew_s);
587 #ifdef CONFIG_PREEMPT_RCU
590 * Remote handler for smp_call_function_single(). If there is an
591 * RCU read-side critical section in effect, request that the
592 * next rcu_read_unlock() record the quiescent state up the
593 * ->expmask fields in the rcu_node tree. Otherwise, immediately
594 * report the quiescent state.
596 static void rcu_exp_handler(void *unused)
599 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
600 struct rcu_node *rnp = rdp->mynode;
601 struct task_struct *t = current;
604 * First, the common case of not being in an RCU read-side
605 * critical section. If also enabled or idle, immediately
606 * report the quiescent state, otherwise defer.
608 if (!t->rcu_read_lock_nesting) {
609 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
610 rcu_dynticks_curr_cpu_in_eqs()) {
611 rcu_report_exp_rdp(rdp);
613 rdp->deferred_qs = true;
614 set_tsk_need_resched(t);
615 set_preempt_need_resched();
621 * Second, the less-common case of being in an RCU read-side
622 * critical section. In this case we can count on a future
623 * rcu_read_unlock(). However, this rcu_read_unlock() might
624 * execute on some other CPU, but in that case there will be
625 * a future context switch. Either way, if the expedited
626 * grace period is still waiting on this CPU, set ->deferred_qs
627 * so that the eventual quiescent state will be reported.
628 * Note that there is a large group of race conditions that
629 * can have caused this quiescent state to already have been
630 * reported, so we really do need to check ->expmask.
632 if (t->rcu_read_lock_nesting > 0) {
633 raw_spin_lock_irqsave_rcu_node(rnp, flags);
634 if (rnp->expmask & rdp->grpmask) {
635 rdp->deferred_qs = true;
636 WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true);
638 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
643 * The final and least likely case is where the interrupted
644 * code was just about to or just finished exiting the RCU-preempt
645 * read-side critical section, and no, we can't tell which.
646 * So either way, set ->deferred_qs to flag later code that
647 * a quiescent state is required.
649 * If the CPU is fully enabled (or if some buggy RCU-preempt
650 * read-side critical section is being used from idle), just
651 * invoke rcu_preempt_defer_qs() to immediately report the
652 * quiescent state. We cannot use rcu_read_unlock_special()
653 * because we are in an interrupt handler, which will cause that
654 * function to take an early exit without doing anything.
656 * Otherwise, force a context switch after the CPU enables everything.
658 rdp->deferred_qs = true;
659 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
660 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
661 rcu_preempt_deferred_qs(t);
663 set_tsk_need_resched(t);
664 set_preempt_need_resched();
668 /* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
669 static void sync_sched_exp_online_cleanup(int cpu)
673 #else /* #ifdef CONFIG_PREEMPT_RCU */
675 /* Invoked on each online non-idle CPU for expedited quiescent state. */
676 static void rcu_exp_handler(void *unused)
678 struct rcu_data *rdp;
679 struct rcu_node *rnp;
681 rdp = this_cpu_ptr(&rcu_data);
683 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
684 __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
686 if (rcu_is_cpu_rrupt_from_idle()) {
687 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
690 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
691 /* Store .exp before .rcu_urgent_qs. */
692 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
693 set_tsk_need_resched(current);
694 set_preempt_need_resched();
697 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
698 static void sync_sched_exp_online_cleanup(int cpu)
700 struct rcu_data *rdp;
702 struct rcu_node *rnp;
704 rdp = per_cpu_ptr(&rcu_data, cpu);
706 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
708 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
712 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
715 * synchronize_rcu_expedited - Brute-force RCU grace period
717 * Wait for an RCU grace period, but expedite it. The basic idea is to
718 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether
719 * the CPU is in an RCU critical section, and if so, it sets a flag that
720 * causes the outermost rcu_read_unlock() to report the quiescent state
721 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the
722 * other hand, if the CPU is not in an RCU read-side critical section,
723 * the IPI handler reports the quiescent state immediately.
725 * Although this is a greate improvement over previous expedited
726 * implementations, it is still unfriendly to real-time workloads, so is
727 * thus not recommended for any sort of common-case code. In fact, if
728 * you are using synchronize_rcu_expedited() in a loop, please restructure
729 * your code to batch your updates, and then Use a single synchronize_rcu()
732 * This has the same semantics as (but is more brutal than) synchronize_rcu().
734 void synchronize_rcu_expedited(void)
736 struct rcu_data *rdp;
737 struct rcu_exp_work rew;
738 struct rcu_node *rnp;
741 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
742 lock_is_held(&rcu_lock_map) ||
743 lock_is_held(&rcu_sched_lock_map),
744 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
746 /* Is the state is such that the call is a grace period? */
747 if (rcu_blocking_is_gp())
750 /* If expedited grace periods are prohibited, fall back to normal. */
751 if (rcu_gp_is_normal()) {
752 wait_rcu_gp(call_rcu);
756 /* Take a snapshot of the sequence number. */
757 s = rcu_exp_gp_seq_snap();
758 if (exp_funnel_lock(s))
759 return; /* Someone else did our work for us. */
761 /* Ensure that load happens before action based on it. */
762 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
763 /* Direct call during scheduler init and early_initcalls(). */
764 rcu_exp_sel_wait_wake(s);
766 /* Marshall arguments & schedule the expedited grace period. */
768 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
769 queue_work(rcu_gp_wq, &rew.rew_work);
772 /* Wait for expedited grace period to complete. */
773 rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
774 rnp = rcu_get_root();
775 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
776 sync_exp_work_done(s));
777 smp_mb(); /* Workqueue actions happen before return. */
779 /* Let the next expedited grace period start. */
780 mutex_unlock(&rcu_state.exp_mutex);
782 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);