1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_PARAVIRT_H
3 #define _ASM_POWERPC_PARAVIRT_H
5 #include <linux/jump_label.h>
9 #include <asm/lppaca.h>
10 #include <asm/hvcall.h>
13 #ifdef CONFIG_PPC_SPLPAR
14 #include <linux/smp.h>
15 #include <asm/kvm_guest.h>
16 #include <asm/cputhreads.h>
18 DECLARE_STATIC_KEY_FALSE(shared_processor);
20 static inline bool is_shared_processor(void)
22 return static_branch_unlikely(&shared_processor);
25 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
26 extern struct static_key paravirt_steal_enabled;
27 extern struct static_key paravirt_steal_rq_enabled;
29 u64 pseries_paravirt_steal_clock(int cpu);
31 static inline u64 paravirt_steal_clock(int cpu)
33 return pseries_paravirt_steal_clock(cpu);
37 /* If bit 0 is set, the cpu has been ceded, conferred, or preempted */
38 static inline u32 yield_count_of(int cpu)
40 __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
41 return be32_to_cpu(yield_count);
45 * Spinlock code confers and prods, so don't trace the hcalls because the
46 * tracing code takes spinlocks which can cause recursion deadlocks.
48 * These calls are made while the lock is not held: the lock slowpath yields if
49 * it can not acquire the lock, and unlock slow path might prod if a waiter has
50 * yielded). So this may not be a problem for simple spin locks because the
51 * tracing does not technically recurse on the lock, but we avoid it anyway.
53 * However the queued spin lock contended path is more strictly ordered: the
54 * H_CONFER hcall is made after the task has queued itself on the lock, so then
55 * recursing on that lock will cause the task to then queue up again behind the
56 * first instance (or worse: queued spinlocks use tricks that assume a context
57 * never waits on more than one spinlock, so such recursion may cause random
58 * corruption in the lock code).
60 static inline void yield_to_preempted(int cpu, u32 yield_count)
62 plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
65 static inline void prod_cpu(int cpu)
67 plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
70 static inline void yield_to_any(void)
72 plpar_hcall_norets_notrace(H_CONFER, -1, 0);
75 static inline bool is_vcpu_idle(int vcpu)
77 return lppaca_of(vcpu).idle;
80 static inline bool is_shared_processor(void)
85 static inline u32 yield_count_of(int cpu)
90 extern void ___bad_yield_to_preempted(void);
91 static inline void yield_to_preempted(int cpu, u32 yield_count)
93 ___bad_yield_to_preempted(); /* This would be a bug */
96 extern void ___bad_yield_to_any(void);
97 static inline void yield_to_any(void)
99 ___bad_yield_to_any(); /* This would be a bug */
102 extern void ___bad_prod_cpu(void);
103 static inline void prod_cpu(int cpu)
105 ___bad_prod_cpu(); /* This would be a bug */
108 static inline bool is_vcpu_idle(int vcpu)
114 #define vcpu_is_preempted vcpu_is_preempted
115 static inline bool vcpu_is_preempted(int cpu)
118 * The dispatch/yield bit alone is an imperfect indicator of
119 * whether the hypervisor has dispatched @cpu to run on a physical
120 * processor. When it is clear, @cpu is definitely not preempted.
121 * But when it is set, it means only that it *might* be, subject to
122 * other conditions. So we check other properties of the VM and
123 * @cpu first, resorting to the yield count last.
127 * Hypervisor preemption isn't possible in dedicated processor
128 * mode by definition.
130 if (!is_shared_processor())
134 * If the hypervisor has dispatched the target CPU on a physical
135 * processor, then the target CPU is definitely not preempted.
137 if (!(yield_count_of(cpu) & 1))
141 * If the target CPU has yielded to Hypervisor but OS has not
142 * requested idle then the target CPU is definitely preempted.
144 if (!is_vcpu_idle(cpu))
147 #ifdef CONFIG_PPC_SPLPAR
148 if (!is_kvm_guest()) {
152 * The result of vcpu_is_preempted() is used in a
153 * speculative way, and is always subject to invalidation
154 * by events internal and external to Linux. While we can
155 * be called in preemptable context (in the Linux sense),
156 * we're not accessing per-cpu resources in a way that can
157 * race destructively with Linux scheduler preemption and
158 * migration, and callers can tolerate the potential for
159 * error introduced by sampling the CPU index without
160 * pinning the task to it. So it is permissible to use
161 * raw_smp_processor_id() here to defeat the preempt debug
162 * warnings that can arise from using smp_processor_id()
163 * in arbitrary contexts.
165 first_cpu = cpu_first_thread_sibling(raw_smp_processor_id());
168 * The PowerVM hypervisor dispatches VMs on a whole core
169 * basis. So we know that a thread sibling of the local CPU
170 * cannot have been preempted by the hypervisor, even if it
171 * has called H_CONFER, which will set the yield bit.
173 if (cpu_first_thread_sibling(cpu) == first_cpu)
177 * If any of the threads of the target CPU's core are not
178 * preempted or ceded, then consider target CPU to be
181 first_cpu = cpu_first_thread_sibling(cpu);
182 for (i = first_cpu; i < first_cpu + threads_per_core; i++) {
185 if (!(yield_count_of(i) & 1))
187 if (!is_vcpu_idle(i))
194 * None of the threads in target CPU's core are running but none of
195 * them were preempted too. Hence assume the target CPU to be
201 static inline bool pv_is_native_spin_unlock(void)
203 return !is_shared_processor();
206 #endif /* _ASM_POWERPC_PARAVIRT_H */