2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
5 #include <linux/kernel_stat.h>
6 #include <linux/spinlock.h>
7 #include <linux/debugfs.h>
8 #include <linux/log2.h>
10 #include <linux/slab.h>
11 #include <linux/atomic.h>
13 #include <asm/paravirt.h>
15 #include <xen/interface/xen.h>
16 #include <xen/events.h>
21 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
22 static DEFINE_PER_CPU(char *, irq_name);
23 static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
24 static bool xen_pvspin = true;
26 #ifdef CONFIG_QUEUED_SPINLOCKS
28 #include <asm/qspinlock.h>
30 static void xen_qlock_kick(int cpu)
32 int irq = per_cpu(lock_kicker_irq, cpu);
34 /* Don't kick if the target's kicker interrupt is not initialized. */
38 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
42 * Halt the current CPU & release it back to the host
44 static void xen_qlock_wait(u8 *byte, u8 val)
46 int irq = __this_cpu_read(lock_kicker_irq);
47 atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
49 /* If kicker interrupts not initialized yet, just spin */
50 if (irq == -1 || in_nmi())
56 /* If irq pending already and no nested call clear it. */
57 if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
58 xen_clear_irq_pending(irq);
59 } else if (READ_ONCE(*byte) == val) {
60 /* Block until irq becomes pending (or a spurious wakeup) */
67 #else /* CONFIG_QUEUED_SPINLOCKS */
69 enum xen_contention_stat {
79 #ifdef CONFIG_XEN_DEBUG_FS
80 #define HISTO_BUCKETS 30
81 static struct xen_spinlock_stats
83 u32 contention_stats[NR_CONTENTION_STATS];
84 u32 histo_spin_blocked[HISTO_BUCKETS+1];
90 static inline void check_zero(void)
93 u8 old = READ_ONCE(zero_stats);
95 ret = cmpxchg(&zero_stats, old, 0);
96 /* This ensures only one fellow resets the stat */
98 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
102 static inline void add_stats(enum xen_contention_stat var, u32 val)
105 spinlock_stats.contention_stats[var] += val;
108 static inline u64 spin_time_start(void)
110 return xen_clocksource_read();
113 static void __spin_time_accum(u64 delta, u32 *array)
115 unsigned index = ilog2(delta);
119 if (index < HISTO_BUCKETS)
122 array[HISTO_BUCKETS]++;
125 static inline void spin_time_accum_blocked(u64 start)
127 u32 delta = xen_clocksource_read() - start;
129 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
130 spinlock_stats.time_blocked += delta;
132 #else /* !CONFIG_XEN_DEBUG_FS */
133 static inline void add_stats(enum xen_contention_stat var, u32 val)
137 static inline u64 spin_time_start(void)
142 static inline void spin_time_accum_blocked(u64 start)
145 #endif /* CONFIG_XEN_DEBUG_FS */
147 struct xen_lock_waiting {
148 struct arch_spinlock *lock;
152 static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
153 static cpumask_t waiting_cpus;
155 __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
157 int irq = __this_cpu_read(lock_kicker_irq);
158 struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
159 int cpu = smp_processor_id();
164 /* If kicker interrupts not initialized yet, just spin */
168 start = spin_time_start();
171 * Make sure an interrupt handler can't upset things in a
172 * partially setup state.
174 local_irq_save(flags);
176 * We don't really care if we're overwriting some other
177 * (lock,want) pair, as that would mean that we're currently
178 * in an interrupt context, and the outer context had
179 * interrupts enabled. That has already kicked the VCPU out
180 * of xen_poll_irq(), so it will just return spuriously and
181 * retry with newly setup (lock,want).
183 * The ordering protocol on this is that the "lock" pointer
184 * may only be set non-NULL if the "want" ticket is correct.
185 * If we're updating "want", we must first clear "lock".
193 /* This uses set_bit, which atomic and therefore a barrier */
194 cpumask_set_cpu(cpu, &waiting_cpus);
195 add_stats(TAKEN_SLOW, 1);
198 xen_clear_irq_pending(irq);
200 /* Only check lock once pending cleared */
204 * Mark entry to slowpath before doing the pickup test to make
205 * sure we don't deadlock with an unlocker.
207 __ticket_enter_slowpath(lock);
209 /* make sure enter_slowpath, which is atomic does not cross the read */
210 smp_mb__after_atomic();
213 * check again make sure it didn't become free while
216 head = READ_ONCE(lock->tickets.head);
217 if (__tickets_equal(head, want)) {
218 add_stats(TAKEN_SLOW_PICKUP, 1);
222 /* Allow interrupts while blocked */
223 local_irq_restore(flags);
226 * If an interrupt happens here, it will leave the wakeup irq
227 * pending, which will cause xen_poll_irq() to return
231 /* Block until irq becomes pending (or perhaps a spurious wakeup) */
233 add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
235 local_irq_save(flags);
237 kstat_incr_irq_this_cpu(irq);
239 cpumask_clear_cpu(cpu, &waiting_cpus);
242 local_irq_restore(flags);
244 spin_time_accum_blocked(start);
246 PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
248 static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
252 add_stats(RELEASED_SLOW, 1);
254 for_each_cpu(cpu, &waiting_cpus) {
255 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
257 /* Make sure we read lock before want */
258 if (READ_ONCE(w->lock) == lock &&
259 READ_ONCE(w->want) == next) {
260 add_stats(RELEASED_SLOW_KICKED, 1);
261 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
266 #endif /* CONFIG_QUEUED_SPINLOCKS */
268 static irqreturn_t dummy_handler(int irq, void *dev_id)
274 void xen_init_lock_cpu(int cpu)
282 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
283 cpu, per_cpu(lock_kicker_irq, cpu));
285 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
286 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
289 IRQF_PERCPU|IRQF_NOBALANCING,
294 disable_irq(irq); /* make sure it's never delivered */
295 per_cpu(lock_kicker_irq, cpu) = irq;
296 per_cpu(irq_name, cpu) = name;
299 printk("cpu %d spinlock event irq %d\n", cpu, irq);
302 void xen_uninit_lock_cpu(int cpu)
310 * When booting the kernel with 'mitigations=auto,nosmt', the secondary
311 * CPUs are not activated, and lock_kicker_irq is not initialized.
313 irq = per_cpu(lock_kicker_irq, cpu);
317 unbind_from_irqhandler(irq, NULL);
318 per_cpu(lock_kicker_irq, cpu) = -1;
319 kfree(per_cpu(irq_name, cpu));
320 per_cpu(irq_name, cpu) = NULL;
325 * Our init of PV spinlocks is split in two init functions due to us
326 * using paravirt patching and jump labels patching and having to do
327 * all of this before SMP code is invoked.
329 * The paravirt patching needs to be done _before_ the alternative asm code
330 * is started, otherwise we would not patch the core kernel code.
332 void __init xen_init_spinlocks(void)
336 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
339 printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
340 #ifdef CONFIG_QUEUED_SPINLOCKS
341 __pv_init_lock_hash();
342 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
343 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
344 pv_lock_ops.wait = xen_qlock_wait;
345 pv_lock_ops.kick = xen_qlock_kick;
347 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
348 pv_lock_ops.unlock_kick = xen_unlock_kick;
353 * While the jump_label init code needs to happend _after_ the jump labels are
354 * enabled and before SMP is started. Hence we use pre-SMP initcall level
355 * init. We cannot do it in xen_init_spinlocks as that is done before
356 * jump labels are activated.
358 static __init int xen_init_spinlocks_jump(void)
366 static_key_slow_inc(¶virt_ticketlocks_enabled);
369 early_initcall(xen_init_spinlocks_jump);
371 static __init int xen_parse_nopvspin(char *arg)
376 early_param("xen_nopvspin", xen_parse_nopvspin);
378 #if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCKS)
380 static struct dentry *d_spin_debug;
382 static int __init xen_spinlock_debugfs(void)
384 struct dentry *d_xen = xen_init_debugfs();
392 d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
394 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
396 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
397 &spinlock_stats.contention_stats[TAKEN_SLOW]);
398 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
399 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
400 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
401 &spinlock_stats.contention_stats[TAKEN_SLOW_SPURIOUS]);
403 debugfs_create_u32("released_slow", 0444, d_spin_debug,
404 &spinlock_stats.contention_stats[RELEASED_SLOW]);
405 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
406 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
408 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
409 &spinlock_stats.time_blocked);
411 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
412 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
416 fs_initcall(xen_spinlock_debugfs);
418 #endif /* CONFIG_XEN_DEBUG_FS */