2 * Xen event channels (2-level ABI)
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
7 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
9 #include <linux/linkage.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
13 #include <asm/sync_bitops.h>
14 #include <asm/xen/hypercall.h>
15 #include <asm/xen/hypervisor.h>
18 #include <xen/xen-ops.h>
19 #include <xen/events.h>
20 #include <xen/interface/xen.h>
21 #include <xen/interface/event_channel.h>
23 #include "events_internal.h"
26 * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
27 * careful to only use bitops which allow for this (e.g
28 * test_bit/find_first_bit and friends but not __ffs) and to pass
29 * BITS_PER_EVTCHN_WORD as the bitmask length.
31 #define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
33 * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
34 * array. Primarily to avoid long lines (hence the terse name).
36 #define BM(x) (unsigned long *)(x)
37 /* Find the first set bit in a evtchn mask */
38 #define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
40 #define EVTCHN_MASK_SIZE (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)
42 static DEFINE_PER_CPU(xen_ulong_t [EVTCHN_MASK_SIZE], cpu_evtchn_mask);
44 static unsigned evtchn_2l_max_channels(void)
46 return EVTCHN_2L_NR_CHANNELS;
49 static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu)
51 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
54 static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
56 clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
57 set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
60 static void evtchn_2l_clear_pending(unsigned port)
62 struct shared_info *s = HYPERVISOR_shared_info;
63 sync_clear_bit(port, BM(&s->evtchn_pending[0]));
66 static void evtchn_2l_set_pending(unsigned port)
68 struct shared_info *s = HYPERVISOR_shared_info;
69 sync_set_bit(port, BM(&s->evtchn_pending[0]));
72 static bool evtchn_2l_is_pending(unsigned port)
74 struct shared_info *s = HYPERVISOR_shared_info;
75 return sync_test_bit(port, BM(&s->evtchn_pending[0]));
78 static void evtchn_2l_mask(unsigned port)
80 struct shared_info *s = HYPERVISOR_shared_info;
81 sync_set_bit(port, BM(&s->evtchn_mask[0]));
84 static void evtchn_2l_unmask(unsigned port)
86 struct shared_info *s = HYPERVISOR_shared_info;
87 unsigned int cpu = get_cpu();
88 int do_hypercall = 0, evtchn_pending = 0;
90 BUG_ON(!irqs_disabled());
92 smp_wmb(); /* All writes before unmask must be visible. */
94 if (unlikely((cpu != cpu_from_evtchn(port))))
98 * Need to clear the mask before checking pending to
99 * avoid a race with an event becoming pending.
101 * EVTCHNOP_unmask will only trigger an upcall if the
102 * mask bit was set, so if a hypercall is needed
105 sync_clear_bit(port, BM(&s->evtchn_mask[0]));
106 evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
108 if (unlikely(evtchn_pending && xen_hvm_domain())) {
109 sync_set_bit(port, BM(&s->evtchn_mask[0]));
114 /* Slow path (hypercall) if this is a non-local port or if this is
115 * an hvm domain and an event is pending (hvm domains don't have
116 * their own implementation of irq_enable). */
118 struct evtchn_unmask unmask = { .port = port };
119 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
121 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
124 * The following is basically the equivalent of
125 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
126 * the interrupt edge' if the channel is masked.
128 if (evtchn_pending &&
129 !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
130 BM(&vcpu_info->evtchn_pending_sel)))
131 vcpu_info->evtchn_upcall_pending = 1;
137 static DEFINE_PER_CPU(unsigned int, current_word_idx);
138 static DEFINE_PER_CPU(unsigned int, current_bit_idx);
141 * Mask out the i least significant bits of w
143 #define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
145 static inline xen_ulong_t active_evtchns(unsigned int cpu,
146 struct shared_info *sh,
149 return sh->evtchn_pending[idx] &
150 per_cpu(cpu_evtchn_mask, cpu)[idx] &
151 ~sh->evtchn_mask[idx];
155 * Search the CPU's pending events bitmasks. For each one found, map
156 * the event number to an irq, and feed it into do_IRQ() for handling.
158 * Xen uses a two-level bitmap to speed searching. The first level is
159 * a bitset of words which contain pending event bits. The second
160 * level is a bitset of pending events themselves.
162 static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
165 xen_ulong_t pending_words;
166 xen_ulong_t pending_bits;
167 int start_word_idx, start_bit_idx;
168 int word_idx, bit_idx;
170 struct shared_info *s = HYPERVISOR_shared_info;
171 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
173 /* Timer interrupt has highest priority. */
174 irq = irq_from_virq(cpu, VIRQ_TIMER);
176 unsigned int evtchn = evtchn_from_irq(irq);
177 word_idx = evtchn / BITS_PER_LONG;
178 bit_idx = evtchn % BITS_PER_LONG;
179 if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
180 generic_handle_irq(irq);
184 * Master flag must be cleared /before/ clearing
185 * selector flag. xchg_xen_ulong must contain an
186 * appropriate barrier.
188 pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
190 start_word_idx = __this_cpu_read(current_word_idx);
191 start_bit_idx = __this_cpu_read(current_bit_idx);
193 word_idx = start_word_idx;
195 for (i = 0; pending_words != 0; i++) {
198 words = MASK_LSBS(pending_words, word_idx);
201 * If we masked out all events, wrap to beginning.
208 word_idx = EVTCHN_FIRST_BIT(words);
210 pending_bits = active_evtchns(cpu, s, word_idx);
211 bit_idx = 0; /* usually scan entire word from start */
213 * We scan the starting word in two parts.
215 * 1st time: start in the middle, scanning the
218 * 2nd time: scan the whole word (not just the
219 * parts skipped in the first pass) -- if an
220 * event in the previously scanned bits is
221 * pending again it would just be scanned on
222 * the next loop anyway.
224 if (word_idx == start_word_idx) {
226 bit_idx = start_bit_idx;
233 bits = MASK_LSBS(pending_bits, bit_idx);
235 /* If we masked out all events, move on. */
239 bit_idx = EVTCHN_FIRST_BIT(bits);
242 port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
243 handle_irq_for_port(port, ctrl);
245 bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
247 /* Next caller starts at last processed + 1 */
248 __this_cpu_write(current_word_idx,
250 (word_idx+1) % BITS_PER_EVTCHN_WORD);
251 __this_cpu_write(current_bit_idx, bit_idx);
252 } while (bit_idx != 0);
254 /* Scan start_l1i twice; all others once. */
255 if ((word_idx != start_word_idx) || (i != 0))
256 pending_words &= ~(1UL << word_idx);
258 word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
262 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
264 struct shared_info *sh = HYPERVISOR_shared_info;
265 int cpu = smp_processor_id();
266 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
269 static DEFINE_SPINLOCK(debug_lock);
272 spin_lock_irqsave(&debug_lock, flags);
274 printk("\nvcpu %d\n ", cpu);
276 for_each_online_cpu(i) {
278 v = per_cpu(xen_vcpu, i);
279 pending = (get_irq_regs() && i == cpu)
280 ? xen_irqs_disabled(get_irq_regs())
281 : v->evtchn_upcall_mask;
282 printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
283 pending, v->evtchn_upcall_pending,
284 (int)(sizeof(v->evtchn_pending_sel)*2),
285 v->evtchn_pending_sel);
287 v = per_cpu(xen_vcpu, cpu);
289 printk("\npending:\n ");
290 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
291 printk("%0*"PRI_xen_ulong"%s",
292 (int)sizeof(sh->evtchn_pending[0])*2,
293 sh->evtchn_pending[i],
294 i % 8 == 0 ? "\n " : " ");
295 printk("\nglobal mask:\n ");
296 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
297 printk("%0*"PRI_xen_ulong"%s",
298 (int)(sizeof(sh->evtchn_mask[0])*2),
300 i % 8 == 0 ? "\n " : " ");
302 printk("\nglobally unmasked:\n ");
303 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
304 printk("%0*"PRI_xen_ulong"%s",
305 (int)(sizeof(sh->evtchn_mask[0])*2),
306 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
307 i % 8 == 0 ? "\n " : " ");
309 printk("\nlocal cpu%d mask:\n ", cpu);
310 for (i = (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
311 printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
313 i % 8 == 0 ? "\n " : " ");
315 printk("\nlocally unmasked:\n ");
316 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
317 xen_ulong_t pending = sh->evtchn_pending[i]
318 & ~sh->evtchn_mask[i]
320 printk("%0*"PRI_xen_ulong"%s",
321 (int)(sizeof(sh->evtchn_mask[0])*2),
322 pending, i % 8 == 0 ? "\n " : " ");
325 printk("\npending list:\n");
326 for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
327 if (sync_test_bit(i, BM(sh->evtchn_pending))) {
328 int word_idx = i / BITS_PER_EVTCHN_WORD;
329 printk(" %d: event %d -> irq %d%s%s%s\n",
330 cpu_from_evtchn(i), i,
331 get_evtchn_to_irq(i),
332 sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
334 !sync_test_bit(i, BM(sh->evtchn_mask))
335 ? "" : " globally-masked",
336 sync_test_bit(i, BM(cpu_evtchn))
337 ? "" : " locally-masked");
341 spin_unlock_irqrestore(&debug_lock, flags);
346 static void evtchn_2l_resume(void)
350 for_each_online_cpu(i)
351 memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
352 EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
355 static int evtchn_2l_percpu_deinit(unsigned int cpu)
357 memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
358 EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
363 static const struct evtchn_ops evtchn_ops_2l = {
364 .max_channels = evtchn_2l_max_channels,
365 .nr_channels = evtchn_2l_max_channels,
366 .remove = evtchn_2l_remove,
367 .bind_to_cpu = evtchn_2l_bind_to_cpu,
368 .clear_pending = evtchn_2l_clear_pending,
369 .set_pending = evtchn_2l_set_pending,
370 .is_pending = evtchn_2l_is_pending,
371 .mask = evtchn_2l_mask,
372 .unmask = evtchn_2l_unmask,
373 .handle_events = evtchn_2l_handle_events,
374 .resume = evtchn_2l_resume,
375 .percpu_deinit = evtchn_2l_percpu_deinit,
378 void __init xen_evtchn_2l_init(void)
380 pr_info("Using 2-level ABI\n");
381 evtchn_ops = &evtchn_ops_2l;