]> Pileus Git - ~andy/linux/blob - drivers/xen/events.c
xen/events: replace raw bit ops with functions
[~andy/linux] / drivers / xen / events.c
1 /*
2  * Xen event channels
3  *
4  * Xen models interrupts with abstract event channels.  Because each
5  * domain gets 1024 event channels, but NR_IRQ is not that large, we
6  * must dynamically map irqs<->event channels.  The event channels
7  * interface with the rest of the kernel by defining a xen interrupt
8  * chip.  When an event is received, it is mapped to an irq and sent
9  * through the normal interrupt processing path.
10  *
11  * There are four kinds of events which can be mapped to an event
12  * channel:
13  *
14  * 1. Inter-domain notifications.  This includes all the virtual
15  *    device events, since they're driven by front-ends in another domain
16  *    (typically dom0).
17  * 2. VIRQs, typically used for timers.  These are per-cpu events.
18  * 3. IPIs.
19  * 4. PIRQs - Hardware interrupts.
20  *
21  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22  */
23
24 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
25
26 #include <linux/linkage.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/module.h>
30 #include <linux/string.h>
31 #include <linux/bootmem.h>
32 #include <linux/slab.h>
33 #include <linux/irqnr.h>
34 #include <linux/pci.h>
35
36 #ifdef CONFIG_X86
37 #include <asm/desc.h>
38 #include <asm/ptrace.h>
39 #include <asm/irq.h>
40 #include <asm/idle.h>
41 #include <asm/io_apic.h>
42 #include <asm/xen/page.h>
43 #include <asm/xen/pci.h>
44 #endif
45 #include <asm/sync_bitops.h>
46 #include <asm/xen/hypercall.h>
47 #include <asm/xen/hypervisor.h>
48
49 #include <xen/xen.h>
50 #include <xen/hvm.h>
51 #include <xen/xen-ops.h>
52 #include <xen/events.h>
53 #include <xen/interface/xen.h>
54 #include <xen/interface/event_channel.h>
55 #include <xen/interface/hvm/hvm_op.h>
56 #include <xen/interface/hvm/params.h>
57 #include <xen/interface/physdev.h>
58 #include <xen/interface/sched.h>
59 #include <xen/interface/vcpu.h>
60 #include <asm/hw_irq.h>
61
62 /*
63  * This lock protects updates to the following mapping and reference-count
64  * arrays. The lock does not need to be acquired to read the mapping tables.
65  */
66 static DEFINE_MUTEX(irq_mapping_update_lock);
67
68 static LIST_HEAD(xen_irq_list_head);
69
70 /* IRQ <-> VIRQ mapping. */
71 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
72
73 /* IRQ <-> IPI mapping */
74 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
75
76 /* Interrupt types. */
77 enum xen_irq_type {
78         IRQT_UNBOUND = 0,
79         IRQT_PIRQ,
80         IRQT_VIRQ,
81         IRQT_IPI,
82         IRQT_EVTCHN
83 };
84
85 /*
86  * Packed IRQ information:
87  * type - enum xen_irq_type
88  * event channel - irq->event channel mapping
89  * cpu - cpu this event channel is bound to
90  * index - type-specific information:
91  *    PIRQ - physical IRQ, GSI, flags, and owner domain
92  *    VIRQ - virq number
93  *    IPI - IPI vector
94  *    EVTCHN -
95  */
96 struct irq_info {
97         struct list_head list;
98         int refcnt;
99         enum xen_irq_type type; /* type */
100         unsigned irq;
101         unsigned short evtchn;  /* event channel */
102         unsigned short cpu;     /* cpu bound */
103
104         union {
105                 unsigned short virq;
106                 enum ipi_vector ipi;
107                 struct {
108                         unsigned short pirq;
109                         unsigned short gsi;
110                         unsigned char flags;
111                         uint16_t domid;
112                 } pirq;
113         } u;
114 };
115 #define PIRQ_NEEDS_EOI  (1 << 0)
116 #define PIRQ_SHAREABLE  (1 << 1)
117
118 static int *evtchn_to_irq;
119 #ifdef CONFIG_X86
120 static unsigned long *pirq_eoi_map;
121 #endif
122 static bool (*pirq_needs_eoi)(unsigned irq);
123
124 /*
125  * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
126  * careful to only use bitops which allow for this (e.g
127  * test_bit/find_first_bit and friends but not __ffs) and to pass
128  * BITS_PER_EVTCHN_WORD as the bitmask length.
129  */
130 #define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
131 /*
132  * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
133  * array. Primarily to avoid long lines (hence the terse name).
134  */
135 #define BM(x) (unsigned long *)(x)
136 /* Find the first set bit in a evtchn mask */
137 #define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
138
139 static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD],
140                       cpu_evtchn_mask);
141
142 /* Xen will never allocate port zero for any purpose. */
143 #define VALID_EVTCHN(chn)       ((chn) != 0)
144
145 static struct irq_chip xen_dynamic_chip;
146 static struct irq_chip xen_percpu_chip;
147 static struct irq_chip xen_pirq_chip;
148 static void enable_dynirq(struct irq_data *data);
149 static void disable_dynirq(struct irq_data *data);
150
151 /* Get info for IRQ */
152 static struct irq_info *info_for_irq(unsigned irq)
153 {
154         return irq_get_handler_data(irq);
155 }
156
157 /* Constructors for packed IRQ information. */
158 static void xen_irq_info_common_init(struct irq_info *info,
159                                      unsigned irq,
160                                      enum xen_irq_type type,
161                                      unsigned short evtchn,
162                                      unsigned short cpu)
163 {
164
165         BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
166
167         info->type = type;
168         info->irq = irq;
169         info->evtchn = evtchn;
170         info->cpu = cpu;
171
172         evtchn_to_irq[evtchn] = irq;
173
174         irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
175 }
176
177 static void xen_irq_info_evtchn_init(unsigned irq,
178                                      unsigned short evtchn)
179 {
180         struct irq_info *info = info_for_irq(irq);
181
182         xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
183 }
184
185 static void xen_irq_info_ipi_init(unsigned cpu,
186                                   unsigned irq,
187                                   unsigned short evtchn,
188                                   enum ipi_vector ipi)
189 {
190         struct irq_info *info = info_for_irq(irq);
191
192         xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
193
194         info->u.ipi = ipi;
195
196         per_cpu(ipi_to_irq, cpu)[ipi] = irq;
197 }
198
199 static void xen_irq_info_virq_init(unsigned cpu,
200                                    unsigned irq,
201                                    unsigned short evtchn,
202                                    unsigned short virq)
203 {
204         struct irq_info *info = info_for_irq(irq);
205
206         xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
207
208         info->u.virq = virq;
209
210         per_cpu(virq_to_irq, cpu)[virq] = irq;
211 }
212
213 static void xen_irq_info_pirq_init(unsigned irq,
214                                    unsigned short evtchn,
215                                    unsigned short pirq,
216                                    unsigned short gsi,
217                                    uint16_t domid,
218                                    unsigned char flags)
219 {
220         struct irq_info *info = info_for_irq(irq);
221
222         xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
223
224         info->u.pirq.pirq = pirq;
225         info->u.pirq.gsi = gsi;
226         info->u.pirq.domid = domid;
227         info->u.pirq.flags = flags;
228 }
229
230 /*
231  * Accessors for packed IRQ information.
232  */
233 static unsigned int evtchn_from_irq(unsigned irq)
234 {
235         if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
236                 return 0;
237
238         return info_for_irq(irq)->evtchn;
239 }
240
241 unsigned irq_from_evtchn(unsigned int evtchn)
242 {
243         return evtchn_to_irq[evtchn];
244 }
245 EXPORT_SYMBOL_GPL(irq_from_evtchn);
246
247 static enum ipi_vector ipi_from_irq(unsigned irq)
248 {
249         struct irq_info *info = info_for_irq(irq);
250
251         BUG_ON(info == NULL);
252         BUG_ON(info->type != IRQT_IPI);
253
254         return info->u.ipi;
255 }
256
257 static unsigned virq_from_irq(unsigned irq)
258 {
259         struct irq_info *info = info_for_irq(irq);
260
261         BUG_ON(info == NULL);
262         BUG_ON(info->type != IRQT_VIRQ);
263
264         return info->u.virq;
265 }
266
267 static unsigned pirq_from_irq(unsigned irq)
268 {
269         struct irq_info *info = info_for_irq(irq);
270
271         BUG_ON(info == NULL);
272         BUG_ON(info->type != IRQT_PIRQ);
273
274         return info->u.pirq.pirq;
275 }
276
277 static enum xen_irq_type type_from_irq(unsigned irq)
278 {
279         return info_for_irq(irq)->type;
280 }
281
282 static unsigned cpu_from_irq(unsigned irq)
283 {
284         return info_for_irq(irq)->cpu;
285 }
286
287 static unsigned int cpu_from_evtchn(unsigned int evtchn)
288 {
289         int irq = evtchn_to_irq[evtchn];
290         unsigned ret = 0;
291
292         if (irq != -1)
293                 ret = cpu_from_irq(irq);
294
295         return ret;
296 }
297
298 #ifdef CONFIG_X86
299 static bool pirq_check_eoi_map(unsigned irq)
300 {
301         return test_bit(pirq_from_irq(irq), pirq_eoi_map);
302 }
303 #endif
304
305 static bool pirq_needs_eoi_flag(unsigned irq)
306 {
307         struct irq_info *info = info_for_irq(irq);
308         BUG_ON(info->type != IRQT_PIRQ);
309
310         return info->u.pirq.flags & PIRQ_NEEDS_EOI;
311 }
312
313 static inline xen_ulong_t active_evtchns(unsigned int cpu,
314                                          struct shared_info *sh,
315                                          unsigned int idx)
316 {
317         return sh->evtchn_pending[idx] &
318                 per_cpu(cpu_evtchn_mask, cpu)[idx] &
319                 ~sh->evtchn_mask[idx];
320 }
321
322 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
323 {
324         int irq = evtchn_to_irq[chn];
325
326         BUG_ON(irq == -1);
327 #ifdef CONFIG_SMP
328         cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
329 #endif
330
331         clear_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))));
332         set_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu)));
333
334         info_for_irq(irq)->cpu = cpu;
335 }
336
337 static inline void clear_evtchn(int port)
338 {
339         struct shared_info *s = HYPERVISOR_shared_info;
340         sync_clear_bit(port, BM(&s->evtchn_pending[0]));
341 }
342
343 static inline void set_evtchn(int port)
344 {
345         struct shared_info *s = HYPERVISOR_shared_info;
346         sync_set_bit(port, BM(&s->evtchn_pending[0]));
347 }
348
349 static inline int test_evtchn(int port)
350 {
351         struct shared_info *s = HYPERVISOR_shared_info;
352         return sync_test_bit(port, BM(&s->evtchn_pending[0]));
353 }
354
355 static inline int test_and_set_mask(int port)
356 {
357         struct shared_info *s = HYPERVISOR_shared_info;
358         return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
359 }
360
361
362 /**
363  * notify_remote_via_irq - send event to remote end of event channel via irq
364  * @irq: irq of event channel to send event to
365  *
366  * Unlike notify_remote_via_evtchn(), this is safe to use across
367  * save/restore. Notifications on a broken connection are silently
368  * dropped.
369  */
370 void notify_remote_via_irq(int irq)
371 {
372         int evtchn = evtchn_from_irq(irq);
373
374         if (VALID_EVTCHN(evtchn))
375                 notify_remote_via_evtchn(evtchn);
376 }
377 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
378
379 static void mask_evtchn(int port)
380 {
381         struct shared_info *s = HYPERVISOR_shared_info;
382         sync_set_bit(port, BM(&s->evtchn_mask[0]));
383 }
384
385 static void unmask_evtchn(int port)
386 {
387         struct shared_info *s = HYPERVISOR_shared_info;
388         unsigned int cpu = get_cpu();
389         int do_hypercall = 0, evtchn_pending = 0;
390
391         BUG_ON(!irqs_disabled());
392
393         if (unlikely((cpu != cpu_from_evtchn(port))))
394                 do_hypercall = 1;
395         else {
396                 /*
397                  * Need to clear the mask before checking pending to
398                  * avoid a race with an event becoming pending.
399                  *
400                  * EVTCHNOP_unmask will only trigger an upcall if the
401                  * mask bit was set, so if a hypercall is needed
402                  * remask the event.
403                  */
404                 sync_clear_bit(port, BM(&s->evtchn_mask[0]));
405                 evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
406
407                 if (unlikely(evtchn_pending && xen_hvm_domain())) {
408                         sync_set_bit(port, BM(&s->evtchn_mask[0]));
409                         do_hypercall = 1;
410                 }
411         }
412
413         /* Slow path (hypercall) if this is a non-local port or if this is
414          * an hvm domain and an event is pending (hvm domains don't have
415          * their own implementation of irq_enable). */
416         if (do_hypercall) {
417                 struct evtchn_unmask unmask = { .port = port };
418                 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
419         } else {
420                 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
421
422                 /*
423                  * The following is basically the equivalent of
424                  * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
425                  * the interrupt edge' if the channel is masked.
426                  */
427                 if (evtchn_pending &&
428                     !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
429                                            BM(&vcpu_info->evtchn_pending_sel)))
430                         vcpu_info->evtchn_upcall_pending = 1;
431         }
432
433         put_cpu();
434 }
435
436 static void xen_irq_init(unsigned irq)
437 {
438         struct irq_info *info;
439 #ifdef CONFIG_SMP
440         struct irq_desc *desc = irq_to_desc(irq);
441
442         /* By default all event channels notify CPU#0. */
443         cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
444 #endif
445
446         info = kzalloc(sizeof(*info), GFP_KERNEL);
447         if (info == NULL)
448                 panic("Unable to allocate metadata for IRQ%d\n", irq);
449
450         info->type = IRQT_UNBOUND;
451         info->refcnt = -1;
452
453         irq_set_handler_data(irq, info);
454
455         list_add_tail(&info->list, &xen_irq_list_head);
456 }
457
458 static int __must_check xen_allocate_irq_dynamic(void)
459 {
460         int first = 0;
461         int irq;
462
463 #ifdef CONFIG_X86_IO_APIC
464         /*
465          * For an HVM guest or domain 0 which see "real" (emulated or
466          * actual respectively) GSIs we allocate dynamic IRQs
467          * e.g. those corresponding to event channels or MSIs
468          * etc. from the range above those "real" GSIs to avoid
469          * collisions.
470          */
471         if (xen_initial_domain() || xen_hvm_domain())
472                 first = get_nr_irqs_gsi();
473 #endif
474
475         irq = irq_alloc_desc_from(first, -1);
476
477         if (irq >= 0)
478                 xen_irq_init(irq);
479
480         return irq;
481 }
482
483 static int __must_check xen_allocate_irq_gsi(unsigned gsi)
484 {
485         int irq;
486
487         /*
488          * A PV guest has no concept of a GSI (since it has no ACPI
489          * nor access to/knowledge of the physical APICs). Therefore
490          * all IRQs are dynamically allocated from the entire IRQ
491          * space.
492          */
493         if (xen_pv_domain() && !xen_initial_domain())
494                 return xen_allocate_irq_dynamic();
495
496         /* Legacy IRQ descriptors are already allocated by the arch. */
497         if (gsi < NR_IRQS_LEGACY)
498                 irq = gsi;
499         else
500                 irq = irq_alloc_desc_at(gsi, -1);
501
502         xen_irq_init(irq);
503
504         return irq;
505 }
506
507 static void xen_free_irq(unsigned irq)
508 {
509         struct irq_info *info = irq_get_handler_data(irq);
510
511         if (WARN_ON(!info))
512                 return;
513
514         list_del(&info->list);
515
516         irq_set_handler_data(irq, NULL);
517
518         WARN_ON(info->refcnt > 0);
519
520         kfree(info);
521
522         /* Legacy IRQ descriptors are managed by the arch. */
523         if (irq < NR_IRQS_LEGACY)
524                 return;
525
526         irq_free_desc(irq);
527 }
528
529 static void pirq_query_unmask(int irq)
530 {
531         struct physdev_irq_status_query irq_status;
532         struct irq_info *info = info_for_irq(irq);
533
534         BUG_ON(info->type != IRQT_PIRQ);
535
536         irq_status.irq = pirq_from_irq(irq);
537         if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
538                 irq_status.flags = 0;
539
540         info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
541         if (irq_status.flags & XENIRQSTAT_needs_eoi)
542                 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
543 }
544
545 static bool probing_irq(int irq)
546 {
547         struct irq_desc *desc = irq_to_desc(irq);
548
549         return desc && desc->action == NULL;
550 }
551
552 static void eoi_pirq(struct irq_data *data)
553 {
554         int evtchn = evtchn_from_irq(data->irq);
555         struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
556         int rc = 0;
557
558         irq_move_irq(data);
559
560         if (VALID_EVTCHN(evtchn))
561                 clear_evtchn(evtchn);
562
563         if (pirq_needs_eoi(data->irq)) {
564                 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
565                 WARN_ON(rc);
566         }
567 }
568
569 static void mask_ack_pirq(struct irq_data *data)
570 {
571         disable_dynirq(data);
572         eoi_pirq(data);
573 }
574
575 static unsigned int __startup_pirq(unsigned int irq)
576 {
577         struct evtchn_bind_pirq bind_pirq;
578         struct irq_info *info = info_for_irq(irq);
579         int evtchn = evtchn_from_irq(irq);
580         int rc;
581
582         BUG_ON(info->type != IRQT_PIRQ);
583
584         if (VALID_EVTCHN(evtchn))
585                 goto out;
586
587         bind_pirq.pirq = pirq_from_irq(irq);
588         /* NB. We are happy to share unless we are probing. */
589         bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
590                                         BIND_PIRQ__WILL_SHARE : 0;
591         rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
592         if (rc != 0) {
593                 if (!probing_irq(irq))
594                         pr_info("Failed to obtain physical IRQ %d\n", irq);
595                 return 0;
596         }
597         evtchn = bind_pirq.port;
598
599         pirq_query_unmask(irq);
600
601         evtchn_to_irq[evtchn] = irq;
602         bind_evtchn_to_cpu(evtchn, 0);
603         info->evtchn = evtchn;
604
605 out:
606         unmask_evtchn(evtchn);
607         eoi_pirq(irq_get_irq_data(irq));
608
609         return 0;
610 }
611
612 static unsigned int startup_pirq(struct irq_data *data)
613 {
614         return __startup_pirq(data->irq);
615 }
616
617 static void shutdown_pirq(struct irq_data *data)
618 {
619         struct evtchn_close close;
620         unsigned int irq = data->irq;
621         struct irq_info *info = info_for_irq(irq);
622         int evtchn = evtchn_from_irq(irq);
623
624         BUG_ON(info->type != IRQT_PIRQ);
625
626         if (!VALID_EVTCHN(evtchn))
627                 return;
628
629         mask_evtchn(evtchn);
630
631         close.port = evtchn;
632         if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
633                 BUG();
634
635         bind_evtchn_to_cpu(evtchn, 0);
636         evtchn_to_irq[evtchn] = -1;
637         info->evtchn = 0;
638 }
639
640 static void enable_pirq(struct irq_data *data)
641 {
642         startup_pirq(data);
643 }
644
645 static void disable_pirq(struct irq_data *data)
646 {
647         disable_dynirq(data);
648 }
649
650 int xen_irq_from_gsi(unsigned gsi)
651 {
652         struct irq_info *info;
653
654         list_for_each_entry(info, &xen_irq_list_head, list) {
655                 if (info->type != IRQT_PIRQ)
656                         continue;
657
658                 if (info->u.pirq.gsi == gsi)
659                         return info->irq;
660         }
661
662         return -1;
663 }
664 EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
665
666 /*
667  * Do not make any assumptions regarding the relationship between the
668  * IRQ number returned here and the Xen pirq argument.
669  *
670  * Note: We don't assign an event channel until the irq actually started
671  * up.  Return an existing irq if we've already got one for the gsi.
672  *
673  * Shareable implies level triggered, not shareable implies edge
674  * triggered here.
675  */
676 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
677                              unsigned pirq, int shareable, char *name)
678 {
679         int irq = -1;
680         struct physdev_irq irq_op;
681
682         mutex_lock(&irq_mapping_update_lock);
683
684         irq = xen_irq_from_gsi(gsi);
685         if (irq != -1) {
686                 pr_info("%s: returning irq %d for gsi %u\n",
687                         __func__, irq, gsi);
688                 goto out;
689         }
690
691         irq = xen_allocate_irq_gsi(gsi);
692         if (irq < 0)
693                 goto out;
694
695         irq_op.irq = irq;
696         irq_op.vector = 0;
697
698         /* Only the privileged domain can do this. For non-priv, the pcifront
699          * driver provides a PCI bus that does the call to do exactly
700          * this in the priv domain. */
701         if (xen_initial_domain() &&
702             HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
703                 xen_free_irq(irq);
704                 irq = -ENOSPC;
705                 goto out;
706         }
707
708         xen_irq_info_pirq_init(irq, 0, pirq, gsi, DOMID_SELF,
709                                shareable ? PIRQ_SHAREABLE : 0);
710
711         pirq_query_unmask(irq);
712         /* We try to use the handler with the appropriate semantic for the
713          * type of interrupt: if the interrupt is an edge triggered
714          * interrupt we use handle_edge_irq.
715          *
716          * On the other hand if the interrupt is level triggered we use
717          * handle_fasteoi_irq like the native code does for this kind of
718          * interrupts.
719          *
720          * Depending on the Xen version, pirq_needs_eoi might return true
721          * not only for level triggered interrupts but for edge triggered
722          * interrupts too. In any case Xen always honors the eoi mechanism,
723          * not injecting any more pirqs of the same kind if the first one
724          * hasn't received an eoi yet. Therefore using the fasteoi handler
725          * is the right choice either way.
726          */
727         if (shareable)
728                 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
729                                 handle_fasteoi_irq, name);
730         else
731                 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
732                                 handle_edge_irq, name);
733
734 out:
735         mutex_unlock(&irq_mapping_update_lock);
736
737         return irq;
738 }
739
740 #ifdef CONFIG_PCI_MSI
741 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
742 {
743         int rc;
744         struct physdev_get_free_pirq op_get_free_pirq;
745
746         op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
747         rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
748
749         WARN_ONCE(rc == -ENOSYS,
750                   "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
751
752         return rc ? -1 : op_get_free_pirq.pirq;
753 }
754
755 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
756                              int pirq, const char *name, domid_t domid)
757 {
758         int irq, ret;
759
760         mutex_lock(&irq_mapping_update_lock);
761
762         irq = xen_allocate_irq_dynamic();
763         if (irq < 0)
764                 goto out;
765
766         irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
767                         name);
768
769         xen_irq_info_pirq_init(irq, 0, pirq, 0, domid, 0);
770         ret = irq_set_msi_desc(irq, msidesc);
771         if (ret < 0)
772                 goto error_irq;
773 out:
774         mutex_unlock(&irq_mapping_update_lock);
775         return irq;
776 error_irq:
777         mutex_unlock(&irq_mapping_update_lock);
778         xen_free_irq(irq);
779         return ret;
780 }
781 #endif
782
783 int xen_destroy_irq(int irq)
784 {
785         struct irq_desc *desc;
786         struct physdev_unmap_pirq unmap_irq;
787         struct irq_info *info = info_for_irq(irq);
788         int rc = -ENOENT;
789
790         mutex_lock(&irq_mapping_update_lock);
791
792         desc = irq_to_desc(irq);
793         if (!desc)
794                 goto out;
795
796         if (xen_initial_domain()) {
797                 unmap_irq.pirq = info->u.pirq.pirq;
798                 unmap_irq.domid = info->u.pirq.domid;
799                 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
800                 /* If another domain quits without making the pci_disable_msix
801                  * call, the Xen hypervisor takes care of freeing the PIRQs
802                  * (free_domain_pirqs).
803                  */
804                 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
805                         pr_info("domain %d does not have %d anymore\n",
806                                 info->u.pirq.domid, info->u.pirq.pirq);
807                 else if (rc) {
808                         pr_warn("unmap irq failed %d\n", rc);
809                         goto out;
810                 }
811         }
812
813         xen_free_irq(irq);
814
815 out:
816         mutex_unlock(&irq_mapping_update_lock);
817         return rc;
818 }
819
820 int xen_irq_from_pirq(unsigned pirq)
821 {
822         int irq;
823
824         struct irq_info *info;
825
826         mutex_lock(&irq_mapping_update_lock);
827
828         list_for_each_entry(info, &xen_irq_list_head, list) {
829                 if (info->type != IRQT_PIRQ)
830                         continue;
831                 irq = info->irq;
832                 if (info->u.pirq.pirq == pirq)
833                         goto out;
834         }
835         irq = -1;
836 out:
837         mutex_unlock(&irq_mapping_update_lock);
838
839         return irq;
840 }
841
842
843 int xen_pirq_from_irq(unsigned irq)
844 {
845         return pirq_from_irq(irq);
846 }
847 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
848 int bind_evtchn_to_irq(unsigned int evtchn)
849 {
850         int irq;
851
852         mutex_lock(&irq_mapping_update_lock);
853
854         irq = evtchn_to_irq[evtchn];
855
856         if (irq == -1) {
857                 irq = xen_allocate_irq_dynamic();
858                 if (irq < 0)
859                         goto out;
860
861                 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
862                                               handle_edge_irq, "event");
863
864                 xen_irq_info_evtchn_init(irq, evtchn);
865         } else {
866                 struct irq_info *info = info_for_irq(irq);
867                 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
868         }
869
870 out:
871         mutex_unlock(&irq_mapping_update_lock);
872
873         return irq;
874 }
875 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
876
877 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
878 {
879         struct evtchn_bind_ipi bind_ipi;
880         int evtchn, irq;
881
882         mutex_lock(&irq_mapping_update_lock);
883
884         irq = per_cpu(ipi_to_irq, cpu)[ipi];
885
886         if (irq == -1) {
887                 irq = xen_allocate_irq_dynamic();
888                 if (irq < 0)
889                         goto out;
890
891                 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
892                                               handle_percpu_irq, "ipi");
893
894                 bind_ipi.vcpu = cpu;
895                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
896                                                 &bind_ipi) != 0)
897                         BUG();
898                 evtchn = bind_ipi.port;
899
900                 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
901
902                 bind_evtchn_to_cpu(evtchn, cpu);
903         } else {
904                 struct irq_info *info = info_for_irq(irq);
905                 WARN_ON(info == NULL || info->type != IRQT_IPI);
906         }
907
908  out:
909         mutex_unlock(&irq_mapping_update_lock);
910         return irq;
911 }
912
913 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
914                                           unsigned int remote_port)
915 {
916         struct evtchn_bind_interdomain bind_interdomain;
917         int err;
918
919         bind_interdomain.remote_dom  = remote_domain;
920         bind_interdomain.remote_port = remote_port;
921
922         err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
923                                           &bind_interdomain);
924
925         return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
926 }
927
928 static int find_virq(unsigned int virq, unsigned int cpu)
929 {
930         struct evtchn_status status;
931         int port, rc = -ENOENT;
932
933         memset(&status, 0, sizeof(status));
934         for (port = 0; port <= NR_EVENT_CHANNELS; port++) {
935                 status.dom = DOMID_SELF;
936                 status.port = port;
937                 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
938                 if (rc < 0)
939                         continue;
940                 if (status.status != EVTCHNSTAT_virq)
941                         continue;
942                 if (status.u.virq == virq && status.vcpu == cpu) {
943                         rc = port;
944                         break;
945                 }
946         }
947         return rc;
948 }
949
950 int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
951 {
952         struct evtchn_bind_virq bind_virq;
953         int evtchn, irq, ret;
954
955         mutex_lock(&irq_mapping_update_lock);
956
957         irq = per_cpu(virq_to_irq, cpu)[virq];
958
959         if (irq == -1) {
960                 irq = xen_allocate_irq_dynamic();
961                 if (irq < 0)
962                         goto out;
963
964                 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
965                                               handle_percpu_irq, "virq");
966
967                 bind_virq.virq = virq;
968                 bind_virq.vcpu = cpu;
969                 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
970                                                 &bind_virq);
971                 if (ret == 0)
972                         evtchn = bind_virq.port;
973                 else {
974                         if (ret == -EEXIST)
975                                 ret = find_virq(virq, cpu);
976                         BUG_ON(ret < 0);
977                         evtchn = ret;
978                 }
979
980                 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
981
982                 bind_evtchn_to_cpu(evtchn, cpu);
983         } else {
984                 struct irq_info *info = info_for_irq(irq);
985                 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
986         }
987
988 out:
989         mutex_unlock(&irq_mapping_update_lock);
990
991         return irq;
992 }
993
994 static void unbind_from_irq(unsigned int irq)
995 {
996         struct evtchn_close close;
997         int evtchn = evtchn_from_irq(irq);
998         struct irq_info *info = irq_get_handler_data(irq);
999
1000         if (WARN_ON(!info))
1001                 return;
1002
1003         mutex_lock(&irq_mapping_update_lock);
1004
1005         if (info->refcnt > 0) {
1006                 info->refcnt--;
1007                 if (info->refcnt != 0)
1008                         goto done;
1009         }
1010
1011         if (VALID_EVTCHN(evtchn)) {
1012                 close.port = evtchn;
1013                 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
1014                         BUG();
1015
1016                 switch (type_from_irq(irq)) {
1017                 case IRQT_VIRQ:
1018                         per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
1019                                 [virq_from_irq(irq)] = -1;
1020                         break;
1021                 case IRQT_IPI:
1022                         per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
1023                                 [ipi_from_irq(irq)] = -1;
1024                         break;
1025                 default:
1026                         break;
1027                 }
1028
1029                 /* Closed ports are implicitly re-bound to VCPU0. */
1030                 bind_evtchn_to_cpu(evtchn, 0);
1031
1032                 evtchn_to_irq[evtchn] = -1;
1033         }
1034
1035         BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
1036
1037         xen_free_irq(irq);
1038
1039  done:
1040         mutex_unlock(&irq_mapping_update_lock);
1041 }
1042
1043 int bind_evtchn_to_irqhandler(unsigned int evtchn,
1044                               irq_handler_t handler,
1045                               unsigned long irqflags,
1046                               const char *devname, void *dev_id)
1047 {
1048         int irq, retval;
1049
1050         irq = bind_evtchn_to_irq(evtchn);
1051         if (irq < 0)
1052                 return irq;
1053         retval = request_irq(irq, handler, irqflags, devname, dev_id);
1054         if (retval != 0) {
1055                 unbind_from_irq(irq);
1056                 return retval;
1057         }
1058
1059         return irq;
1060 }
1061 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1062
1063 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
1064                                           unsigned int remote_port,
1065                                           irq_handler_t handler,
1066                                           unsigned long irqflags,
1067                                           const char *devname,
1068                                           void *dev_id)
1069 {
1070         int irq, retval;
1071
1072         irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
1073         if (irq < 0)
1074                 return irq;
1075
1076         retval = request_irq(irq, handler, irqflags, devname, dev_id);
1077         if (retval != 0) {
1078                 unbind_from_irq(irq);
1079                 return retval;
1080         }
1081
1082         return irq;
1083 }
1084 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
1085
1086 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1087                             irq_handler_t handler,
1088                             unsigned long irqflags, const char *devname, void *dev_id)
1089 {
1090         int irq, retval;
1091
1092         irq = bind_virq_to_irq(virq, cpu);
1093         if (irq < 0)
1094                 return irq;
1095         retval = request_irq(irq, handler, irqflags, devname, dev_id);
1096         if (retval != 0) {
1097                 unbind_from_irq(irq);
1098                 return retval;
1099         }
1100
1101         return irq;
1102 }
1103 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1104
1105 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1106                            unsigned int cpu,
1107                            irq_handler_t handler,
1108                            unsigned long irqflags,
1109                            const char *devname,
1110                            void *dev_id)
1111 {
1112         int irq, retval;
1113
1114         irq = bind_ipi_to_irq(ipi, cpu);
1115         if (irq < 0)
1116                 return irq;
1117
1118         irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
1119         retval = request_irq(irq, handler, irqflags, devname, dev_id);
1120         if (retval != 0) {
1121                 unbind_from_irq(irq);
1122                 return retval;
1123         }
1124
1125         return irq;
1126 }
1127
1128 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1129 {
1130         struct irq_info *info = irq_get_handler_data(irq);
1131
1132         if (WARN_ON(!info))
1133                 return;
1134         free_irq(irq, dev_id);
1135         unbind_from_irq(irq);
1136 }
1137 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1138
1139 int evtchn_make_refcounted(unsigned int evtchn)
1140 {
1141         int irq = evtchn_to_irq[evtchn];
1142         struct irq_info *info;
1143
1144         if (irq == -1)
1145                 return -ENOENT;
1146
1147         info = irq_get_handler_data(irq);
1148
1149         if (!info)
1150                 return -ENOENT;
1151
1152         WARN_ON(info->refcnt != -1);
1153
1154         info->refcnt = 1;
1155
1156         return 0;
1157 }
1158 EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1159
1160 int evtchn_get(unsigned int evtchn)
1161 {
1162         int irq;
1163         struct irq_info *info;
1164         int err = -ENOENT;
1165
1166         if (evtchn >= NR_EVENT_CHANNELS)
1167                 return -EINVAL;
1168
1169         mutex_lock(&irq_mapping_update_lock);
1170
1171         irq = evtchn_to_irq[evtchn];
1172         if (irq == -1)
1173                 goto done;
1174
1175         info = irq_get_handler_data(irq);
1176
1177         if (!info)
1178                 goto done;
1179
1180         err = -EINVAL;
1181         if (info->refcnt <= 0)
1182                 goto done;
1183
1184         info->refcnt++;
1185         err = 0;
1186  done:
1187         mutex_unlock(&irq_mapping_update_lock);
1188
1189         return err;
1190 }
1191 EXPORT_SYMBOL_GPL(evtchn_get);
1192
1193 void evtchn_put(unsigned int evtchn)
1194 {
1195         int irq = evtchn_to_irq[evtchn];
1196         if (WARN_ON(irq == -1))
1197                 return;
1198         unbind_from_irq(irq);
1199 }
1200 EXPORT_SYMBOL_GPL(evtchn_put);
1201
1202 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1203 {
1204         int irq;
1205
1206 #ifdef CONFIG_X86
1207         if (unlikely(vector == XEN_NMI_VECTOR)) {
1208                 int rc =  HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
1209                 if (rc < 0)
1210                         printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1211                 return;
1212         }
1213 #endif
1214         irq = per_cpu(ipi_to_irq, cpu)[vector];
1215         BUG_ON(irq < 0);
1216         notify_remote_via_irq(irq);
1217 }
1218
1219 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1220 {
1221         struct shared_info *sh = HYPERVISOR_shared_info;
1222         int cpu = smp_processor_id();
1223         xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
1224         int i;
1225         unsigned long flags;
1226         static DEFINE_SPINLOCK(debug_lock);
1227         struct vcpu_info *v;
1228
1229         spin_lock_irqsave(&debug_lock, flags);
1230
1231         printk("\nvcpu %d\n  ", cpu);
1232
1233         for_each_online_cpu(i) {
1234                 int pending;
1235                 v = per_cpu(xen_vcpu, i);
1236                 pending = (get_irq_regs() && i == cpu)
1237                         ? xen_irqs_disabled(get_irq_regs())
1238                         : v->evtchn_upcall_mask;
1239                 printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n  ", i,
1240                        pending, v->evtchn_upcall_pending,
1241                        (int)(sizeof(v->evtchn_pending_sel)*2),
1242                        v->evtchn_pending_sel);
1243         }
1244         v = per_cpu(xen_vcpu, cpu);
1245
1246         printk("\npending:\n   ");
1247         for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1248                 printk("%0*"PRI_xen_ulong"%s",
1249                        (int)sizeof(sh->evtchn_pending[0])*2,
1250                        sh->evtchn_pending[i],
1251                        i % 8 == 0 ? "\n   " : " ");
1252         printk("\nglobal mask:\n   ");
1253         for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1254                 printk("%0*"PRI_xen_ulong"%s",
1255                        (int)(sizeof(sh->evtchn_mask[0])*2),
1256                        sh->evtchn_mask[i],
1257                        i % 8 == 0 ? "\n   " : " ");
1258
1259         printk("\nglobally unmasked:\n   ");
1260         for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1261                 printk("%0*"PRI_xen_ulong"%s",
1262                        (int)(sizeof(sh->evtchn_mask[0])*2),
1263                        sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1264                        i % 8 == 0 ? "\n   " : " ");
1265
1266         printk("\nlocal cpu%d mask:\n   ", cpu);
1267         for (i = (NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
1268                 printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
1269                        cpu_evtchn[i],
1270                        i % 8 == 0 ? "\n   " : " ");
1271
1272         printk("\nlocally unmasked:\n   ");
1273         for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1274                 xen_ulong_t pending = sh->evtchn_pending[i]
1275                         & ~sh->evtchn_mask[i]
1276                         & cpu_evtchn[i];
1277                 printk("%0*"PRI_xen_ulong"%s",
1278                        (int)(sizeof(sh->evtchn_mask[0])*2),
1279                        pending, i % 8 == 0 ? "\n   " : " ");
1280         }
1281
1282         printk("\npending list:\n");
1283         for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1284                 if (sync_test_bit(i, BM(sh->evtchn_pending))) {
1285                         int word_idx = i / BITS_PER_EVTCHN_WORD;
1286                         printk("  %d: event %d -> irq %d%s%s%s\n",
1287                                cpu_from_evtchn(i), i,
1288                                evtchn_to_irq[i],
1289                                sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
1290                                              ? "" : " l2-clear",
1291                                !sync_test_bit(i, BM(sh->evtchn_mask))
1292                                              ? "" : " globally-masked",
1293                                sync_test_bit(i, BM(cpu_evtchn))
1294                                              ? "" : " locally-masked");
1295                 }
1296         }
1297
1298         spin_unlock_irqrestore(&debug_lock, flags);
1299
1300         return IRQ_HANDLED;
1301 }
1302
1303 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1304 static DEFINE_PER_CPU(unsigned int, current_word_idx);
1305 static DEFINE_PER_CPU(unsigned int, current_bit_idx);
1306
1307 /*
1308  * Mask out the i least significant bits of w
1309  */
1310 #define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
1311
1312 /*
1313  * Search the CPUs pending events bitmasks.  For each one found, map
1314  * the event number to an irq, and feed it into do_IRQ() for
1315  * handling.
1316  *
1317  * Xen uses a two-level bitmap to speed searching.  The first level is
1318  * a bitset of words which contain pending event bits.  The second
1319  * level is a bitset of pending events themselves.
1320  */
1321 static void __xen_evtchn_do_upcall(void)
1322 {
1323         int start_word_idx, start_bit_idx;
1324         int word_idx, bit_idx;
1325         int i, irq;
1326         int cpu = get_cpu();
1327         struct shared_info *s = HYPERVISOR_shared_info;
1328         struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1329         unsigned count;
1330
1331         do {
1332                 xen_ulong_t pending_words;
1333                 xen_ulong_t pending_bits;
1334                 struct irq_desc *desc;
1335
1336                 vcpu_info->evtchn_upcall_pending = 0;
1337
1338                 if (__this_cpu_inc_return(xed_nesting_count) - 1)
1339                         goto out;
1340
1341                 /*
1342                  * Master flag must be cleared /before/ clearing
1343                  * selector flag. xchg_xen_ulong must contain an
1344                  * appropriate barrier.
1345                  */
1346                 if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) {
1347                         int evtchn = evtchn_from_irq(irq);
1348                         word_idx = evtchn / BITS_PER_LONG;
1349                         pending_bits = evtchn % BITS_PER_LONG;
1350                         if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) {
1351                                 desc = irq_to_desc(irq);
1352                                 if (desc)
1353                                         generic_handle_irq_desc(irq, desc);
1354                         }
1355                 }
1356
1357                 pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
1358
1359                 start_word_idx = __this_cpu_read(current_word_idx);
1360                 start_bit_idx = __this_cpu_read(current_bit_idx);
1361
1362                 word_idx = start_word_idx;
1363
1364                 for (i = 0; pending_words != 0; i++) {
1365                         xen_ulong_t words;
1366
1367                         words = MASK_LSBS(pending_words, word_idx);
1368
1369                         /*
1370                          * If we masked out all events, wrap to beginning.
1371                          */
1372                         if (words == 0) {
1373                                 word_idx = 0;
1374                                 bit_idx = 0;
1375                                 continue;
1376                         }
1377                         word_idx = EVTCHN_FIRST_BIT(words);
1378
1379                         pending_bits = active_evtchns(cpu, s, word_idx);
1380                         bit_idx = 0; /* usually scan entire word from start */
1381                         /*
1382                          * We scan the starting word in two parts.
1383                          *
1384                          * 1st time: start in the middle, scanning the
1385                          * upper bits.
1386                          *
1387                          * 2nd time: scan the whole word (not just the
1388                          * parts skipped in the first pass) -- if an
1389                          * event in the previously scanned bits is
1390                          * pending again it would just be scanned on
1391                          * the next loop anyway.
1392                          */
1393                         if (word_idx == start_word_idx) {
1394                                 if (i == 0)
1395                                         bit_idx = start_bit_idx;
1396                         }
1397
1398                         do {
1399                                 xen_ulong_t bits;
1400                                 int port;
1401
1402                                 bits = MASK_LSBS(pending_bits, bit_idx);
1403
1404                                 /* If we masked out all events, move on. */
1405                                 if (bits == 0)
1406                                         break;
1407
1408                                 bit_idx = EVTCHN_FIRST_BIT(bits);
1409
1410                                 /* Process port. */
1411                                 port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
1412                                 irq = evtchn_to_irq[port];
1413
1414                                 if (irq != -1) {
1415                                         desc = irq_to_desc(irq);
1416                                         if (desc)
1417                                                 generic_handle_irq_desc(irq, desc);
1418                                 }
1419
1420                                 bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
1421
1422                                 /* Next caller starts at last processed + 1 */
1423                                 __this_cpu_write(current_word_idx,
1424                                                  bit_idx ? word_idx :
1425                                                  (word_idx+1) % BITS_PER_EVTCHN_WORD);
1426                                 __this_cpu_write(current_bit_idx, bit_idx);
1427                         } while (bit_idx != 0);
1428
1429                         /* Scan start_l1i twice; all others once. */
1430                         if ((word_idx != start_word_idx) || (i != 0))
1431                                 pending_words &= ~(1UL << word_idx);
1432
1433                         word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
1434                 }
1435
1436                 BUG_ON(!irqs_disabled());
1437
1438                 count = __this_cpu_read(xed_nesting_count);
1439                 __this_cpu_write(xed_nesting_count, 0);
1440         } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1441
1442 out:
1443
1444         put_cpu();
1445 }
1446
1447 void xen_evtchn_do_upcall(struct pt_regs *regs)
1448 {
1449         struct pt_regs *old_regs = set_irq_regs(regs);
1450
1451         irq_enter();
1452 #ifdef CONFIG_X86
1453         exit_idle();
1454 #endif
1455
1456         __xen_evtchn_do_upcall();
1457
1458         irq_exit();
1459         set_irq_regs(old_regs);
1460 }
1461
1462 void xen_hvm_evtchn_do_upcall(void)
1463 {
1464         __xen_evtchn_do_upcall();
1465 }
1466 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1467
1468 /* Rebind a new event channel to an existing irq. */
1469 void rebind_evtchn_irq(int evtchn, int irq)
1470 {
1471         struct irq_info *info = info_for_irq(irq);
1472
1473         if (WARN_ON(!info))
1474                 return;
1475
1476         /* Make sure the irq is masked, since the new event channel
1477            will also be masked. */
1478         disable_irq(irq);
1479
1480         mutex_lock(&irq_mapping_update_lock);
1481
1482         /* After resume the irq<->evtchn mappings are all cleared out */
1483         BUG_ON(evtchn_to_irq[evtchn] != -1);
1484         /* Expect irq to have been bound before,
1485            so there should be a proper type */
1486         BUG_ON(info->type == IRQT_UNBOUND);
1487
1488         xen_irq_info_evtchn_init(irq, evtchn);
1489
1490         mutex_unlock(&irq_mapping_update_lock);
1491
1492         /* new event channels are always bound to cpu 0 */
1493         irq_set_affinity(irq, cpumask_of(0));
1494
1495         /* Unmask the event channel. */
1496         enable_irq(irq);
1497 }
1498
1499 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1500 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1501 {
1502         struct evtchn_bind_vcpu bind_vcpu;
1503         int evtchn = evtchn_from_irq(irq);
1504         int masked;
1505
1506         if (!VALID_EVTCHN(evtchn))
1507                 return -1;
1508
1509         /*
1510          * Events delivered via platform PCI interrupts are always
1511          * routed to vcpu 0 and hence cannot be rebound.
1512          */
1513         if (xen_hvm_domain() && !xen_have_vector_callback)
1514                 return -1;
1515
1516         /* Send future instances of this interrupt to other vcpu. */
1517         bind_vcpu.port = evtchn;
1518         bind_vcpu.vcpu = tcpu;
1519
1520         /*
1521          * Mask the event while changing the VCPU binding to prevent
1522          * it being delivered on an unexpected VCPU.
1523          */
1524         masked = test_and_set_mask(evtchn);
1525
1526         /*
1527          * If this fails, it usually just indicates that we're dealing with a
1528          * virq or IPI channel, which don't actually need to be rebound. Ignore
1529          * it, but don't do the xenlinux-level rebind in that case.
1530          */
1531         if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1532                 bind_evtchn_to_cpu(evtchn, tcpu);
1533
1534         if (!masked)
1535                 unmask_evtchn(evtchn);
1536
1537         return 0;
1538 }
1539
1540 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1541                             bool force)
1542 {
1543         unsigned tcpu = cpumask_first(dest);
1544
1545         return rebind_irq_to_cpu(data->irq, tcpu);
1546 }
1547
1548 static int retrigger_evtchn(int evtchn)
1549 {
1550         int masked;
1551
1552         if (!VALID_EVTCHN(evtchn))
1553                 return 0;
1554
1555         masked = test_and_set_mask(evtchn);
1556         set_evtchn(evtchn);
1557         if (!masked)
1558                 unmask_evtchn(evtchn);
1559
1560         return 1;
1561 }
1562
1563 int resend_irq_on_evtchn(unsigned int irq)
1564 {
1565         return retrigger_evtchn(evtchn_from_irq(irq));
1566 }
1567
1568 static void enable_dynirq(struct irq_data *data)
1569 {
1570         int evtchn = evtchn_from_irq(data->irq);
1571
1572         if (VALID_EVTCHN(evtchn))
1573                 unmask_evtchn(evtchn);
1574 }
1575
1576 static void disable_dynirq(struct irq_data *data)
1577 {
1578         int evtchn = evtchn_from_irq(data->irq);
1579
1580         if (VALID_EVTCHN(evtchn))
1581                 mask_evtchn(evtchn);
1582 }
1583
1584 static void ack_dynirq(struct irq_data *data)
1585 {
1586         int evtchn = evtchn_from_irq(data->irq);
1587
1588         irq_move_irq(data);
1589
1590         if (VALID_EVTCHN(evtchn))
1591                 clear_evtchn(evtchn);
1592 }
1593
1594 static void mask_ack_dynirq(struct irq_data *data)
1595 {
1596         disable_dynirq(data);
1597         ack_dynirq(data);
1598 }
1599
1600 static int retrigger_dynirq(struct irq_data *data)
1601 {
1602         return retrigger_evtchn(evtchn_from_irq(data->irq));
1603 }
1604
1605 static void restore_pirqs(void)
1606 {
1607         int pirq, rc, irq, gsi;
1608         struct physdev_map_pirq map_irq;
1609         struct irq_info *info;
1610
1611         list_for_each_entry(info, &xen_irq_list_head, list) {
1612                 if (info->type != IRQT_PIRQ)
1613                         continue;
1614
1615                 pirq = info->u.pirq.pirq;
1616                 gsi = info->u.pirq.gsi;
1617                 irq = info->irq;
1618
1619                 /* save/restore of PT devices doesn't work, so at this point the
1620                  * only devices present are GSI based emulated devices */
1621                 if (!gsi)
1622                         continue;
1623
1624                 map_irq.domid = DOMID_SELF;
1625                 map_irq.type = MAP_PIRQ_TYPE_GSI;
1626                 map_irq.index = gsi;
1627                 map_irq.pirq = pirq;
1628
1629                 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1630                 if (rc) {
1631                         pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1632                                 gsi, irq, pirq, rc);
1633                         xen_free_irq(irq);
1634                         continue;
1635                 }
1636
1637                 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1638
1639                 __startup_pirq(irq);
1640         }
1641 }
1642
1643 static void restore_cpu_virqs(unsigned int cpu)
1644 {
1645         struct evtchn_bind_virq bind_virq;
1646         int virq, irq, evtchn;
1647
1648         for (virq = 0; virq < NR_VIRQS; virq++) {
1649                 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1650                         continue;
1651
1652                 BUG_ON(virq_from_irq(irq) != virq);
1653
1654                 /* Get a new binding from Xen. */
1655                 bind_virq.virq = virq;
1656                 bind_virq.vcpu = cpu;
1657                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1658                                                 &bind_virq) != 0)
1659                         BUG();
1660                 evtchn = bind_virq.port;
1661
1662                 /* Record the new mapping. */
1663                 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
1664                 bind_evtchn_to_cpu(evtchn, cpu);
1665         }
1666 }
1667
1668 static void restore_cpu_ipis(unsigned int cpu)
1669 {
1670         struct evtchn_bind_ipi bind_ipi;
1671         int ipi, irq, evtchn;
1672
1673         for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1674                 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1675                         continue;
1676
1677                 BUG_ON(ipi_from_irq(irq) != ipi);
1678
1679                 /* Get a new binding from Xen. */
1680                 bind_ipi.vcpu = cpu;
1681                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1682                                                 &bind_ipi) != 0)
1683                         BUG();
1684                 evtchn = bind_ipi.port;
1685
1686                 /* Record the new mapping. */
1687                 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
1688                 bind_evtchn_to_cpu(evtchn, cpu);
1689         }
1690 }
1691
1692 /* Clear an irq's pending state, in preparation for polling on it */
1693 void xen_clear_irq_pending(int irq)
1694 {
1695         int evtchn = evtchn_from_irq(irq);
1696
1697         if (VALID_EVTCHN(evtchn))
1698                 clear_evtchn(evtchn);
1699 }
1700 EXPORT_SYMBOL(xen_clear_irq_pending);
1701 void xen_set_irq_pending(int irq)
1702 {
1703         int evtchn = evtchn_from_irq(irq);
1704
1705         if (VALID_EVTCHN(evtchn))
1706                 set_evtchn(evtchn);
1707 }
1708
1709 bool xen_test_irq_pending(int irq)
1710 {
1711         int evtchn = evtchn_from_irq(irq);
1712         bool ret = false;
1713
1714         if (VALID_EVTCHN(evtchn))
1715                 ret = test_evtchn(evtchn);
1716
1717         return ret;
1718 }
1719
1720 /* Poll waiting for an irq to become pending with timeout.  In the usual case,
1721  * the irq will be disabled so it won't deliver an interrupt. */
1722 void xen_poll_irq_timeout(int irq, u64 timeout)
1723 {
1724         evtchn_port_t evtchn = evtchn_from_irq(irq);
1725
1726         if (VALID_EVTCHN(evtchn)) {
1727                 struct sched_poll poll;
1728
1729                 poll.nr_ports = 1;
1730                 poll.timeout = timeout;
1731                 set_xen_guest_handle(poll.ports, &evtchn);
1732
1733                 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1734                         BUG();
1735         }
1736 }
1737 EXPORT_SYMBOL(xen_poll_irq_timeout);
1738 /* Poll waiting for an irq to become pending.  In the usual case, the
1739  * irq will be disabled so it won't deliver an interrupt. */
1740 void xen_poll_irq(int irq)
1741 {
1742         xen_poll_irq_timeout(irq, 0 /* no timeout */);
1743 }
1744
1745 /* Check whether the IRQ line is shared with other guests. */
1746 int xen_test_irq_shared(int irq)
1747 {
1748         struct irq_info *info = info_for_irq(irq);
1749         struct physdev_irq_status_query irq_status;
1750
1751         if (WARN_ON(!info))
1752                 return -ENOENT;
1753
1754         irq_status.irq = info->u.pirq.pirq;
1755
1756         if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1757                 return 0;
1758         return !(irq_status.flags & XENIRQSTAT_shared);
1759 }
1760 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1761
1762 void xen_irq_resume(void)
1763 {
1764         unsigned int cpu, evtchn;
1765         struct irq_info *info;
1766
1767         /* New event-channel space is not 'live' yet. */
1768         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1769                 mask_evtchn(evtchn);
1770
1771         /* No IRQ <-> event-channel mappings. */
1772         list_for_each_entry(info, &xen_irq_list_head, list)
1773                 info->evtchn = 0; /* zap event-channel binding */
1774
1775         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1776                 evtchn_to_irq[evtchn] = -1;
1777
1778         for_each_possible_cpu(cpu) {
1779                 restore_cpu_virqs(cpu);
1780                 restore_cpu_ipis(cpu);
1781         }
1782
1783         restore_pirqs();
1784 }
1785
1786 static struct irq_chip xen_dynamic_chip __read_mostly = {
1787         .name                   = "xen-dyn",
1788
1789         .irq_disable            = disable_dynirq,
1790         .irq_mask               = disable_dynirq,
1791         .irq_unmask             = enable_dynirq,
1792
1793         .irq_ack                = ack_dynirq,
1794         .irq_mask_ack           = mask_ack_dynirq,
1795
1796         .irq_set_affinity       = set_affinity_irq,
1797         .irq_retrigger          = retrigger_dynirq,
1798 };
1799
1800 static struct irq_chip xen_pirq_chip __read_mostly = {
1801         .name                   = "xen-pirq",
1802
1803         .irq_startup            = startup_pirq,
1804         .irq_shutdown           = shutdown_pirq,
1805         .irq_enable             = enable_pirq,
1806         .irq_disable            = disable_pirq,
1807
1808         .irq_mask               = disable_dynirq,
1809         .irq_unmask             = enable_dynirq,
1810
1811         .irq_ack                = eoi_pirq,
1812         .irq_eoi                = eoi_pirq,
1813         .irq_mask_ack           = mask_ack_pirq,
1814
1815         .irq_set_affinity       = set_affinity_irq,
1816
1817         .irq_retrigger          = retrigger_dynirq,
1818 };
1819
1820 static struct irq_chip xen_percpu_chip __read_mostly = {
1821         .name                   = "xen-percpu",
1822
1823         .irq_disable            = disable_dynirq,
1824         .irq_mask               = disable_dynirq,
1825         .irq_unmask             = enable_dynirq,
1826
1827         .irq_ack                = ack_dynirq,
1828 };
1829
1830 int xen_set_callback_via(uint64_t via)
1831 {
1832         struct xen_hvm_param a;
1833         a.domid = DOMID_SELF;
1834         a.index = HVM_PARAM_CALLBACK_IRQ;
1835         a.value = via;
1836         return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1837 }
1838 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1839
1840 #ifdef CONFIG_XEN_PVHVM
1841 /* Vector callbacks are better than PCI interrupts to receive event
1842  * channel notifications because we can receive vector callbacks on any
1843  * vcpu and we don't need PCI support or APIC interactions. */
1844 void xen_callback_vector(void)
1845 {
1846         int rc;
1847         uint64_t callback_via;
1848         if (xen_have_vector_callback) {
1849                 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
1850                 rc = xen_set_callback_via(callback_via);
1851                 if (rc) {
1852                         pr_err("Request for Xen HVM callback vector failed\n");
1853                         xen_have_vector_callback = 0;
1854                         return;
1855                 }
1856                 pr_info("Xen HVM callback vector for event delivery is enabled\n");
1857                 /* in the restore case the vector has already been allocated */
1858                 if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
1859                         alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
1860                                         xen_hvm_callback_vector);
1861         }
1862 }
1863 #else
1864 void xen_callback_vector(void) {}
1865 #endif
1866
1867 void __init xen_init_IRQ(void)
1868 {
1869         int i;
1870
1871         evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1872                                     GFP_KERNEL);
1873         BUG_ON(!evtchn_to_irq);
1874         for (i = 0; i < NR_EVENT_CHANNELS; i++)
1875                 evtchn_to_irq[i] = -1;
1876
1877         /* No event channels are 'live' right now. */
1878         for (i = 0; i < NR_EVENT_CHANNELS; i++)
1879                 mask_evtchn(i);
1880
1881         pirq_needs_eoi = pirq_needs_eoi_flag;
1882
1883 #ifdef CONFIG_X86
1884         if (xen_hvm_domain()) {
1885                 xen_callback_vector();
1886                 native_init_IRQ();
1887                 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1888                  * __acpi_register_gsi can point at the right function */
1889                 pci_xen_hvm_init();
1890         } else {
1891                 int rc;
1892                 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1893
1894                 irq_ctx_init(smp_processor_id());
1895                 if (xen_initial_domain())
1896                         pci_xen_initial_domain();
1897
1898                 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
1899                 eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
1900                 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
1901                 if (rc != 0) {
1902                         free_page((unsigned long) pirq_eoi_map);
1903                         pirq_eoi_map = NULL;
1904                 } else
1905                         pirq_needs_eoi = pirq_check_eoi_map;
1906         }
1907 #endif
1908 }