]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/i915/i915_irq.c
Merge tag 'drm-intel-fixes-2013-11-07' of git://people.freedesktop.org/~danvet/drm...
[~andy/linux] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 static const u32 hpd_ibx[] = {
41         [HPD_CRT] = SDE_CRT_HOTPLUG,
42         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46 };
47
48 static const u32 hpd_cpt[] = {
49         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54 };
55
56 static const u32 hpd_mask_i915[] = {
57         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63 };
64
65 static const u32 hpd_status_gen4[] = {
66         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72 };
73
74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81 };
82
83 /* For display hotplug interrupt */
84 static void
85 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
86 {
87         assert_spin_locked(&dev_priv->irq_lock);
88
89         if (dev_priv->pc8.irqs_disabled) {
90                 WARN(1, "IRQs disabled\n");
91                 dev_priv->pc8.regsave.deimr &= ~mask;
92                 return;
93         }
94
95         if ((dev_priv->irq_mask & mask) != 0) {
96                 dev_priv->irq_mask &= ~mask;
97                 I915_WRITE(DEIMR, dev_priv->irq_mask);
98                 POSTING_READ(DEIMR);
99         }
100 }
101
102 static void
103 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
104 {
105         assert_spin_locked(&dev_priv->irq_lock);
106
107         if (dev_priv->pc8.irqs_disabled) {
108                 WARN(1, "IRQs disabled\n");
109                 dev_priv->pc8.regsave.deimr |= mask;
110                 return;
111         }
112
113         if ((dev_priv->irq_mask & mask) != mask) {
114                 dev_priv->irq_mask |= mask;
115                 I915_WRITE(DEIMR, dev_priv->irq_mask);
116                 POSTING_READ(DEIMR);
117         }
118 }
119
120 /**
121  * ilk_update_gt_irq - update GTIMR
122  * @dev_priv: driver private
123  * @interrupt_mask: mask of interrupt bits to update
124  * @enabled_irq_mask: mask of interrupt bits to enable
125  */
126 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
127                               uint32_t interrupt_mask,
128                               uint32_t enabled_irq_mask)
129 {
130         assert_spin_locked(&dev_priv->irq_lock);
131
132         if (dev_priv->pc8.irqs_disabled) {
133                 WARN(1, "IRQs disabled\n");
134                 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
135                 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
136                                                 interrupt_mask);
137                 return;
138         }
139
140         dev_priv->gt_irq_mask &= ~interrupt_mask;
141         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
142         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
143         POSTING_READ(GTIMR);
144 }
145
146 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
147 {
148         ilk_update_gt_irq(dev_priv, mask, mask);
149 }
150
151 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
152 {
153         ilk_update_gt_irq(dev_priv, mask, 0);
154 }
155
156 /**
157   * snb_update_pm_irq - update GEN6_PMIMR
158   * @dev_priv: driver private
159   * @interrupt_mask: mask of interrupt bits to update
160   * @enabled_irq_mask: mask of interrupt bits to enable
161   */
162 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
163                               uint32_t interrupt_mask,
164                               uint32_t enabled_irq_mask)
165 {
166         uint32_t new_val;
167
168         assert_spin_locked(&dev_priv->irq_lock);
169
170         if (dev_priv->pc8.irqs_disabled) {
171                 WARN(1, "IRQs disabled\n");
172                 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
173                 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
174                                                      interrupt_mask);
175                 return;
176         }
177
178         new_val = dev_priv->pm_irq_mask;
179         new_val &= ~interrupt_mask;
180         new_val |= (~enabled_irq_mask & interrupt_mask);
181
182         if (new_val != dev_priv->pm_irq_mask) {
183                 dev_priv->pm_irq_mask = new_val;
184                 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
185                 POSTING_READ(GEN6_PMIMR);
186         }
187 }
188
189 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
190 {
191         snb_update_pm_irq(dev_priv, mask, mask);
192 }
193
194 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
195 {
196         snb_update_pm_irq(dev_priv, mask, 0);
197 }
198
199 static bool ivb_can_enable_err_int(struct drm_device *dev)
200 {
201         struct drm_i915_private *dev_priv = dev->dev_private;
202         struct intel_crtc *crtc;
203         enum pipe pipe;
204
205         assert_spin_locked(&dev_priv->irq_lock);
206
207         for_each_pipe(pipe) {
208                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
209
210                 if (crtc->cpu_fifo_underrun_disabled)
211                         return false;
212         }
213
214         return true;
215 }
216
217 static bool cpt_can_enable_serr_int(struct drm_device *dev)
218 {
219         struct drm_i915_private *dev_priv = dev->dev_private;
220         enum pipe pipe;
221         struct intel_crtc *crtc;
222
223         assert_spin_locked(&dev_priv->irq_lock);
224
225         for_each_pipe(pipe) {
226                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
227
228                 if (crtc->pch_fifo_underrun_disabled)
229                         return false;
230         }
231
232         return true;
233 }
234
235 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
236                                                  enum pipe pipe, bool enable)
237 {
238         struct drm_i915_private *dev_priv = dev->dev_private;
239         uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
240                                           DE_PIPEB_FIFO_UNDERRUN;
241
242         if (enable)
243                 ironlake_enable_display_irq(dev_priv, bit);
244         else
245                 ironlake_disable_display_irq(dev_priv, bit);
246 }
247
248 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
249                                                   enum pipe pipe, bool enable)
250 {
251         struct drm_i915_private *dev_priv = dev->dev_private;
252         if (enable) {
253                 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
254
255                 if (!ivb_can_enable_err_int(dev))
256                         return;
257
258                 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
259         } else {
260                 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
261
262                 /* Change the state _after_ we've read out the current one. */
263                 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
264
265                 if (!was_enabled &&
266                     (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
267                         DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
268                                       pipe_name(pipe));
269                 }
270         }
271 }
272
273 /**
274  * ibx_display_interrupt_update - update SDEIMR
275  * @dev_priv: driver private
276  * @interrupt_mask: mask of interrupt bits to update
277  * @enabled_irq_mask: mask of interrupt bits to enable
278  */
279 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
280                                          uint32_t interrupt_mask,
281                                          uint32_t enabled_irq_mask)
282 {
283         uint32_t sdeimr = I915_READ(SDEIMR);
284         sdeimr &= ~interrupt_mask;
285         sdeimr |= (~enabled_irq_mask & interrupt_mask);
286
287         assert_spin_locked(&dev_priv->irq_lock);
288
289         if (dev_priv->pc8.irqs_disabled &&
290             (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
291                 WARN(1, "IRQs disabled\n");
292                 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
293                 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
294                                                  interrupt_mask);
295                 return;
296         }
297
298         I915_WRITE(SDEIMR, sdeimr);
299         POSTING_READ(SDEIMR);
300 }
301 #define ibx_enable_display_interrupt(dev_priv, bits) \
302         ibx_display_interrupt_update((dev_priv), (bits), (bits))
303 #define ibx_disable_display_interrupt(dev_priv, bits) \
304         ibx_display_interrupt_update((dev_priv), (bits), 0)
305
306 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
307                                             enum transcoder pch_transcoder,
308                                             bool enable)
309 {
310         struct drm_i915_private *dev_priv = dev->dev_private;
311         uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
312                        SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
313
314         if (enable)
315                 ibx_enable_display_interrupt(dev_priv, bit);
316         else
317                 ibx_disable_display_interrupt(dev_priv, bit);
318 }
319
320 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
321                                             enum transcoder pch_transcoder,
322                                             bool enable)
323 {
324         struct drm_i915_private *dev_priv = dev->dev_private;
325
326         if (enable) {
327                 I915_WRITE(SERR_INT,
328                            SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
329
330                 if (!cpt_can_enable_serr_int(dev))
331                         return;
332
333                 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
334         } else {
335                 uint32_t tmp = I915_READ(SERR_INT);
336                 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
337
338                 /* Change the state _after_ we've read out the current one. */
339                 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
340
341                 if (!was_enabled &&
342                     (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
343                         DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
344                                       transcoder_name(pch_transcoder));
345                 }
346         }
347 }
348
349 /**
350  * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
351  * @dev: drm device
352  * @pipe: pipe
353  * @enable: true if we want to report FIFO underrun errors, false otherwise
354  *
355  * This function makes us disable or enable CPU fifo underruns for a specific
356  * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
357  * reporting for one pipe may also disable all the other CPU error interruts for
358  * the other pipes, due to the fact that there's just one interrupt mask/enable
359  * bit for all the pipes.
360  *
361  * Returns the previous state of underrun reporting.
362  */
363 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
364                                            enum pipe pipe, bool enable)
365 {
366         struct drm_i915_private *dev_priv = dev->dev_private;
367         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
368         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
369         unsigned long flags;
370         bool ret;
371
372         spin_lock_irqsave(&dev_priv->irq_lock, flags);
373
374         ret = !intel_crtc->cpu_fifo_underrun_disabled;
375
376         if (enable == ret)
377                 goto done;
378
379         intel_crtc->cpu_fifo_underrun_disabled = !enable;
380
381         if (IS_GEN5(dev) || IS_GEN6(dev))
382                 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
383         else if (IS_GEN7(dev))
384                 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
385
386 done:
387         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
388         return ret;
389 }
390
391 /**
392  * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
393  * @dev: drm device
394  * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
395  * @enable: true if we want to report FIFO underrun errors, false otherwise
396  *
397  * This function makes us disable or enable PCH fifo underruns for a specific
398  * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
399  * underrun reporting for one transcoder may also disable all the other PCH
400  * error interruts for the other transcoders, due to the fact that there's just
401  * one interrupt mask/enable bit for all the transcoders.
402  *
403  * Returns the previous state of underrun reporting.
404  */
405 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
406                                            enum transcoder pch_transcoder,
407                                            bool enable)
408 {
409         struct drm_i915_private *dev_priv = dev->dev_private;
410         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
411         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
412         unsigned long flags;
413         bool ret;
414
415         /*
416          * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
417          * has only one pch transcoder A that all pipes can use. To avoid racy
418          * pch transcoder -> pipe lookups from interrupt code simply store the
419          * underrun statistics in crtc A. Since we never expose this anywhere
420          * nor use it outside of the fifo underrun code here using the "wrong"
421          * crtc on LPT won't cause issues.
422          */
423
424         spin_lock_irqsave(&dev_priv->irq_lock, flags);
425
426         ret = !intel_crtc->pch_fifo_underrun_disabled;
427
428         if (enable == ret)
429                 goto done;
430
431         intel_crtc->pch_fifo_underrun_disabled = !enable;
432
433         if (HAS_PCH_IBX(dev))
434                 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
435         else
436                 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
437
438 done:
439         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
440         return ret;
441 }
442
443
444 void
445 i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
446 {
447         u32 reg = PIPESTAT(pipe);
448         u32 pipestat = I915_READ(reg) & 0x7fff0000;
449
450         assert_spin_locked(&dev_priv->irq_lock);
451
452         if ((pipestat & mask) == mask)
453                 return;
454
455         /* Enable the interrupt, clear any pending status */
456         pipestat |= mask | (mask >> 16);
457         I915_WRITE(reg, pipestat);
458         POSTING_READ(reg);
459 }
460
461 void
462 i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
463 {
464         u32 reg = PIPESTAT(pipe);
465         u32 pipestat = I915_READ(reg) & 0x7fff0000;
466
467         assert_spin_locked(&dev_priv->irq_lock);
468
469         if ((pipestat & mask) == 0)
470                 return;
471
472         pipestat &= ~mask;
473         I915_WRITE(reg, pipestat);
474         POSTING_READ(reg);
475 }
476
477 /**
478  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
479  */
480 static void i915_enable_asle_pipestat(struct drm_device *dev)
481 {
482         drm_i915_private_t *dev_priv = dev->dev_private;
483         unsigned long irqflags;
484
485         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
486                 return;
487
488         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
489
490         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
491         if (INTEL_INFO(dev)->gen >= 4)
492                 i915_enable_pipestat(dev_priv, PIPE_A,
493                                      PIPE_LEGACY_BLC_EVENT_ENABLE);
494
495         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
496 }
497
498 /**
499  * i915_pipe_enabled - check if a pipe is enabled
500  * @dev: DRM device
501  * @pipe: pipe to check
502  *
503  * Reading certain registers when the pipe is disabled can hang the chip.
504  * Use this routine to make sure the PLL is running and the pipe is active
505  * before reading such registers if unsure.
506  */
507 static int
508 i915_pipe_enabled(struct drm_device *dev, int pipe)
509 {
510         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
511
512         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
513                 /* Locking is horribly broken here, but whatever. */
514                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
515                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
516
517                 return intel_crtc->active;
518         } else {
519                 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
520         }
521 }
522
523 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
524 {
525         /* Gen2 doesn't have a hardware frame counter */
526         return 0;
527 }
528
529 /* Called from drm generic code, passed a 'crtc', which
530  * we use as a pipe index
531  */
532 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
533 {
534         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
535         unsigned long high_frame;
536         unsigned long low_frame;
537         u32 high1, high2, low, pixel, vbl_start;
538
539         if (!i915_pipe_enabled(dev, pipe)) {
540                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
541                                 "pipe %c\n", pipe_name(pipe));
542                 return 0;
543         }
544
545         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
546                 struct intel_crtc *intel_crtc =
547                         to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
548                 const struct drm_display_mode *mode =
549                         &intel_crtc->config.adjusted_mode;
550
551                 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
552         } else {
553                 enum transcoder cpu_transcoder =
554                         intel_pipe_to_cpu_transcoder(dev_priv, pipe);
555                 u32 htotal;
556
557                 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
558                 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
559
560                 vbl_start *= htotal;
561         }
562
563         high_frame = PIPEFRAME(pipe);
564         low_frame = PIPEFRAMEPIXEL(pipe);
565
566         /*
567          * High & low register fields aren't synchronized, so make sure
568          * we get a low value that's stable across two reads of the high
569          * register.
570          */
571         do {
572                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
573                 low   = I915_READ(low_frame);
574                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
575         } while (high1 != high2);
576
577         high1 >>= PIPE_FRAME_HIGH_SHIFT;
578         pixel = low & PIPE_PIXEL_MASK;
579         low >>= PIPE_FRAME_LOW_SHIFT;
580
581         /*
582          * The frame counter increments at beginning of active.
583          * Cook up a vblank counter by also checking the pixel
584          * counter against vblank start.
585          */
586         return ((high1 << 8) | low) + (pixel >= vbl_start);
587 }
588
589 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
590 {
591         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
592         int reg = PIPE_FRMCOUNT_GM45(pipe);
593
594         if (!i915_pipe_enabled(dev, pipe)) {
595                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
596                                  "pipe %c\n", pipe_name(pipe));
597                 return 0;
598         }
599
600         return I915_READ(reg);
601 }
602
603 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
604 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
605 #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
606
607 static bool intel_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
608 {
609         struct drm_i915_private *dev_priv = dev->dev_private;
610         uint32_t status;
611         int reg;
612
613         if (IS_VALLEYVIEW(dev)) {
614                 status = pipe == PIPE_A ?
615                         I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
616                         I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
617
618                 reg = VLV_ISR;
619         } else if (IS_GEN2(dev)) {
620                 status = pipe == PIPE_A ?
621                         I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
622                         I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
623
624                 reg = ISR;
625         } else if (INTEL_INFO(dev)->gen < 5) {
626                 status = pipe == PIPE_A ?
627                         I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
628                         I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
629
630                 reg = ISR;
631         } else if (INTEL_INFO(dev)->gen < 7) {
632                 status = pipe == PIPE_A ?
633                         DE_PIPEA_VBLANK :
634                         DE_PIPEB_VBLANK;
635
636                 reg = DEISR;
637         } else {
638                 switch (pipe) {
639                 default:
640                 case PIPE_A:
641                         status = DE_PIPEA_VBLANK_IVB;
642                         break;
643                 case PIPE_B:
644                         status = DE_PIPEB_VBLANK_IVB;
645                         break;
646                 case PIPE_C:
647                         status = DE_PIPEC_VBLANK_IVB;
648                         break;
649                 }
650
651                 reg = DEISR;
652         }
653
654         if (IS_GEN2(dev))
655                 return __raw_i915_read16(dev_priv, reg) & status;
656         else
657                 return __raw_i915_read32(dev_priv, reg) & status;
658 }
659
660 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
661                              int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
662 {
663         struct drm_i915_private *dev_priv = dev->dev_private;
664         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
665         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
666         const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
667         int position;
668         int vbl_start, vbl_end, htotal, vtotal;
669         bool in_vbl = true;
670         int ret = 0;
671         unsigned long irqflags;
672
673         if (!intel_crtc->active) {
674                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
675                                  "pipe %c\n", pipe_name(pipe));
676                 return 0;
677         }
678
679         htotal = mode->crtc_htotal;
680         vtotal = mode->crtc_vtotal;
681         vbl_start = mode->crtc_vblank_start;
682         vbl_end = mode->crtc_vblank_end;
683
684         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
685
686         /*
687          * Lock uncore.lock, as we will do multiple timing critical raw
688          * register reads, potentially with preemption disabled, so the
689          * following code must not block on uncore.lock.
690          */
691         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
692         
693         /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
694
695         /* Get optional system timestamp before query. */
696         if (stime)
697                 *stime = ktime_get();
698
699         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
700                 /* No obvious pixelcount register. Only query vertical
701                  * scanout position from Display scan line register.
702                  */
703                 if (IS_GEN2(dev))
704                         position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
705                 else
706                         position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
707
708                 /*
709                  * The scanline counter increments at the leading edge
710                  * of hsync, ie. it completely misses the active portion
711                  * of the line. Fix up the counter at both edges of vblank
712                  * to get a more accurate picture whether we're in vblank
713                  * or not.
714                  */
715                 in_vbl = intel_pipe_in_vblank_locked(dev, pipe);
716                 if ((in_vbl && position == vbl_start - 1) ||
717                     (!in_vbl && position == vbl_end - 1))
718                         position = (position + 1) % vtotal;
719         } else {
720                 /* Have access to pixelcount since start of frame.
721                  * We can split this into vertical and horizontal
722                  * scanout position.
723                  */
724                 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
725
726                 /* convert to pixel counts */
727                 vbl_start *= htotal;
728                 vbl_end *= htotal;
729                 vtotal *= htotal;
730         }
731
732         /* Get optional system timestamp after query. */
733         if (etime)
734                 *etime = ktime_get();
735
736         /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
737
738         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
739
740         in_vbl = position >= vbl_start && position < vbl_end;
741
742         /*
743          * While in vblank, position will be negative
744          * counting up towards 0 at vbl_end. And outside
745          * vblank, position will be positive counting
746          * up since vbl_end.
747          */
748         if (position >= vbl_start)
749                 position -= vbl_end;
750         else
751                 position += vtotal - vbl_end;
752
753         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
754                 *vpos = position;
755                 *hpos = 0;
756         } else {
757                 *vpos = position / htotal;
758                 *hpos = position - (*vpos * htotal);
759         }
760
761         /* In vblank? */
762         if (in_vbl)
763                 ret |= DRM_SCANOUTPOS_INVBL;
764
765         return ret;
766 }
767
768 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
769                               int *max_error,
770                               struct timeval *vblank_time,
771                               unsigned flags)
772 {
773         struct drm_crtc *crtc;
774
775         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
776                 DRM_ERROR("Invalid crtc %d\n", pipe);
777                 return -EINVAL;
778         }
779
780         /* Get drm_crtc to timestamp: */
781         crtc = intel_get_crtc_for_pipe(dev, pipe);
782         if (crtc == NULL) {
783                 DRM_ERROR("Invalid crtc %d\n", pipe);
784                 return -EINVAL;
785         }
786
787         if (!crtc->enabled) {
788                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
789                 return -EBUSY;
790         }
791
792         /* Helper routine in DRM core does all the work: */
793         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
794                                                      vblank_time, flags,
795                                                      crtc);
796 }
797
798 static bool intel_hpd_irq_event(struct drm_device *dev,
799                                 struct drm_connector *connector)
800 {
801         enum drm_connector_status old_status;
802
803         WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
804         old_status = connector->status;
805
806         connector->status = connector->funcs->detect(connector, false);
807         if (old_status == connector->status)
808                 return false;
809
810         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
811                       connector->base.id,
812                       drm_get_connector_name(connector),
813                       drm_get_connector_status_name(old_status),
814                       drm_get_connector_status_name(connector->status));
815
816         return true;
817 }
818
819 /*
820  * Handle hotplug events outside the interrupt handler proper.
821  */
822 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
823
824 static void i915_hotplug_work_func(struct work_struct *work)
825 {
826         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
827                                                     hotplug_work);
828         struct drm_device *dev = dev_priv->dev;
829         struct drm_mode_config *mode_config = &dev->mode_config;
830         struct intel_connector *intel_connector;
831         struct intel_encoder *intel_encoder;
832         struct drm_connector *connector;
833         unsigned long irqflags;
834         bool hpd_disabled = false;
835         bool changed = false;
836         u32 hpd_event_bits;
837
838         /* HPD irq before everything is fully set up. */
839         if (!dev_priv->enable_hotplug_processing)
840                 return;
841
842         mutex_lock(&mode_config->mutex);
843         DRM_DEBUG_KMS("running encoder hotplug functions\n");
844
845         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
846
847         hpd_event_bits = dev_priv->hpd_event_bits;
848         dev_priv->hpd_event_bits = 0;
849         list_for_each_entry(connector, &mode_config->connector_list, head) {
850                 intel_connector = to_intel_connector(connector);
851                 intel_encoder = intel_connector->encoder;
852                 if (intel_encoder->hpd_pin > HPD_NONE &&
853                     dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
854                     connector->polled == DRM_CONNECTOR_POLL_HPD) {
855                         DRM_INFO("HPD interrupt storm detected on connector %s: "
856                                  "switching from hotplug detection to polling\n",
857                                 drm_get_connector_name(connector));
858                         dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
859                         connector->polled = DRM_CONNECTOR_POLL_CONNECT
860                                 | DRM_CONNECTOR_POLL_DISCONNECT;
861                         hpd_disabled = true;
862                 }
863                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
864                         DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
865                                       drm_get_connector_name(connector), intel_encoder->hpd_pin);
866                 }
867         }
868          /* if there were no outputs to poll, poll was disabled,
869           * therefore make sure it's enabled when disabling HPD on
870           * some connectors */
871         if (hpd_disabled) {
872                 drm_kms_helper_poll_enable(dev);
873                 mod_timer(&dev_priv->hotplug_reenable_timer,
874                           jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
875         }
876
877         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
878
879         list_for_each_entry(connector, &mode_config->connector_list, head) {
880                 intel_connector = to_intel_connector(connector);
881                 intel_encoder = intel_connector->encoder;
882                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
883                         if (intel_encoder->hot_plug)
884                                 intel_encoder->hot_plug(intel_encoder);
885                         if (intel_hpd_irq_event(dev, connector))
886                                 changed = true;
887                 }
888         }
889         mutex_unlock(&mode_config->mutex);
890
891         if (changed)
892                 drm_kms_helper_hotplug_event(dev);
893 }
894
895 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
896 {
897         drm_i915_private_t *dev_priv = dev->dev_private;
898         u32 busy_up, busy_down, max_avg, min_avg;
899         u8 new_delay;
900
901         spin_lock(&mchdev_lock);
902
903         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
904
905         new_delay = dev_priv->ips.cur_delay;
906
907         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
908         busy_up = I915_READ(RCPREVBSYTUPAVG);
909         busy_down = I915_READ(RCPREVBSYTDNAVG);
910         max_avg = I915_READ(RCBMAXAVG);
911         min_avg = I915_READ(RCBMINAVG);
912
913         /* Handle RCS change request from hw */
914         if (busy_up > max_avg) {
915                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
916                         new_delay = dev_priv->ips.cur_delay - 1;
917                 if (new_delay < dev_priv->ips.max_delay)
918                         new_delay = dev_priv->ips.max_delay;
919         } else if (busy_down < min_avg) {
920                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
921                         new_delay = dev_priv->ips.cur_delay + 1;
922                 if (new_delay > dev_priv->ips.min_delay)
923                         new_delay = dev_priv->ips.min_delay;
924         }
925
926         if (ironlake_set_drps(dev, new_delay))
927                 dev_priv->ips.cur_delay = new_delay;
928
929         spin_unlock(&mchdev_lock);
930
931         return;
932 }
933
934 static void notify_ring(struct drm_device *dev,
935                         struct intel_ring_buffer *ring)
936 {
937         if (ring->obj == NULL)
938                 return;
939
940         trace_i915_gem_request_complete(ring);
941
942         wake_up_all(&ring->irq_queue);
943         i915_queue_hangcheck(dev);
944 }
945
946 static void gen6_pm_rps_work(struct work_struct *work)
947 {
948         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
949                                                     rps.work);
950         u32 pm_iir;
951         int new_delay, adj;
952
953         spin_lock_irq(&dev_priv->irq_lock);
954         pm_iir = dev_priv->rps.pm_iir;
955         dev_priv->rps.pm_iir = 0;
956         /* Make sure not to corrupt PMIMR state used by ringbuffer code */
957         snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
958         spin_unlock_irq(&dev_priv->irq_lock);
959
960         /* Make sure we didn't queue anything we're not going to process. */
961         WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
962
963         if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
964                 return;
965
966         mutex_lock(&dev_priv->rps.hw_lock);
967
968         adj = dev_priv->rps.last_adj;
969         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
970                 if (adj > 0)
971                         adj *= 2;
972                 else
973                         adj = 1;
974                 new_delay = dev_priv->rps.cur_delay + adj;
975
976                 /*
977                  * For better performance, jump directly
978                  * to RPe if we're below it.
979                  */
980                 if (new_delay < dev_priv->rps.rpe_delay)
981                         new_delay = dev_priv->rps.rpe_delay;
982         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
983                 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
984                         new_delay = dev_priv->rps.rpe_delay;
985                 else
986                         new_delay = dev_priv->rps.min_delay;
987                 adj = 0;
988         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
989                 if (adj < 0)
990                         adj *= 2;
991                 else
992                         adj = -1;
993                 new_delay = dev_priv->rps.cur_delay + adj;
994         } else { /* unknown event */
995                 new_delay = dev_priv->rps.cur_delay;
996         }
997
998         /* sysfs frequency interfaces may have snuck in while servicing the
999          * interrupt
1000          */
1001         if (new_delay < (int)dev_priv->rps.min_delay)
1002                 new_delay = dev_priv->rps.min_delay;
1003         if (new_delay > (int)dev_priv->rps.max_delay)
1004                 new_delay = dev_priv->rps.max_delay;
1005         dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
1006
1007         if (IS_VALLEYVIEW(dev_priv->dev))
1008                 valleyview_set_rps(dev_priv->dev, new_delay);
1009         else
1010                 gen6_set_rps(dev_priv->dev, new_delay);
1011
1012         mutex_unlock(&dev_priv->rps.hw_lock);
1013 }
1014
1015
1016 /**
1017  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1018  * occurred.
1019  * @work: workqueue struct
1020  *
1021  * Doesn't actually do anything except notify userspace. As a consequence of
1022  * this event, userspace should try to remap the bad rows since statistically
1023  * it is likely the same row is more likely to go bad again.
1024  */
1025 static void ivybridge_parity_work(struct work_struct *work)
1026 {
1027         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
1028                                                     l3_parity.error_work);
1029         u32 error_status, row, bank, subbank;
1030         char *parity_event[6];
1031         uint32_t misccpctl;
1032         unsigned long flags;
1033         uint8_t slice = 0;
1034
1035         /* We must turn off DOP level clock gating to access the L3 registers.
1036          * In order to prevent a get/put style interface, acquire struct mutex
1037          * any time we access those registers.
1038          */
1039         mutex_lock(&dev_priv->dev->struct_mutex);
1040
1041         /* If we've screwed up tracking, just let the interrupt fire again */
1042         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1043                 goto out;
1044
1045         misccpctl = I915_READ(GEN7_MISCCPCTL);
1046         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1047         POSTING_READ(GEN7_MISCCPCTL);
1048
1049         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1050                 u32 reg;
1051
1052                 slice--;
1053                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1054                         break;
1055
1056                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1057
1058                 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1059
1060                 error_status = I915_READ(reg);
1061                 row = GEN7_PARITY_ERROR_ROW(error_status);
1062                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1063                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1064
1065                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1066                 POSTING_READ(reg);
1067
1068                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1069                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1070                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1071                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1072                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1073                 parity_event[5] = NULL;
1074
1075                 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1076                                    KOBJ_CHANGE, parity_event);
1077
1078                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1079                           slice, row, bank, subbank);
1080
1081                 kfree(parity_event[4]);
1082                 kfree(parity_event[3]);
1083                 kfree(parity_event[2]);
1084                 kfree(parity_event[1]);
1085         }
1086
1087         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1088
1089 out:
1090         WARN_ON(dev_priv->l3_parity.which_slice);
1091         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1092         ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1093         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1094
1095         mutex_unlock(&dev_priv->dev->struct_mutex);
1096 }
1097
1098 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1099 {
1100         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1101
1102         if (!HAS_L3_DPF(dev))
1103                 return;
1104
1105         spin_lock(&dev_priv->irq_lock);
1106         ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1107         spin_unlock(&dev_priv->irq_lock);
1108
1109         iir &= GT_PARITY_ERROR(dev);
1110         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1111                 dev_priv->l3_parity.which_slice |= 1 << 1;
1112
1113         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1114                 dev_priv->l3_parity.which_slice |= 1 << 0;
1115
1116         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1117 }
1118
1119 static void ilk_gt_irq_handler(struct drm_device *dev,
1120                                struct drm_i915_private *dev_priv,
1121                                u32 gt_iir)
1122 {
1123         if (gt_iir &
1124             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1125                 notify_ring(dev, &dev_priv->ring[RCS]);
1126         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1127                 notify_ring(dev, &dev_priv->ring[VCS]);
1128 }
1129
1130 static void snb_gt_irq_handler(struct drm_device *dev,
1131                                struct drm_i915_private *dev_priv,
1132                                u32 gt_iir)
1133 {
1134
1135         if (gt_iir &
1136             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1137                 notify_ring(dev, &dev_priv->ring[RCS]);
1138         if (gt_iir & GT_BSD_USER_INTERRUPT)
1139                 notify_ring(dev, &dev_priv->ring[VCS]);
1140         if (gt_iir & GT_BLT_USER_INTERRUPT)
1141                 notify_ring(dev, &dev_priv->ring[BCS]);
1142
1143         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1144                       GT_BSD_CS_ERROR_INTERRUPT |
1145                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1146                 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
1147                 i915_handle_error(dev, false);
1148         }
1149
1150         if (gt_iir & GT_PARITY_ERROR(dev))
1151                 ivybridge_parity_error_irq_handler(dev, gt_iir);
1152 }
1153
1154 #define HPD_STORM_DETECT_PERIOD 1000
1155 #define HPD_STORM_THRESHOLD 5
1156
1157 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1158                                          u32 hotplug_trigger,
1159                                          const u32 *hpd)
1160 {
1161         drm_i915_private_t *dev_priv = dev->dev_private;
1162         int i;
1163         bool storm_detected = false;
1164
1165         if (!hotplug_trigger)
1166                 return;
1167
1168         spin_lock(&dev_priv->irq_lock);
1169         for (i = 1; i < HPD_NUM_PINS; i++) {
1170
1171                 WARN(((hpd[i] & hotplug_trigger) &&
1172                       dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
1173                      "Received HPD interrupt although disabled\n");
1174
1175                 if (!(hpd[i] & hotplug_trigger) ||
1176                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1177                         continue;
1178
1179                 dev_priv->hpd_event_bits |= (1 << i);
1180                 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1181                                    dev_priv->hpd_stats[i].hpd_last_jiffies
1182                                    + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1183                         dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1184                         dev_priv->hpd_stats[i].hpd_cnt = 0;
1185                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1186                 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1187                         dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1188                         dev_priv->hpd_event_bits &= ~(1 << i);
1189                         DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1190                         storm_detected = true;
1191                 } else {
1192                         dev_priv->hpd_stats[i].hpd_cnt++;
1193                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1194                                       dev_priv->hpd_stats[i].hpd_cnt);
1195                 }
1196         }
1197
1198         if (storm_detected)
1199                 dev_priv->display.hpd_irq_setup(dev);
1200         spin_unlock(&dev_priv->irq_lock);
1201
1202         /*
1203          * Our hotplug handler can grab modeset locks (by calling down into the
1204          * fb helpers). Hence it must not be run on our own dev-priv->wq work
1205          * queue for otherwise the flush_work in the pageflip code will
1206          * deadlock.
1207          */
1208         schedule_work(&dev_priv->hotplug_work);
1209 }
1210
1211 static void gmbus_irq_handler(struct drm_device *dev)
1212 {
1213         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1214
1215         wake_up_all(&dev_priv->gmbus_wait_queue);
1216 }
1217
1218 static void dp_aux_irq_handler(struct drm_device *dev)
1219 {
1220         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1221
1222         wake_up_all(&dev_priv->gmbus_wait_queue);
1223 }
1224
1225 #if defined(CONFIG_DEBUG_FS)
1226 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1227                                          uint32_t crc0, uint32_t crc1,
1228                                          uint32_t crc2, uint32_t crc3,
1229                                          uint32_t crc4)
1230 {
1231         struct drm_i915_private *dev_priv = dev->dev_private;
1232         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1233         struct intel_pipe_crc_entry *entry;
1234         int head, tail;
1235
1236         spin_lock(&pipe_crc->lock);
1237
1238         if (!pipe_crc->entries) {
1239                 spin_unlock(&pipe_crc->lock);
1240                 DRM_ERROR("spurious interrupt\n");
1241                 return;
1242         }
1243
1244         head = pipe_crc->head;
1245         tail = pipe_crc->tail;
1246
1247         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1248                 spin_unlock(&pipe_crc->lock);
1249                 DRM_ERROR("CRC buffer overflowing\n");
1250                 return;
1251         }
1252
1253         entry = &pipe_crc->entries[head];
1254
1255         entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1256         entry->crc[0] = crc0;
1257         entry->crc[1] = crc1;
1258         entry->crc[2] = crc2;
1259         entry->crc[3] = crc3;
1260         entry->crc[4] = crc4;
1261
1262         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1263         pipe_crc->head = head;
1264
1265         spin_unlock(&pipe_crc->lock);
1266
1267         wake_up_interruptible(&pipe_crc->wq);
1268 }
1269 #else
1270 static inline void
1271 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1272                              uint32_t crc0, uint32_t crc1,
1273                              uint32_t crc2, uint32_t crc3,
1274                              uint32_t crc4) {}
1275 #endif
1276
1277
1278 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1279 {
1280         struct drm_i915_private *dev_priv = dev->dev_private;
1281
1282         display_pipe_crc_irq_handler(dev, pipe,
1283                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1284                                      0, 0, 0, 0);
1285 }
1286
1287 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1288 {
1289         struct drm_i915_private *dev_priv = dev->dev_private;
1290
1291         display_pipe_crc_irq_handler(dev, pipe,
1292                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1293                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1294                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1295                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1296                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1297 }
1298
1299 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1300 {
1301         struct drm_i915_private *dev_priv = dev->dev_private;
1302         uint32_t res1, res2;
1303
1304         if (INTEL_INFO(dev)->gen >= 3)
1305                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1306         else
1307                 res1 = 0;
1308
1309         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1310                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1311         else
1312                 res2 = 0;
1313
1314         display_pipe_crc_irq_handler(dev, pipe,
1315                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1316                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1317                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1318                                      res1, res2);
1319 }
1320
1321 /* The RPS events need forcewake, so we add them to a work queue and mask their
1322  * IMR bits until the work is done. Other interrupts can be processed without
1323  * the work queue. */
1324 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1325 {
1326         if (pm_iir & GEN6_PM_RPS_EVENTS) {
1327                 spin_lock(&dev_priv->irq_lock);
1328                 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
1329                 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
1330                 spin_unlock(&dev_priv->irq_lock);
1331
1332                 queue_work(dev_priv->wq, &dev_priv->rps.work);
1333         }
1334
1335         if (HAS_VEBOX(dev_priv->dev)) {
1336                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1337                         notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1338
1339                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1340                         DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
1341                         i915_handle_error(dev_priv->dev, false);
1342                 }
1343         }
1344 }
1345
1346 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1347 {
1348         struct drm_device *dev = (struct drm_device *) arg;
1349         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1350         u32 iir, gt_iir, pm_iir;
1351         irqreturn_t ret = IRQ_NONE;
1352         unsigned long irqflags;
1353         int pipe;
1354         u32 pipe_stats[I915_MAX_PIPES];
1355
1356         atomic_inc(&dev_priv->irq_received);
1357
1358         while (true) {
1359                 iir = I915_READ(VLV_IIR);
1360                 gt_iir = I915_READ(GTIIR);
1361                 pm_iir = I915_READ(GEN6_PMIIR);
1362
1363                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1364                         goto out;
1365
1366                 ret = IRQ_HANDLED;
1367
1368                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1369
1370                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1371                 for_each_pipe(pipe) {
1372                         int reg = PIPESTAT(pipe);
1373                         pipe_stats[pipe] = I915_READ(reg);
1374
1375                         /*
1376                          * Clear the PIPE*STAT regs before the IIR
1377                          */
1378                         if (pipe_stats[pipe] & 0x8000ffff) {
1379                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1380                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
1381                                                          pipe_name(pipe));
1382                                 I915_WRITE(reg, pipe_stats[pipe]);
1383                         }
1384                 }
1385                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1386
1387                 for_each_pipe(pipe) {
1388                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1389                                 drm_handle_vblank(dev, pipe);
1390
1391                         if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1392                                 intel_prepare_page_flip(dev, pipe);
1393                                 intel_finish_page_flip(dev, pipe);
1394                         }
1395
1396                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1397                                 i9xx_pipe_crc_irq_handler(dev, pipe);
1398                 }
1399
1400                 /* Consume port.  Then clear IIR or we'll miss events */
1401                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1402                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1403                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1404
1405                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1406                                          hotplug_status);
1407
1408                         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1409
1410                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1411                         I915_READ(PORT_HOTPLUG_STAT);
1412                 }
1413
1414                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1415                         gmbus_irq_handler(dev);
1416
1417                 if (pm_iir)
1418                         gen6_rps_irq_handler(dev_priv, pm_iir);
1419
1420                 I915_WRITE(GTIIR, gt_iir);
1421                 I915_WRITE(GEN6_PMIIR, pm_iir);
1422                 I915_WRITE(VLV_IIR, iir);
1423         }
1424
1425 out:
1426         return ret;
1427 }
1428
1429 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1430 {
1431         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1432         int pipe;
1433         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1434
1435         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1436
1437         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1438                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1439                                SDE_AUDIO_POWER_SHIFT);
1440                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1441                                  port_name(port));
1442         }
1443
1444         if (pch_iir & SDE_AUX_MASK)
1445                 dp_aux_irq_handler(dev);
1446
1447         if (pch_iir & SDE_GMBUS)
1448                 gmbus_irq_handler(dev);
1449
1450         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1451                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1452
1453         if (pch_iir & SDE_AUDIO_TRANS_MASK)
1454                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1455
1456         if (pch_iir & SDE_POISON)
1457                 DRM_ERROR("PCH poison interrupt\n");
1458
1459         if (pch_iir & SDE_FDI_MASK)
1460                 for_each_pipe(pipe)
1461                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1462                                          pipe_name(pipe),
1463                                          I915_READ(FDI_RX_IIR(pipe)));
1464
1465         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1466                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1467
1468         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1469                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1470
1471         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1472                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1473                                                           false))
1474                         DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1475
1476         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1477                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1478                                                           false))
1479                         DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1480 }
1481
1482 static void ivb_err_int_handler(struct drm_device *dev)
1483 {
1484         struct drm_i915_private *dev_priv = dev->dev_private;
1485         u32 err_int = I915_READ(GEN7_ERR_INT);
1486         enum pipe pipe;
1487
1488         if (err_int & ERR_INT_POISON)
1489                 DRM_ERROR("Poison interrupt\n");
1490
1491         for_each_pipe(pipe) {
1492                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1493                         if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1494                                                                   false))
1495                                 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1496                                                  pipe_name(pipe));
1497                 }
1498
1499                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1500                         if (IS_IVYBRIDGE(dev))
1501                                 ivb_pipe_crc_irq_handler(dev, pipe);
1502                         else
1503                                 hsw_pipe_crc_irq_handler(dev, pipe);
1504                 }
1505         }
1506
1507         I915_WRITE(GEN7_ERR_INT, err_int);
1508 }
1509
1510 static void cpt_serr_int_handler(struct drm_device *dev)
1511 {
1512         struct drm_i915_private *dev_priv = dev->dev_private;
1513         u32 serr_int = I915_READ(SERR_INT);
1514
1515         if (serr_int & SERR_INT_POISON)
1516                 DRM_ERROR("PCH poison interrupt\n");
1517
1518         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1519                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1520                                                           false))
1521                         DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1522
1523         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1524                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1525                                                           false))
1526                         DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1527
1528         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1529                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1530                                                           false))
1531                         DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1532
1533         I915_WRITE(SERR_INT, serr_int);
1534 }
1535
1536 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1537 {
1538         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1539         int pipe;
1540         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1541
1542         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1543
1544         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1545                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1546                                SDE_AUDIO_POWER_SHIFT_CPT);
1547                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1548                                  port_name(port));
1549         }
1550
1551         if (pch_iir & SDE_AUX_MASK_CPT)
1552                 dp_aux_irq_handler(dev);
1553
1554         if (pch_iir & SDE_GMBUS_CPT)
1555                 gmbus_irq_handler(dev);
1556
1557         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1558                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1559
1560         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1561                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1562
1563         if (pch_iir & SDE_FDI_MASK_CPT)
1564                 for_each_pipe(pipe)
1565                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1566                                          pipe_name(pipe),
1567                                          I915_READ(FDI_RX_IIR(pipe)));
1568
1569         if (pch_iir & SDE_ERROR_CPT)
1570                 cpt_serr_int_handler(dev);
1571 }
1572
1573 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1574 {
1575         struct drm_i915_private *dev_priv = dev->dev_private;
1576         enum pipe pipe;
1577
1578         if (de_iir & DE_AUX_CHANNEL_A)
1579                 dp_aux_irq_handler(dev);
1580
1581         if (de_iir & DE_GSE)
1582                 intel_opregion_asle_intr(dev);
1583
1584         if (de_iir & DE_POISON)
1585                 DRM_ERROR("Poison interrupt\n");
1586
1587         for_each_pipe(pipe) {
1588                 if (de_iir & DE_PIPE_VBLANK(pipe))
1589                         drm_handle_vblank(dev, pipe);
1590
1591                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1592                         if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1593                                 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1594                                                  pipe_name(pipe));
1595
1596                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1597                         i9xx_pipe_crc_irq_handler(dev, pipe);
1598
1599                 /* plane/pipes map 1:1 on ilk+ */
1600                 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1601                         intel_prepare_page_flip(dev, pipe);
1602                         intel_finish_page_flip_plane(dev, pipe);
1603                 }
1604         }
1605
1606         /* check event from PCH */
1607         if (de_iir & DE_PCH_EVENT) {
1608                 u32 pch_iir = I915_READ(SDEIIR);
1609
1610                 if (HAS_PCH_CPT(dev))
1611                         cpt_irq_handler(dev, pch_iir);
1612                 else
1613                         ibx_irq_handler(dev, pch_iir);
1614
1615                 /* should clear PCH hotplug event before clear CPU irq */
1616                 I915_WRITE(SDEIIR, pch_iir);
1617         }
1618
1619         if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1620                 ironlake_rps_change_irq_handler(dev);
1621 }
1622
1623 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1624 {
1625         struct drm_i915_private *dev_priv = dev->dev_private;
1626         enum pipe i;
1627
1628         if (de_iir & DE_ERR_INT_IVB)
1629                 ivb_err_int_handler(dev);
1630
1631         if (de_iir & DE_AUX_CHANNEL_A_IVB)
1632                 dp_aux_irq_handler(dev);
1633
1634         if (de_iir & DE_GSE_IVB)
1635                 intel_opregion_asle_intr(dev);
1636
1637         for_each_pipe(i) {
1638                 if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
1639                         drm_handle_vblank(dev, i);
1640
1641                 /* plane/pipes map 1:1 on ilk+ */
1642                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
1643                         intel_prepare_page_flip(dev, i);
1644                         intel_finish_page_flip_plane(dev, i);
1645                 }
1646         }
1647
1648         /* check event from PCH */
1649         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1650                 u32 pch_iir = I915_READ(SDEIIR);
1651
1652                 cpt_irq_handler(dev, pch_iir);
1653
1654                 /* clear PCH hotplug event before clear CPU irq */
1655                 I915_WRITE(SDEIIR, pch_iir);
1656         }
1657 }
1658
1659 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1660 {
1661         struct drm_device *dev = (struct drm_device *) arg;
1662         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1663         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1664         irqreturn_t ret = IRQ_NONE;
1665
1666         atomic_inc(&dev_priv->irq_received);
1667
1668         /* We get interrupts on unclaimed registers, so check for this before we
1669          * do any I915_{READ,WRITE}. */
1670         intel_uncore_check_errors(dev);
1671
1672         /* disable master interrupt before clearing iir  */
1673         de_ier = I915_READ(DEIER);
1674         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1675         POSTING_READ(DEIER);
1676
1677         /* Disable south interrupts. We'll only write to SDEIIR once, so further
1678          * interrupts will will be stored on its back queue, and then we'll be
1679          * able to process them after we restore SDEIER (as soon as we restore
1680          * it, we'll get an interrupt if SDEIIR still has something to process
1681          * due to its back queue). */
1682         if (!HAS_PCH_NOP(dev)) {
1683                 sde_ier = I915_READ(SDEIER);
1684                 I915_WRITE(SDEIER, 0);
1685                 POSTING_READ(SDEIER);
1686         }
1687
1688         gt_iir = I915_READ(GTIIR);
1689         if (gt_iir) {
1690                 if (INTEL_INFO(dev)->gen >= 6)
1691                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
1692                 else
1693                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1694                 I915_WRITE(GTIIR, gt_iir);
1695                 ret = IRQ_HANDLED;
1696         }
1697
1698         de_iir = I915_READ(DEIIR);
1699         if (de_iir) {
1700                 if (INTEL_INFO(dev)->gen >= 7)
1701                         ivb_display_irq_handler(dev, de_iir);
1702                 else
1703                         ilk_display_irq_handler(dev, de_iir);
1704                 I915_WRITE(DEIIR, de_iir);
1705                 ret = IRQ_HANDLED;
1706         }
1707
1708         if (INTEL_INFO(dev)->gen >= 6) {
1709                 u32 pm_iir = I915_READ(GEN6_PMIIR);
1710                 if (pm_iir) {
1711                         gen6_rps_irq_handler(dev_priv, pm_iir);
1712                         I915_WRITE(GEN6_PMIIR, pm_iir);
1713                         ret = IRQ_HANDLED;
1714                 }
1715         }
1716
1717         I915_WRITE(DEIER, de_ier);
1718         POSTING_READ(DEIER);
1719         if (!HAS_PCH_NOP(dev)) {
1720                 I915_WRITE(SDEIER, sde_ier);
1721                 POSTING_READ(SDEIER);
1722         }
1723
1724         return ret;
1725 }
1726
1727 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1728                                bool reset_completed)
1729 {
1730         struct intel_ring_buffer *ring;
1731         int i;
1732
1733         /*
1734          * Notify all waiters for GPU completion events that reset state has
1735          * been changed, and that they need to restart their wait after
1736          * checking for potential errors (and bail out to drop locks if there is
1737          * a gpu reset pending so that i915_error_work_func can acquire them).
1738          */
1739
1740         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1741         for_each_ring(ring, dev_priv, i)
1742                 wake_up_all(&ring->irq_queue);
1743
1744         /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
1745         wake_up_all(&dev_priv->pending_flip_queue);
1746
1747         /*
1748          * Signal tasks blocked in i915_gem_wait_for_error that the pending
1749          * reset state is cleared.
1750          */
1751         if (reset_completed)
1752                 wake_up_all(&dev_priv->gpu_error.reset_queue);
1753 }
1754
1755 /**
1756  * i915_error_work_func - do process context error handling work
1757  * @work: work struct
1758  *
1759  * Fire an error uevent so userspace can see that a hang or error
1760  * was detected.
1761  */
1762 static void i915_error_work_func(struct work_struct *work)
1763 {
1764         struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1765                                                     work);
1766         drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1767                                                     gpu_error);
1768         struct drm_device *dev = dev_priv->dev;
1769         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1770         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1771         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1772         int ret;
1773
1774         kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
1775
1776         /*
1777          * Note that there's only one work item which does gpu resets, so we
1778          * need not worry about concurrent gpu resets potentially incrementing
1779          * error->reset_counter twice. We only need to take care of another
1780          * racing irq/hangcheck declaring the gpu dead for a second time. A
1781          * quick check for that is good enough: schedule_work ensures the
1782          * correct ordering between hang detection and this work item, and since
1783          * the reset in-progress bit is only ever set by code outside of this
1784          * work we don't need to worry about any other races.
1785          */
1786         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1787                 DRM_DEBUG_DRIVER("resetting chip\n");
1788                 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
1789                                    reset_event);
1790
1791                 /*
1792                  * All state reset _must_ be completed before we update the
1793                  * reset counter, for otherwise waiters might miss the reset
1794                  * pending state and not properly drop locks, resulting in
1795                  * deadlocks with the reset work.
1796                  */
1797                 ret = i915_reset(dev);
1798
1799                 intel_display_handle_reset(dev);
1800
1801                 if (ret == 0) {
1802                         /*
1803                          * After all the gem state is reset, increment the reset
1804                          * counter and wake up everyone waiting for the reset to
1805                          * complete.
1806                          *
1807                          * Since unlock operations are a one-sided barrier only,
1808                          * we need to insert a barrier here to order any seqno
1809                          * updates before
1810                          * the counter increment.
1811                          */
1812                         smp_mb__before_atomic_inc();
1813                         atomic_inc(&dev_priv->gpu_error.reset_counter);
1814
1815                         kobject_uevent_env(&dev->primary->kdev->kobj,
1816                                            KOBJ_CHANGE, reset_done_event);
1817                 } else {
1818                         atomic_set(&error->reset_counter, I915_WEDGED);
1819                 }
1820
1821                 /*
1822                  * Note: The wake_up also serves as a memory barrier so that
1823                  * waiters see the update value of the reset counter atomic_t.
1824                  */
1825                 i915_error_wake_up(dev_priv, true);
1826         }
1827 }
1828
1829 static void i915_report_and_clear_eir(struct drm_device *dev)
1830 {
1831         struct drm_i915_private *dev_priv = dev->dev_private;
1832         uint32_t instdone[I915_NUM_INSTDONE_REG];
1833         u32 eir = I915_READ(EIR);
1834         int pipe, i;
1835
1836         if (!eir)
1837                 return;
1838
1839         pr_err("render error detected, EIR: 0x%08x\n", eir);
1840
1841         i915_get_extra_instdone(dev, instdone);
1842
1843         if (IS_G4X(dev)) {
1844                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1845                         u32 ipeir = I915_READ(IPEIR_I965);
1846
1847                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1848                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1849                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
1850                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1851                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1852                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1853                         I915_WRITE(IPEIR_I965, ipeir);
1854                         POSTING_READ(IPEIR_I965);
1855                 }
1856                 if (eir & GM45_ERROR_PAGE_TABLE) {
1857                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1858                         pr_err("page table error\n");
1859                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1860                         I915_WRITE(PGTBL_ER, pgtbl_err);
1861                         POSTING_READ(PGTBL_ER);
1862                 }
1863         }
1864
1865         if (!IS_GEN2(dev)) {
1866                 if (eir & I915_ERROR_PAGE_TABLE) {
1867                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1868                         pr_err("page table error\n");
1869                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1870                         I915_WRITE(PGTBL_ER, pgtbl_err);
1871                         POSTING_READ(PGTBL_ER);
1872                 }
1873         }
1874
1875         if (eir & I915_ERROR_MEMORY_REFRESH) {
1876                 pr_err("memory refresh error:\n");
1877                 for_each_pipe(pipe)
1878                         pr_err("pipe %c stat: 0x%08x\n",
1879                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1880                 /* pipestat has already been acked */
1881         }
1882         if (eir & I915_ERROR_INSTRUCTION) {
1883                 pr_err("instruction error\n");
1884                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1885                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1886                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1887                 if (INTEL_INFO(dev)->gen < 4) {
1888                         u32 ipeir = I915_READ(IPEIR);
1889
1890                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1891                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1892                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1893                         I915_WRITE(IPEIR, ipeir);
1894                         POSTING_READ(IPEIR);
1895                 } else {
1896                         u32 ipeir = I915_READ(IPEIR_I965);
1897
1898                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1899                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1900                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1901                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1902                         I915_WRITE(IPEIR_I965, ipeir);
1903                         POSTING_READ(IPEIR_I965);
1904                 }
1905         }
1906
1907         I915_WRITE(EIR, eir);
1908         POSTING_READ(EIR);
1909         eir = I915_READ(EIR);
1910         if (eir) {
1911                 /*
1912                  * some errors might have become stuck,
1913                  * mask them.
1914                  */
1915                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1916                 I915_WRITE(EMR, I915_READ(EMR) | eir);
1917                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1918         }
1919 }
1920
1921 /**
1922  * i915_handle_error - handle an error interrupt
1923  * @dev: drm device
1924  *
1925  * Do some basic checking of regsiter state at error interrupt time and
1926  * dump it to the syslog.  Also call i915_capture_error_state() to make
1927  * sure we get a record and make it available in debugfs.  Fire a uevent
1928  * so userspace knows something bad happened (should trigger collection
1929  * of a ring dump etc.).
1930  */
1931 void i915_handle_error(struct drm_device *dev, bool wedged)
1932 {
1933         struct drm_i915_private *dev_priv = dev->dev_private;
1934
1935         i915_capture_error_state(dev);
1936         i915_report_and_clear_eir(dev);
1937
1938         if (wedged) {
1939                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1940                                 &dev_priv->gpu_error.reset_counter);
1941
1942                 /*
1943                  * Wakeup waiting processes so that the reset work function
1944                  * i915_error_work_func doesn't deadlock trying to grab various
1945                  * locks. By bumping the reset counter first, the woken
1946                  * processes will see a reset in progress and back off,
1947                  * releasing their locks and then wait for the reset completion.
1948                  * We must do this for _all_ gpu waiters that might hold locks
1949                  * that the reset work needs to acquire.
1950                  *
1951                  * Note: The wake_up serves as the required memory barrier to
1952                  * ensure that the waiters see the updated value of the reset
1953                  * counter atomic_t.
1954                  */
1955                 i915_error_wake_up(dev_priv, false);
1956         }
1957
1958         /*
1959          * Our reset work can grab modeset locks (since it needs to reset the
1960          * state of outstanding pagelips). Hence it must not be run on our own
1961          * dev-priv->wq work queue for otherwise the flush_work in the pageflip
1962          * code will deadlock.
1963          */
1964         schedule_work(&dev_priv->gpu_error.work);
1965 }
1966
1967 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1968 {
1969         drm_i915_private_t *dev_priv = dev->dev_private;
1970         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1971         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1972         struct drm_i915_gem_object *obj;
1973         struct intel_unpin_work *work;
1974         unsigned long flags;
1975         bool stall_detected;
1976
1977         /* Ignore early vblank irqs */
1978         if (intel_crtc == NULL)
1979                 return;
1980
1981         spin_lock_irqsave(&dev->event_lock, flags);
1982         work = intel_crtc->unpin_work;
1983
1984         if (work == NULL ||
1985             atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1986             !work->enable_stall_check) {
1987                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1988                 spin_unlock_irqrestore(&dev->event_lock, flags);
1989                 return;
1990         }
1991
1992         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1993         obj = work->pending_flip_obj;
1994         if (INTEL_INFO(dev)->gen >= 4) {
1995                 int dspsurf = DSPSURF(intel_crtc->plane);
1996                 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1997                                         i915_gem_obj_ggtt_offset(obj);
1998         } else {
1999                 int dspaddr = DSPADDR(intel_crtc->plane);
2000                 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2001                                                         crtc->y * crtc->fb->pitches[0] +
2002                                                         crtc->x * crtc->fb->bits_per_pixel/8);
2003         }
2004
2005         spin_unlock_irqrestore(&dev->event_lock, flags);
2006
2007         if (stall_detected) {
2008                 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2009                 intel_prepare_page_flip(dev, intel_crtc->plane);
2010         }
2011 }
2012
2013 /* Called from drm generic code, passed 'crtc' which
2014  * we use as a pipe index
2015  */
2016 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2017 {
2018         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2019         unsigned long irqflags;
2020
2021         if (!i915_pipe_enabled(dev, pipe))
2022                 return -EINVAL;
2023
2024         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2025         if (INTEL_INFO(dev)->gen >= 4)
2026                 i915_enable_pipestat(dev_priv, pipe,
2027                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
2028         else
2029                 i915_enable_pipestat(dev_priv, pipe,
2030                                      PIPE_VBLANK_INTERRUPT_ENABLE);
2031
2032         /* maintain vblank delivery even in deep C-states */
2033         if (dev_priv->info->gen == 3)
2034                 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2035         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2036
2037         return 0;
2038 }
2039
2040 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2041 {
2042         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2043         unsigned long irqflags;
2044         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2045                                                      DE_PIPE_VBLANK(pipe);
2046
2047         if (!i915_pipe_enabled(dev, pipe))
2048                 return -EINVAL;
2049
2050         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2051         ironlake_enable_display_irq(dev_priv, bit);
2052         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2053
2054         return 0;
2055 }
2056
2057 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2058 {
2059         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2060         unsigned long irqflags;
2061         u32 imr;
2062
2063         if (!i915_pipe_enabled(dev, pipe))
2064                 return -EINVAL;
2065
2066         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2067         imr = I915_READ(VLV_IMR);
2068         if (pipe == PIPE_A)
2069                 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2070         else
2071                 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2072         I915_WRITE(VLV_IMR, imr);
2073         i915_enable_pipestat(dev_priv, pipe,
2074                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
2075         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2076
2077         return 0;
2078 }
2079
2080 /* Called from drm generic code, passed 'crtc' which
2081  * we use as a pipe index
2082  */
2083 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2084 {
2085         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2086         unsigned long irqflags;
2087
2088         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2089         if (dev_priv->info->gen == 3)
2090                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2091
2092         i915_disable_pipestat(dev_priv, pipe,
2093                               PIPE_VBLANK_INTERRUPT_ENABLE |
2094                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
2095         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2096 }
2097
2098 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2099 {
2100         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2101         unsigned long irqflags;
2102         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2103                                                      DE_PIPE_VBLANK(pipe);
2104
2105         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2106         ironlake_disable_display_irq(dev_priv, bit);
2107         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2108 }
2109
2110 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2111 {
2112         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2113         unsigned long irqflags;
2114         u32 imr;
2115
2116         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2117         i915_disable_pipestat(dev_priv, pipe,
2118                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
2119         imr = I915_READ(VLV_IMR);
2120         if (pipe == PIPE_A)
2121                 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2122         else
2123                 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2124         I915_WRITE(VLV_IMR, imr);
2125         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2126 }
2127
2128 static u32
2129 ring_last_seqno(struct intel_ring_buffer *ring)
2130 {
2131         return list_entry(ring->request_list.prev,
2132                           struct drm_i915_gem_request, list)->seqno;
2133 }
2134
2135 static bool
2136 ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2137 {
2138         return (list_empty(&ring->request_list) ||
2139                 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2140 }
2141
2142 static struct intel_ring_buffer *
2143 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2144 {
2145         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2146         u32 cmd, ipehr, acthd, acthd_min;
2147
2148         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2149         if ((ipehr & ~(0x3 << 16)) !=
2150             (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2151                 return NULL;
2152
2153         /* ACTHD is likely pointing to the dword after the actual command,
2154          * so scan backwards until we find the MBOX.
2155          */
2156         acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
2157         acthd_min = max((int)acthd - 3 * 4, 0);
2158         do {
2159                 cmd = ioread32(ring->virtual_start + acthd);
2160                 if (cmd == ipehr)
2161                         break;
2162
2163                 acthd -= 4;
2164                 if (acthd < acthd_min)
2165                         return NULL;
2166         } while (1);
2167
2168         *seqno = ioread32(ring->virtual_start+acthd+4)+1;
2169         return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2170 }
2171
2172 static int semaphore_passed(struct intel_ring_buffer *ring)
2173 {
2174         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2175         struct intel_ring_buffer *signaller;
2176         u32 seqno, ctl;
2177
2178         ring->hangcheck.deadlock = true;
2179
2180         signaller = semaphore_waits_for(ring, &seqno);
2181         if (signaller == NULL || signaller->hangcheck.deadlock)
2182                 return -1;
2183
2184         /* cursory check for an unkickable deadlock */
2185         ctl = I915_READ_CTL(signaller);
2186         if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2187                 return -1;
2188
2189         return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2190 }
2191
2192 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2193 {
2194         struct intel_ring_buffer *ring;
2195         int i;
2196
2197         for_each_ring(ring, dev_priv, i)
2198                 ring->hangcheck.deadlock = false;
2199 }
2200
2201 static enum intel_ring_hangcheck_action
2202 ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2203 {
2204         struct drm_device *dev = ring->dev;
2205         struct drm_i915_private *dev_priv = dev->dev_private;
2206         u32 tmp;
2207
2208         if (ring->hangcheck.acthd != acthd)
2209                 return HANGCHECK_ACTIVE;
2210
2211         if (IS_GEN2(dev))
2212                 return HANGCHECK_HUNG;
2213
2214         /* Is the chip hanging on a WAIT_FOR_EVENT?
2215          * If so we can simply poke the RB_WAIT bit
2216          * and break the hang. This should work on
2217          * all but the second generation chipsets.
2218          */
2219         tmp = I915_READ_CTL(ring);
2220         if (tmp & RING_WAIT) {
2221                 DRM_ERROR("Kicking stuck wait on %s\n",
2222                           ring->name);
2223                 i915_handle_error(dev, false);
2224                 I915_WRITE_CTL(ring, tmp);
2225                 return HANGCHECK_KICK;
2226         }
2227
2228         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2229                 switch (semaphore_passed(ring)) {
2230                 default:
2231                         return HANGCHECK_HUNG;
2232                 case 1:
2233                         DRM_ERROR("Kicking stuck semaphore on %s\n",
2234                                   ring->name);
2235                         i915_handle_error(dev, false);
2236                         I915_WRITE_CTL(ring, tmp);
2237                         return HANGCHECK_KICK;
2238                 case 0:
2239                         return HANGCHECK_WAIT;
2240                 }
2241         }
2242
2243         return HANGCHECK_HUNG;
2244 }
2245
2246 /**
2247  * This is called when the chip hasn't reported back with completed
2248  * batchbuffers in a long time. We keep track per ring seqno progress and
2249  * if there are no progress, hangcheck score for that ring is increased.
2250  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2251  * we kick the ring. If we see no progress on three subsequent calls
2252  * we assume chip is wedged and try to fix it by resetting the chip.
2253  */
2254 static void i915_hangcheck_elapsed(unsigned long data)
2255 {
2256         struct drm_device *dev = (struct drm_device *)data;
2257         drm_i915_private_t *dev_priv = dev->dev_private;
2258         struct intel_ring_buffer *ring;
2259         int i;
2260         int busy_count = 0, rings_hung = 0;
2261         bool stuck[I915_NUM_RINGS] = { 0 };
2262 #define BUSY 1
2263 #define KICK 5
2264 #define HUNG 20
2265 #define FIRE 30
2266
2267         if (!i915_enable_hangcheck)
2268                 return;
2269
2270         for_each_ring(ring, dev_priv, i) {
2271                 u32 seqno, acthd;
2272                 bool busy = true;
2273
2274                 semaphore_clear_deadlocks(dev_priv);
2275
2276                 seqno = ring->get_seqno(ring, false);
2277                 acthd = intel_ring_get_active_head(ring);
2278
2279                 if (ring->hangcheck.seqno == seqno) {
2280                         if (ring_idle(ring, seqno)) {
2281                                 ring->hangcheck.action = HANGCHECK_IDLE;
2282
2283                                 if (waitqueue_active(&ring->irq_queue)) {
2284                                         /* Issue a wake-up to catch stuck h/w. */
2285                                         if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2286                                                 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2287                                                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2288                                                                   ring->name);
2289                                                 else
2290                                                         DRM_INFO("Fake missed irq on %s\n",
2291                                                                  ring->name);
2292                                                 wake_up_all(&ring->irq_queue);
2293                                         }
2294                                         /* Safeguard against driver failure */
2295                                         ring->hangcheck.score += BUSY;
2296                                 } else
2297                                         busy = false;
2298                         } else {
2299                                 /* We always increment the hangcheck score
2300                                  * if the ring is busy and still processing
2301                                  * the same request, so that no single request
2302                                  * can run indefinitely (such as a chain of
2303                                  * batches). The only time we do not increment
2304                                  * the hangcheck score on this ring, if this
2305                                  * ring is in a legitimate wait for another
2306                                  * ring. In that case the waiting ring is a
2307                                  * victim and we want to be sure we catch the
2308                                  * right culprit. Then every time we do kick
2309                                  * the ring, add a small increment to the
2310                                  * score so that we can catch a batch that is
2311                                  * being repeatedly kicked and so responsible
2312                                  * for stalling the machine.
2313                                  */
2314                                 ring->hangcheck.action = ring_stuck(ring,
2315                                                                     acthd);
2316
2317                                 switch (ring->hangcheck.action) {
2318                                 case HANGCHECK_IDLE:
2319                                 case HANGCHECK_WAIT:
2320                                         break;
2321                                 case HANGCHECK_ACTIVE:
2322                                         ring->hangcheck.score += BUSY;
2323                                         break;
2324                                 case HANGCHECK_KICK:
2325                                         ring->hangcheck.score += KICK;
2326                                         break;
2327                                 case HANGCHECK_HUNG:
2328                                         ring->hangcheck.score += HUNG;
2329                                         stuck[i] = true;
2330                                         break;
2331                                 }
2332                         }
2333                 } else {
2334                         ring->hangcheck.action = HANGCHECK_ACTIVE;
2335
2336                         /* Gradually reduce the count so that we catch DoS
2337                          * attempts across multiple batches.
2338                          */
2339                         if (ring->hangcheck.score > 0)
2340                                 ring->hangcheck.score--;
2341                 }
2342
2343                 ring->hangcheck.seqno = seqno;
2344                 ring->hangcheck.acthd = acthd;
2345                 busy_count += busy;
2346         }
2347
2348         for_each_ring(ring, dev_priv, i) {
2349                 if (ring->hangcheck.score > FIRE) {
2350                         DRM_INFO("%s on %s\n",
2351                                  stuck[i] ? "stuck" : "no progress",
2352                                  ring->name);
2353                         rings_hung++;
2354                 }
2355         }
2356
2357         if (rings_hung)
2358                 return i915_handle_error(dev, true);
2359
2360         if (busy_count)
2361                 /* Reset timer case chip hangs without another request
2362                  * being added */
2363                 i915_queue_hangcheck(dev);
2364 }
2365
2366 void i915_queue_hangcheck(struct drm_device *dev)
2367 {
2368         struct drm_i915_private *dev_priv = dev->dev_private;
2369         if (!i915_enable_hangcheck)
2370                 return;
2371
2372         mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2373                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2374 }
2375
2376 static void ibx_irq_preinstall(struct drm_device *dev)
2377 {
2378         struct drm_i915_private *dev_priv = dev->dev_private;
2379
2380         if (HAS_PCH_NOP(dev))
2381                 return;
2382
2383         /* south display irq */
2384         I915_WRITE(SDEIMR, 0xffffffff);
2385         /*
2386          * SDEIER is also touched by the interrupt handler to work around missed
2387          * PCH interrupts. Hence we can't update it after the interrupt handler
2388          * is enabled - instead we unconditionally enable all PCH interrupt
2389          * sources here, but then only unmask them as needed with SDEIMR.
2390          */
2391         I915_WRITE(SDEIER, 0xffffffff);
2392         POSTING_READ(SDEIER);
2393 }
2394
2395 static void gen5_gt_irq_preinstall(struct drm_device *dev)
2396 {
2397         struct drm_i915_private *dev_priv = dev->dev_private;
2398
2399         /* and GT */
2400         I915_WRITE(GTIMR, 0xffffffff);
2401         I915_WRITE(GTIER, 0x0);
2402         POSTING_READ(GTIER);
2403
2404         if (INTEL_INFO(dev)->gen >= 6) {
2405                 /* and PM */
2406                 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2407                 I915_WRITE(GEN6_PMIER, 0x0);
2408                 POSTING_READ(GEN6_PMIER);
2409         }
2410 }
2411
2412 /* drm_dma.h hooks
2413 */
2414 static void ironlake_irq_preinstall(struct drm_device *dev)
2415 {
2416         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2417
2418         atomic_set(&dev_priv->irq_received, 0);
2419
2420         I915_WRITE(HWSTAM, 0xeffe);
2421
2422         I915_WRITE(DEIMR, 0xffffffff);
2423         I915_WRITE(DEIER, 0x0);
2424         POSTING_READ(DEIER);
2425
2426         gen5_gt_irq_preinstall(dev);
2427
2428         ibx_irq_preinstall(dev);
2429 }
2430
2431 static void valleyview_irq_preinstall(struct drm_device *dev)
2432 {
2433         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2434         int pipe;
2435
2436         atomic_set(&dev_priv->irq_received, 0);
2437
2438         /* VLV magic */
2439         I915_WRITE(VLV_IMR, 0);
2440         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2441         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2442         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2443
2444         /* and GT */
2445         I915_WRITE(GTIIR, I915_READ(GTIIR));
2446         I915_WRITE(GTIIR, I915_READ(GTIIR));
2447
2448         gen5_gt_irq_preinstall(dev);
2449
2450         I915_WRITE(DPINVGTT, 0xff);
2451
2452         I915_WRITE(PORT_HOTPLUG_EN, 0);
2453         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2454         for_each_pipe(pipe)
2455                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2456         I915_WRITE(VLV_IIR, 0xffffffff);
2457         I915_WRITE(VLV_IMR, 0xffffffff);
2458         I915_WRITE(VLV_IER, 0x0);
2459         POSTING_READ(VLV_IER);
2460 }
2461
2462 static void ibx_hpd_irq_setup(struct drm_device *dev)
2463 {
2464         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2465         struct drm_mode_config *mode_config = &dev->mode_config;
2466         struct intel_encoder *intel_encoder;
2467         u32 hotplug_irqs, hotplug, enabled_irqs = 0;
2468
2469         if (HAS_PCH_IBX(dev)) {
2470                 hotplug_irqs = SDE_HOTPLUG_MASK;
2471                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2472                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2473                                 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
2474         } else {
2475                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
2476                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2477                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2478                                 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
2479         }
2480
2481         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2482
2483         /*
2484          * Enable digital hotplug on the PCH, and configure the DP short pulse
2485          * duration to 2ms (which is the minimum in the Display Port spec)
2486          *
2487          * This register is the same on all known PCH chips.
2488          */
2489         hotplug = I915_READ(PCH_PORT_HOTPLUG);
2490         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2491         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2492         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2493         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2494         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2495 }
2496
2497 static void ibx_irq_postinstall(struct drm_device *dev)
2498 {
2499         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2500         u32 mask;
2501
2502         if (HAS_PCH_NOP(dev))
2503                 return;
2504
2505         if (HAS_PCH_IBX(dev)) {
2506                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2507                        SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2508         } else {
2509                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2510
2511                 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2512         }
2513
2514         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2515         I915_WRITE(SDEIMR, ~mask);
2516 }
2517
2518 static void gen5_gt_irq_postinstall(struct drm_device *dev)
2519 {
2520         struct drm_i915_private *dev_priv = dev->dev_private;
2521         u32 pm_irqs, gt_irqs;
2522
2523         pm_irqs = gt_irqs = 0;
2524
2525         dev_priv->gt_irq_mask = ~0;
2526         if (HAS_L3_DPF(dev)) {
2527                 /* L3 parity interrupt is always unmasked. */
2528                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
2529                 gt_irqs |= GT_PARITY_ERROR(dev);
2530         }
2531
2532         gt_irqs |= GT_RENDER_USER_INTERRUPT;
2533         if (IS_GEN5(dev)) {
2534                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2535                            ILK_BSD_USER_INTERRUPT;
2536         } else {
2537                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2538         }
2539
2540         I915_WRITE(GTIIR, I915_READ(GTIIR));
2541         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2542         I915_WRITE(GTIER, gt_irqs);
2543         POSTING_READ(GTIER);
2544
2545         if (INTEL_INFO(dev)->gen >= 6) {
2546                 pm_irqs |= GEN6_PM_RPS_EVENTS;
2547
2548                 if (HAS_VEBOX(dev))
2549                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2550
2551                 dev_priv->pm_irq_mask = 0xffffffff;
2552                 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2553                 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
2554                 I915_WRITE(GEN6_PMIER, pm_irqs);
2555                 POSTING_READ(GEN6_PMIER);
2556         }
2557 }
2558
2559 static int ironlake_irq_postinstall(struct drm_device *dev)
2560 {
2561         unsigned long irqflags;
2562         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2563         u32 display_mask, extra_mask;
2564
2565         if (INTEL_INFO(dev)->gen >= 7) {
2566                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2567                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
2568                                 DE_PLANEB_FLIP_DONE_IVB |
2569                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2570                                 DE_ERR_INT_IVB);
2571                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2572                               DE_PIPEA_VBLANK_IVB);
2573
2574                 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2575         } else {
2576                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2577                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2578                                 DE_AUX_CHANNEL_A |
2579                                 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
2580                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
2581                                 DE_POISON);
2582                 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2583         }
2584
2585         dev_priv->irq_mask = ~display_mask;
2586
2587         /* should always can generate irq */
2588         I915_WRITE(DEIIR, I915_READ(DEIIR));
2589         I915_WRITE(DEIMR, dev_priv->irq_mask);
2590         I915_WRITE(DEIER, display_mask | extra_mask);
2591         POSTING_READ(DEIER);
2592
2593         gen5_gt_irq_postinstall(dev);
2594
2595         ibx_irq_postinstall(dev);
2596
2597         if (IS_IRONLAKE_M(dev)) {
2598                 /* Enable PCU event interrupts
2599                  *
2600                  * spinlocking not required here for correctness since interrupt
2601                  * setup is guaranteed to run in single-threaded context. But we
2602                  * need it to make the assert_spin_locked happy. */
2603                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2604                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2605                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2606         }
2607
2608         return 0;
2609 }
2610
2611 static int valleyview_irq_postinstall(struct drm_device *dev)
2612 {
2613         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2614         u32 enable_mask;
2615         u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
2616                 PIPE_CRC_DONE_ENABLE;
2617         unsigned long irqflags;
2618
2619         enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2620         enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2621                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2622                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2623                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2624
2625         /*
2626          *Leave vblank interrupts masked initially.  enable/disable will
2627          * toggle them based on usage.
2628          */
2629         dev_priv->irq_mask = (~enable_mask) |
2630                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2631                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2632
2633         I915_WRITE(PORT_HOTPLUG_EN, 0);
2634         POSTING_READ(PORT_HOTPLUG_EN);
2635
2636         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2637         I915_WRITE(VLV_IER, enable_mask);
2638         I915_WRITE(VLV_IIR, 0xffffffff);
2639         I915_WRITE(PIPESTAT(0), 0xffff);
2640         I915_WRITE(PIPESTAT(1), 0xffff);
2641         POSTING_READ(VLV_IER);
2642
2643         /* Interrupt setup is already guaranteed to be single-threaded, this is
2644          * just to make the assert_spin_locked check happy. */
2645         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2646         i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
2647         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
2648         i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
2649         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2650
2651         I915_WRITE(VLV_IIR, 0xffffffff);
2652         I915_WRITE(VLV_IIR, 0xffffffff);
2653
2654         gen5_gt_irq_postinstall(dev);
2655
2656         /* ack & enable invalid PTE error interrupts */
2657 #if 0 /* FIXME: add support to irq handler for checking these bits */
2658         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2659         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2660 #endif
2661
2662         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2663
2664         return 0;
2665 }
2666
2667 static void valleyview_irq_uninstall(struct drm_device *dev)
2668 {
2669         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2670         int pipe;
2671
2672         if (!dev_priv)
2673                 return;
2674
2675         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2676
2677         for_each_pipe(pipe)
2678                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2679
2680         I915_WRITE(HWSTAM, 0xffffffff);
2681         I915_WRITE(PORT_HOTPLUG_EN, 0);
2682         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2683         for_each_pipe(pipe)
2684                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2685         I915_WRITE(VLV_IIR, 0xffffffff);
2686         I915_WRITE(VLV_IMR, 0xffffffff);
2687         I915_WRITE(VLV_IER, 0x0);
2688         POSTING_READ(VLV_IER);
2689 }
2690
2691 static void ironlake_irq_uninstall(struct drm_device *dev)
2692 {
2693         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2694
2695         if (!dev_priv)
2696                 return;
2697
2698         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2699
2700         I915_WRITE(HWSTAM, 0xffffffff);
2701
2702         I915_WRITE(DEIMR, 0xffffffff);
2703         I915_WRITE(DEIER, 0x0);
2704         I915_WRITE(DEIIR, I915_READ(DEIIR));
2705         if (IS_GEN7(dev))
2706                 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2707
2708         I915_WRITE(GTIMR, 0xffffffff);
2709         I915_WRITE(GTIER, 0x0);
2710         I915_WRITE(GTIIR, I915_READ(GTIIR));
2711
2712         if (HAS_PCH_NOP(dev))
2713                 return;
2714
2715         I915_WRITE(SDEIMR, 0xffffffff);
2716         I915_WRITE(SDEIER, 0x0);
2717         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2718         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2719                 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2720 }
2721
2722 static void i8xx_irq_preinstall(struct drm_device * dev)
2723 {
2724         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2725         int pipe;
2726
2727         atomic_set(&dev_priv->irq_received, 0);
2728
2729         for_each_pipe(pipe)
2730                 I915_WRITE(PIPESTAT(pipe), 0);
2731         I915_WRITE16(IMR, 0xffff);
2732         I915_WRITE16(IER, 0x0);
2733         POSTING_READ16(IER);
2734 }
2735
2736 static int i8xx_irq_postinstall(struct drm_device *dev)
2737 {
2738         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2739         unsigned long irqflags;
2740
2741         I915_WRITE16(EMR,
2742                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2743
2744         /* Unmask the interrupts that we always want on. */
2745         dev_priv->irq_mask =
2746                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2747                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2748                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2749                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2750                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2751         I915_WRITE16(IMR, dev_priv->irq_mask);
2752
2753         I915_WRITE16(IER,
2754                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2755                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2756                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2757                      I915_USER_INTERRUPT);
2758         POSTING_READ16(IER);
2759
2760         /* Interrupt setup is already guaranteed to be single-threaded, this is
2761          * just to make the assert_spin_locked check happy. */
2762         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2763         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
2764         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
2765         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2766
2767         return 0;
2768 }
2769
2770 /*
2771  * Returns true when a page flip has completed.
2772  */
2773 static bool i8xx_handle_vblank(struct drm_device *dev,
2774                                int pipe, u16 iir)
2775 {
2776         drm_i915_private_t *dev_priv = dev->dev_private;
2777         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2778
2779         if (!drm_handle_vblank(dev, pipe))
2780                 return false;
2781
2782         if ((iir & flip_pending) == 0)
2783                 return false;
2784
2785         intel_prepare_page_flip(dev, pipe);
2786
2787         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2788          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2789          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2790          * the flip is completed (no longer pending). Since this doesn't raise
2791          * an interrupt per se, we watch for the change at vblank.
2792          */
2793         if (I915_READ16(ISR) & flip_pending)
2794                 return false;
2795
2796         intel_finish_page_flip(dev, pipe);
2797
2798         return true;
2799 }
2800
2801 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2802 {
2803         struct drm_device *dev = (struct drm_device *) arg;
2804         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2805         u16 iir, new_iir;
2806         u32 pipe_stats[2];
2807         unsigned long irqflags;
2808         int pipe;
2809         u16 flip_mask =
2810                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2811                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2812
2813         atomic_inc(&dev_priv->irq_received);
2814
2815         iir = I915_READ16(IIR);
2816         if (iir == 0)
2817                 return IRQ_NONE;
2818
2819         while (iir & ~flip_mask) {
2820                 /* Can't rely on pipestat interrupt bit in iir as it might
2821                  * have been cleared after the pipestat interrupt was received.
2822                  * It doesn't set the bit in iir again, but it still produces
2823                  * interrupts (for non-MSI).
2824                  */
2825                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2826                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2827                         i915_handle_error(dev, false);
2828
2829                 for_each_pipe(pipe) {
2830                         int reg = PIPESTAT(pipe);
2831                         pipe_stats[pipe] = I915_READ(reg);
2832
2833                         /*
2834                          * Clear the PIPE*STAT regs before the IIR
2835                          */
2836                         if (pipe_stats[pipe] & 0x8000ffff) {
2837                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2838                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2839                                                          pipe_name(pipe));
2840                                 I915_WRITE(reg, pipe_stats[pipe]);
2841                         }
2842                 }
2843                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2844
2845                 I915_WRITE16(IIR, iir & ~flip_mask);
2846                 new_iir = I915_READ16(IIR); /* Flush posted writes */
2847
2848                 i915_update_dri1_breadcrumb(dev);
2849
2850                 if (iir & I915_USER_INTERRUPT)
2851                         notify_ring(dev, &dev_priv->ring[RCS]);
2852
2853                 for_each_pipe(pipe) {
2854                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2855                             i8xx_handle_vblank(dev, pipe, iir))
2856                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
2857
2858                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2859                                 i9xx_pipe_crc_irq_handler(dev, pipe);
2860                 }
2861
2862                 iir = new_iir;
2863         }
2864
2865         return IRQ_HANDLED;
2866 }
2867
2868 static void i8xx_irq_uninstall(struct drm_device * dev)
2869 {
2870         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2871         int pipe;
2872
2873         for_each_pipe(pipe) {
2874                 /* Clear enable bits; then clear status bits */
2875                 I915_WRITE(PIPESTAT(pipe), 0);
2876                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2877         }
2878         I915_WRITE16(IMR, 0xffff);
2879         I915_WRITE16(IER, 0x0);
2880         I915_WRITE16(IIR, I915_READ16(IIR));
2881 }
2882
2883 static void i915_irq_preinstall(struct drm_device * dev)
2884 {
2885         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2886         int pipe;
2887
2888         atomic_set(&dev_priv->irq_received, 0);
2889
2890         if (I915_HAS_HOTPLUG(dev)) {
2891                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2892                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2893         }
2894
2895         I915_WRITE16(HWSTAM, 0xeffe);
2896         for_each_pipe(pipe)
2897                 I915_WRITE(PIPESTAT(pipe), 0);
2898         I915_WRITE(IMR, 0xffffffff);
2899         I915_WRITE(IER, 0x0);
2900         POSTING_READ(IER);
2901 }
2902
2903 static int i915_irq_postinstall(struct drm_device *dev)
2904 {
2905         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2906         u32 enable_mask;
2907         unsigned long irqflags;
2908
2909         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2910
2911         /* Unmask the interrupts that we always want on. */
2912         dev_priv->irq_mask =
2913                 ~(I915_ASLE_INTERRUPT |
2914                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2915                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2916                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2917                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2918                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2919
2920         enable_mask =
2921                 I915_ASLE_INTERRUPT |
2922                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2923                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2924                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2925                 I915_USER_INTERRUPT;
2926
2927         if (I915_HAS_HOTPLUG(dev)) {
2928                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2929                 POSTING_READ(PORT_HOTPLUG_EN);
2930
2931                 /* Enable in IER... */
2932                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2933                 /* and unmask in IMR */
2934                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2935         }
2936
2937         I915_WRITE(IMR, dev_priv->irq_mask);
2938         I915_WRITE(IER, enable_mask);
2939         POSTING_READ(IER);
2940
2941         i915_enable_asle_pipestat(dev);
2942
2943         /* Interrupt setup is already guaranteed to be single-threaded, this is
2944          * just to make the assert_spin_locked check happy. */
2945         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2946         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
2947         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
2948         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2949
2950         return 0;
2951 }
2952
2953 /*
2954  * Returns true when a page flip has completed.
2955  */
2956 static bool i915_handle_vblank(struct drm_device *dev,
2957                                int plane, int pipe, u32 iir)
2958 {
2959         drm_i915_private_t *dev_priv = dev->dev_private;
2960         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2961
2962         if (!drm_handle_vblank(dev, pipe))
2963                 return false;
2964
2965         if ((iir & flip_pending) == 0)
2966                 return false;
2967
2968         intel_prepare_page_flip(dev, plane);
2969
2970         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2971          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2972          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2973          * the flip is completed (no longer pending). Since this doesn't raise
2974          * an interrupt per se, we watch for the change at vblank.
2975          */
2976         if (I915_READ(ISR) & flip_pending)
2977                 return false;
2978
2979         intel_finish_page_flip(dev, pipe);
2980
2981         return true;
2982 }
2983
2984 static irqreturn_t i915_irq_handler(int irq, void *arg)
2985 {
2986         struct drm_device *dev = (struct drm_device *) arg;
2987         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2988         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2989         unsigned long irqflags;
2990         u32 flip_mask =
2991                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2992                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2993         int pipe, ret = IRQ_NONE;
2994
2995         atomic_inc(&dev_priv->irq_received);
2996
2997         iir = I915_READ(IIR);
2998         do {
2999                 bool irq_received = (iir & ~flip_mask) != 0;
3000                 bool blc_event = false;
3001
3002                 /* Can't rely on pipestat interrupt bit in iir as it might
3003                  * have been cleared after the pipestat interrupt was received.
3004                  * It doesn't set the bit in iir again, but it still produces
3005                  * interrupts (for non-MSI).
3006                  */
3007                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3008                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3009                         i915_handle_error(dev, false);
3010
3011                 for_each_pipe(pipe) {
3012                         int reg = PIPESTAT(pipe);
3013                         pipe_stats[pipe] = I915_READ(reg);
3014
3015                         /* Clear the PIPE*STAT regs before the IIR */
3016                         if (pipe_stats[pipe] & 0x8000ffff) {
3017                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3018                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
3019                                                          pipe_name(pipe));
3020                                 I915_WRITE(reg, pipe_stats[pipe]);
3021                                 irq_received = true;
3022                         }
3023                 }
3024                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3025
3026                 if (!irq_received)
3027                         break;
3028
3029                 /* Consume port.  Then clear IIR or we'll miss events */
3030                 if ((I915_HAS_HOTPLUG(dev)) &&
3031                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3032                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3033                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3034
3035                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3036                                   hotplug_status);
3037
3038                         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3039
3040                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3041                         POSTING_READ(PORT_HOTPLUG_STAT);
3042                 }
3043
3044                 I915_WRITE(IIR, iir & ~flip_mask);
3045                 new_iir = I915_READ(IIR); /* Flush posted writes */
3046
3047                 if (iir & I915_USER_INTERRUPT)
3048                         notify_ring(dev, &dev_priv->ring[RCS]);
3049
3050                 for_each_pipe(pipe) {
3051                         int plane = pipe;
3052                         if (IS_MOBILE(dev))
3053                                 plane = !plane;
3054
3055                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3056                             i915_handle_vblank(dev, plane, pipe, iir))
3057                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3058
3059                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3060                                 blc_event = true;
3061
3062                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3063                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3064                 }
3065
3066                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3067                         intel_opregion_asle_intr(dev);
3068
3069                 /* With MSI, interrupts are only generated when iir
3070                  * transitions from zero to nonzero.  If another bit got
3071                  * set while we were handling the existing iir bits, then
3072                  * we would never get another interrupt.
3073                  *
3074                  * This is fine on non-MSI as well, as if we hit this path
3075                  * we avoid exiting the interrupt handler only to generate
3076                  * another one.
3077                  *
3078                  * Note that for MSI this could cause a stray interrupt report
3079                  * if an interrupt landed in the time between writing IIR and
3080                  * the posting read.  This should be rare enough to never
3081                  * trigger the 99% of 100,000 interrupts test for disabling
3082                  * stray interrupts.
3083                  */
3084                 ret = IRQ_HANDLED;
3085                 iir = new_iir;
3086         } while (iir & ~flip_mask);
3087
3088         i915_update_dri1_breadcrumb(dev);
3089
3090         return ret;
3091 }
3092
3093 static void i915_irq_uninstall(struct drm_device * dev)
3094 {
3095         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3096         int pipe;
3097
3098         del_timer_sync(&dev_priv->hotplug_reenable_timer);
3099
3100         if (I915_HAS_HOTPLUG(dev)) {
3101                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3102                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3103         }
3104
3105         I915_WRITE16(HWSTAM, 0xffff);
3106         for_each_pipe(pipe) {
3107                 /* Clear enable bits; then clear status bits */
3108                 I915_WRITE(PIPESTAT(pipe), 0);
3109                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3110         }
3111         I915_WRITE(IMR, 0xffffffff);
3112         I915_WRITE(IER, 0x0);
3113
3114         I915_WRITE(IIR, I915_READ(IIR));
3115 }
3116
3117 static void i965_irq_preinstall(struct drm_device * dev)
3118 {
3119         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3120         int pipe;
3121
3122         atomic_set(&dev_priv->irq_received, 0);
3123
3124         I915_WRITE(PORT_HOTPLUG_EN, 0);
3125         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3126
3127         I915_WRITE(HWSTAM, 0xeffe);
3128         for_each_pipe(pipe)
3129                 I915_WRITE(PIPESTAT(pipe), 0);
3130         I915_WRITE(IMR, 0xffffffff);
3131         I915_WRITE(IER, 0x0);
3132         POSTING_READ(IER);
3133 }
3134
3135 static int i965_irq_postinstall(struct drm_device *dev)
3136 {
3137         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3138         u32 enable_mask;
3139         u32 error_mask;
3140         unsigned long irqflags;
3141
3142         /* Unmask the interrupts that we always want on. */
3143         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3144                                I915_DISPLAY_PORT_INTERRUPT |
3145                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3146                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3147                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3148                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3149                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3150
3151         enable_mask = ~dev_priv->irq_mask;
3152         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3153                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3154         enable_mask |= I915_USER_INTERRUPT;
3155
3156         if (IS_G4X(dev))
3157                 enable_mask |= I915_BSD_USER_INTERRUPT;
3158
3159         /* Interrupt setup is already guaranteed to be single-threaded, this is
3160          * just to make the assert_spin_locked check happy. */
3161         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3162         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
3163         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3164         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3165         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3166
3167         /*
3168          * Enable some error detection, note the instruction error mask
3169          * bit is reserved, so we leave it masked.
3170          */
3171         if (IS_G4X(dev)) {
3172                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3173                                GM45_ERROR_MEM_PRIV |
3174                                GM45_ERROR_CP_PRIV |
3175                                I915_ERROR_MEMORY_REFRESH);
3176         } else {
3177                 error_mask = ~(I915_ERROR_PAGE_TABLE |
3178                                I915_ERROR_MEMORY_REFRESH);
3179         }
3180         I915_WRITE(EMR, error_mask);
3181
3182         I915_WRITE(IMR, dev_priv->irq_mask);
3183         I915_WRITE(IER, enable_mask);
3184         POSTING_READ(IER);
3185
3186         I915_WRITE(PORT_HOTPLUG_EN, 0);
3187         POSTING_READ(PORT_HOTPLUG_EN);
3188
3189         i915_enable_asle_pipestat(dev);
3190
3191         return 0;
3192 }
3193
3194 static void i915_hpd_irq_setup(struct drm_device *dev)
3195 {
3196         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3197         struct drm_mode_config *mode_config = &dev->mode_config;
3198         struct intel_encoder *intel_encoder;
3199         u32 hotplug_en;
3200
3201         assert_spin_locked(&dev_priv->irq_lock);
3202
3203         if (I915_HAS_HOTPLUG(dev)) {
3204                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3205                 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3206                 /* Note HDMI and DP share hotplug bits */
3207                 /* enable bits are the same for all generations */
3208                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3209                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3210                                 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3211                 /* Programming the CRT detection parameters tends
3212                    to generate a spurious hotplug event about three
3213                    seconds later.  So just do it once.
3214                 */
3215                 if (IS_G4X(dev))
3216                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3217                 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3218                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3219
3220                 /* Ignore TV since it's buggy */
3221                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3222         }
3223 }
3224
3225 static irqreturn_t i965_irq_handler(int irq, void *arg)
3226 {
3227         struct drm_device *dev = (struct drm_device *) arg;
3228         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3229         u32 iir, new_iir;
3230         u32 pipe_stats[I915_MAX_PIPES];
3231         unsigned long irqflags;
3232         int irq_received;
3233         int ret = IRQ_NONE, pipe;
3234         u32 flip_mask =
3235                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3236                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3237
3238         atomic_inc(&dev_priv->irq_received);
3239
3240         iir = I915_READ(IIR);
3241
3242         for (;;) {
3243                 bool blc_event = false;
3244
3245                 irq_received = (iir & ~flip_mask) != 0;
3246
3247                 /* Can't rely on pipestat interrupt bit in iir as it might
3248                  * have been cleared after the pipestat interrupt was received.
3249                  * It doesn't set the bit in iir again, but it still produces
3250                  * interrupts (for non-MSI).
3251                  */
3252                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3253                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3254                         i915_handle_error(dev, false);
3255
3256                 for_each_pipe(pipe) {
3257                         int reg = PIPESTAT(pipe);
3258                         pipe_stats[pipe] = I915_READ(reg);
3259
3260                         /*
3261                          * Clear the PIPE*STAT regs before the IIR
3262                          */
3263                         if (pipe_stats[pipe] & 0x8000ffff) {
3264                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3265                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
3266                                                          pipe_name(pipe));
3267                                 I915_WRITE(reg, pipe_stats[pipe]);
3268                                 irq_received = 1;
3269                         }
3270                 }
3271                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3272
3273                 if (!irq_received)
3274                         break;
3275
3276                 ret = IRQ_HANDLED;
3277
3278                 /* Consume port.  Then clear IIR or we'll miss events */
3279                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
3280                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3281                         u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3282                                                                   HOTPLUG_INT_STATUS_G4X :
3283                                                                   HOTPLUG_INT_STATUS_I915);
3284
3285                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3286                                   hotplug_status);
3287
3288                         intel_hpd_irq_handler(dev, hotplug_trigger,
3289                                               IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
3290
3291                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3292                         I915_READ(PORT_HOTPLUG_STAT);
3293                 }
3294
3295                 I915_WRITE(IIR, iir & ~flip_mask);
3296                 new_iir = I915_READ(IIR); /* Flush posted writes */
3297
3298                 if (iir & I915_USER_INTERRUPT)
3299                         notify_ring(dev, &dev_priv->ring[RCS]);
3300                 if (iir & I915_BSD_USER_INTERRUPT)
3301                         notify_ring(dev, &dev_priv->ring[VCS]);
3302
3303                 for_each_pipe(pipe) {
3304                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3305                             i915_handle_vblank(dev, pipe, pipe, iir))
3306                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3307
3308                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3309                                 blc_event = true;
3310
3311                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3312                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3313                 }
3314
3315
3316                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3317                         intel_opregion_asle_intr(dev);
3318
3319                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3320                         gmbus_irq_handler(dev);
3321
3322                 /* With MSI, interrupts are only generated when iir
3323                  * transitions from zero to nonzero.  If another bit got
3324                  * set while we were handling the existing iir bits, then
3325                  * we would never get another interrupt.
3326                  *
3327                  * This is fine on non-MSI as well, as if we hit this path
3328                  * we avoid exiting the interrupt handler only to generate
3329                  * another one.
3330                  *
3331                  * Note that for MSI this could cause a stray interrupt report
3332                  * if an interrupt landed in the time between writing IIR and
3333                  * the posting read.  This should be rare enough to never
3334                  * trigger the 99% of 100,000 interrupts test for disabling
3335                  * stray interrupts.
3336                  */
3337                 iir = new_iir;
3338         }
3339
3340         i915_update_dri1_breadcrumb(dev);
3341
3342         return ret;
3343 }
3344
3345 static void i965_irq_uninstall(struct drm_device * dev)
3346 {
3347         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3348         int pipe;
3349
3350         if (!dev_priv)
3351                 return;
3352
3353         del_timer_sync(&dev_priv->hotplug_reenable_timer);
3354
3355         I915_WRITE(PORT_HOTPLUG_EN, 0);
3356         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3357
3358         I915_WRITE(HWSTAM, 0xffffffff);
3359         for_each_pipe(pipe)
3360                 I915_WRITE(PIPESTAT(pipe), 0);
3361         I915_WRITE(IMR, 0xffffffff);
3362         I915_WRITE(IER, 0x0);
3363
3364         for_each_pipe(pipe)
3365                 I915_WRITE(PIPESTAT(pipe),
3366                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3367         I915_WRITE(IIR, I915_READ(IIR));
3368 }
3369
3370 static void i915_reenable_hotplug_timer_func(unsigned long data)
3371 {
3372         drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3373         struct drm_device *dev = dev_priv->dev;
3374         struct drm_mode_config *mode_config = &dev->mode_config;
3375         unsigned long irqflags;
3376         int i;
3377
3378         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3379         for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3380                 struct drm_connector *connector;
3381
3382                 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3383                         continue;
3384
3385                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3386
3387                 list_for_each_entry(connector, &mode_config->connector_list, head) {
3388                         struct intel_connector *intel_connector = to_intel_connector(connector);
3389
3390                         if (intel_connector->encoder->hpd_pin == i) {
3391                                 if (connector->polled != intel_connector->polled)
3392                                         DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3393                                                          drm_get_connector_name(connector));
3394                                 connector->polled = intel_connector->polled;
3395                                 if (!connector->polled)
3396                                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3397                         }
3398                 }
3399         }
3400         if (dev_priv->display.hpd_irq_setup)
3401                 dev_priv->display.hpd_irq_setup(dev);
3402         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3403 }
3404
3405 void intel_irq_init(struct drm_device *dev)
3406 {
3407         struct drm_i915_private *dev_priv = dev->dev_private;
3408
3409         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3410         INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3411         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3412         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3413
3414         setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3415                     i915_hangcheck_elapsed,
3416                     (unsigned long) dev);
3417         setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3418                     (unsigned long) dev_priv);
3419
3420         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3421
3422         if (IS_GEN2(dev)) {
3423                 dev->max_vblank_count = 0;
3424                 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
3425         } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3426                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3427                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3428         } else {
3429                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3430                 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3431         }
3432
3433         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
3434                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3435                 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3436         }
3437
3438         if (IS_VALLEYVIEW(dev)) {
3439                 dev->driver->irq_handler = valleyview_irq_handler;
3440                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3441                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3442                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3443                 dev->driver->enable_vblank = valleyview_enable_vblank;
3444                 dev->driver->disable_vblank = valleyview_disable_vblank;
3445                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3446         } else if (HAS_PCH_SPLIT(dev)) {
3447                 dev->driver->irq_handler = ironlake_irq_handler;
3448                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3449                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3450                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3451                 dev->driver->enable_vblank = ironlake_enable_vblank;
3452                 dev->driver->disable_vblank = ironlake_disable_vblank;
3453                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3454         } else {
3455                 if (INTEL_INFO(dev)->gen == 2) {
3456                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
3457                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
3458                         dev->driver->irq_handler = i8xx_irq_handler;
3459                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
3460                 } else if (INTEL_INFO(dev)->gen == 3) {
3461                         dev->driver->irq_preinstall = i915_irq_preinstall;
3462                         dev->driver->irq_postinstall = i915_irq_postinstall;
3463                         dev->driver->irq_uninstall = i915_irq_uninstall;
3464                         dev->driver->irq_handler = i915_irq_handler;
3465                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3466                 } else {
3467                         dev->driver->irq_preinstall = i965_irq_preinstall;
3468                         dev->driver->irq_postinstall = i965_irq_postinstall;
3469                         dev->driver->irq_uninstall = i965_irq_uninstall;
3470                         dev->driver->irq_handler = i965_irq_handler;
3471                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3472                 }
3473                 dev->driver->enable_vblank = i915_enable_vblank;
3474                 dev->driver->disable_vblank = i915_disable_vblank;
3475         }
3476 }
3477
3478 void intel_hpd_init(struct drm_device *dev)
3479 {
3480         struct drm_i915_private *dev_priv = dev->dev_private;
3481         struct drm_mode_config *mode_config = &dev->mode_config;
3482         struct drm_connector *connector;
3483         unsigned long irqflags;
3484         int i;
3485
3486         for (i = 1; i < HPD_NUM_PINS; i++) {
3487                 dev_priv->hpd_stats[i].hpd_cnt = 0;
3488                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3489         }
3490         list_for_each_entry(connector, &mode_config->connector_list, head) {
3491                 struct intel_connector *intel_connector = to_intel_connector(connector);
3492                 connector->polled = intel_connector->polled;
3493                 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3494                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3495         }
3496
3497         /* Interrupt setup is already guaranteed to be single-threaded, this is
3498          * just to make the assert_spin_locked checks happy. */
3499         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3500         if (dev_priv->display.hpd_irq_setup)
3501                 dev_priv->display.hpd_irq_setup(dev);
3502         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3503 }
3504
3505 /* Disable interrupts so we can allow Package C8+. */
3506 void hsw_pc8_disable_interrupts(struct drm_device *dev)
3507 {
3508         struct drm_i915_private *dev_priv = dev->dev_private;
3509         unsigned long irqflags;
3510
3511         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3512
3513         dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
3514         dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3515         dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3516         dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3517         dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3518
3519         ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
3520         ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
3521         ilk_disable_gt_irq(dev_priv, 0xffffffff);
3522         snb_disable_pm_irq(dev_priv, 0xffffffff);
3523
3524         dev_priv->pc8.irqs_disabled = true;
3525
3526         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3527 }
3528
3529 /* Restore interrupts so we can recover from Package C8+. */
3530 void hsw_pc8_restore_interrupts(struct drm_device *dev)
3531 {
3532         struct drm_i915_private *dev_priv = dev->dev_private;
3533         unsigned long irqflags;
3534         uint32_t val, expected;
3535
3536         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3537
3538         val = I915_READ(DEIMR);
3539         expected = ~DE_PCH_EVENT_IVB;
3540         WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
3541
3542         val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
3543         expected = ~SDE_HOTPLUG_MASK_CPT;
3544         WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
3545              val, expected);
3546
3547         val = I915_READ(GTIMR);
3548         expected = 0xffffffff;
3549         WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
3550
3551         val = I915_READ(GEN6_PMIMR);
3552         expected = 0xffffffff;
3553         WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
3554              expected);
3555
3556         dev_priv->pc8.irqs_disabled = false;
3557
3558         ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3559         ibx_enable_display_interrupt(dev_priv,
3560                                      ~dev_priv->pc8.regsave.sdeimr &
3561                                      ~SDE_HOTPLUG_MASK_CPT);
3562         ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3563         snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3564         I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
3565
3566         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3567 }