]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/i915/i915_irq.c
drm/i915: Mask the vblank interrupt on bdw by default
[~andy/linux] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 static const u32 hpd_ibx[] = {
41         [HPD_CRT] = SDE_CRT_HOTPLUG,
42         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46 };
47
48 static const u32 hpd_cpt[] = {
49         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54 };
55
56 static const u32 hpd_mask_i915[] = {
57         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63 };
64
65 static const u32 hpd_status_gen4[] = {
66         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72 };
73
74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81 };
82
83 /* For display hotplug interrupt */
84 static void
85 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
86 {
87         assert_spin_locked(&dev_priv->irq_lock);
88
89         if (dev_priv->pc8.irqs_disabled) {
90                 WARN(1, "IRQs disabled\n");
91                 dev_priv->pc8.regsave.deimr &= ~mask;
92                 return;
93         }
94
95         if ((dev_priv->irq_mask & mask) != 0) {
96                 dev_priv->irq_mask &= ~mask;
97                 I915_WRITE(DEIMR, dev_priv->irq_mask);
98                 POSTING_READ(DEIMR);
99         }
100 }
101
102 static void
103 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
104 {
105         assert_spin_locked(&dev_priv->irq_lock);
106
107         if (dev_priv->pc8.irqs_disabled) {
108                 WARN(1, "IRQs disabled\n");
109                 dev_priv->pc8.regsave.deimr |= mask;
110                 return;
111         }
112
113         if ((dev_priv->irq_mask & mask) != mask) {
114                 dev_priv->irq_mask |= mask;
115                 I915_WRITE(DEIMR, dev_priv->irq_mask);
116                 POSTING_READ(DEIMR);
117         }
118 }
119
120 /**
121  * ilk_update_gt_irq - update GTIMR
122  * @dev_priv: driver private
123  * @interrupt_mask: mask of interrupt bits to update
124  * @enabled_irq_mask: mask of interrupt bits to enable
125  */
126 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
127                               uint32_t interrupt_mask,
128                               uint32_t enabled_irq_mask)
129 {
130         assert_spin_locked(&dev_priv->irq_lock);
131
132         if (dev_priv->pc8.irqs_disabled) {
133                 WARN(1, "IRQs disabled\n");
134                 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
135                 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
136                                                 interrupt_mask);
137                 return;
138         }
139
140         dev_priv->gt_irq_mask &= ~interrupt_mask;
141         dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
142         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
143         POSTING_READ(GTIMR);
144 }
145
146 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
147 {
148         ilk_update_gt_irq(dev_priv, mask, mask);
149 }
150
151 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
152 {
153         ilk_update_gt_irq(dev_priv, mask, 0);
154 }
155
156 /**
157   * snb_update_pm_irq - update GEN6_PMIMR
158   * @dev_priv: driver private
159   * @interrupt_mask: mask of interrupt bits to update
160   * @enabled_irq_mask: mask of interrupt bits to enable
161   */
162 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
163                               uint32_t interrupt_mask,
164                               uint32_t enabled_irq_mask)
165 {
166         uint32_t new_val;
167
168         assert_spin_locked(&dev_priv->irq_lock);
169
170         if (dev_priv->pc8.irqs_disabled) {
171                 WARN(1, "IRQs disabled\n");
172                 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
173                 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
174                                                      interrupt_mask);
175                 return;
176         }
177
178         new_val = dev_priv->pm_irq_mask;
179         new_val &= ~interrupt_mask;
180         new_val |= (~enabled_irq_mask & interrupt_mask);
181
182         if (new_val != dev_priv->pm_irq_mask) {
183                 dev_priv->pm_irq_mask = new_val;
184                 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
185                 POSTING_READ(GEN6_PMIMR);
186         }
187 }
188
189 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
190 {
191         snb_update_pm_irq(dev_priv, mask, mask);
192 }
193
194 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
195 {
196         snb_update_pm_irq(dev_priv, mask, 0);
197 }
198
199 static bool ivb_can_enable_err_int(struct drm_device *dev)
200 {
201         struct drm_i915_private *dev_priv = dev->dev_private;
202         struct intel_crtc *crtc;
203         enum pipe pipe;
204
205         assert_spin_locked(&dev_priv->irq_lock);
206
207         for_each_pipe(pipe) {
208                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
209
210                 if (crtc->cpu_fifo_underrun_disabled)
211                         return false;
212         }
213
214         return true;
215 }
216
217 static bool cpt_can_enable_serr_int(struct drm_device *dev)
218 {
219         struct drm_i915_private *dev_priv = dev->dev_private;
220         enum pipe pipe;
221         struct intel_crtc *crtc;
222
223         assert_spin_locked(&dev_priv->irq_lock);
224
225         for_each_pipe(pipe) {
226                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
227
228                 if (crtc->pch_fifo_underrun_disabled)
229                         return false;
230         }
231
232         return true;
233 }
234
235 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
236                                                  enum pipe pipe, bool enable)
237 {
238         struct drm_i915_private *dev_priv = dev->dev_private;
239         uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
240                                           DE_PIPEB_FIFO_UNDERRUN;
241
242         if (enable)
243                 ironlake_enable_display_irq(dev_priv, bit);
244         else
245                 ironlake_disable_display_irq(dev_priv, bit);
246 }
247
248 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
249                                                   enum pipe pipe, bool enable)
250 {
251         struct drm_i915_private *dev_priv = dev->dev_private;
252         if (enable) {
253                 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
254
255                 if (!ivb_can_enable_err_int(dev))
256                         return;
257
258                 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
259         } else {
260                 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
261
262                 /* Change the state _after_ we've read out the current one. */
263                 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
264
265                 if (!was_enabled &&
266                     (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
267                         DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
268                                       pipe_name(pipe));
269                 }
270         }
271 }
272
273 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
274                                                   enum pipe pipe, bool enable)
275 {
276         struct drm_i915_private *dev_priv = dev->dev_private;
277
278         assert_spin_locked(&dev_priv->irq_lock);
279
280         if (enable)
281                 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
282         else
283                 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
284         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
285         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
286 }
287
288 /**
289  * ibx_display_interrupt_update - update SDEIMR
290  * @dev_priv: driver private
291  * @interrupt_mask: mask of interrupt bits to update
292  * @enabled_irq_mask: mask of interrupt bits to enable
293  */
294 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
295                                          uint32_t interrupt_mask,
296                                          uint32_t enabled_irq_mask)
297 {
298         uint32_t sdeimr = I915_READ(SDEIMR);
299         sdeimr &= ~interrupt_mask;
300         sdeimr |= (~enabled_irq_mask & interrupt_mask);
301
302         assert_spin_locked(&dev_priv->irq_lock);
303
304         if (dev_priv->pc8.irqs_disabled &&
305             (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
306                 WARN(1, "IRQs disabled\n");
307                 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
308                 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
309                                                  interrupt_mask);
310                 return;
311         }
312
313         I915_WRITE(SDEIMR, sdeimr);
314         POSTING_READ(SDEIMR);
315 }
316 #define ibx_enable_display_interrupt(dev_priv, bits) \
317         ibx_display_interrupt_update((dev_priv), (bits), (bits))
318 #define ibx_disable_display_interrupt(dev_priv, bits) \
319         ibx_display_interrupt_update((dev_priv), (bits), 0)
320
321 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
322                                             enum transcoder pch_transcoder,
323                                             bool enable)
324 {
325         struct drm_i915_private *dev_priv = dev->dev_private;
326         uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
327                        SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
328
329         if (enable)
330                 ibx_enable_display_interrupt(dev_priv, bit);
331         else
332                 ibx_disable_display_interrupt(dev_priv, bit);
333 }
334
335 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
336                                             enum transcoder pch_transcoder,
337                                             bool enable)
338 {
339         struct drm_i915_private *dev_priv = dev->dev_private;
340
341         if (enable) {
342                 I915_WRITE(SERR_INT,
343                            SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
344
345                 if (!cpt_can_enable_serr_int(dev))
346                         return;
347
348                 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
349         } else {
350                 uint32_t tmp = I915_READ(SERR_INT);
351                 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
352
353                 /* Change the state _after_ we've read out the current one. */
354                 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
355
356                 if (!was_enabled &&
357                     (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
358                         DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
359                                       transcoder_name(pch_transcoder));
360                 }
361         }
362 }
363
364 /**
365  * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
366  * @dev: drm device
367  * @pipe: pipe
368  * @enable: true if we want to report FIFO underrun errors, false otherwise
369  *
370  * This function makes us disable or enable CPU fifo underruns for a specific
371  * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
372  * reporting for one pipe may also disable all the other CPU error interruts for
373  * the other pipes, due to the fact that there's just one interrupt mask/enable
374  * bit for all the pipes.
375  *
376  * Returns the previous state of underrun reporting.
377  */
378 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
379                                            enum pipe pipe, bool enable)
380 {
381         struct drm_i915_private *dev_priv = dev->dev_private;
382         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
383         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
384         unsigned long flags;
385         bool ret;
386
387         spin_lock_irqsave(&dev_priv->irq_lock, flags);
388
389         ret = !intel_crtc->cpu_fifo_underrun_disabled;
390
391         if (enable == ret)
392                 goto done;
393
394         intel_crtc->cpu_fifo_underrun_disabled = !enable;
395
396         if (IS_GEN5(dev) || IS_GEN6(dev))
397                 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
398         else if (IS_GEN7(dev))
399                 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
400         else if (IS_GEN8(dev))
401                 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
402
403 done:
404         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
405         return ret;
406 }
407
408 /**
409  * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
410  * @dev: drm device
411  * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
412  * @enable: true if we want to report FIFO underrun errors, false otherwise
413  *
414  * This function makes us disable or enable PCH fifo underruns for a specific
415  * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
416  * underrun reporting for one transcoder may also disable all the other PCH
417  * error interruts for the other transcoders, due to the fact that there's just
418  * one interrupt mask/enable bit for all the transcoders.
419  *
420  * Returns the previous state of underrun reporting.
421  */
422 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
423                                            enum transcoder pch_transcoder,
424                                            bool enable)
425 {
426         struct drm_i915_private *dev_priv = dev->dev_private;
427         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
428         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
429         unsigned long flags;
430         bool ret;
431
432         /*
433          * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
434          * has only one pch transcoder A that all pipes can use. To avoid racy
435          * pch transcoder -> pipe lookups from interrupt code simply store the
436          * underrun statistics in crtc A. Since we never expose this anywhere
437          * nor use it outside of the fifo underrun code here using the "wrong"
438          * crtc on LPT won't cause issues.
439          */
440
441         spin_lock_irqsave(&dev_priv->irq_lock, flags);
442
443         ret = !intel_crtc->pch_fifo_underrun_disabled;
444
445         if (enable == ret)
446                 goto done;
447
448         intel_crtc->pch_fifo_underrun_disabled = !enable;
449
450         if (HAS_PCH_IBX(dev))
451                 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
452         else
453                 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
454
455 done:
456         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
457         return ret;
458 }
459
460
461 void
462 i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
463 {
464         u32 reg = PIPESTAT(pipe);
465         u32 pipestat = I915_READ(reg) & 0x7fff0000;
466
467         assert_spin_locked(&dev_priv->irq_lock);
468
469         if ((pipestat & mask) == mask)
470                 return;
471
472         /* Enable the interrupt, clear any pending status */
473         pipestat |= mask | (mask >> 16);
474         I915_WRITE(reg, pipestat);
475         POSTING_READ(reg);
476 }
477
478 void
479 i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask)
480 {
481         u32 reg = PIPESTAT(pipe);
482         u32 pipestat = I915_READ(reg) & 0x7fff0000;
483
484         assert_spin_locked(&dev_priv->irq_lock);
485
486         if ((pipestat & mask) == 0)
487                 return;
488
489         pipestat &= ~mask;
490         I915_WRITE(reg, pipestat);
491         POSTING_READ(reg);
492 }
493
494 /**
495  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
496  */
497 static void i915_enable_asle_pipestat(struct drm_device *dev)
498 {
499         drm_i915_private_t *dev_priv = dev->dev_private;
500         unsigned long irqflags;
501
502         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
503                 return;
504
505         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
506
507         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE);
508         if (INTEL_INFO(dev)->gen >= 4)
509                 i915_enable_pipestat(dev_priv, PIPE_A,
510                                      PIPE_LEGACY_BLC_EVENT_ENABLE);
511
512         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
513 }
514
515 /**
516  * i915_pipe_enabled - check if a pipe is enabled
517  * @dev: DRM device
518  * @pipe: pipe to check
519  *
520  * Reading certain registers when the pipe is disabled can hang the chip.
521  * Use this routine to make sure the PLL is running and the pipe is active
522  * before reading such registers if unsure.
523  */
524 static int
525 i915_pipe_enabled(struct drm_device *dev, int pipe)
526 {
527         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
528
529         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
530                 /* Locking is horribly broken here, but whatever. */
531                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
532                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
533
534                 return intel_crtc->active;
535         } else {
536                 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
537         }
538 }
539
540 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
541 {
542         /* Gen2 doesn't have a hardware frame counter */
543         return 0;
544 }
545
546 /* Called from drm generic code, passed a 'crtc', which
547  * we use as a pipe index
548  */
549 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
550 {
551         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
552         unsigned long high_frame;
553         unsigned long low_frame;
554         u32 high1, high2, low, pixel, vbl_start;
555
556         if (!i915_pipe_enabled(dev, pipe)) {
557                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
558                                 "pipe %c\n", pipe_name(pipe));
559                 return 0;
560         }
561
562         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
563                 struct intel_crtc *intel_crtc =
564                         to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
565                 const struct drm_display_mode *mode =
566                         &intel_crtc->config.adjusted_mode;
567
568                 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
569         } else {
570                 enum transcoder cpu_transcoder =
571                         intel_pipe_to_cpu_transcoder(dev_priv, pipe);
572                 u32 htotal;
573
574                 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
575                 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
576
577                 vbl_start *= htotal;
578         }
579
580         high_frame = PIPEFRAME(pipe);
581         low_frame = PIPEFRAMEPIXEL(pipe);
582
583         /*
584          * High & low register fields aren't synchronized, so make sure
585          * we get a low value that's stable across two reads of the high
586          * register.
587          */
588         do {
589                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
590                 low   = I915_READ(low_frame);
591                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
592         } while (high1 != high2);
593
594         high1 >>= PIPE_FRAME_HIGH_SHIFT;
595         pixel = low & PIPE_PIXEL_MASK;
596         low >>= PIPE_FRAME_LOW_SHIFT;
597
598         /*
599          * The frame counter increments at beginning of active.
600          * Cook up a vblank counter by also checking the pixel
601          * counter against vblank start.
602          */
603         return ((high1 << 8) | low) + (pixel >= vbl_start);
604 }
605
606 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
607 {
608         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
609         int reg = PIPE_FRMCOUNT_GM45(pipe);
610
611         if (!i915_pipe_enabled(dev, pipe)) {
612                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
613                                  "pipe %c\n", pipe_name(pipe));
614                 return 0;
615         }
616
617         return I915_READ(reg);
618 }
619
620 static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe)
621 {
622         struct drm_i915_private *dev_priv = dev->dev_private;
623         uint32_t status;
624
625         if (IS_VALLEYVIEW(dev)) {
626                 status = pipe == PIPE_A ?
627                         I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
628                         I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
629
630                 return I915_READ(VLV_ISR) & status;
631         } else if (IS_GEN2(dev)) {
632                 status = pipe == PIPE_A ?
633                         I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
634                         I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
635
636                 return I915_READ16(ISR) & status;
637         } else if (INTEL_INFO(dev)->gen < 5) {
638                 status = pipe == PIPE_A ?
639                         I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
640                         I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
641
642                 return I915_READ(ISR) & status;
643         } else if (INTEL_INFO(dev)->gen < 7) {
644                 status = pipe == PIPE_A ?
645                         DE_PIPEA_VBLANK :
646                         DE_PIPEB_VBLANK;
647
648                 return I915_READ(DEISR) & status;
649         } else {
650                 switch (pipe) {
651                 default:
652                 case PIPE_A:
653                         status = DE_PIPEA_VBLANK_IVB;
654                         break;
655                 case PIPE_B:
656                         status = DE_PIPEB_VBLANK_IVB;
657                         break;
658                 case PIPE_C:
659                         status = DE_PIPEC_VBLANK_IVB;
660                         break;
661                 }
662
663                 return I915_READ(DEISR) & status;
664         }
665 }
666
667 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
668                              int *vpos, int *hpos)
669 {
670         struct drm_i915_private *dev_priv = dev->dev_private;
671         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
672         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
673         const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
674         int position;
675         int vbl_start, vbl_end, htotal, vtotal;
676         bool in_vbl = true;
677         int ret = 0;
678
679         if (!intel_crtc->active) {
680                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
681                                  "pipe %c\n", pipe_name(pipe));
682                 return 0;
683         }
684
685         htotal = mode->crtc_htotal;
686         vtotal = mode->crtc_vtotal;
687         vbl_start = mode->crtc_vblank_start;
688         vbl_end = mode->crtc_vblank_end;
689
690         ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
691
692         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
693                 /* No obvious pixelcount register. Only query vertical
694                  * scanout position from Display scan line register.
695                  */
696                 if (IS_GEN2(dev))
697                         position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
698                 else
699                         position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
700
701                 /*
702                  * The scanline counter increments at the leading edge
703                  * of hsync, ie. it completely misses the active portion
704                  * of the line. Fix up the counter at both edges of vblank
705                  * to get a more accurate picture whether we're in vblank
706                  * or not.
707                  */
708                 in_vbl = intel_pipe_in_vblank(dev, pipe);
709                 if ((in_vbl && position == vbl_start - 1) ||
710                     (!in_vbl && position == vbl_end - 1))
711                         position = (position + 1) % vtotal;
712         } else {
713                 /* Have access to pixelcount since start of frame.
714                  * We can split this into vertical and horizontal
715                  * scanout position.
716                  */
717                 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
718
719                 /* convert to pixel counts */
720                 vbl_start *= htotal;
721                 vbl_end *= htotal;
722                 vtotal *= htotal;
723         }
724
725         in_vbl = position >= vbl_start && position < vbl_end;
726
727         /*
728          * While in vblank, position will be negative
729          * counting up towards 0 at vbl_end. And outside
730          * vblank, position will be positive counting
731          * up since vbl_end.
732          */
733         if (position >= vbl_start)
734                 position -= vbl_end;
735         else
736                 position += vtotal - vbl_end;
737
738         if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
739                 *vpos = position;
740                 *hpos = 0;
741         } else {
742                 *vpos = position / htotal;
743                 *hpos = position - (*vpos * htotal);
744         }
745
746         /* In vblank? */
747         if (in_vbl)
748                 ret |= DRM_SCANOUTPOS_INVBL;
749
750         return ret;
751 }
752
753 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
754                               int *max_error,
755                               struct timeval *vblank_time,
756                               unsigned flags)
757 {
758         struct drm_crtc *crtc;
759
760         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
761                 DRM_ERROR("Invalid crtc %d\n", pipe);
762                 return -EINVAL;
763         }
764
765         /* Get drm_crtc to timestamp: */
766         crtc = intel_get_crtc_for_pipe(dev, pipe);
767         if (crtc == NULL) {
768                 DRM_ERROR("Invalid crtc %d\n", pipe);
769                 return -EINVAL;
770         }
771
772         if (!crtc->enabled) {
773                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
774                 return -EBUSY;
775         }
776
777         /* Helper routine in DRM core does all the work: */
778         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
779                                                      vblank_time, flags,
780                                                      crtc);
781 }
782
783 static bool intel_hpd_irq_event(struct drm_device *dev,
784                                 struct drm_connector *connector)
785 {
786         enum drm_connector_status old_status;
787
788         WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
789         old_status = connector->status;
790
791         connector->status = connector->funcs->detect(connector, false);
792         if (old_status == connector->status)
793                 return false;
794
795         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
796                       connector->base.id,
797                       drm_get_connector_name(connector),
798                       drm_get_connector_status_name(old_status),
799                       drm_get_connector_status_name(connector->status));
800
801         return true;
802 }
803
804 /*
805  * Handle hotplug events outside the interrupt handler proper.
806  */
807 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
808
809 static void i915_hotplug_work_func(struct work_struct *work)
810 {
811         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
812                                                     hotplug_work);
813         struct drm_device *dev = dev_priv->dev;
814         struct drm_mode_config *mode_config = &dev->mode_config;
815         struct intel_connector *intel_connector;
816         struct intel_encoder *intel_encoder;
817         struct drm_connector *connector;
818         unsigned long irqflags;
819         bool hpd_disabled = false;
820         bool changed = false;
821         u32 hpd_event_bits;
822
823         /* HPD irq before everything is fully set up. */
824         if (!dev_priv->enable_hotplug_processing)
825                 return;
826
827         mutex_lock(&mode_config->mutex);
828         DRM_DEBUG_KMS("running encoder hotplug functions\n");
829
830         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
831
832         hpd_event_bits = dev_priv->hpd_event_bits;
833         dev_priv->hpd_event_bits = 0;
834         list_for_each_entry(connector, &mode_config->connector_list, head) {
835                 intel_connector = to_intel_connector(connector);
836                 intel_encoder = intel_connector->encoder;
837                 if (intel_encoder->hpd_pin > HPD_NONE &&
838                     dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
839                     connector->polled == DRM_CONNECTOR_POLL_HPD) {
840                         DRM_INFO("HPD interrupt storm detected on connector %s: "
841                                  "switching from hotplug detection to polling\n",
842                                 drm_get_connector_name(connector));
843                         dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
844                         connector->polled = DRM_CONNECTOR_POLL_CONNECT
845                                 | DRM_CONNECTOR_POLL_DISCONNECT;
846                         hpd_disabled = true;
847                 }
848                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
849                         DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
850                                       drm_get_connector_name(connector), intel_encoder->hpd_pin);
851                 }
852         }
853          /* if there were no outputs to poll, poll was disabled,
854           * therefore make sure it's enabled when disabling HPD on
855           * some connectors */
856         if (hpd_disabled) {
857                 drm_kms_helper_poll_enable(dev);
858                 mod_timer(&dev_priv->hotplug_reenable_timer,
859                           jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
860         }
861
862         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
863
864         list_for_each_entry(connector, &mode_config->connector_list, head) {
865                 intel_connector = to_intel_connector(connector);
866                 intel_encoder = intel_connector->encoder;
867                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
868                         if (intel_encoder->hot_plug)
869                                 intel_encoder->hot_plug(intel_encoder);
870                         if (intel_hpd_irq_event(dev, connector))
871                                 changed = true;
872                 }
873         }
874         mutex_unlock(&mode_config->mutex);
875
876         if (changed)
877                 drm_kms_helper_hotplug_event(dev);
878 }
879
880 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
881 {
882         drm_i915_private_t *dev_priv = dev->dev_private;
883         u32 busy_up, busy_down, max_avg, min_avg;
884         u8 new_delay;
885
886         spin_lock(&mchdev_lock);
887
888         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
889
890         new_delay = dev_priv->ips.cur_delay;
891
892         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
893         busy_up = I915_READ(RCPREVBSYTUPAVG);
894         busy_down = I915_READ(RCPREVBSYTDNAVG);
895         max_avg = I915_READ(RCBMAXAVG);
896         min_avg = I915_READ(RCBMINAVG);
897
898         /* Handle RCS change request from hw */
899         if (busy_up > max_avg) {
900                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
901                         new_delay = dev_priv->ips.cur_delay - 1;
902                 if (new_delay < dev_priv->ips.max_delay)
903                         new_delay = dev_priv->ips.max_delay;
904         } else if (busy_down < min_avg) {
905                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
906                         new_delay = dev_priv->ips.cur_delay + 1;
907                 if (new_delay > dev_priv->ips.min_delay)
908                         new_delay = dev_priv->ips.min_delay;
909         }
910
911         if (ironlake_set_drps(dev, new_delay))
912                 dev_priv->ips.cur_delay = new_delay;
913
914         spin_unlock(&mchdev_lock);
915
916         return;
917 }
918
919 static void notify_ring(struct drm_device *dev,
920                         struct intel_ring_buffer *ring)
921 {
922         if (ring->obj == NULL)
923                 return;
924
925         trace_i915_gem_request_complete(ring);
926
927         wake_up_all(&ring->irq_queue);
928         i915_queue_hangcheck(dev);
929 }
930
931 static void gen6_pm_rps_work(struct work_struct *work)
932 {
933         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
934                                                     rps.work);
935         u32 pm_iir;
936         int new_delay, adj;
937
938         spin_lock_irq(&dev_priv->irq_lock);
939         pm_iir = dev_priv->rps.pm_iir;
940         dev_priv->rps.pm_iir = 0;
941         /* Make sure not to corrupt PMIMR state used by ringbuffer code */
942         snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
943         spin_unlock_irq(&dev_priv->irq_lock);
944
945         /* Make sure we didn't queue anything we're not going to process. */
946         WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
947
948         if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
949                 return;
950
951         mutex_lock(&dev_priv->rps.hw_lock);
952
953         adj = dev_priv->rps.last_adj;
954         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
955                 if (adj > 0)
956                         adj *= 2;
957                 else
958                         adj = 1;
959                 new_delay = dev_priv->rps.cur_delay + adj;
960
961                 /*
962                  * For better performance, jump directly
963                  * to RPe if we're below it.
964                  */
965                 if (new_delay < dev_priv->rps.rpe_delay)
966                         new_delay = dev_priv->rps.rpe_delay;
967         } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
968                 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
969                         new_delay = dev_priv->rps.rpe_delay;
970                 else
971                         new_delay = dev_priv->rps.min_delay;
972                 adj = 0;
973         } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
974                 if (adj < 0)
975                         adj *= 2;
976                 else
977                         adj = -1;
978                 new_delay = dev_priv->rps.cur_delay + adj;
979         } else { /* unknown event */
980                 new_delay = dev_priv->rps.cur_delay;
981         }
982
983         /* sysfs frequency interfaces may have snuck in while servicing the
984          * interrupt
985          */
986         if (new_delay < (int)dev_priv->rps.min_delay)
987                 new_delay = dev_priv->rps.min_delay;
988         if (new_delay > (int)dev_priv->rps.max_delay)
989                 new_delay = dev_priv->rps.max_delay;
990         dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
991
992         if (IS_VALLEYVIEW(dev_priv->dev))
993                 valleyview_set_rps(dev_priv->dev, new_delay);
994         else
995                 gen6_set_rps(dev_priv->dev, new_delay);
996
997         mutex_unlock(&dev_priv->rps.hw_lock);
998 }
999
1000
1001 /**
1002  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1003  * occurred.
1004  * @work: workqueue struct
1005  *
1006  * Doesn't actually do anything except notify userspace. As a consequence of
1007  * this event, userspace should try to remap the bad rows since statistically
1008  * it is likely the same row is more likely to go bad again.
1009  */
1010 static void ivybridge_parity_work(struct work_struct *work)
1011 {
1012         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
1013                                                     l3_parity.error_work);
1014         u32 error_status, row, bank, subbank;
1015         char *parity_event[6];
1016         uint32_t misccpctl;
1017         unsigned long flags;
1018         uint8_t slice = 0;
1019
1020         /* We must turn off DOP level clock gating to access the L3 registers.
1021          * In order to prevent a get/put style interface, acquire struct mutex
1022          * any time we access those registers.
1023          */
1024         mutex_lock(&dev_priv->dev->struct_mutex);
1025
1026         /* If we've screwed up tracking, just let the interrupt fire again */
1027         if (WARN_ON(!dev_priv->l3_parity.which_slice))
1028                 goto out;
1029
1030         misccpctl = I915_READ(GEN7_MISCCPCTL);
1031         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1032         POSTING_READ(GEN7_MISCCPCTL);
1033
1034         while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1035                 u32 reg;
1036
1037                 slice--;
1038                 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1039                         break;
1040
1041                 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1042
1043                 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1044
1045                 error_status = I915_READ(reg);
1046                 row = GEN7_PARITY_ERROR_ROW(error_status);
1047                 bank = GEN7_PARITY_ERROR_BANK(error_status);
1048                 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1049
1050                 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1051                 POSTING_READ(reg);
1052
1053                 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1054                 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1055                 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1056                 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1057                 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1058                 parity_event[5] = NULL;
1059
1060                 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
1061                                    KOBJ_CHANGE, parity_event);
1062
1063                 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1064                           slice, row, bank, subbank);
1065
1066                 kfree(parity_event[4]);
1067                 kfree(parity_event[3]);
1068                 kfree(parity_event[2]);
1069                 kfree(parity_event[1]);
1070         }
1071
1072         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1073
1074 out:
1075         WARN_ON(dev_priv->l3_parity.which_slice);
1076         spin_lock_irqsave(&dev_priv->irq_lock, flags);
1077         ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1078         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1079
1080         mutex_unlock(&dev_priv->dev->struct_mutex);
1081 }
1082
1083 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1084 {
1085         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1086
1087         if (!HAS_L3_DPF(dev))
1088                 return;
1089
1090         spin_lock(&dev_priv->irq_lock);
1091         ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1092         spin_unlock(&dev_priv->irq_lock);
1093
1094         iir &= GT_PARITY_ERROR(dev);
1095         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1096                 dev_priv->l3_parity.which_slice |= 1 << 1;
1097
1098         if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1099                 dev_priv->l3_parity.which_slice |= 1 << 0;
1100
1101         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1102 }
1103
1104 static void ilk_gt_irq_handler(struct drm_device *dev,
1105                                struct drm_i915_private *dev_priv,
1106                                u32 gt_iir)
1107 {
1108         if (gt_iir &
1109             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1110                 notify_ring(dev, &dev_priv->ring[RCS]);
1111         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1112                 notify_ring(dev, &dev_priv->ring[VCS]);
1113 }
1114
1115 static void snb_gt_irq_handler(struct drm_device *dev,
1116                                struct drm_i915_private *dev_priv,
1117                                u32 gt_iir)
1118 {
1119
1120         if (gt_iir &
1121             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1122                 notify_ring(dev, &dev_priv->ring[RCS]);
1123         if (gt_iir & GT_BSD_USER_INTERRUPT)
1124                 notify_ring(dev, &dev_priv->ring[VCS]);
1125         if (gt_iir & GT_BLT_USER_INTERRUPT)
1126                 notify_ring(dev, &dev_priv->ring[BCS]);
1127
1128         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1129                       GT_BSD_CS_ERROR_INTERRUPT |
1130                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1131                 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
1132                 i915_handle_error(dev, false);
1133         }
1134
1135         if (gt_iir & GT_PARITY_ERROR(dev))
1136                 ivybridge_parity_error_irq_handler(dev, gt_iir);
1137 }
1138
1139 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1140                                        struct drm_i915_private *dev_priv,
1141                                        u32 master_ctl)
1142 {
1143         u32 rcs, bcs, vcs;
1144         uint32_t tmp = 0;
1145         irqreturn_t ret = IRQ_NONE;
1146
1147         if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1148                 tmp = I915_READ(GEN8_GT_IIR(0));
1149                 if (tmp) {
1150                         ret = IRQ_HANDLED;
1151                         rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1152                         bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1153                         if (rcs & GT_RENDER_USER_INTERRUPT)
1154                                 notify_ring(dev, &dev_priv->ring[RCS]);
1155                         if (bcs & GT_RENDER_USER_INTERRUPT)
1156                                 notify_ring(dev, &dev_priv->ring[BCS]);
1157                         I915_WRITE(GEN8_GT_IIR(0), tmp);
1158                 } else
1159                         DRM_ERROR("The master control interrupt lied (GT0)!\n");
1160         }
1161
1162         if (master_ctl & GEN8_GT_VCS1_IRQ) {
1163                 tmp = I915_READ(GEN8_GT_IIR(1));
1164                 if (tmp) {
1165                         ret = IRQ_HANDLED;
1166                         vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1167                         if (vcs & GT_RENDER_USER_INTERRUPT)
1168                                 notify_ring(dev, &dev_priv->ring[VCS]);
1169                         I915_WRITE(GEN8_GT_IIR(1), tmp);
1170                 } else
1171                         DRM_ERROR("The master control interrupt lied (GT1)!\n");
1172         }
1173
1174         if (master_ctl & GEN8_GT_VECS_IRQ) {
1175                 tmp = I915_READ(GEN8_GT_IIR(3));
1176                 if (tmp) {
1177                         ret = IRQ_HANDLED;
1178                         vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1179                         if (vcs & GT_RENDER_USER_INTERRUPT)
1180                                 notify_ring(dev, &dev_priv->ring[VECS]);
1181                         I915_WRITE(GEN8_GT_IIR(3), tmp);
1182                 } else
1183                         DRM_ERROR("The master control interrupt lied (GT3)!\n");
1184         }
1185
1186         return ret;
1187 }
1188
1189 #define HPD_STORM_DETECT_PERIOD 1000
1190 #define HPD_STORM_THRESHOLD 5
1191
1192 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1193                                          u32 hotplug_trigger,
1194                                          const u32 *hpd)
1195 {
1196         drm_i915_private_t *dev_priv = dev->dev_private;
1197         int i;
1198         bool storm_detected = false;
1199
1200         if (!hotplug_trigger)
1201                 return;
1202
1203         spin_lock(&dev_priv->irq_lock);
1204         for (i = 1; i < HPD_NUM_PINS; i++) {
1205
1206                 WARN(((hpd[i] & hotplug_trigger) &&
1207                       dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
1208                      "Received HPD interrupt although disabled\n");
1209
1210                 if (!(hpd[i] & hotplug_trigger) ||
1211                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1212                         continue;
1213
1214                 dev_priv->hpd_event_bits |= (1 << i);
1215                 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1216                                    dev_priv->hpd_stats[i].hpd_last_jiffies
1217                                    + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1218                         dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1219                         dev_priv->hpd_stats[i].hpd_cnt = 0;
1220                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1221                 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1222                         dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1223                         dev_priv->hpd_event_bits &= ~(1 << i);
1224                         DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1225                         storm_detected = true;
1226                 } else {
1227                         dev_priv->hpd_stats[i].hpd_cnt++;
1228                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1229                                       dev_priv->hpd_stats[i].hpd_cnt);
1230                 }
1231         }
1232
1233         if (storm_detected)
1234                 dev_priv->display.hpd_irq_setup(dev);
1235         spin_unlock(&dev_priv->irq_lock);
1236
1237         /*
1238          * Our hotplug handler can grab modeset locks (by calling down into the
1239          * fb helpers). Hence it must not be run on our own dev-priv->wq work
1240          * queue for otherwise the flush_work in the pageflip code will
1241          * deadlock.
1242          */
1243         schedule_work(&dev_priv->hotplug_work);
1244 }
1245
1246 static void gmbus_irq_handler(struct drm_device *dev)
1247 {
1248         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1249
1250         wake_up_all(&dev_priv->gmbus_wait_queue);
1251 }
1252
1253 static void dp_aux_irq_handler(struct drm_device *dev)
1254 {
1255         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
1256
1257         wake_up_all(&dev_priv->gmbus_wait_queue);
1258 }
1259
1260 #if defined(CONFIG_DEBUG_FS)
1261 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1262                                          uint32_t crc0, uint32_t crc1,
1263                                          uint32_t crc2, uint32_t crc3,
1264                                          uint32_t crc4)
1265 {
1266         struct drm_i915_private *dev_priv = dev->dev_private;
1267         struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1268         struct intel_pipe_crc_entry *entry;
1269         int head, tail;
1270
1271         spin_lock(&pipe_crc->lock);
1272
1273         if (!pipe_crc->entries) {
1274                 spin_unlock(&pipe_crc->lock);
1275                 DRM_ERROR("spurious interrupt\n");
1276                 return;
1277         }
1278
1279         head = pipe_crc->head;
1280         tail = pipe_crc->tail;
1281
1282         if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1283                 spin_unlock(&pipe_crc->lock);
1284                 DRM_ERROR("CRC buffer overflowing\n");
1285                 return;
1286         }
1287
1288         entry = &pipe_crc->entries[head];
1289
1290         entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1291         entry->crc[0] = crc0;
1292         entry->crc[1] = crc1;
1293         entry->crc[2] = crc2;
1294         entry->crc[3] = crc3;
1295         entry->crc[4] = crc4;
1296
1297         head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1298         pipe_crc->head = head;
1299
1300         spin_unlock(&pipe_crc->lock);
1301
1302         wake_up_interruptible(&pipe_crc->wq);
1303 }
1304 #else
1305 static inline void
1306 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1307                              uint32_t crc0, uint32_t crc1,
1308                              uint32_t crc2, uint32_t crc3,
1309                              uint32_t crc4) {}
1310 #endif
1311
1312
1313 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1314 {
1315         struct drm_i915_private *dev_priv = dev->dev_private;
1316
1317         display_pipe_crc_irq_handler(dev, pipe,
1318                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1319                                      0, 0, 0, 0);
1320 }
1321
1322 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1323 {
1324         struct drm_i915_private *dev_priv = dev->dev_private;
1325
1326         display_pipe_crc_irq_handler(dev, pipe,
1327                                      I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1328                                      I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1329                                      I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1330                                      I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1331                                      I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1332 }
1333
1334 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1335 {
1336         struct drm_i915_private *dev_priv = dev->dev_private;
1337         uint32_t res1, res2;
1338
1339         if (INTEL_INFO(dev)->gen >= 3)
1340                 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1341         else
1342                 res1 = 0;
1343
1344         if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1345                 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1346         else
1347                 res2 = 0;
1348
1349         display_pipe_crc_irq_handler(dev, pipe,
1350                                      I915_READ(PIPE_CRC_RES_RED(pipe)),
1351                                      I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1352                                      I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1353                                      res1, res2);
1354 }
1355
1356 /* The RPS events need forcewake, so we add them to a work queue and mask their
1357  * IMR bits until the work is done. Other interrupts can be processed without
1358  * the work queue. */
1359 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1360 {
1361         if (pm_iir & GEN6_PM_RPS_EVENTS) {
1362                 spin_lock(&dev_priv->irq_lock);
1363                 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
1364                 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
1365                 spin_unlock(&dev_priv->irq_lock);
1366
1367                 queue_work(dev_priv->wq, &dev_priv->rps.work);
1368         }
1369
1370         if (HAS_VEBOX(dev_priv->dev)) {
1371                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1372                         notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1373
1374                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1375                         DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
1376                         i915_handle_error(dev_priv->dev, false);
1377                 }
1378         }
1379 }
1380
1381 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1382 {
1383         struct drm_device *dev = (struct drm_device *) arg;
1384         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1385         u32 iir, gt_iir, pm_iir;
1386         irqreturn_t ret = IRQ_NONE;
1387         unsigned long irqflags;
1388         int pipe;
1389         u32 pipe_stats[I915_MAX_PIPES];
1390
1391         atomic_inc(&dev_priv->irq_received);
1392
1393         while (true) {
1394                 iir = I915_READ(VLV_IIR);
1395                 gt_iir = I915_READ(GTIIR);
1396                 pm_iir = I915_READ(GEN6_PMIIR);
1397
1398                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1399                         goto out;
1400
1401                 ret = IRQ_HANDLED;
1402
1403                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1404
1405                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1406                 for_each_pipe(pipe) {
1407                         int reg = PIPESTAT(pipe);
1408                         pipe_stats[pipe] = I915_READ(reg);
1409
1410                         /*
1411                          * Clear the PIPE*STAT regs before the IIR
1412                          */
1413                         if (pipe_stats[pipe] & 0x8000ffff) {
1414                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1415                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
1416                                                          pipe_name(pipe));
1417                                 I915_WRITE(reg, pipe_stats[pipe]);
1418                         }
1419                 }
1420                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1421
1422                 for_each_pipe(pipe) {
1423                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1424                                 drm_handle_vblank(dev, pipe);
1425
1426                         if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1427                                 intel_prepare_page_flip(dev, pipe);
1428                                 intel_finish_page_flip(dev, pipe);
1429                         }
1430
1431                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1432                                 i9xx_pipe_crc_irq_handler(dev, pipe);
1433                 }
1434
1435                 /* Consume port.  Then clear IIR or we'll miss events */
1436                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1437                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1438                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1439
1440                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1441                                          hotplug_status);
1442
1443                         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1444
1445                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1446                         I915_READ(PORT_HOTPLUG_STAT);
1447                 }
1448
1449                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1450                         gmbus_irq_handler(dev);
1451
1452                 if (pm_iir)
1453                         gen6_rps_irq_handler(dev_priv, pm_iir);
1454
1455                 I915_WRITE(GTIIR, gt_iir);
1456                 I915_WRITE(GEN6_PMIIR, pm_iir);
1457                 I915_WRITE(VLV_IIR, iir);
1458         }
1459
1460 out:
1461         return ret;
1462 }
1463
1464 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1465 {
1466         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1467         int pipe;
1468         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1469
1470         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1471
1472         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1473                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1474                                SDE_AUDIO_POWER_SHIFT);
1475                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1476                                  port_name(port));
1477         }
1478
1479         if (pch_iir & SDE_AUX_MASK)
1480                 dp_aux_irq_handler(dev);
1481
1482         if (pch_iir & SDE_GMBUS)
1483                 gmbus_irq_handler(dev);
1484
1485         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1486                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1487
1488         if (pch_iir & SDE_AUDIO_TRANS_MASK)
1489                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1490
1491         if (pch_iir & SDE_POISON)
1492                 DRM_ERROR("PCH poison interrupt\n");
1493
1494         if (pch_iir & SDE_FDI_MASK)
1495                 for_each_pipe(pipe)
1496                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1497                                          pipe_name(pipe),
1498                                          I915_READ(FDI_RX_IIR(pipe)));
1499
1500         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1501                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1502
1503         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1504                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1505
1506         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1507                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1508                                                           false))
1509                         DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1510
1511         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1512                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1513                                                           false))
1514                         DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1515 }
1516
1517 static void ivb_err_int_handler(struct drm_device *dev)
1518 {
1519         struct drm_i915_private *dev_priv = dev->dev_private;
1520         u32 err_int = I915_READ(GEN7_ERR_INT);
1521         enum pipe pipe;
1522
1523         if (err_int & ERR_INT_POISON)
1524                 DRM_ERROR("Poison interrupt\n");
1525
1526         for_each_pipe(pipe) {
1527                 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1528                         if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1529                                                                   false))
1530                                 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1531                                                  pipe_name(pipe));
1532                 }
1533
1534                 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1535                         if (IS_IVYBRIDGE(dev))
1536                                 ivb_pipe_crc_irq_handler(dev, pipe);
1537                         else
1538                                 hsw_pipe_crc_irq_handler(dev, pipe);
1539                 }
1540         }
1541
1542         I915_WRITE(GEN7_ERR_INT, err_int);
1543 }
1544
1545 static void cpt_serr_int_handler(struct drm_device *dev)
1546 {
1547         struct drm_i915_private *dev_priv = dev->dev_private;
1548         u32 serr_int = I915_READ(SERR_INT);
1549
1550         if (serr_int & SERR_INT_POISON)
1551                 DRM_ERROR("PCH poison interrupt\n");
1552
1553         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1554                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1555                                                           false))
1556                         DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1557
1558         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1559                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1560                                                           false))
1561                         DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1562
1563         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1564                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1565                                                           false))
1566                         DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1567
1568         I915_WRITE(SERR_INT, serr_int);
1569 }
1570
1571 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1572 {
1573         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1574         int pipe;
1575         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1576
1577         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1578
1579         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1580                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1581                                SDE_AUDIO_POWER_SHIFT_CPT);
1582                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1583                                  port_name(port));
1584         }
1585
1586         if (pch_iir & SDE_AUX_MASK_CPT)
1587                 dp_aux_irq_handler(dev);
1588
1589         if (pch_iir & SDE_GMBUS_CPT)
1590                 gmbus_irq_handler(dev);
1591
1592         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1593                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1594
1595         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1596                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1597
1598         if (pch_iir & SDE_FDI_MASK_CPT)
1599                 for_each_pipe(pipe)
1600                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1601                                          pipe_name(pipe),
1602                                          I915_READ(FDI_RX_IIR(pipe)));
1603
1604         if (pch_iir & SDE_ERROR_CPT)
1605                 cpt_serr_int_handler(dev);
1606 }
1607
1608 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1609 {
1610         struct drm_i915_private *dev_priv = dev->dev_private;
1611         enum pipe pipe;
1612
1613         if (de_iir & DE_AUX_CHANNEL_A)
1614                 dp_aux_irq_handler(dev);
1615
1616         if (de_iir & DE_GSE)
1617                 intel_opregion_asle_intr(dev);
1618
1619         if (de_iir & DE_POISON)
1620                 DRM_ERROR("Poison interrupt\n");
1621
1622         for_each_pipe(pipe) {
1623                 if (de_iir & DE_PIPE_VBLANK(pipe))
1624                         drm_handle_vblank(dev, pipe);
1625
1626                 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1627                         if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1628                                 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1629                                                  pipe_name(pipe));
1630
1631                 if (de_iir & DE_PIPE_CRC_DONE(pipe))
1632                         i9xx_pipe_crc_irq_handler(dev, pipe);
1633
1634                 /* plane/pipes map 1:1 on ilk+ */
1635                 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1636                         intel_prepare_page_flip(dev, pipe);
1637                         intel_finish_page_flip_plane(dev, pipe);
1638                 }
1639         }
1640
1641         /* check event from PCH */
1642         if (de_iir & DE_PCH_EVENT) {
1643                 u32 pch_iir = I915_READ(SDEIIR);
1644
1645                 if (HAS_PCH_CPT(dev))
1646                         cpt_irq_handler(dev, pch_iir);
1647                 else
1648                         ibx_irq_handler(dev, pch_iir);
1649
1650                 /* should clear PCH hotplug event before clear CPU irq */
1651                 I915_WRITE(SDEIIR, pch_iir);
1652         }
1653
1654         if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1655                 ironlake_rps_change_irq_handler(dev);
1656 }
1657
1658 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1659 {
1660         struct drm_i915_private *dev_priv = dev->dev_private;
1661         enum pipe i;
1662
1663         if (de_iir & DE_ERR_INT_IVB)
1664                 ivb_err_int_handler(dev);
1665
1666         if (de_iir & DE_AUX_CHANNEL_A_IVB)
1667                 dp_aux_irq_handler(dev);
1668
1669         if (de_iir & DE_GSE_IVB)
1670                 intel_opregion_asle_intr(dev);
1671
1672         for_each_pipe(i) {
1673                 if (de_iir & (DE_PIPE_VBLANK_IVB(i)))
1674                         drm_handle_vblank(dev, i);
1675
1676                 /* plane/pipes map 1:1 on ilk+ */
1677                 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) {
1678                         intel_prepare_page_flip(dev, i);
1679                         intel_finish_page_flip_plane(dev, i);
1680                 }
1681         }
1682
1683         /* check event from PCH */
1684         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1685                 u32 pch_iir = I915_READ(SDEIIR);
1686
1687                 cpt_irq_handler(dev, pch_iir);
1688
1689                 /* clear PCH hotplug event before clear CPU irq */
1690                 I915_WRITE(SDEIIR, pch_iir);
1691         }
1692 }
1693
1694 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1695 {
1696         struct drm_device *dev = (struct drm_device *) arg;
1697         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1698         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1699         irqreturn_t ret = IRQ_NONE;
1700
1701         atomic_inc(&dev_priv->irq_received);
1702
1703         /* We get interrupts on unclaimed registers, so check for this before we
1704          * do any I915_{READ,WRITE}. */
1705         intel_uncore_check_errors(dev);
1706
1707         /* disable master interrupt before clearing iir  */
1708         de_ier = I915_READ(DEIER);
1709         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1710         POSTING_READ(DEIER);
1711
1712         /* Disable south interrupts. We'll only write to SDEIIR once, so further
1713          * interrupts will will be stored on its back queue, and then we'll be
1714          * able to process them after we restore SDEIER (as soon as we restore
1715          * it, we'll get an interrupt if SDEIIR still has something to process
1716          * due to its back queue). */
1717         if (!HAS_PCH_NOP(dev)) {
1718                 sde_ier = I915_READ(SDEIER);
1719                 I915_WRITE(SDEIER, 0);
1720                 POSTING_READ(SDEIER);
1721         }
1722
1723         gt_iir = I915_READ(GTIIR);
1724         if (gt_iir) {
1725                 if (INTEL_INFO(dev)->gen >= 6)
1726                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
1727                 else
1728                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1729                 I915_WRITE(GTIIR, gt_iir);
1730                 ret = IRQ_HANDLED;
1731         }
1732
1733         de_iir = I915_READ(DEIIR);
1734         if (de_iir) {
1735                 if (INTEL_INFO(dev)->gen >= 7)
1736                         ivb_display_irq_handler(dev, de_iir);
1737                 else
1738                         ilk_display_irq_handler(dev, de_iir);
1739                 I915_WRITE(DEIIR, de_iir);
1740                 ret = IRQ_HANDLED;
1741         }
1742
1743         if (INTEL_INFO(dev)->gen >= 6) {
1744                 u32 pm_iir = I915_READ(GEN6_PMIIR);
1745                 if (pm_iir) {
1746                         gen6_rps_irq_handler(dev_priv, pm_iir);
1747                         I915_WRITE(GEN6_PMIIR, pm_iir);
1748                         ret = IRQ_HANDLED;
1749                 }
1750         }
1751
1752         I915_WRITE(DEIER, de_ier);
1753         POSTING_READ(DEIER);
1754         if (!HAS_PCH_NOP(dev)) {
1755                 I915_WRITE(SDEIER, sde_ier);
1756                 POSTING_READ(SDEIER);
1757         }
1758
1759         return ret;
1760 }
1761
1762 static irqreturn_t gen8_irq_handler(int irq, void *arg)
1763 {
1764         struct drm_device *dev = arg;
1765         struct drm_i915_private *dev_priv = dev->dev_private;
1766         u32 master_ctl;
1767         irqreturn_t ret = IRQ_NONE;
1768         uint32_t tmp = 0;
1769         enum pipe pipe;
1770
1771         atomic_inc(&dev_priv->irq_received);
1772
1773         master_ctl = I915_READ(GEN8_MASTER_IRQ);
1774         master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
1775         if (!master_ctl)
1776                 return IRQ_NONE;
1777
1778         I915_WRITE(GEN8_MASTER_IRQ, 0);
1779         POSTING_READ(GEN8_MASTER_IRQ);
1780
1781         ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1782
1783         if (master_ctl & GEN8_DE_MISC_IRQ) {
1784                 tmp = I915_READ(GEN8_DE_MISC_IIR);
1785                 if (tmp & GEN8_DE_MISC_GSE)
1786                         intel_opregion_asle_intr(dev);
1787                 else if (tmp)
1788                         DRM_ERROR("Unexpected DE Misc interrupt\n");
1789                 else
1790                         DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
1791
1792                 if (tmp) {
1793                         I915_WRITE(GEN8_DE_MISC_IIR, tmp);
1794                         ret = IRQ_HANDLED;
1795                 }
1796         }
1797
1798         if (master_ctl & GEN8_DE_PORT_IRQ) {
1799                 tmp = I915_READ(GEN8_DE_PORT_IIR);
1800                 if (tmp & GEN8_AUX_CHANNEL_A)
1801                         dp_aux_irq_handler(dev);
1802                 else if (tmp)
1803                         DRM_ERROR("Unexpected DE Port interrupt\n");
1804                 else
1805                         DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
1806
1807                 if (tmp) {
1808                         I915_WRITE(GEN8_DE_PORT_IIR, tmp);
1809                         ret = IRQ_HANDLED;
1810                 }
1811         }
1812
1813         for_each_pipe(pipe) {
1814                 uint32_t pipe_iir;
1815
1816                 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
1817                         continue;
1818
1819                 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
1820                 if (pipe_iir & GEN8_PIPE_VBLANK)
1821                         drm_handle_vblank(dev, pipe);
1822
1823                 if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
1824                         intel_prepare_page_flip(dev, pipe);
1825                         intel_finish_page_flip_plane(dev, pipe);
1826                 }
1827
1828                 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
1829                         hsw_pipe_crc_irq_handler(dev, pipe);
1830
1831                 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
1832                         if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1833                                                                   false))
1834                                 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
1835                                                  pipe_name(pipe));
1836                 }
1837
1838                 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
1839                         DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
1840                                   pipe_name(pipe),
1841                                   pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
1842                 }
1843
1844                 if (pipe_iir) {
1845                         ret = IRQ_HANDLED;
1846                         I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
1847                 } else
1848                         DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
1849         }
1850
1851         if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
1852                 /*
1853                  * FIXME(BDW): Assume for now that the new interrupt handling
1854                  * scheme also closed the SDE interrupt handling race we've seen
1855                  * on older pch-split platforms. But this needs testing.
1856                  */
1857                 u32 pch_iir = I915_READ(SDEIIR);
1858
1859                 cpt_irq_handler(dev, pch_iir);
1860
1861                 if (pch_iir) {
1862                         I915_WRITE(SDEIIR, pch_iir);
1863                         ret = IRQ_HANDLED;
1864                 }
1865         }
1866
1867         I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1868         POSTING_READ(GEN8_MASTER_IRQ);
1869
1870         return ret;
1871 }
1872
1873 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
1874                                bool reset_completed)
1875 {
1876         struct intel_ring_buffer *ring;
1877         int i;
1878
1879         /*
1880          * Notify all waiters for GPU completion events that reset state has
1881          * been changed, and that they need to restart their wait after
1882          * checking for potential errors (and bail out to drop locks if there is
1883          * a gpu reset pending so that i915_error_work_func can acquire them).
1884          */
1885
1886         /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
1887         for_each_ring(ring, dev_priv, i)
1888                 wake_up_all(&ring->irq_queue);
1889
1890         /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
1891         wake_up_all(&dev_priv->pending_flip_queue);
1892
1893         /*
1894          * Signal tasks blocked in i915_gem_wait_for_error that the pending
1895          * reset state is cleared.
1896          */
1897         if (reset_completed)
1898                 wake_up_all(&dev_priv->gpu_error.reset_queue);
1899 }
1900
1901 /**
1902  * i915_error_work_func - do process context error handling work
1903  * @work: work struct
1904  *
1905  * Fire an error uevent so userspace can see that a hang or error
1906  * was detected.
1907  */
1908 static void i915_error_work_func(struct work_struct *work)
1909 {
1910         struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1911                                                     work);
1912         drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1913                                                     gpu_error);
1914         struct drm_device *dev = dev_priv->dev;
1915         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1916         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1917         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1918         int ret;
1919
1920         kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
1921
1922         /*
1923          * Note that there's only one work item which does gpu resets, so we
1924          * need not worry about concurrent gpu resets potentially incrementing
1925          * error->reset_counter twice. We only need to take care of another
1926          * racing irq/hangcheck declaring the gpu dead for a second time. A
1927          * quick check for that is good enough: schedule_work ensures the
1928          * correct ordering between hang detection and this work item, and since
1929          * the reset in-progress bit is only ever set by code outside of this
1930          * work we don't need to worry about any other races.
1931          */
1932         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1933                 DRM_DEBUG_DRIVER("resetting chip\n");
1934                 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1935                                    reset_event);
1936
1937                 /*
1938                  * All state reset _must_ be completed before we update the
1939                  * reset counter, for otherwise waiters might miss the reset
1940                  * pending state and not properly drop locks, resulting in
1941                  * deadlocks with the reset work.
1942                  */
1943                 ret = i915_reset(dev);
1944
1945                 intel_display_handle_reset(dev);
1946
1947                 if (ret == 0) {
1948                         /*
1949                          * After all the gem state is reset, increment the reset
1950                          * counter and wake up everyone waiting for the reset to
1951                          * complete.
1952                          *
1953                          * Since unlock operations are a one-sided barrier only,
1954                          * we need to insert a barrier here to order any seqno
1955                          * updates before
1956                          * the counter increment.
1957                          */
1958                         smp_mb__before_atomic_inc();
1959                         atomic_inc(&dev_priv->gpu_error.reset_counter);
1960
1961                         kobject_uevent_env(&dev->primary->kdev.kobj,
1962                                            KOBJ_CHANGE, reset_done_event);
1963                 } else {
1964                         atomic_set(&error->reset_counter, I915_WEDGED);
1965                 }
1966
1967                 /*
1968                  * Note: The wake_up also serves as a memory barrier so that
1969                  * waiters see the update value of the reset counter atomic_t.
1970                  */
1971                 i915_error_wake_up(dev_priv, true);
1972         }
1973 }
1974
1975 static void i915_report_and_clear_eir(struct drm_device *dev)
1976 {
1977         struct drm_i915_private *dev_priv = dev->dev_private;
1978         uint32_t instdone[I915_NUM_INSTDONE_REG];
1979         u32 eir = I915_READ(EIR);
1980         int pipe, i;
1981
1982         if (!eir)
1983                 return;
1984
1985         pr_err("render error detected, EIR: 0x%08x\n", eir);
1986
1987         i915_get_extra_instdone(dev, instdone);
1988
1989         if (IS_G4X(dev)) {
1990                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1991                         u32 ipeir = I915_READ(IPEIR_I965);
1992
1993                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1994                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1995                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
1996                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1997                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1998                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1999                         I915_WRITE(IPEIR_I965, ipeir);
2000                         POSTING_READ(IPEIR_I965);
2001                 }
2002                 if (eir & GM45_ERROR_PAGE_TABLE) {
2003                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2004                         pr_err("page table error\n");
2005                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2006                         I915_WRITE(PGTBL_ER, pgtbl_err);
2007                         POSTING_READ(PGTBL_ER);
2008                 }
2009         }
2010
2011         if (!IS_GEN2(dev)) {
2012                 if (eir & I915_ERROR_PAGE_TABLE) {
2013                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2014                         pr_err("page table error\n");
2015                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2016                         I915_WRITE(PGTBL_ER, pgtbl_err);
2017                         POSTING_READ(PGTBL_ER);
2018                 }
2019         }
2020
2021         if (eir & I915_ERROR_MEMORY_REFRESH) {
2022                 pr_err("memory refresh error:\n");
2023                 for_each_pipe(pipe)
2024                         pr_err("pipe %c stat: 0x%08x\n",
2025                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2026                 /* pipestat has already been acked */
2027         }
2028         if (eir & I915_ERROR_INSTRUCTION) {
2029                 pr_err("instruction error\n");
2030                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2031                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2032                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2033                 if (INTEL_INFO(dev)->gen < 4) {
2034                         u32 ipeir = I915_READ(IPEIR);
2035
2036                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2037                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2038                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2039                         I915_WRITE(IPEIR, ipeir);
2040                         POSTING_READ(IPEIR);
2041                 } else {
2042                         u32 ipeir = I915_READ(IPEIR_I965);
2043
2044                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2045                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2046                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2047                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2048                         I915_WRITE(IPEIR_I965, ipeir);
2049                         POSTING_READ(IPEIR_I965);
2050                 }
2051         }
2052
2053         I915_WRITE(EIR, eir);
2054         POSTING_READ(EIR);
2055         eir = I915_READ(EIR);
2056         if (eir) {
2057                 /*
2058                  * some errors might have become stuck,
2059                  * mask them.
2060                  */
2061                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2062                 I915_WRITE(EMR, I915_READ(EMR) | eir);
2063                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2064         }
2065 }
2066
2067 /**
2068  * i915_handle_error - handle an error interrupt
2069  * @dev: drm device
2070  *
2071  * Do some basic checking of regsiter state at error interrupt time and
2072  * dump it to the syslog.  Also call i915_capture_error_state() to make
2073  * sure we get a record and make it available in debugfs.  Fire a uevent
2074  * so userspace knows something bad happened (should trigger collection
2075  * of a ring dump etc.).
2076  */
2077 void i915_handle_error(struct drm_device *dev, bool wedged)
2078 {
2079         struct drm_i915_private *dev_priv = dev->dev_private;
2080
2081         i915_capture_error_state(dev);
2082         i915_report_and_clear_eir(dev);
2083
2084         if (wedged) {
2085                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2086                                 &dev_priv->gpu_error.reset_counter);
2087
2088                 /*
2089                  * Wakeup waiting processes so that the reset work function
2090                  * i915_error_work_func doesn't deadlock trying to grab various
2091                  * locks. By bumping the reset counter first, the woken
2092                  * processes will see a reset in progress and back off,
2093                  * releasing their locks and then wait for the reset completion.
2094                  * We must do this for _all_ gpu waiters that might hold locks
2095                  * that the reset work needs to acquire.
2096                  *
2097                  * Note: The wake_up serves as the required memory barrier to
2098                  * ensure that the waiters see the updated value of the reset
2099                  * counter atomic_t.
2100                  */
2101                 i915_error_wake_up(dev_priv, false);
2102         }
2103
2104         /*
2105          * Our reset work can grab modeset locks (since it needs to reset the
2106          * state of outstanding pagelips). Hence it must not be run on our own
2107          * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2108          * code will deadlock.
2109          */
2110         schedule_work(&dev_priv->gpu_error.work);
2111 }
2112
2113 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2114 {
2115         drm_i915_private_t *dev_priv = dev->dev_private;
2116         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2117         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2118         struct drm_i915_gem_object *obj;
2119         struct intel_unpin_work *work;
2120         unsigned long flags;
2121         bool stall_detected;
2122
2123         /* Ignore early vblank irqs */
2124         if (intel_crtc == NULL)
2125                 return;
2126
2127         spin_lock_irqsave(&dev->event_lock, flags);
2128         work = intel_crtc->unpin_work;
2129
2130         if (work == NULL ||
2131             atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2132             !work->enable_stall_check) {
2133                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2134                 spin_unlock_irqrestore(&dev->event_lock, flags);
2135                 return;
2136         }
2137
2138         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2139         obj = work->pending_flip_obj;
2140         if (INTEL_INFO(dev)->gen >= 4) {
2141                 int dspsurf = DSPSURF(intel_crtc->plane);
2142                 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2143                                         i915_gem_obj_ggtt_offset(obj);
2144         } else {
2145                 int dspaddr = DSPADDR(intel_crtc->plane);
2146                 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2147                                                         crtc->y * crtc->fb->pitches[0] +
2148                                                         crtc->x * crtc->fb->bits_per_pixel/8);
2149         }
2150
2151         spin_unlock_irqrestore(&dev->event_lock, flags);
2152
2153         if (stall_detected) {
2154                 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2155                 intel_prepare_page_flip(dev, intel_crtc->plane);
2156         }
2157 }
2158
2159 /* Called from drm generic code, passed 'crtc' which
2160  * we use as a pipe index
2161  */
2162 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2163 {
2164         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2165         unsigned long irqflags;
2166
2167         if (!i915_pipe_enabled(dev, pipe))
2168                 return -EINVAL;
2169
2170         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2171         if (INTEL_INFO(dev)->gen >= 4)
2172                 i915_enable_pipestat(dev_priv, pipe,
2173                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
2174         else
2175                 i915_enable_pipestat(dev_priv, pipe,
2176                                      PIPE_VBLANK_INTERRUPT_ENABLE);
2177
2178         /* maintain vblank delivery even in deep C-states */
2179         if (dev_priv->info->gen == 3)
2180                 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2181         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2182
2183         return 0;
2184 }
2185
2186 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2187 {
2188         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2189         unsigned long irqflags;
2190         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2191                                                      DE_PIPE_VBLANK(pipe);
2192
2193         if (!i915_pipe_enabled(dev, pipe))
2194                 return -EINVAL;
2195
2196         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2197         ironlake_enable_display_irq(dev_priv, bit);
2198         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2199
2200         return 0;
2201 }
2202
2203 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2204 {
2205         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2206         unsigned long irqflags;
2207         u32 imr;
2208
2209         if (!i915_pipe_enabled(dev, pipe))
2210                 return -EINVAL;
2211
2212         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2213         imr = I915_READ(VLV_IMR);
2214         if (pipe == PIPE_A)
2215                 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2216         else
2217                 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2218         I915_WRITE(VLV_IMR, imr);
2219         i915_enable_pipestat(dev_priv, pipe,
2220                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
2221         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2222
2223         return 0;
2224 }
2225
2226 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2227 {
2228         struct drm_i915_private *dev_priv = dev->dev_private;
2229         unsigned long irqflags;
2230
2231         if (!i915_pipe_enabled(dev, pipe))
2232                 return -EINVAL;
2233
2234         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2235         dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2236         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2237         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2238         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2239         return 0;
2240 }
2241
2242 /* Called from drm generic code, passed 'crtc' which
2243  * we use as a pipe index
2244  */
2245 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2246 {
2247         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2248         unsigned long irqflags;
2249
2250         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2251         if (dev_priv->info->gen == 3)
2252                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2253
2254         i915_disable_pipestat(dev_priv, pipe,
2255                               PIPE_VBLANK_INTERRUPT_ENABLE |
2256                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
2257         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2258 }
2259
2260 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2261 {
2262         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2263         unsigned long irqflags;
2264         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2265                                                      DE_PIPE_VBLANK(pipe);
2266
2267         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2268         ironlake_disable_display_irq(dev_priv, bit);
2269         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2270 }
2271
2272 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2273 {
2274         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2275         unsigned long irqflags;
2276         u32 imr;
2277
2278         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2279         i915_disable_pipestat(dev_priv, pipe,
2280                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
2281         imr = I915_READ(VLV_IMR);
2282         if (pipe == PIPE_A)
2283                 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2284         else
2285                 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2286         I915_WRITE(VLV_IMR, imr);
2287         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2288 }
2289
2290 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2291 {
2292         struct drm_i915_private *dev_priv = dev->dev_private;
2293         unsigned long irqflags;
2294
2295         if (!i915_pipe_enabled(dev, pipe))
2296                 return;
2297
2298         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2299         dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2300         I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2301         POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2302         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2303 }
2304
2305 static u32
2306 ring_last_seqno(struct intel_ring_buffer *ring)
2307 {
2308         return list_entry(ring->request_list.prev,
2309                           struct drm_i915_gem_request, list)->seqno;
2310 }
2311
2312 static bool
2313 ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2314 {
2315         return (list_empty(&ring->request_list) ||
2316                 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2317 }
2318
2319 static struct intel_ring_buffer *
2320 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2321 {
2322         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2323         u32 cmd, ipehr, acthd, acthd_min;
2324
2325         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2326         if ((ipehr & ~(0x3 << 16)) !=
2327             (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2328                 return NULL;
2329
2330         /* ACTHD is likely pointing to the dword after the actual command,
2331          * so scan backwards until we find the MBOX.
2332          */
2333         acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
2334         acthd_min = max((int)acthd - 3 * 4, 0);
2335         do {
2336                 cmd = ioread32(ring->virtual_start + acthd);
2337                 if (cmd == ipehr)
2338                         break;
2339
2340                 acthd -= 4;
2341                 if (acthd < acthd_min)
2342                         return NULL;
2343         } while (1);
2344
2345         *seqno = ioread32(ring->virtual_start+acthd+4)+1;
2346         return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2347 }
2348
2349 static int semaphore_passed(struct intel_ring_buffer *ring)
2350 {
2351         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2352         struct intel_ring_buffer *signaller;
2353         u32 seqno, ctl;
2354
2355         ring->hangcheck.deadlock = true;
2356
2357         signaller = semaphore_waits_for(ring, &seqno);
2358         if (signaller == NULL || signaller->hangcheck.deadlock)
2359                 return -1;
2360
2361         /* cursory check for an unkickable deadlock */
2362         ctl = I915_READ_CTL(signaller);
2363         if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2364                 return -1;
2365
2366         return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2367 }
2368
2369 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2370 {
2371         struct intel_ring_buffer *ring;
2372         int i;
2373
2374         for_each_ring(ring, dev_priv, i)
2375                 ring->hangcheck.deadlock = false;
2376 }
2377
2378 static enum intel_ring_hangcheck_action
2379 ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2380 {
2381         struct drm_device *dev = ring->dev;
2382         struct drm_i915_private *dev_priv = dev->dev_private;
2383         u32 tmp;
2384
2385         if (ring->hangcheck.acthd != acthd)
2386                 return HANGCHECK_ACTIVE;
2387
2388         if (IS_GEN2(dev))
2389                 return HANGCHECK_HUNG;
2390
2391         /* Is the chip hanging on a WAIT_FOR_EVENT?
2392          * If so we can simply poke the RB_WAIT bit
2393          * and break the hang. This should work on
2394          * all but the second generation chipsets.
2395          */
2396         tmp = I915_READ_CTL(ring);
2397         if (tmp & RING_WAIT) {
2398                 DRM_ERROR("Kicking stuck wait on %s\n",
2399                           ring->name);
2400                 i915_handle_error(dev, false);
2401                 I915_WRITE_CTL(ring, tmp);
2402                 return HANGCHECK_KICK;
2403         }
2404
2405         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2406                 switch (semaphore_passed(ring)) {
2407                 default:
2408                         return HANGCHECK_HUNG;
2409                 case 1:
2410                         DRM_ERROR("Kicking stuck semaphore on %s\n",
2411                                   ring->name);
2412                         i915_handle_error(dev, false);
2413                         I915_WRITE_CTL(ring, tmp);
2414                         return HANGCHECK_KICK;
2415                 case 0:
2416                         return HANGCHECK_WAIT;
2417                 }
2418         }
2419
2420         return HANGCHECK_HUNG;
2421 }
2422
2423 /**
2424  * This is called when the chip hasn't reported back with completed
2425  * batchbuffers in a long time. We keep track per ring seqno progress and
2426  * if there are no progress, hangcheck score for that ring is increased.
2427  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2428  * we kick the ring. If we see no progress on three subsequent calls
2429  * we assume chip is wedged and try to fix it by resetting the chip.
2430  */
2431 static void i915_hangcheck_elapsed(unsigned long data)
2432 {
2433         struct drm_device *dev = (struct drm_device *)data;
2434         drm_i915_private_t *dev_priv = dev->dev_private;
2435         struct intel_ring_buffer *ring;
2436         int i;
2437         int busy_count = 0, rings_hung = 0;
2438         bool stuck[I915_NUM_RINGS] = { 0 };
2439 #define BUSY 1
2440 #define KICK 5
2441 #define HUNG 20
2442 #define FIRE 30
2443
2444         if (!i915_enable_hangcheck)
2445                 return;
2446
2447         for_each_ring(ring, dev_priv, i) {
2448                 u32 seqno, acthd;
2449                 bool busy = true;
2450
2451                 semaphore_clear_deadlocks(dev_priv);
2452
2453                 seqno = ring->get_seqno(ring, false);
2454                 acthd = intel_ring_get_active_head(ring);
2455
2456                 if (ring->hangcheck.seqno == seqno) {
2457                         if (ring_idle(ring, seqno)) {
2458                                 ring->hangcheck.action = HANGCHECK_IDLE;
2459
2460                                 if (waitqueue_active(&ring->irq_queue)) {
2461                                         /* Issue a wake-up to catch stuck h/w. */
2462                                         if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2463                                                 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2464                                                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2465                                                                   ring->name);
2466                                                 else
2467                                                         DRM_INFO("Fake missed irq on %s\n",
2468                                                                  ring->name);
2469                                                 wake_up_all(&ring->irq_queue);
2470                                         }
2471                                         /* Safeguard against driver failure */
2472                                         ring->hangcheck.score += BUSY;
2473                                 } else
2474                                         busy = false;
2475                         } else {
2476                                 /* We always increment the hangcheck score
2477                                  * if the ring is busy and still processing
2478                                  * the same request, so that no single request
2479                                  * can run indefinitely (such as a chain of
2480                                  * batches). The only time we do not increment
2481                                  * the hangcheck score on this ring, if this
2482                                  * ring is in a legitimate wait for another
2483                                  * ring. In that case the waiting ring is a
2484                                  * victim and we want to be sure we catch the
2485                                  * right culprit. Then every time we do kick
2486                                  * the ring, add a small increment to the
2487                                  * score so that we can catch a batch that is
2488                                  * being repeatedly kicked and so responsible
2489                                  * for stalling the machine.
2490                                  */
2491                                 ring->hangcheck.action = ring_stuck(ring,
2492                                                                     acthd);
2493
2494                                 switch (ring->hangcheck.action) {
2495                                 case HANGCHECK_IDLE:
2496                                 case HANGCHECK_WAIT:
2497                                         break;
2498                                 case HANGCHECK_ACTIVE:
2499                                         ring->hangcheck.score += BUSY;
2500                                         break;
2501                                 case HANGCHECK_KICK:
2502                                         ring->hangcheck.score += KICK;
2503                                         break;
2504                                 case HANGCHECK_HUNG:
2505                                         ring->hangcheck.score += HUNG;
2506                                         stuck[i] = true;
2507                                         break;
2508                                 }
2509                         }
2510                 } else {
2511                         ring->hangcheck.action = HANGCHECK_ACTIVE;
2512
2513                         /* Gradually reduce the count so that we catch DoS
2514                          * attempts across multiple batches.
2515                          */
2516                         if (ring->hangcheck.score > 0)
2517                                 ring->hangcheck.score--;
2518                 }
2519
2520                 ring->hangcheck.seqno = seqno;
2521                 ring->hangcheck.acthd = acthd;
2522                 busy_count += busy;
2523         }
2524
2525         for_each_ring(ring, dev_priv, i) {
2526                 if (ring->hangcheck.score > FIRE) {
2527                         DRM_INFO("%s on %s\n",
2528                                  stuck[i] ? "stuck" : "no progress",
2529                                  ring->name);
2530                         rings_hung++;
2531                 }
2532         }
2533
2534         if (rings_hung)
2535                 return i915_handle_error(dev, true);
2536
2537         if (busy_count)
2538                 /* Reset timer case chip hangs without another request
2539                  * being added */
2540                 i915_queue_hangcheck(dev);
2541 }
2542
2543 void i915_queue_hangcheck(struct drm_device *dev)
2544 {
2545         struct drm_i915_private *dev_priv = dev->dev_private;
2546         if (!i915_enable_hangcheck)
2547                 return;
2548
2549         mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2550                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2551 }
2552
2553 static void ibx_irq_preinstall(struct drm_device *dev)
2554 {
2555         struct drm_i915_private *dev_priv = dev->dev_private;
2556
2557         if (HAS_PCH_NOP(dev))
2558                 return;
2559
2560         /* south display irq */
2561         I915_WRITE(SDEIMR, 0xffffffff);
2562         /*
2563          * SDEIER is also touched by the interrupt handler to work around missed
2564          * PCH interrupts. Hence we can't update it after the interrupt handler
2565          * is enabled - instead we unconditionally enable all PCH interrupt
2566          * sources here, but then only unmask them as needed with SDEIMR.
2567          */
2568         I915_WRITE(SDEIER, 0xffffffff);
2569         POSTING_READ(SDEIER);
2570 }
2571
2572 static void gen5_gt_irq_preinstall(struct drm_device *dev)
2573 {
2574         struct drm_i915_private *dev_priv = dev->dev_private;
2575
2576         /* and GT */
2577         I915_WRITE(GTIMR, 0xffffffff);
2578         I915_WRITE(GTIER, 0x0);
2579         POSTING_READ(GTIER);
2580
2581         if (INTEL_INFO(dev)->gen >= 6) {
2582                 /* and PM */
2583                 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2584                 I915_WRITE(GEN6_PMIER, 0x0);
2585                 POSTING_READ(GEN6_PMIER);
2586         }
2587 }
2588
2589 /* drm_dma.h hooks
2590 */
2591 static void ironlake_irq_preinstall(struct drm_device *dev)
2592 {
2593         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2594
2595         atomic_set(&dev_priv->irq_received, 0);
2596
2597         I915_WRITE(HWSTAM, 0xeffe);
2598
2599         I915_WRITE(DEIMR, 0xffffffff);
2600         I915_WRITE(DEIER, 0x0);
2601         POSTING_READ(DEIER);
2602
2603         gen5_gt_irq_preinstall(dev);
2604
2605         ibx_irq_preinstall(dev);
2606 }
2607
2608 static void valleyview_irq_preinstall(struct drm_device *dev)
2609 {
2610         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2611         int pipe;
2612
2613         atomic_set(&dev_priv->irq_received, 0);
2614
2615         /* VLV magic */
2616         I915_WRITE(VLV_IMR, 0);
2617         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2618         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2619         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2620
2621         /* and GT */
2622         I915_WRITE(GTIIR, I915_READ(GTIIR));
2623         I915_WRITE(GTIIR, I915_READ(GTIIR));
2624
2625         gen5_gt_irq_preinstall(dev);
2626
2627         I915_WRITE(DPINVGTT, 0xff);
2628
2629         I915_WRITE(PORT_HOTPLUG_EN, 0);
2630         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2631         for_each_pipe(pipe)
2632                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2633         I915_WRITE(VLV_IIR, 0xffffffff);
2634         I915_WRITE(VLV_IMR, 0xffffffff);
2635         I915_WRITE(VLV_IER, 0x0);
2636         POSTING_READ(VLV_IER);
2637 }
2638
2639 static void gen8_irq_preinstall(struct drm_device *dev)
2640 {
2641         struct drm_i915_private *dev_priv = dev->dev_private;
2642         int pipe;
2643
2644         atomic_set(&dev_priv->irq_received, 0);
2645
2646         I915_WRITE(GEN8_MASTER_IRQ, 0);
2647         POSTING_READ(GEN8_MASTER_IRQ);
2648
2649         /* IIR can theoretically queue up two events. Be paranoid */
2650 #define GEN8_IRQ_INIT_NDX(type, which) do { \
2651                 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2652                 POSTING_READ(GEN8_##type##_IMR(which)); \
2653                 I915_WRITE(GEN8_##type##_IER(which), 0); \
2654                 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2655                 POSTING_READ(GEN8_##type##_IIR(which)); \
2656                 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2657         } while (0)
2658
2659 #define GEN8_IRQ_INIT(type) do { \
2660                 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2661                 POSTING_READ(GEN8_##type##_IMR); \
2662                 I915_WRITE(GEN8_##type##_IER, 0); \
2663                 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2664                 POSTING_READ(GEN8_##type##_IIR); \
2665                 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2666         } while (0)
2667
2668         GEN8_IRQ_INIT_NDX(GT, 0);
2669         GEN8_IRQ_INIT_NDX(GT, 1);
2670         GEN8_IRQ_INIT_NDX(GT, 2);
2671         GEN8_IRQ_INIT_NDX(GT, 3);
2672
2673         for_each_pipe(pipe) {
2674                 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
2675         }
2676
2677         GEN8_IRQ_INIT(DE_PORT);
2678         GEN8_IRQ_INIT(DE_MISC);
2679         GEN8_IRQ_INIT(PCU);
2680 #undef GEN8_IRQ_INIT
2681 #undef GEN8_IRQ_INIT_NDX
2682
2683         POSTING_READ(GEN8_PCU_IIR);
2684 }
2685
2686 static void ibx_hpd_irq_setup(struct drm_device *dev)
2687 {
2688         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2689         struct drm_mode_config *mode_config = &dev->mode_config;
2690         struct intel_encoder *intel_encoder;
2691         u32 hotplug_irqs, hotplug, enabled_irqs = 0;
2692
2693         if (HAS_PCH_IBX(dev)) {
2694                 hotplug_irqs = SDE_HOTPLUG_MASK;
2695                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2696                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2697                                 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
2698         } else {
2699                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
2700                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2701                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2702                                 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
2703         }
2704
2705         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2706
2707         /*
2708          * Enable digital hotplug on the PCH, and configure the DP short pulse
2709          * duration to 2ms (which is the minimum in the Display Port spec)
2710          *
2711          * This register is the same on all known PCH chips.
2712          */
2713         hotplug = I915_READ(PCH_PORT_HOTPLUG);
2714         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2715         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2716         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2717         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2718         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2719 }
2720
2721 static void ibx_irq_postinstall(struct drm_device *dev)
2722 {
2723         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2724         u32 mask;
2725
2726         if (HAS_PCH_NOP(dev))
2727                 return;
2728
2729         if (HAS_PCH_IBX(dev)) {
2730                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2731                        SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2732         } else {
2733                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2734
2735                 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2736         }
2737
2738         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2739         I915_WRITE(SDEIMR, ~mask);
2740 }
2741
2742 static void gen5_gt_irq_postinstall(struct drm_device *dev)
2743 {
2744         struct drm_i915_private *dev_priv = dev->dev_private;
2745         u32 pm_irqs, gt_irqs;
2746
2747         pm_irqs = gt_irqs = 0;
2748
2749         dev_priv->gt_irq_mask = ~0;
2750         if (HAS_L3_DPF(dev)) {
2751                 /* L3 parity interrupt is always unmasked. */
2752                 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
2753                 gt_irqs |= GT_PARITY_ERROR(dev);
2754         }
2755
2756         gt_irqs |= GT_RENDER_USER_INTERRUPT;
2757         if (IS_GEN5(dev)) {
2758                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2759                            ILK_BSD_USER_INTERRUPT;
2760         } else {
2761                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2762         }
2763
2764         I915_WRITE(GTIIR, I915_READ(GTIIR));
2765         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2766         I915_WRITE(GTIER, gt_irqs);
2767         POSTING_READ(GTIER);
2768
2769         if (INTEL_INFO(dev)->gen >= 6) {
2770                 pm_irqs |= GEN6_PM_RPS_EVENTS;
2771
2772                 if (HAS_VEBOX(dev))
2773                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2774
2775                 dev_priv->pm_irq_mask = 0xffffffff;
2776                 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2777                 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
2778                 I915_WRITE(GEN6_PMIER, pm_irqs);
2779                 POSTING_READ(GEN6_PMIER);
2780         }
2781 }
2782
2783 static int ironlake_irq_postinstall(struct drm_device *dev)
2784 {
2785         unsigned long irqflags;
2786         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2787         u32 display_mask, extra_mask;
2788
2789         if (INTEL_INFO(dev)->gen >= 7) {
2790                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2791                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
2792                                 DE_PLANEB_FLIP_DONE_IVB |
2793                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2794                                 DE_ERR_INT_IVB);
2795                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2796                               DE_PIPEA_VBLANK_IVB);
2797
2798                 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2799         } else {
2800                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2801                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2802                                 DE_AUX_CHANNEL_A |
2803                                 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
2804                                 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
2805                                 DE_POISON);
2806                 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2807         }
2808
2809         dev_priv->irq_mask = ~display_mask;
2810
2811         /* should always can generate irq */
2812         I915_WRITE(DEIIR, I915_READ(DEIIR));
2813         I915_WRITE(DEIMR, dev_priv->irq_mask);
2814         I915_WRITE(DEIER, display_mask | extra_mask);
2815         POSTING_READ(DEIER);
2816
2817         gen5_gt_irq_postinstall(dev);
2818
2819         ibx_irq_postinstall(dev);
2820
2821         if (IS_IRONLAKE_M(dev)) {
2822                 /* Enable PCU event interrupts
2823                  *
2824                  * spinlocking not required here for correctness since interrupt
2825                  * setup is guaranteed to run in single-threaded context. But we
2826                  * need it to make the assert_spin_locked happy. */
2827                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2828                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2829                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2830         }
2831
2832         return 0;
2833 }
2834
2835 static int valleyview_irq_postinstall(struct drm_device *dev)
2836 {
2837         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2838         u32 enable_mask;
2839         u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
2840                 PIPE_CRC_DONE_ENABLE;
2841         unsigned long irqflags;
2842
2843         enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2844         enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2845                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2846                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2847                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2848
2849         /*
2850          *Leave vblank interrupts masked initially.  enable/disable will
2851          * toggle them based on usage.
2852          */
2853         dev_priv->irq_mask = (~enable_mask) |
2854                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2855                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2856
2857         I915_WRITE(PORT_HOTPLUG_EN, 0);
2858         POSTING_READ(PORT_HOTPLUG_EN);
2859
2860         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2861         I915_WRITE(VLV_IER, enable_mask);
2862         I915_WRITE(VLV_IIR, 0xffffffff);
2863         I915_WRITE(PIPESTAT(0), 0xffff);
2864         I915_WRITE(PIPESTAT(1), 0xffff);
2865         POSTING_READ(VLV_IER);
2866
2867         /* Interrupt setup is already guaranteed to be single-threaded, this is
2868          * just to make the assert_spin_locked check happy. */
2869         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2870         i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable);
2871         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
2872         i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable);
2873         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2874
2875         I915_WRITE(VLV_IIR, 0xffffffff);
2876         I915_WRITE(VLV_IIR, 0xffffffff);
2877
2878         gen5_gt_irq_postinstall(dev);
2879
2880         /* ack & enable invalid PTE error interrupts */
2881 #if 0 /* FIXME: add support to irq handler for checking these bits */
2882         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2883         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2884 #endif
2885
2886         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2887
2888         return 0;
2889 }
2890
2891 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
2892 {
2893         int i;
2894
2895         /* These are interrupts we'll toggle with the ring mask register */
2896         uint32_t gt_interrupts[] = {
2897                 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
2898                         GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
2899                         GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
2900                 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
2901                         GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
2902                 0,
2903                 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
2904                 };
2905
2906         for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
2907                 u32 tmp = I915_READ(GEN8_GT_IIR(i));
2908                 if (tmp)
2909                         DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2910                                   i, tmp);
2911                 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
2912                 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
2913         }
2914         POSTING_READ(GEN8_GT_IER(0));
2915 }
2916
2917 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
2918 {
2919         struct drm_device *dev = dev_priv->dev;
2920         uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
2921                 GEN8_PIPE_CDCLK_CRC_DONE |
2922                 GEN8_PIPE_FIFO_UNDERRUN |
2923                 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2924         uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
2925         int pipe;
2926         dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
2927         dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
2928         dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
2929
2930         for_each_pipe(pipe) {
2931                 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2932                 if (tmp)
2933                         DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
2934                                   pipe, tmp);
2935                 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2936                 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
2937         }
2938         POSTING_READ(GEN8_DE_PIPE_ISR(0));
2939
2940         I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
2941         I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
2942         POSTING_READ(GEN8_DE_PORT_IER);
2943 }
2944
2945 static int gen8_irq_postinstall(struct drm_device *dev)
2946 {
2947         struct drm_i915_private *dev_priv = dev->dev_private;
2948
2949         gen8_gt_irq_postinstall(dev_priv);
2950         gen8_de_irq_postinstall(dev_priv);
2951
2952         ibx_irq_postinstall(dev);
2953
2954         I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2955         POSTING_READ(GEN8_MASTER_IRQ);
2956
2957         return 0;
2958 }
2959
2960 static void gen8_irq_uninstall(struct drm_device *dev)
2961 {
2962         struct drm_i915_private *dev_priv = dev->dev_private;
2963         int pipe;
2964
2965         if (!dev_priv)
2966                 return;
2967
2968         atomic_set(&dev_priv->irq_received, 0);
2969
2970         I915_WRITE(GEN8_MASTER_IRQ, 0);
2971
2972 #define GEN8_IRQ_FINI_NDX(type, which) do { \
2973                 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2974                 I915_WRITE(GEN8_##type##_IER(which), 0); \
2975                 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2976         } while (0)
2977
2978 #define GEN8_IRQ_FINI(type) do { \
2979                 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2980                 I915_WRITE(GEN8_##type##_IER, 0); \
2981                 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2982         } while (0)
2983
2984         GEN8_IRQ_FINI_NDX(GT, 0);
2985         GEN8_IRQ_FINI_NDX(GT, 1);
2986         GEN8_IRQ_FINI_NDX(GT, 2);
2987         GEN8_IRQ_FINI_NDX(GT, 3);
2988
2989         for_each_pipe(pipe) {
2990                 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
2991         }
2992
2993         GEN8_IRQ_FINI(DE_PORT);
2994         GEN8_IRQ_FINI(DE_MISC);
2995         GEN8_IRQ_FINI(PCU);
2996 #undef GEN8_IRQ_FINI
2997 #undef GEN8_IRQ_FINI_NDX
2998
2999         POSTING_READ(GEN8_PCU_IIR);
3000 }
3001
3002 static void valleyview_irq_uninstall(struct drm_device *dev)
3003 {
3004         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3005         int pipe;
3006
3007         if (!dev_priv)
3008                 return;
3009
3010         del_timer_sync(&dev_priv->hotplug_reenable_timer);
3011
3012         for_each_pipe(pipe)
3013                 I915_WRITE(PIPESTAT(pipe), 0xffff);
3014
3015         I915_WRITE(HWSTAM, 0xffffffff);
3016         I915_WRITE(PORT_HOTPLUG_EN, 0);
3017         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3018         for_each_pipe(pipe)
3019                 I915_WRITE(PIPESTAT(pipe), 0xffff);
3020         I915_WRITE(VLV_IIR, 0xffffffff);
3021         I915_WRITE(VLV_IMR, 0xffffffff);
3022         I915_WRITE(VLV_IER, 0x0);
3023         POSTING_READ(VLV_IER);
3024 }
3025
3026 static void ironlake_irq_uninstall(struct drm_device *dev)
3027 {
3028         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3029
3030         if (!dev_priv)
3031                 return;
3032
3033         del_timer_sync(&dev_priv->hotplug_reenable_timer);
3034
3035         I915_WRITE(HWSTAM, 0xffffffff);
3036
3037         I915_WRITE(DEIMR, 0xffffffff);
3038         I915_WRITE(DEIER, 0x0);
3039         I915_WRITE(DEIIR, I915_READ(DEIIR));
3040         if (IS_GEN7(dev))
3041                 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
3042
3043         I915_WRITE(GTIMR, 0xffffffff);
3044         I915_WRITE(GTIER, 0x0);
3045         I915_WRITE(GTIIR, I915_READ(GTIIR));
3046
3047         if (HAS_PCH_NOP(dev))
3048                 return;
3049
3050         I915_WRITE(SDEIMR, 0xffffffff);
3051         I915_WRITE(SDEIER, 0x0);
3052         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
3053         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3054                 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
3055 }
3056
3057 static void i8xx_irq_preinstall(struct drm_device * dev)
3058 {
3059         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3060         int pipe;
3061
3062         atomic_set(&dev_priv->irq_received, 0);
3063
3064         for_each_pipe(pipe)
3065                 I915_WRITE(PIPESTAT(pipe), 0);
3066         I915_WRITE16(IMR, 0xffff);
3067         I915_WRITE16(IER, 0x0);
3068         POSTING_READ16(IER);
3069 }
3070
3071 static int i8xx_irq_postinstall(struct drm_device *dev)
3072 {
3073         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3074         unsigned long irqflags;
3075
3076         I915_WRITE16(EMR,
3077                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3078
3079         /* Unmask the interrupts that we always want on. */
3080         dev_priv->irq_mask =
3081                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3082                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3083                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3084                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3085                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3086         I915_WRITE16(IMR, dev_priv->irq_mask);
3087
3088         I915_WRITE16(IER,
3089                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3090                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3091                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3092                      I915_USER_INTERRUPT);
3093         POSTING_READ16(IER);
3094
3095         /* Interrupt setup is already guaranteed to be single-threaded, this is
3096          * just to make the assert_spin_locked check happy. */
3097         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3098         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3099         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3100         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3101
3102         return 0;
3103 }
3104
3105 /*
3106  * Returns true when a page flip has completed.
3107  */
3108 static bool i8xx_handle_vblank(struct drm_device *dev,
3109                                int pipe, u16 iir)
3110 {
3111         drm_i915_private_t *dev_priv = dev->dev_private;
3112         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
3113
3114         if (!drm_handle_vblank(dev, pipe))
3115                 return false;
3116
3117         if ((iir & flip_pending) == 0)
3118                 return false;
3119
3120         intel_prepare_page_flip(dev, pipe);
3121
3122         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3123          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3124          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3125          * the flip is completed (no longer pending). Since this doesn't raise
3126          * an interrupt per se, we watch for the change at vblank.
3127          */
3128         if (I915_READ16(ISR) & flip_pending)
3129                 return false;
3130
3131         intel_finish_page_flip(dev, pipe);
3132
3133         return true;
3134 }
3135
3136 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3137 {
3138         struct drm_device *dev = (struct drm_device *) arg;
3139         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3140         u16 iir, new_iir;
3141         u32 pipe_stats[2];
3142         unsigned long irqflags;
3143         int pipe;
3144         u16 flip_mask =
3145                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3146                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3147
3148         atomic_inc(&dev_priv->irq_received);
3149
3150         iir = I915_READ16(IIR);
3151         if (iir == 0)
3152                 return IRQ_NONE;
3153
3154         while (iir & ~flip_mask) {
3155                 /* Can't rely on pipestat interrupt bit in iir as it might
3156                  * have been cleared after the pipestat interrupt was received.
3157                  * It doesn't set the bit in iir again, but it still produces
3158                  * interrupts (for non-MSI).
3159                  */
3160                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3161                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3162                         i915_handle_error(dev, false);
3163
3164                 for_each_pipe(pipe) {
3165                         int reg = PIPESTAT(pipe);
3166                         pipe_stats[pipe] = I915_READ(reg);
3167
3168                         /*
3169                          * Clear the PIPE*STAT regs before the IIR
3170                          */
3171                         if (pipe_stats[pipe] & 0x8000ffff) {
3172                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3173                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
3174                                                          pipe_name(pipe));
3175                                 I915_WRITE(reg, pipe_stats[pipe]);
3176                         }
3177                 }
3178                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3179
3180                 I915_WRITE16(IIR, iir & ~flip_mask);
3181                 new_iir = I915_READ16(IIR); /* Flush posted writes */
3182
3183                 i915_update_dri1_breadcrumb(dev);
3184
3185                 if (iir & I915_USER_INTERRUPT)
3186                         notify_ring(dev, &dev_priv->ring[RCS]);
3187
3188                 for_each_pipe(pipe) {
3189                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3190                             i8xx_handle_vblank(dev, pipe, iir))
3191                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3192
3193                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3194                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3195                 }
3196
3197                 iir = new_iir;
3198         }
3199
3200         return IRQ_HANDLED;
3201 }
3202
3203 static void i8xx_irq_uninstall(struct drm_device * dev)
3204 {
3205         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3206         int pipe;
3207
3208         for_each_pipe(pipe) {
3209                 /* Clear enable bits; then clear status bits */
3210                 I915_WRITE(PIPESTAT(pipe), 0);
3211                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3212         }
3213         I915_WRITE16(IMR, 0xffff);
3214         I915_WRITE16(IER, 0x0);
3215         I915_WRITE16(IIR, I915_READ16(IIR));
3216 }
3217
3218 static void i915_irq_preinstall(struct drm_device * dev)
3219 {
3220         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3221         int pipe;
3222
3223         atomic_set(&dev_priv->irq_received, 0);
3224
3225         if (I915_HAS_HOTPLUG(dev)) {
3226                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3227                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3228         }
3229
3230         I915_WRITE16(HWSTAM, 0xeffe);
3231         for_each_pipe(pipe)
3232                 I915_WRITE(PIPESTAT(pipe), 0);
3233         I915_WRITE(IMR, 0xffffffff);
3234         I915_WRITE(IER, 0x0);
3235         POSTING_READ(IER);
3236 }
3237
3238 static int i915_irq_postinstall(struct drm_device *dev)
3239 {
3240         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3241         u32 enable_mask;
3242         unsigned long irqflags;
3243
3244         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3245
3246         /* Unmask the interrupts that we always want on. */
3247         dev_priv->irq_mask =
3248                 ~(I915_ASLE_INTERRUPT |
3249                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3250                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3251                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3252                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3253                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3254
3255         enable_mask =
3256                 I915_ASLE_INTERRUPT |
3257                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3258                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3259                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3260                 I915_USER_INTERRUPT;
3261
3262         if (I915_HAS_HOTPLUG(dev)) {
3263                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3264                 POSTING_READ(PORT_HOTPLUG_EN);
3265
3266                 /* Enable in IER... */
3267                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3268                 /* and unmask in IMR */
3269                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3270         }
3271
3272         I915_WRITE(IMR, dev_priv->irq_mask);
3273         I915_WRITE(IER, enable_mask);
3274         POSTING_READ(IER);
3275
3276         i915_enable_asle_pipestat(dev);
3277
3278         /* Interrupt setup is already guaranteed to be single-threaded, this is
3279          * just to make the assert_spin_locked check happy. */
3280         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3281         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3282         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3283         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3284
3285         return 0;
3286 }
3287
3288 /*
3289  * Returns true when a page flip has completed.
3290  */
3291 static bool i915_handle_vblank(struct drm_device *dev,
3292                                int plane, int pipe, u32 iir)
3293 {
3294         drm_i915_private_t *dev_priv = dev->dev_private;
3295         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3296
3297         if (!drm_handle_vblank(dev, pipe))
3298                 return false;
3299
3300         if ((iir & flip_pending) == 0)
3301                 return false;
3302
3303         intel_prepare_page_flip(dev, plane);
3304
3305         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3306          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3307          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3308          * the flip is completed (no longer pending). Since this doesn't raise
3309          * an interrupt per se, we watch for the change at vblank.
3310          */
3311         if (I915_READ(ISR) & flip_pending)
3312                 return false;
3313
3314         intel_finish_page_flip(dev, pipe);
3315
3316         return true;
3317 }
3318
3319 static irqreturn_t i915_irq_handler(int irq, void *arg)
3320 {
3321         struct drm_device *dev = (struct drm_device *) arg;
3322         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3323         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3324         unsigned long irqflags;
3325         u32 flip_mask =
3326                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3327                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3328         int pipe, ret = IRQ_NONE;
3329
3330         atomic_inc(&dev_priv->irq_received);
3331
3332         iir = I915_READ(IIR);
3333         do {
3334                 bool irq_received = (iir & ~flip_mask) != 0;
3335                 bool blc_event = false;
3336
3337                 /* Can't rely on pipestat interrupt bit in iir as it might
3338                  * have been cleared after the pipestat interrupt was received.
3339                  * It doesn't set the bit in iir again, but it still produces
3340                  * interrupts (for non-MSI).
3341                  */
3342                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3343                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3344                         i915_handle_error(dev, false);
3345
3346                 for_each_pipe(pipe) {
3347                         int reg = PIPESTAT(pipe);
3348                         pipe_stats[pipe] = I915_READ(reg);
3349
3350                         /* Clear the PIPE*STAT regs before the IIR */
3351                         if (pipe_stats[pipe] & 0x8000ffff) {
3352                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3353                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
3354                                                          pipe_name(pipe));
3355                                 I915_WRITE(reg, pipe_stats[pipe]);
3356                                 irq_received = true;
3357                         }
3358                 }
3359                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3360
3361                 if (!irq_received)
3362                         break;
3363
3364                 /* Consume port.  Then clear IIR or we'll miss events */
3365                 if ((I915_HAS_HOTPLUG(dev)) &&
3366                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3367                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3368                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3369
3370                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3371                                   hotplug_status);
3372
3373                         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3374
3375                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3376                         POSTING_READ(PORT_HOTPLUG_STAT);
3377                 }
3378
3379                 I915_WRITE(IIR, iir & ~flip_mask);
3380                 new_iir = I915_READ(IIR); /* Flush posted writes */
3381
3382                 if (iir & I915_USER_INTERRUPT)
3383                         notify_ring(dev, &dev_priv->ring[RCS]);
3384
3385                 for_each_pipe(pipe) {
3386                         int plane = pipe;
3387                         if (IS_MOBILE(dev))
3388                                 plane = !plane;
3389
3390                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3391                             i915_handle_vblank(dev, plane, pipe, iir))
3392                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3393
3394                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3395                                 blc_event = true;
3396
3397                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3398                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3399                 }
3400
3401                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3402                         intel_opregion_asle_intr(dev);
3403
3404                 /* With MSI, interrupts are only generated when iir
3405                  * transitions from zero to nonzero.  If another bit got
3406                  * set while we were handling the existing iir bits, then
3407                  * we would never get another interrupt.
3408                  *
3409                  * This is fine on non-MSI as well, as if we hit this path
3410                  * we avoid exiting the interrupt handler only to generate
3411                  * another one.
3412                  *
3413                  * Note that for MSI this could cause a stray interrupt report
3414                  * if an interrupt landed in the time between writing IIR and
3415                  * the posting read.  This should be rare enough to never
3416                  * trigger the 99% of 100,000 interrupts test for disabling
3417                  * stray interrupts.
3418                  */
3419                 ret = IRQ_HANDLED;
3420                 iir = new_iir;
3421         } while (iir & ~flip_mask);
3422
3423         i915_update_dri1_breadcrumb(dev);
3424
3425         return ret;
3426 }
3427
3428 static void i915_irq_uninstall(struct drm_device * dev)
3429 {
3430         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3431         int pipe;
3432
3433         del_timer_sync(&dev_priv->hotplug_reenable_timer);
3434
3435         if (I915_HAS_HOTPLUG(dev)) {
3436                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3437                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3438         }
3439
3440         I915_WRITE16(HWSTAM, 0xffff);
3441         for_each_pipe(pipe) {
3442                 /* Clear enable bits; then clear status bits */
3443                 I915_WRITE(PIPESTAT(pipe), 0);
3444                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3445         }
3446         I915_WRITE(IMR, 0xffffffff);
3447         I915_WRITE(IER, 0x0);
3448
3449         I915_WRITE(IIR, I915_READ(IIR));
3450 }
3451
3452 static void i965_irq_preinstall(struct drm_device * dev)
3453 {
3454         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3455         int pipe;
3456
3457         atomic_set(&dev_priv->irq_received, 0);
3458
3459         I915_WRITE(PORT_HOTPLUG_EN, 0);
3460         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3461
3462         I915_WRITE(HWSTAM, 0xeffe);
3463         for_each_pipe(pipe)
3464                 I915_WRITE(PIPESTAT(pipe), 0);
3465         I915_WRITE(IMR, 0xffffffff);
3466         I915_WRITE(IER, 0x0);
3467         POSTING_READ(IER);
3468 }
3469
3470 static int i965_irq_postinstall(struct drm_device *dev)
3471 {
3472         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3473         u32 enable_mask;
3474         u32 error_mask;
3475         unsigned long irqflags;
3476
3477         /* Unmask the interrupts that we always want on. */
3478         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3479                                I915_DISPLAY_PORT_INTERRUPT |
3480                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3481                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3482                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3483                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3484                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3485
3486         enable_mask = ~dev_priv->irq_mask;
3487         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3488                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3489         enable_mask |= I915_USER_INTERRUPT;
3490
3491         if (IS_G4X(dev))
3492                 enable_mask |= I915_BSD_USER_INTERRUPT;
3493
3494         /* Interrupt setup is already guaranteed to be single-threaded, this is
3495          * just to make the assert_spin_locked check happy. */
3496         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3497         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE);
3498         i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE);
3499         i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE);
3500         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3501
3502         /*
3503          * Enable some error detection, note the instruction error mask
3504          * bit is reserved, so we leave it masked.
3505          */
3506         if (IS_G4X(dev)) {
3507                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3508                                GM45_ERROR_MEM_PRIV |
3509                                GM45_ERROR_CP_PRIV |
3510                                I915_ERROR_MEMORY_REFRESH);
3511         } else {
3512                 error_mask = ~(I915_ERROR_PAGE_TABLE |
3513                                I915_ERROR_MEMORY_REFRESH);
3514         }
3515         I915_WRITE(EMR, error_mask);
3516
3517         I915_WRITE(IMR, dev_priv->irq_mask);
3518         I915_WRITE(IER, enable_mask);
3519         POSTING_READ(IER);
3520
3521         I915_WRITE(PORT_HOTPLUG_EN, 0);
3522         POSTING_READ(PORT_HOTPLUG_EN);
3523
3524         i915_enable_asle_pipestat(dev);
3525
3526         return 0;
3527 }
3528
3529 static void i915_hpd_irq_setup(struct drm_device *dev)
3530 {
3531         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3532         struct drm_mode_config *mode_config = &dev->mode_config;
3533         struct intel_encoder *intel_encoder;
3534         u32 hotplug_en;
3535
3536         assert_spin_locked(&dev_priv->irq_lock);
3537
3538         if (I915_HAS_HOTPLUG(dev)) {
3539                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3540                 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3541                 /* Note HDMI and DP share hotplug bits */
3542                 /* enable bits are the same for all generations */
3543                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3544                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3545                                 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3546                 /* Programming the CRT detection parameters tends
3547                    to generate a spurious hotplug event about three
3548                    seconds later.  So just do it once.
3549                 */
3550                 if (IS_G4X(dev))
3551                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3552                 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3553                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3554
3555                 /* Ignore TV since it's buggy */
3556                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3557         }
3558 }
3559
3560 static irqreturn_t i965_irq_handler(int irq, void *arg)
3561 {
3562         struct drm_device *dev = (struct drm_device *) arg;
3563         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3564         u32 iir, new_iir;
3565         u32 pipe_stats[I915_MAX_PIPES];
3566         unsigned long irqflags;
3567         int irq_received;
3568         int ret = IRQ_NONE, pipe;
3569         u32 flip_mask =
3570                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3571                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3572
3573         atomic_inc(&dev_priv->irq_received);
3574
3575         iir = I915_READ(IIR);
3576
3577         for (;;) {
3578                 bool blc_event = false;
3579
3580                 irq_received = (iir & ~flip_mask) != 0;
3581
3582                 /* Can't rely on pipestat interrupt bit in iir as it might
3583                  * have been cleared after the pipestat interrupt was received.
3584                  * It doesn't set the bit in iir again, but it still produces
3585                  * interrupts (for non-MSI).
3586                  */
3587                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3588                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3589                         i915_handle_error(dev, false);
3590
3591                 for_each_pipe(pipe) {
3592                         int reg = PIPESTAT(pipe);
3593                         pipe_stats[pipe] = I915_READ(reg);
3594
3595                         /*
3596                          * Clear the PIPE*STAT regs before the IIR
3597                          */
3598                         if (pipe_stats[pipe] & 0x8000ffff) {
3599                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3600                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
3601                                                          pipe_name(pipe));
3602                                 I915_WRITE(reg, pipe_stats[pipe]);
3603                                 irq_received = 1;
3604                         }
3605                 }
3606                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3607
3608                 if (!irq_received)
3609                         break;
3610
3611                 ret = IRQ_HANDLED;
3612
3613                 /* Consume port.  Then clear IIR or we'll miss events */
3614                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
3615                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3616                         u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3617                                                                   HOTPLUG_INT_STATUS_G4X :
3618                                                                   HOTPLUG_INT_STATUS_I915);
3619
3620                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3621                                   hotplug_status);
3622
3623                         intel_hpd_irq_handler(dev, hotplug_trigger,
3624                                               IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
3625
3626                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3627                         I915_READ(PORT_HOTPLUG_STAT);
3628                 }
3629
3630                 I915_WRITE(IIR, iir & ~flip_mask);
3631                 new_iir = I915_READ(IIR); /* Flush posted writes */
3632
3633                 if (iir & I915_USER_INTERRUPT)
3634                         notify_ring(dev, &dev_priv->ring[RCS]);
3635                 if (iir & I915_BSD_USER_INTERRUPT)
3636                         notify_ring(dev, &dev_priv->ring[VCS]);
3637
3638                 for_each_pipe(pipe) {
3639                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3640                             i915_handle_vblank(dev, pipe, pipe, iir))
3641                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3642
3643                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3644                                 blc_event = true;
3645
3646                         if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3647                                 i9xx_pipe_crc_irq_handler(dev, pipe);
3648                 }
3649
3650
3651                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3652                         intel_opregion_asle_intr(dev);
3653
3654                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3655                         gmbus_irq_handler(dev);
3656
3657                 /* With MSI, interrupts are only generated when iir
3658                  * transitions from zero to nonzero.  If another bit got
3659                  * set while we were handling the existing iir bits, then
3660                  * we would never get another interrupt.
3661                  *
3662                  * This is fine on non-MSI as well, as if we hit this path
3663                  * we avoid exiting the interrupt handler only to generate
3664                  * another one.
3665                  *
3666                  * Note that for MSI this could cause a stray interrupt report
3667                  * if an interrupt landed in the time between writing IIR and
3668                  * the posting read.  This should be rare enough to never
3669                  * trigger the 99% of 100,000 interrupts test for disabling
3670                  * stray interrupts.
3671                  */
3672                 iir = new_iir;
3673         }
3674
3675         i915_update_dri1_breadcrumb(dev);
3676
3677         return ret;
3678 }
3679
3680 static void i965_irq_uninstall(struct drm_device * dev)
3681 {
3682         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3683         int pipe;
3684
3685         if (!dev_priv)
3686                 return;
3687
3688         del_timer_sync(&dev_priv->hotplug_reenable_timer);
3689
3690         I915_WRITE(PORT_HOTPLUG_EN, 0);
3691         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3692
3693         I915_WRITE(HWSTAM, 0xffffffff);
3694         for_each_pipe(pipe)
3695                 I915_WRITE(PIPESTAT(pipe), 0);
3696         I915_WRITE(IMR, 0xffffffff);
3697         I915_WRITE(IER, 0x0);
3698
3699         for_each_pipe(pipe)
3700                 I915_WRITE(PIPESTAT(pipe),
3701                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3702         I915_WRITE(IIR, I915_READ(IIR));
3703 }
3704
3705 static void i915_reenable_hotplug_timer_func(unsigned long data)
3706 {
3707         drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3708         struct drm_device *dev = dev_priv->dev;
3709         struct drm_mode_config *mode_config = &dev->mode_config;
3710         unsigned long irqflags;
3711         int i;
3712
3713         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3714         for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3715                 struct drm_connector *connector;
3716
3717                 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3718                         continue;
3719
3720                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3721
3722                 list_for_each_entry(connector, &mode_config->connector_list, head) {
3723                         struct intel_connector *intel_connector = to_intel_connector(connector);
3724
3725                         if (intel_connector->encoder->hpd_pin == i) {
3726                                 if (connector->polled != intel_connector->polled)
3727                                         DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3728                                                          drm_get_connector_name(connector));
3729                                 connector->polled = intel_connector->polled;
3730                                 if (!connector->polled)
3731                                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3732                         }
3733                 }
3734         }
3735         if (dev_priv->display.hpd_irq_setup)
3736                 dev_priv->display.hpd_irq_setup(dev);
3737         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3738 }
3739
3740 void intel_irq_init(struct drm_device *dev)
3741 {
3742         struct drm_i915_private *dev_priv = dev->dev_private;
3743
3744         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3745         INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3746         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3747         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3748
3749         setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3750                     i915_hangcheck_elapsed,
3751                     (unsigned long) dev);
3752         setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3753                     (unsigned long) dev_priv);
3754
3755         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3756
3757         if (IS_GEN2(dev)) {
3758                 dev->max_vblank_count = 0;
3759                 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
3760         } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3761                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3762                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3763         } else {
3764                 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3765                 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3766         }
3767
3768         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
3769                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3770                 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3771         }
3772
3773         if (IS_VALLEYVIEW(dev)) {
3774                 dev->driver->irq_handler = valleyview_irq_handler;
3775                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3776                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3777                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3778                 dev->driver->enable_vblank = valleyview_enable_vblank;
3779                 dev->driver->disable_vblank = valleyview_disable_vblank;
3780                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3781         } else if (IS_GEN8(dev)) {
3782                 dev->driver->irq_handler = gen8_irq_handler;
3783                 dev->driver->irq_preinstall = gen8_irq_preinstall;
3784                 dev->driver->irq_postinstall = gen8_irq_postinstall;
3785                 dev->driver->irq_uninstall = gen8_irq_uninstall;
3786                 dev->driver->enable_vblank = gen8_enable_vblank;
3787                 dev->driver->disable_vblank = gen8_disable_vblank;
3788                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3789         } else if (HAS_PCH_SPLIT(dev)) {
3790                 dev->driver->irq_handler = ironlake_irq_handler;
3791                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3792                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3793                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3794                 dev->driver->enable_vblank = ironlake_enable_vblank;
3795                 dev->driver->disable_vblank = ironlake_disable_vblank;
3796                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3797         } else {
3798                 if (INTEL_INFO(dev)->gen == 2) {
3799                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
3800                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
3801                         dev->driver->irq_handler = i8xx_irq_handler;
3802                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
3803                 } else if (INTEL_INFO(dev)->gen == 3) {
3804                         dev->driver->irq_preinstall = i915_irq_preinstall;
3805                         dev->driver->irq_postinstall = i915_irq_postinstall;
3806                         dev->driver->irq_uninstall = i915_irq_uninstall;
3807                         dev->driver->irq_handler = i915_irq_handler;
3808                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3809                 } else {
3810                         dev->driver->irq_preinstall = i965_irq_preinstall;
3811                         dev->driver->irq_postinstall = i965_irq_postinstall;
3812                         dev->driver->irq_uninstall = i965_irq_uninstall;
3813                         dev->driver->irq_handler = i965_irq_handler;
3814                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3815                 }
3816                 dev->driver->enable_vblank = i915_enable_vblank;
3817                 dev->driver->disable_vblank = i915_disable_vblank;
3818         }
3819 }
3820
3821 void intel_hpd_init(struct drm_device *dev)
3822 {
3823         struct drm_i915_private *dev_priv = dev->dev_private;
3824         struct drm_mode_config *mode_config = &dev->mode_config;
3825         struct drm_connector *connector;
3826         unsigned long irqflags;
3827         int i;
3828
3829         for (i = 1; i < HPD_NUM_PINS; i++) {
3830                 dev_priv->hpd_stats[i].hpd_cnt = 0;
3831                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3832         }
3833         list_for_each_entry(connector, &mode_config->connector_list, head) {
3834                 struct intel_connector *intel_connector = to_intel_connector(connector);
3835                 connector->polled = intel_connector->polled;
3836                 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3837                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3838         }
3839
3840         /* Interrupt setup is already guaranteed to be single-threaded, this is
3841          * just to make the assert_spin_locked checks happy. */
3842         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3843         if (dev_priv->display.hpd_irq_setup)
3844                 dev_priv->display.hpd_irq_setup(dev);
3845         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3846 }
3847
3848 /* Disable interrupts so we can allow Package C8+. */
3849 void hsw_pc8_disable_interrupts(struct drm_device *dev)
3850 {
3851         struct drm_i915_private *dev_priv = dev->dev_private;
3852         unsigned long irqflags;
3853
3854         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3855
3856         dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
3857         dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
3858         dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
3859         dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
3860         dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
3861
3862         ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
3863         ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
3864         ilk_disable_gt_irq(dev_priv, 0xffffffff);
3865         snb_disable_pm_irq(dev_priv, 0xffffffff);
3866
3867         dev_priv->pc8.irqs_disabled = true;
3868
3869         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3870 }
3871
3872 /* Restore interrupts so we can recover from Package C8+. */
3873 void hsw_pc8_restore_interrupts(struct drm_device *dev)
3874 {
3875         struct drm_i915_private *dev_priv = dev->dev_private;
3876         unsigned long irqflags;
3877         uint32_t val, expected;
3878
3879         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3880
3881         val = I915_READ(DEIMR);
3882         expected = ~DE_PCH_EVENT_IVB;
3883         WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
3884
3885         val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
3886         expected = ~SDE_HOTPLUG_MASK_CPT;
3887         WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
3888              val, expected);
3889
3890         val = I915_READ(GTIMR);
3891         expected = 0xffffffff;
3892         WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
3893
3894         val = I915_READ(GEN6_PMIMR);
3895         expected = 0xffffffff;
3896         WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
3897              expected);
3898
3899         dev_priv->pc8.irqs_disabled = false;
3900
3901         ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
3902         ibx_enable_display_interrupt(dev_priv,
3903                                      ~dev_priv->pc8.regsave.sdeimr &
3904                                      ~SDE_HOTPLUG_MASK_CPT);
3905         ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
3906         snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
3907         I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
3908
3909         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3910 }