]> Pileus Git - ~andy/linux/blob - drivers/gpu/drm/i915/i915_irq.c
drm/i915: Remove the now obsolete infoframe definitions
[~andy/linux] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <drm/drmP.h>
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "i915_trace.h"
37 #include "intel_drv.h"
38
39 static const u32 hpd_ibx[] = {
40         [HPD_CRT] = SDE_CRT_HOTPLUG,
41         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
45 };
46
47 static const u32 hpd_cpt[] = {
48         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
49         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
50         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53 };
54
55 static const u32 hpd_mask_i915[] = {
56         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62 };
63
64 static const u32 hpd_status_gen4[] = {
65         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71 };
72
73 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
74         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
76         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
77         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80 };
81
82 /* For display hotplug interrupt */
83 static void
84 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
85 {
86         assert_spin_locked(&dev_priv->irq_lock);
87
88         if ((dev_priv->irq_mask & mask) != 0) {
89                 dev_priv->irq_mask &= ~mask;
90                 I915_WRITE(DEIMR, dev_priv->irq_mask);
91                 POSTING_READ(DEIMR);
92         }
93 }
94
95 static void
96 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
97 {
98         assert_spin_locked(&dev_priv->irq_lock);
99
100         if ((dev_priv->irq_mask & mask) != mask) {
101                 dev_priv->irq_mask |= mask;
102                 I915_WRITE(DEIMR, dev_priv->irq_mask);
103                 POSTING_READ(DEIMR);
104         }
105 }
106
107 static bool ivb_can_enable_err_int(struct drm_device *dev)
108 {
109         struct drm_i915_private *dev_priv = dev->dev_private;
110         struct intel_crtc *crtc;
111         enum pipe pipe;
112
113         assert_spin_locked(&dev_priv->irq_lock);
114
115         for_each_pipe(pipe) {
116                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
117
118                 if (crtc->cpu_fifo_underrun_disabled)
119                         return false;
120         }
121
122         return true;
123 }
124
125 static bool cpt_can_enable_serr_int(struct drm_device *dev)
126 {
127         struct drm_i915_private *dev_priv = dev->dev_private;
128         enum pipe pipe;
129         struct intel_crtc *crtc;
130
131         assert_spin_locked(&dev_priv->irq_lock);
132
133         for_each_pipe(pipe) {
134                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
135
136                 if (crtc->pch_fifo_underrun_disabled)
137                         return false;
138         }
139
140         return true;
141 }
142
143 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
144                                                  enum pipe pipe, bool enable)
145 {
146         struct drm_i915_private *dev_priv = dev->dev_private;
147         uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
148                                           DE_PIPEB_FIFO_UNDERRUN;
149
150         if (enable)
151                 ironlake_enable_display_irq(dev_priv, bit);
152         else
153                 ironlake_disable_display_irq(dev_priv, bit);
154 }
155
156 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
157                                                   enum pipe pipe, bool enable)
158 {
159         struct drm_i915_private *dev_priv = dev->dev_private;
160         if (enable) {
161                 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
162
163                 if (!ivb_can_enable_err_int(dev))
164                         return;
165
166                 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
167         } else {
168                 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
169
170                 /* Change the state _after_ we've read out the current one. */
171                 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
172
173                 if (!was_enabled &&
174                     (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
175                         DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
176                                       pipe_name(pipe));
177                 }
178         }
179 }
180
181 /**
182  * ibx_display_interrupt_update - update SDEIMR
183  * @dev_priv: driver private
184  * @interrupt_mask: mask of interrupt bits to update
185  * @enabled_irq_mask: mask of interrupt bits to enable
186  */
187 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
188                                          uint32_t interrupt_mask,
189                                          uint32_t enabled_irq_mask)
190 {
191         uint32_t sdeimr = I915_READ(SDEIMR);
192         sdeimr &= ~interrupt_mask;
193         sdeimr |= (~enabled_irq_mask & interrupt_mask);
194
195         assert_spin_locked(&dev_priv->irq_lock);
196
197         I915_WRITE(SDEIMR, sdeimr);
198         POSTING_READ(SDEIMR);
199 }
200 #define ibx_enable_display_interrupt(dev_priv, bits) \
201         ibx_display_interrupt_update((dev_priv), (bits), (bits))
202 #define ibx_disable_display_interrupt(dev_priv, bits) \
203         ibx_display_interrupt_update((dev_priv), (bits), 0)
204
205 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
206                                             enum transcoder pch_transcoder,
207                                             bool enable)
208 {
209         struct drm_i915_private *dev_priv = dev->dev_private;
210         uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
211                        SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
212
213         if (enable)
214                 ibx_enable_display_interrupt(dev_priv, bit);
215         else
216                 ibx_disable_display_interrupt(dev_priv, bit);
217 }
218
219 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
220                                             enum transcoder pch_transcoder,
221                                             bool enable)
222 {
223         struct drm_i915_private *dev_priv = dev->dev_private;
224
225         if (enable) {
226                 I915_WRITE(SERR_INT,
227                            SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
228
229                 if (!cpt_can_enable_serr_int(dev))
230                         return;
231
232                 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
233         } else {
234                 uint32_t tmp = I915_READ(SERR_INT);
235                 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
236
237                 /* Change the state _after_ we've read out the current one. */
238                 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
239
240                 if (!was_enabled &&
241                     (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
242                         DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
243                                       transcoder_name(pch_transcoder));
244                 }
245         }
246 }
247
248 /**
249  * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
250  * @dev: drm device
251  * @pipe: pipe
252  * @enable: true if we want to report FIFO underrun errors, false otherwise
253  *
254  * This function makes us disable or enable CPU fifo underruns for a specific
255  * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
256  * reporting for one pipe may also disable all the other CPU error interruts for
257  * the other pipes, due to the fact that there's just one interrupt mask/enable
258  * bit for all the pipes.
259  *
260  * Returns the previous state of underrun reporting.
261  */
262 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
263                                            enum pipe pipe, bool enable)
264 {
265         struct drm_i915_private *dev_priv = dev->dev_private;
266         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
267         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
268         unsigned long flags;
269         bool ret;
270
271         spin_lock_irqsave(&dev_priv->irq_lock, flags);
272
273         ret = !intel_crtc->cpu_fifo_underrun_disabled;
274
275         if (enable == ret)
276                 goto done;
277
278         intel_crtc->cpu_fifo_underrun_disabled = !enable;
279
280         if (IS_GEN5(dev) || IS_GEN6(dev))
281                 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
282         else if (IS_GEN7(dev))
283                 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
284
285 done:
286         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
287         return ret;
288 }
289
290 /**
291  * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
292  * @dev: drm device
293  * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
294  * @enable: true if we want to report FIFO underrun errors, false otherwise
295  *
296  * This function makes us disable or enable PCH fifo underruns for a specific
297  * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
298  * underrun reporting for one transcoder may also disable all the other PCH
299  * error interruts for the other transcoders, due to the fact that there's just
300  * one interrupt mask/enable bit for all the transcoders.
301  *
302  * Returns the previous state of underrun reporting.
303  */
304 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
305                                            enum transcoder pch_transcoder,
306                                            bool enable)
307 {
308         struct drm_i915_private *dev_priv = dev->dev_private;
309         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
310         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
311         unsigned long flags;
312         bool ret;
313
314         /*
315          * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
316          * has only one pch transcoder A that all pipes can use. To avoid racy
317          * pch transcoder -> pipe lookups from interrupt code simply store the
318          * underrun statistics in crtc A. Since we never expose this anywhere
319          * nor use it outside of the fifo underrun code here using the "wrong"
320          * crtc on LPT won't cause issues.
321          */
322
323         spin_lock_irqsave(&dev_priv->irq_lock, flags);
324
325         ret = !intel_crtc->pch_fifo_underrun_disabled;
326
327         if (enable == ret)
328                 goto done;
329
330         intel_crtc->pch_fifo_underrun_disabled = !enable;
331
332         if (HAS_PCH_IBX(dev))
333                 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
334         else
335                 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
336
337 done:
338         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
339         return ret;
340 }
341
342
343 void
344 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
345 {
346         u32 reg = PIPESTAT(pipe);
347         u32 pipestat = I915_READ(reg) & 0x7fff0000;
348
349         assert_spin_locked(&dev_priv->irq_lock);
350
351         if ((pipestat & mask) == mask)
352                 return;
353
354         /* Enable the interrupt, clear any pending status */
355         pipestat |= mask | (mask >> 16);
356         I915_WRITE(reg, pipestat);
357         POSTING_READ(reg);
358 }
359
360 void
361 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
362 {
363         u32 reg = PIPESTAT(pipe);
364         u32 pipestat = I915_READ(reg) & 0x7fff0000;
365
366         assert_spin_locked(&dev_priv->irq_lock);
367
368         if ((pipestat & mask) == 0)
369                 return;
370
371         pipestat &= ~mask;
372         I915_WRITE(reg, pipestat);
373         POSTING_READ(reg);
374 }
375
376 /**
377  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
378  */
379 static void i915_enable_asle_pipestat(struct drm_device *dev)
380 {
381         drm_i915_private_t *dev_priv = dev->dev_private;
382         unsigned long irqflags;
383
384         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
385                 return;
386
387         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
388
389         i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
390         if (INTEL_INFO(dev)->gen >= 4)
391                 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
392
393         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
394 }
395
396 /**
397  * i915_pipe_enabled - check if a pipe is enabled
398  * @dev: DRM device
399  * @pipe: pipe to check
400  *
401  * Reading certain registers when the pipe is disabled can hang the chip.
402  * Use this routine to make sure the PLL is running and the pipe is active
403  * before reading such registers if unsure.
404  */
405 static int
406 i915_pipe_enabled(struct drm_device *dev, int pipe)
407 {
408         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
409
410         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
411                 /* Locking is horribly broken here, but whatever. */
412                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
413                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
414
415                 return intel_crtc->active;
416         } else {
417                 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
418         }
419 }
420
421 /* Called from drm generic code, passed a 'crtc', which
422  * we use as a pipe index
423  */
424 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
425 {
426         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
427         unsigned long high_frame;
428         unsigned long low_frame;
429         u32 high1, high2, low;
430
431         if (!i915_pipe_enabled(dev, pipe)) {
432                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
433                                 "pipe %c\n", pipe_name(pipe));
434                 return 0;
435         }
436
437         high_frame = PIPEFRAME(pipe);
438         low_frame = PIPEFRAMEPIXEL(pipe);
439
440         /*
441          * High & low register fields aren't synchronized, so make sure
442          * we get a low value that's stable across two reads of the high
443          * register.
444          */
445         do {
446                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
447                 low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
448                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
449         } while (high1 != high2);
450
451         high1 >>= PIPE_FRAME_HIGH_SHIFT;
452         low >>= PIPE_FRAME_LOW_SHIFT;
453         return (high1 << 8) | low;
454 }
455
456 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
457 {
458         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
459         int reg = PIPE_FRMCOUNT_GM45(pipe);
460
461         if (!i915_pipe_enabled(dev, pipe)) {
462                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
463                                  "pipe %c\n", pipe_name(pipe));
464                 return 0;
465         }
466
467         return I915_READ(reg);
468 }
469
470 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
471                              int *vpos, int *hpos)
472 {
473         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
474         u32 vbl = 0, position = 0;
475         int vbl_start, vbl_end, htotal, vtotal;
476         bool in_vbl = true;
477         int ret = 0;
478         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
479                                                                       pipe);
480
481         if (!i915_pipe_enabled(dev, pipe)) {
482                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
483                                  "pipe %c\n", pipe_name(pipe));
484                 return 0;
485         }
486
487         /* Get vtotal. */
488         vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
489
490         if (INTEL_INFO(dev)->gen >= 4) {
491                 /* No obvious pixelcount register. Only query vertical
492                  * scanout position from Display scan line register.
493                  */
494                 position = I915_READ(PIPEDSL(pipe));
495
496                 /* Decode into vertical scanout position. Don't have
497                  * horizontal scanout position.
498                  */
499                 *vpos = position & 0x1fff;
500                 *hpos = 0;
501         } else {
502                 /* Have access to pixelcount since start of frame.
503                  * We can split this into vertical and horizontal
504                  * scanout position.
505                  */
506                 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
507
508                 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
509                 *vpos = position / htotal;
510                 *hpos = position - (*vpos * htotal);
511         }
512
513         /* Query vblank area. */
514         vbl = I915_READ(VBLANK(cpu_transcoder));
515
516         /* Test position against vblank region. */
517         vbl_start = vbl & 0x1fff;
518         vbl_end = (vbl >> 16) & 0x1fff;
519
520         if ((*vpos < vbl_start) || (*vpos > vbl_end))
521                 in_vbl = false;
522
523         /* Inside "upper part" of vblank area? Apply corrective offset: */
524         if (in_vbl && (*vpos >= vbl_start))
525                 *vpos = *vpos - vtotal;
526
527         /* Readouts valid? */
528         if (vbl > 0)
529                 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
530
531         /* In vblank? */
532         if (in_vbl)
533                 ret |= DRM_SCANOUTPOS_INVBL;
534
535         return ret;
536 }
537
538 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
539                               int *max_error,
540                               struct timeval *vblank_time,
541                               unsigned flags)
542 {
543         struct drm_crtc *crtc;
544
545         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
546                 DRM_ERROR("Invalid crtc %d\n", pipe);
547                 return -EINVAL;
548         }
549
550         /* Get drm_crtc to timestamp: */
551         crtc = intel_get_crtc_for_pipe(dev, pipe);
552         if (crtc == NULL) {
553                 DRM_ERROR("Invalid crtc %d\n", pipe);
554                 return -EINVAL;
555         }
556
557         if (!crtc->enabled) {
558                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
559                 return -EBUSY;
560         }
561
562         /* Helper routine in DRM core does all the work: */
563         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
564                                                      vblank_time, flags,
565                                                      crtc);
566 }
567
568 static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
569 {
570         enum drm_connector_status old_status;
571
572         WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
573         old_status = connector->status;
574
575         connector->status = connector->funcs->detect(connector, false);
576         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
577                       connector->base.id,
578                       drm_get_connector_name(connector),
579                       old_status, connector->status);
580         return (old_status != connector->status);
581 }
582
583 /*
584  * Handle hotplug events outside the interrupt handler proper.
585  */
586 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
587
588 static void i915_hotplug_work_func(struct work_struct *work)
589 {
590         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
591                                                     hotplug_work);
592         struct drm_device *dev = dev_priv->dev;
593         struct drm_mode_config *mode_config = &dev->mode_config;
594         struct intel_connector *intel_connector;
595         struct intel_encoder *intel_encoder;
596         struct drm_connector *connector;
597         unsigned long irqflags;
598         bool hpd_disabled = false;
599         bool changed = false;
600         u32 hpd_event_bits;
601
602         /* HPD irq before everything is fully set up. */
603         if (!dev_priv->enable_hotplug_processing)
604                 return;
605
606         mutex_lock(&mode_config->mutex);
607         DRM_DEBUG_KMS("running encoder hotplug functions\n");
608
609         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
610
611         hpd_event_bits = dev_priv->hpd_event_bits;
612         dev_priv->hpd_event_bits = 0;
613         list_for_each_entry(connector, &mode_config->connector_list, head) {
614                 intel_connector = to_intel_connector(connector);
615                 intel_encoder = intel_connector->encoder;
616                 if (intel_encoder->hpd_pin > HPD_NONE &&
617                     dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
618                     connector->polled == DRM_CONNECTOR_POLL_HPD) {
619                         DRM_INFO("HPD interrupt storm detected on connector %s: "
620                                  "switching from hotplug detection to polling\n",
621                                 drm_get_connector_name(connector));
622                         dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
623                         connector->polled = DRM_CONNECTOR_POLL_CONNECT
624                                 | DRM_CONNECTOR_POLL_DISCONNECT;
625                         hpd_disabled = true;
626                 }
627                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
628                         DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
629                                       drm_get_connector_name(connector), intel_encoder->hpd_pin);
630                 }
631         }
632          /* if there were no outputs to poll, poll was disabled,
633           * therefore make sure it's enabled when disabling HPD on
634           * some connectors */
635         if (hpd_disabled) {
636                 drm_kms_helper_poll_enable(dev);
637                 mod_timer(&dev_priv->hotplug_reenable_timer,
638                           jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
639         }
640
641         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
642
643         list_for_each_entry(connector, &mode_config->connector_list, head) {
644                 intel_connector = to_intel_connector(connector);
645                 intel_encoder = intel_connector->encoder;
646                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
647                         if (intel_encoder->hot_plug)
648                                 intel_encoder->hot_plug(intel_encoder);
649                         if (intel_hpd_irq_event(dev, connector))
650                                 changed = true;
651                 }
652         }
653         mutex_unlock(&mode_config->mutex);
654
655         if (changed)
656                 drm_kms_helper_hotplug_event(dev);
657 }
658
659 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
660 {
661         drm_i915_private_t *dev_priv = dev->dev_private;
662         u32 busy_up, busy_down, max_avg, min_avg;
663         u8 new_delay;
664
665         spin_lock(&mchdev_lock);
666
667         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
668
669         new_delay = dev_priv->ips.cur_delay;
670
671         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
672         busy_up = I915_READ(RCPREVBSYTUPAVG);
673         busy_down = I915_READ(RCPREVBSYTDNAVG);
674         max_avg = I915_READ(RCBMAXAVG);
675         min_avg = I915_READ(RCBMINAVG);
676
677         /* Handle RCS change request from hw */
678         if (busy_up > max_avg) {
679                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
680                         new_delay = dev_priv->ips.cur_delay - 1;
681                 if (new_delay < dev_priv->ips.max_delay)
682                         new_delay = dev_priv->ips.max_delay;
683         } else if (busy_down < min_avg) {
684                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
685                         new_delay = dev_priv->ips.cur_delay + 1;
686                 if (new_delay > dev_priv->ips.min_delay)
687                         new_delay = dev_priv->ips.min_delay;
688         }
689
690         if (ironlake_set_drps(dev, new_delay))
691                 dev_priv->ips.cur_delay = new_delay;
692
693         spin_unlock(&mchdev_lock);
694
695         return;
696 }
697
698 static void notify_ring(struct drm_device *dev,
699                         struct intel_ring_buffer *ring)
700 {
701         if (ring->obj == NULL)
702                 return;
703
704         trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
705
706         wake_up_all(&ring->irq_queue);
707         i915_queue_hangcheck(dev);
708 }
709
710 static void gen6_pm_rps_work(struct work_struct *work)
711 {
712         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
713                                                     rps.work);
714         u32 pm_iir, pm_imr;
715         u8 new_delay;
716
717         spin_lock_irq(&dev_priv->irq_lock);
718         pm_iir = dev_priv->rps.pm_iir;
719         dev_priv->rps.pm_iir = 0;
720         pm_imr = I915_READ(GEN6_PMIMR);
721         /* Make sure not to corrupt PMIMR state used by ringbuffer code */
722         I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
723         spin_unlock_irq(&dev_priv->irq_lock);
724
725         if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
726                 return;
727
728         mutex_lock(&dev_priv->rps.hw_lock);
729
730         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
731                 new_delay = dev_priv->rps.cur_delay + 1;
732
733                 /*
734                  * For better performance, jump directly
735                  * to RPe if we're below it.
736                  */
737                 if (IS_VALLEYVIEW(dev_priv->dev) &&
738                     dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
739                         new_delay = dev_priv->rps.rpe_delay;
740         } else
741                 new_delay = dev_priv->rps.cur_delay - 1;
742
743         /* sysfs frequency interfaces may have snuck in while servicing the
744          * interrupt
745          */
746         if (new_delay >= dev_priv->rps.min_delay &&
747             new_delay <= dev_priv->rps.max_delay) {
748                 if (IS_VALLEYVIEW(dev_priv->dev))
749                         valleyview_set_rps(dev_priv->dev, new_delay);
750                 else
751                         gen6_set_rps(dev_priv->dev, new_delay);
752         }
753
754         if (IS_VALLEYVIEW(dev_priv->dev)) {
755                 /*
756                  * On VLV, when we enter RC6 we may not be at the minimum
757                  * voltage level, so arm a timer to check.  It should only
758                  * fire when there's activity or once after we've entered
759                  * RC6, and then won't be re-armed until the next RPS interrupt.
760                  */
761                 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
762                                  msecs_to_jiffies(100));
763         }
764
765         mutex_unlock(&dev_priv->rps.hw_lock);
766 }
767
768
769 /**
770  * ivybridge_parity_work - Workqueue called when a parity error interrupt
771  * occurred.
772  * @work: workqueue struct
773  *
774  * Doesn't actually do anything except notify userspace. As a consequence of
775  * this event, userspace should try to remap the bad rows since statistically
776  * it is likely the same row is more likely to go bad again.
777  */
778 static void ivybridge_parity_work(struct work_struct *work)
779 {
780         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
781                                                     l3_parity.error_work);
782         u32 error_status, row, bank, subbank;
783         char *parity_event[5];
784         uint32_t misccpctl;
785         unsigned long flags;
786
787         /* We must turn off DOP level clock gating to access the L3 registers.
788          * In order to prevent a get/put style interface, acquire struct mutex
789          * any time we access those registers.
790          */
791         mutex_lock(&dev_priv->dev->struct_mutex);
792
793         misccpctl = I915_READ(GEN7_MISCCPCTL);
794         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
795         POSTING_READ(GEN7_MISCCPCTL);
796
797         error_status = I915_READ(GEN7_L3CDERRST1);
798         row = GEN7_PARITY_ERROR_ROW(error_status);
799         bank = GEN7_PARITY_ERROR_BANK(error_status);
800         subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
801
802         I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
803                                     GEN7_L3CDERRST1_ENABLE);
804         POSTING_READ(GEN7_L3CDERRST1);
805
806         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
807
808         spin_lock_irqsave(&dev_priv->irq_lock, flags);
809         dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
810         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
811         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
812
813         mutex_unlock(&dev_priv->dev->struct_mutex);
814
815         parity_event[0] = I915_L3_PARITY_UEVENT "=1";
816         parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
817         parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
818         parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
819         parity_event[4] = NULL;
820
821         kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
822                            KOBJ_CHANGE, parity_event);
823
824         DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
825                   row, bank, subbank);
826
827         kfree(parity_event[3]);
828         kfree(parity_event[2]);
829         kfree(parity_event[1]);
830 }
831
832 static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
833 {
834         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
835
836         if (!HAS_L3_GPU_CACHE(dev))
837                 return;
838
839         spin_lock(&dev_priv->irq_lock);
840         dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
841         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
842         spin_unlock(&dev_priv->irq_lock);
843
844         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
845 }
846
847 static void ilk_gt_irq_handler(struct drm_device *dev,
848                                struct drm_i915_private *dev_priv,
849                                u32 gt_iir)
850 {
851         if (gt_iir &
852             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
853                 notify_ring(dev, &dev_priv->ring[RCS]);
854         if (gt_iir & ILK_BSD_USER_INTERRUPT)
855                 notify_ring(dev, &dev_priv->ring[VCS]);
856 }
857
858 static void snb_gt_irq_handler(struct drm_device *dev,
859                                struct drm_i915_private *dev_priv,
860                                u32 gt_iir)
861 {
862
863         if (gt_iir &
864             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
865                 notify_ring(dev, &dev_priv->ring[RCS]);
866         if (gt_iir & GT_BSD_USER_INTERRUPT)
867                 notify_ring(dev, &dev_priv->ring[VCS]);
868         if (gt_iir & GT_BLT_USER_INTERRUPT)
869                 notify_ring(dev, &dev_priv->ring[BCS]);
870
871         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
872                       GT_BSD_CS_ERROR_INTERRUPT |
873                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
874                 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
875                 i915_handle_error(dev, false);
876         }
877
878         if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
879                 ivybridge_parity_error_irq_handler(dev);
880 }
881
882 /* Legacy way of handling PM interrupts */
883 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
884                                  u32 pm_iir)
885 {
886         /*
887          * IIR bits should never already be set because IMR should
888          * prevent an interrupt from being shown in IIR. The warning
889          * displays a case where we've unsafely cleared
890          * dev_priv->rps.pm_iir. Although missing an interrupt of the same
891          * type is not a problem, it displays a problem in the logic.
892          *
893          * The mask bit in IMR is cleared by dev_priv->rps.work.
894          */
895
896         spin_lock(&dev_priv->irq_lock);
897         dev_priv->rps.pm_iir |= pm_iir;
898         I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
899         POSTING_READ(GEN6_PMIMR);
900         spin_unlock(&dev_priv->irq_lock);
901
902         queue_work(dev_priv->wq, &dev_priv->rps.work);
903 }
904
905 #define HPD_STORM_DETECT_PERIOD 1000
906 #define HPD_STORM_THRESHOLD 5
907
908 static inline void intel_hpd_irq_handler(struct drm_device *dev,
909                                          u32 hotplug_trigger,
910                                          const u32 *hpd)
911 {
912         drm_i915_private_t *dev_priv = dev->dev_private;
913         int i;
914         bool storm_detected = false;
915
916         if (!hotplug_trigger)
917                 return;
918
919         spin_lock(&dev_priv->irq_lock);
920         for (i = 1; i < HPD_NUM_PINS; i++) {
921
922                 WARN(((hpd[i] & hotplug_trigger) &&
923                       dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
924                      "Received HPD interrupt although disabled\n");
925
926                 if (!(hpd[i] & hotplug_trigger) ||
927                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
928                         continue;
929
930                 dev_priv->hpd_event_bits |= (1 << i);
931                 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
932                                    dev_priv->hpd_stats[i].hpd_last_jiffies
933                                    + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
934                         dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
935                         dev_priv->hpd_stats[i].hpd_cnt = 0;
936                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
937                 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
938                         dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
939                         dev_priv->hpd_event_bits &= ~(1 << i);
940                         DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
941                         storm_detected = true;
942                 } else {
943                         dev_priv->hpd_stats[i].hpd_cnt++;
944                         DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
945                                       dev_priv->hpd_stats[i].hpd_cnt);
946                 }
947         }
948
949         if (storm_detected)
950                 dev_priv->display.hpd_irq_setup(dev);
951         spin_unlock(&dev_priv->irq_lock);
952
953         queue_work(dev_priv->wq,
954                    &dev_priv->hotplug_work);
955 }
956
957 static void gmbus_irq_handler(struct drm_device *dev)
958 {
959         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
960
961         wake_up_all(&dev_priv->gmbus_wait_queue);
962 }
963
964 static void dp_aux_irq_handler(struct drm_device *dev)
965 {
966         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
967
968         wake_up_all(&dev_priv->gmbus_wait_queue);
969 }
970
971 /* Unlike gen6_rps_irq_handler() from which this function is originally derived,
972  * we must be able to deal with other PM interrupts. This is complicated because
973  * of the way in which we use the masks to defer the RPS work (which for
974  * posterity is necessary because of forcewake).
975  */
976 static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
977                                u32 pm_iir)
978 {
979         if (pm_iir & GEN6_PM_RPS_EVENTS) {
980                 spin_lock(&dev_priv->irq_lock);
981                 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
982                 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
983                 /* never want to mask useful interrupts. (also posting read) */
984                 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
985                 spin_unlock(&dev_priv->irq_lock);
986
987                 queue_work(dev_priv->wq, &dev_priv->rps.work);
988         }
989
990         if (pm_iir & PM_VEBOX_USER_INTERRUPT)
991                 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
992
993         if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
994                 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
995                 i915_handle_error(dev_priv->dev, false);
996         }
997 }
998
999 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1000 {
1001         struct drm_device *dev = (struct drm_device *) arg;
1002         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1003         u32 iir, gt_iir, pm_iir;
1004         irqreturn_t ret = IRQ_NONE;
1005         unsigned long irqflags;
1006         int pipe;
1007         u32 pipe_stats[I915_MAX_PIPES];
1008
1009         atomic_inc(&dev_priv->irq_received);
1010
1011         while (true) {
1012                 iir = I915_READ(VLV_IIR);
1013                 gt_iir = I915_READ(GTIIR);
1014                 pm_iir = I915_READ(GEN6_PMIIR);
1015
1016                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1017                         goto out;
1018
1019                 ret = IRQ_HANDLED;
1020
1021                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1022
1023                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1024                 for_each_pipe(pipe) {
1025                         int reg = PIPESTAT(pipe);
1026                         pipe_stats[pipe] = I915_READ(reg);
1027
1028                         /*
1029                          * Clear the PIPE*STAT regs before the IIR
1030                          */
1031                         if (pipe_stats[pipe] & 0x8000ffff) {
1032                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1033                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
1034                                                          pipe_name(pipe));
1035                                 I915_WRITE(reg, pipe_stats[pipe]);
1036                         }
1037                 }
1038                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1039
1040                 for_each_pipe(pipe) {
1041                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1042                                 drm_handle_vblank(dev, pipe);
1043
1044                         if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1045                                 intel_prepare_page_flip(dev, pipe);
1046                                 intel_finish_page_flip(dev, pipe);
1047                         }
1048                 }
1049
1050                 /* Consume port.  Then clear IIR or we'll miss events */
1051                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1052                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1053                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1054
1055                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1056                                          hotplug_status);
1057
1058                         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1059
1060                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1061                         I915_READ(PORT_HOTPLUG_STAT);
1062                 }
1063
1064                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1065                         gmbus_irq_handler(dev);
1066
1067                 if (pm_iir & GEN6_PM_RPS_EVENTS)
1068                         gen6_rps_irq_handler(dev_priv, pm_iir);
1069
1070                 I915_WRITE(GTIIR, gt_iir);
1071                 I915_WRITE(GEN6_PMIIR, pm_iir);
1072                 I915_WRITE(VLV_IIR, iir);
1073         }
1074
1075 out:
1076         return ret;
1077 }
1078
1079 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1080 {
1081         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1082         int pipe;
1083         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1084
1085         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1086
1087         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1088                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1089                                SDE_AUDIO_POWER_SHIFT);
1090                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1091                                  port_name(port));
1092         }
1093
1094         if (pch_iir & SDE_AUX_MASK)
1095                 dp_aux_irq_handler(dev);
1096
1097         if (pch_iir & SDE_GMBUS)
1098                 gmbus_irq_handler(dev);
1099
1100         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1101                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1102
1103         if (pch_iir & SDE_AUDIO_TRANS_MASK)
1104                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1105
1106         if (pch_iir & SDE_POISON)
1107                 DRM_ERROR("PCH poison interrupt\n");
1108
1109         if (pch_iir & SDE_FDI_MASK)
1110                 for_each_pipe(pipe)
1111                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1112                                          pipe_name(pipe),
1113                                          I915_READ(FDI_RX_IIR(pipe)));
1114
1115         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1116                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1117
1118         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1119                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1120
1121         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1122                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1123                                                           false))
1124                         DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1125
1126         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1127                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1128                                                           false))
1129                         DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1130 }
1131
1132 static void ivb_err_int_handler(struct drm_device *dev)
1133 {
1134         struct drm_i915_private *dev_priv = dev->dev_private;
1135         u32 err_int = I915_READ(GEN7_ERR_INT);
1136
1137         if (err_int & ERR_INT_POISON)
1138                 DRM_ERROR("Poison interrupt\n");
1139
1140         if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1141                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1142                         DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1143
1144         if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1145                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1146                         DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1147
1148         if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1149                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1150                         DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1151
1152         I915_WRITE(GEN7_ERR_INT, err_int);
1153 }
1154
1155 static void cpt_serr_int_handler(struct drm_device *dev)
1156 {
1157         struct drm_i915_private *dev_priv = dev->dev_private;
1158         u32 serr_int = I915_READ(SERR_INT);
1159
1160         if (serr_int & SERR_INT_POISON)
1161                 DRM_ERROR("PCH poison interrupt\n");
1162
1163         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1164                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1165                                                           false))
1166                         DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1167
1168         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1169                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1170                                                           false))
1171                         DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1172
1173         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1174                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1175                                                           false))
1176                         DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1177
1178         I915_WRITE(SERR_INT, serr_int);
1179 }
1180
1181 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1182 {
1183         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1184         int pipe;
1185         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1186
1187         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1188
1189         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1190                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1191                                SDE_AUDIO_POWER_SHIFT_CPT);
1192                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1193                                  port_name(port));
1194         }
1195
1196         if (pch_iir & SDE_AUX_MASK_CPT)
1197                 dp_aux_irq_handler(dev);
1198
1199         if (pch_iir & SDE_GMBUS_CPT)
1200                 gmbus_irq_handler(dev);
1201
1202         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1203                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1204
1205         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1206                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1207
1208         if (pch_iir & SDE_FDI_MASK_CPT)
1209                 for_each_pipe(pipe)
1210                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1211                                          pipe_name(pipe),
1212                                          I915_READ(FDI_RX_IIR(pipe)));
1213
1214         if (pch_iir & SDE_ERROR_CPT)
1215                 cpt_serr_int_handler(dev);
1216 }
1217
1218 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1219 {
1220         struct drm_i915_private *dev_priv = dev->dev_private;
1221
1222         if (de_iir & DE_AUX_CHANNEL_A)
1223                 dp_aux_irq_handler(dev);
1224
1225         if (de_iir & DE_GSE)
1226                 intel_opregion_asle_intr(dev);
1227
1228         if (de_iir & DE_PIPEA_VBLANK)
1229                 drm_handle_vblank(dev, 0);
1230
1231         if (de_iir & DE_PIPEB_VBLANK)
1232                 drm_handle_vblank(dev, 1);
1233
1234         if (de_iir & DE_POISON)
1235                 DRM_ERROR("Poison interrupt\n");
1236
1237         if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1238                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1239                         DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1240
1241         if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1242                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1243                         DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1244
1245         if (de_iir & DE_PLANEA_FLIP_DONE) {
1246                 intel_prepare_page_flip(dev, 0);
1247                 intel_finish_page_flip_plane(dev, 0);
1248         }
1249
1250         if (de_iir & DE_PLANEB_FLIP_DONE) {
1251                 intel_prepare_page_flip(dev, 1);
1252                 intel_finish_page_flip_plane(dev, 1);
1253         }
1254
1255         /* check event from PCH */
1256         if (de_iir & DE_PCH_EVENT) {
1257                 u32 pch_iir = I915_READ(SDEIIR);
1258
1259                 if (HAS_PCH_CPT(dev))
1260                         cpt_irq_handler(dev, pch_iir);
1261                 else
1262                         ibx_irq_handler(dev, pch_iir);
1263
1264                 /* should clear PCH hotplug event before clear CPU irq */
1265                 I915_WRITE(SDEIIR, pch_iir);
1266         }
1267
1268         if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1269                 ironlake_rps_change_irq_handler(dev);
1270 }
1271
1272 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1273 {
1274         struct drm_i915_private *dev_priv = dev->dev_private;
1275         int i;
1276
1277         if (de_iir & DE_ERR_INT_IVB)
1278                 ivb_err_int_handler(dev);
1279
1280         if (de_iir & DE_AUX_CHANNEL_A_IVB)
1281                 dp_aux_irq_handler(dev);
1282
1283         if (de_iir & DE_GSE_IVB)
1284                 intel_opregion_asle_intr(dev);
1285
1286         for (i = 0; i < 3; i++) {
1287                 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1288                         drm_handle_vblank(dev, i);
1289                 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1290                         intel_prepare_page_flip(dev, i);
1291                         intel_finish_page_flip_plane(dev, i);
1292                 }
1293         }
1294
1295         /* check event from PCH */
1296         if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1297                 u32 pch_iir = I915_READ(SDEIIR);
1298
1299                 cpt_irq_handler(dev, pch_iir);
1300
1301                 /* clear PCH hotplug event before clear CPU irq */
1302                 I915_WRITE(SDEIIR, pch_iir);
1303         }
1304 }
1305
1306 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1307 {
1308         struct drm_device *dev = (struct drm_device *) arg;
1309         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1310         u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1311         irqreturn_t ret = IRQ_NONE;
1312
1313         atomic_inc(&dev_priv->irq_received);
1314
1315         /* We get interrupts on unclaimed registers, so check for this before we
1316          * do any I915_{READ,WRITE}. */
1317         intel_uncore_check_errors(dev);
1318
1319         /* disable master interrupt before clearing iir  */
1320         de_ier = I915_READ(DEIER);
1321         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1322         POSTING_READ(DEIER);
1323
1324         /* Disable south interrupts. We'll only write to SDEIIR once, so further
1325          * interrupts will will be stored on its back queue, and then we'll be
1326          * able to process them after we restore SDEIER (as soon as we restore
1327          * it, we'll get an interrupt if SDEIIR still has something to process
1328          * due to its back queue). */
1329         if (!HAS_PCH_NOP(dev)) {
1330                 sde_ier = I915_READ(SDEIER);
1331                 I915_WRITE(SDEIER, 0);
1332                 POSTING_READ(SDEIER);
1333         }
1334
1335         /* On Haswell, also mask ERR_INT because we don't want to risk
1336          * generating "unclaimed register" interrupts from inside the interrupt
1337          * handler. */
1338         if (IS_HASWELL(dev)) {
1339                 spin_lock(&dev_priv->irq_lock);
1340                 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1341                 spin_unlock(&dev_priv->irq_lock);
1342         }
1343
1344         gt_iir = I915_READ(GTIIR);
1345         if (gt_iir) {
1346                 if (INTEL_INFO(dev)->gen >= 6)
1347                         snb_gt_irq_handler(dev, dev_priv, gt_iir);
1348                 else
1349                         ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1350                 I915_WRITE(GTIIR, gt_iir);
1351                 ret = IRQ_HANDLED;
1352         }
1353
1354         de_iir = I915_READ(DEIIR);
1355         if (de_iir) {
1356                 if (INTEL_INFO(dev)->gen >= 7)
1357                         ivb_display_irq_handler(dev, de_iir);
1358                 else
1359                         ilk_display_irq_handler(dev, de_iir);
1360                 I915_WRITE(DEIIR, de_iir);
1361                 ret = IRQ_HANDLED;
1362         }
1363
1364         if (INTEL_INFO(dev)->gen >= 6) {
1365                 u32 pm_iir = I915_READ(GEN6_PMIIR);
1366                 if (pm_iir) {
1367                         if (IS_HASWELL(dev))
1368                                 hsw_pm_irq_handler(dev_priv, pm_iir);
1369                         else if (pm_iir & GEN6_PM_RPS_EVENTS)
1370                                 gen6_rps_irq_handler(dev_priv, pm_iir);
1371                         I915_WRITE(GEN6_PMIIR, pm_iir);
1372                         ret = IRQ_HANDLED;
1373                 }
1374         }
1375
1376         if (IS_HASWELL(dev)) {
1377                 spin_lock(&dev_priv->irq_lock);
1378                 if (ivb_can_enable_err_int(dev))
1379                         ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1380                 spin_unlock(&dev_priv->irq_lock);
1381         }
1382
1383         I915_WRITE(DEIER, de_ier);
1384         POSTING_READ(DEIER);
1385         if (!HAS_PCH_NOP(dev)) {
1386                 I915_WRITE(SDEIER, sde_ier);
1387                 POSTING_READ(SDEIER);
1388         }
1389
1390         return ret;
1391 }
1392
1393 /**
1394  * i915_error_work_func - do process context error handling work
1395  * @work: work struct
1396  *
1397  * Fire an error uevent so userspace can see that a hang or error
1398  * was detected.
1399  */
1400 static void i915_error_work_func(struct work_struct *work)
1401 {
1402         struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1403                                                     work);
1404         drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1405                                                     gpu_error);
1406         struct drm_device *dev = dev_priv->dev;
1407         struct intel_ring_buffer *ring;
1408         char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1409         char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1410         char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1411         int i, ret;
1412
1413         kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
1414
1415         /*
1416          * Note that there's only one work item which does gpu resets, so we
1417          * need not worry about concurrent gpu resets potentially incrementing
1418          * error->reset_counter twice. We only need to take care of another
1419          * racing irq/hangcheck declaring the gpu dead for a second time. A
1420          * quick check for that is good enough: schedule_work ensures the
1421          * correct ordering between hang detection and this work item, and since
1422          * the reset in-progress bit is only ever set by code outside of this
1423          * work we don't need to worry about any other races.
1424          */
1425         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1426                 DRM_DEBUG_DRIVER("resetting chip\n");
1427                 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1428                                    reset_event);
1429
1430                 ret = i915_reset(dev);
1431
1432                 if (ret == 0) {
1433                         /*
1434                          * After all the gem state is reset, increment the reset
1435                          * counter and wake up everyone waiting for the reset to
1436                          * complete.
1437                          *
1438                          * Since unlock operations are a one-sided barrier only,
1439                          * we need to insert a barrier here to order any seqno
1440                          * updates before
1441                          * the counter increment.
1442                          */
1443                         smp_mb__before_atomic_inc();
1444                         atomic_inc(&dev_priv->gpu_error.reset_counter);
1445
1446                         kobject_uevent_env(&dev->primary->kdev.kobj,
1447                                            KOBJ_CHANGE, reset_done_event);
1448                 } else {
1449                         atomic_set(&error->reset_counter, I915_WEDGED);
1450                 }
1451
1452                 for_each_ring(ring, dev_priv, i)
1453                         wake_up_all(&ring->irq_queue);
1454
1455                 intel_display_handle_reset(dev);
1456
1457                 wake_up_all(&dev_priv->gpu_error.reset_queue);
1458         }
1459 }
1460
1461 static void i915_report_and_clear_eir(struct drm_device *dev)
1462 {
1463         struct drm_i915_private *dev_priv = dev->dev_private;
1464         uint32_t instdone[I915_NUM_INSTDONE_REG];
1465         u32 eir = I915_READ(EIR);
1466         int pipe, i;
1467
1468         if (!eir)
1469                 return;
1470
1471         pr_err("render error detected, EIR: 0x%08x\n", eir);
1472
1473         i915_get_extra_instdone(dev, instdone);
1474
1475         if (IS_G4X(dev)) {
1476                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1477                         u32 ipeir = I915_READ(IPEIR_I965);
1478
1479                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1480                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1481                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
1482                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1483                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1484                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1485                         I915_WRITE(IPEIR_I965, ipeir);
1486                         POSTING_READ(IPEIR_I965);
1487                 }
1488                 if (eir & GM45_ERROR_PAGE_TABLE) {
1489                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1490                         pr_err("page table error\n");
1491                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1492                         I915_WRITE(PGTBL_ER, pgtbl_err);
1493                         POSTING_READ(PGTBL_ER);
1494                 }
1495         }
1496
1497         if (!IS_GEN2(dev)) {
1498                 if (eir & I915_ERROR_PAGE_TABLE) {
1499                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1500                         pr_err("page table error\n");
1501                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1502                         I915_WRITE(PGTBL_ER, pgtbl_err);
1503                         POSTING_READ(PGTBL_ER);
1504                 }
1505         }
1506
1507         if (eir & I915_ERROR_MEMORY_REFRESH) {
1508                 pr_err("memory refresh error:\n");
1509                 for_each_pipe(pipe)
1510                         pr_err("pipe %c stat: 0x%08x\n",
1511                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1512                 /* pipestat has already been acked */
1513         }
1514         if (eir & I915_ERROR_INSTRUCTION) {
1515                 pr_err("instruction error\n");
1516                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1517                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1518                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1519                 if (INTEL_INFO(dev)->gen < 4) {
1520                         u32 ipeir = I915_READ(IPEIR);
1521
1522                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1523                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1524                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1525                         I915_WRITE(IPEIR, ipeir);
1526                         POSTING_READ(IPEIR);
1527                 } else {
1528                         u32 ipeir = I915_READ(IPEIR_I965);
1529
1530                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1531                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1532                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1533                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1534                         I915_WRITE(IPEIR_I965, ipeir);
1535                         POSTING_READ(IPEIR_I965);
1536                 }
1537         }
1538
1539         I915_WRITE(EIR, eir);
1540         POSTING_READ(EIR);
1541         eir = I915_READ(EIR);
1542         if (eir) {
1543                 /*
1544                  * some errors might have become stuck,
1545                  * mask them.
1546                  */
1547                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1548                 I915_WRITE(EMR, I915_READ(EMR) | eir);
1549                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1550         }
1551 }
1552
1553 /**
1554  * i915_handle_error - handle an error interrupt
1555  * @dev: drm device
1556  *
1557  * Do some basic checking of regsiter state at error interrupt time and
1558  * dump it to the syslog.  Also call i915_capture_error_state() to make
1559  * sure we get a record and make it available in debugfs.  Fire a uevent
1560  * so userspace knows something bad happened (should trigger collection
1561  * of a ring dump etc.).
1562  */
1563 void i915_handle_error(struct drm_device *dev, bool wedged)
1564 {
1565         struct drm_i915_private *dev_priv = dev->dev_private;
1566         struct intel_ring_buffer *ring;
1567         int i;
1568
1569         i915_capture_error_state(dev);
1570         i915_report_and_clear_eir(dev);
1571
1572         if (wedged) {
1573                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1574                                 &dev_priv->gpu_error.reset_counter);
1575
1576                 /*
1577                  * Wakeup waiting processes so that the reset work item
1578                  * doesn't deadlock trying to grab various locks.
1579                  */
1580                 for_each_ring(ring, dev_priv, i)
1581                         wake_up_all(&ring->irq_queue);
1582         }
1583
1584         queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
1585 }
1586
1587 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1588 {
1589         drm_i915_private_t *dev_priv = dev->dev_private;
1590         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1591         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1592         struct drm_i915_gem_object *obj;
1593         struct intel_unpin_work *work;
1594         unsigned long flags;
1595         bool stall_detected;
1596
1597         /* Ignore early vblank irqs */
1598         if (intel_crtc == NULL)
1599                 return;
1600
1601         spin_lock_irqsave(&dev->event_lock, flags);
1602         work = intel_crtc->unpin_work;
1603
1604         if (work == NULL ||
1605             atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1606             !work->enable_stall_check) {
1607                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1608                 spin_unlock_irqrestore(&dev->event_lock, flags);
1609                 return;
1610         }
1611
1612         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1613         obj = work->pending_flip_obj;
1614         if (INTEL_INFO(dev)->gen >= 4) {
1615                 int dspsurf = DSPSURF(intel_crtc->plane);
1616                 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1617                                         i915_gem_obj_ggtt_offset(obj);
1618         } else {
1619                 int dspaddr = DSPADDR(intel_crtc->plane);
1620                 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
1621                                                         crtc->y * crtc->fb->pitches[0] +
1622                                                         crtc->x * crtc->fb->bits_per_pixel/8);
1623         }
1624
1625         spin_unlock_irqrestore(&dev->event_lock, flags);
1626
1627         if (stall_detected) {
1628                 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1629                 intel_prepare_page_flip(dev, intel_crtc->plane);
1630         }
1631 }
1632
1633 /* Called from drm generic code, passed 'crtc' which
1634  * we use as a pipe index
1635  */
1636 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1637 {
1638         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1639         unsigned long irqflags;
1640
1641         if (!i915_pipe_enabled(dev, pipe))
1642                 return -EINVAL;
1643
1644         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1645         if (INTEL_INFO(dev)->gen >= 4)
1646                 i915_enable_pipestat(dev_priv, pipe,
1647                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1648         else
1649                 i915_enable_pipestat(dev_priv, pipe,
1650                                      PIPE_VBLANK_INTERRUPT_ENABLE);
1651
1652         /* maintain vblank delivery even in deep C-states */
1653         if (dev_priv->info->gen == 3)
1654                 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1655         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1656
1657         return 0;
1658 }
1659
1660 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1661 {
1662         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1663         unsigned long irqflags;
1664         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1665                                                      DE_PIPE_VBLANK_ILK(pipe);
1666
1667         if (!i915_pipe_enabled(dev, pipe))
1668                 return -EINVAL;
1669
1670         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1671         ironlake_enable_display_irq(dev_priv, bit);
1672         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1673
1674         return 0;
1675 }
1676
1677 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1678 {
1679         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1680         unsigned long irqflags;
1681         u32 imr;
1682
1683         if (!i915_pipe_enabled(dev, pipe))
1684                 return -EINVAL;
1685
1686         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1687         imr = I915_READ(VLV_IMR);
1688         if (pipe == 0)
1689                 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1690         else
1691                 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1692         I915_WRITE(VLV_IMR, imr);
1693         i915_enable_pipestat(dev_priv, pipe,
1694                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
1695         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1696
1697         return 0;
1698 }
1699
1700 /* Called from drm generic code, passed 'crtc' which
1701  * we use as a pipe index
1702  */
1703 static void i915_disable_vblank(struct drm_device *dev, int pipe)
1704 {
1705         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1706         unsigned long irqflags;
1707
1708         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1709         if (dev_priv->info->gen == 3)
1710                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1711
1712         i915_disable_pipestat(dev_priv, pipe,
1713                               PIPE_VBLANK_INTERRUPT_ENABLE |
1714                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
1715         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1716 }
1717
1718 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1719 {
1720         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1721         unsigned long irqflags;
1722         uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
1723                                                      DE_PIPE_VBLANK_ILK(pipe);
1724
1725         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1726         ironlake_disable_display_irq(dev_priv, bit);
1727         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1728 }
1729
1730 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1731 {
1732         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1733         unsigned long irqflags;
1734         u32 imr;
1735
1736         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1737         i915_disable_pipestat(dev_priv, pipe,
1738                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
1739         imr = I915_READ(VLV_IMR);
1740         if (pipe == 0)
1741                 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1742         else
1743                 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1744         I915_WRITE(VLV_IMR, imr);
1745         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1746 }
1747
1748 static u32
1749 ring_last_seqno(struct intel_ring_buffer *ring)
1750 {
1751         return list_entry(ring->request_list.prev,
1752                           struct drm_i915_gem_request, list)->seqno;
1753 }
1754
1755 static bool
1756 ring_idle(struct intel_ring_buffer *ring, u32 seqno)
1757 {
1758         return (list_empty(&ring->request_list) ||
1759                 i915_seqno_passed(seqno, ring_last_seqno(ring)));
1760 }
1761
1762 static struct intel_ring_buffer *
1763 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
1764 {
1765         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1766         u32 cmd, ipehr, acthd, acthd_min;
1767
1768         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1769         if ((ipehr & ~(0x3 << 16)) !=
1770             (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
1771                 return NULL;
1772
1773         /* ACTHD is likely pointing to the dword after the actual command,
1774          * so scan backwards until we find the MBOX.
1775          */
1776         acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
1777         acthd_min = max((int)acthd - 3 * 4, 0);
1778         do {
1779                 cmd = ioread32(ring->virtual_start + acthd);
1780                 if (cmd == ipehr)
1781                         break;
1782
1783                 acthd -= 4;
1784                 if (acthd < acthd_min)
1785                         return NULL;
1786         } while (1);
1787
1788         *seqno = ioread32(ring->virtual_start+acthd+4)+1;
1789         return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
1790 }
1791
1792 static int semaphore_passed(struct intel_ring_buffer *ring)
1793 {
1794         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1795         struct intel_ring_buffer *signaller;
1796         u32 seqno, ctl;
1797
1798         ring->hangcheck.deadlock = true;
1799
1800         signaller = semaphore_waits_for(ring, &seqno);
1801         if (signaller == NULL || signaller->hangcheck.deadlock)
1802                 return -1;
1803
1804         /* cursory check for an unkickable deadlock */
1805         ctl = I915_READ_CTL(signaller);
1806         if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
1807                 return -1;
1808
1809         return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
1810 }
1811
1812 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
1813 {
1814         struct intel_ring_buffer *ring;
1815         int i;
1816
1817         for_each_ring(ring, dev_priv, i)
1818                 ring->hangcheck.deadlock = false;
1819 }
1820
1821 static enum intel_ring_hangcheck_action
1822 ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
1823 {
1824         struct drm_device *dev = ring->dev;
1825         struct drm_i915_private *dev_priv = dev->dev_private;
1826         u32 tmp;
1827
1828         if (ring->hangcheck.acthd != acthd)
1829                 return active;
1830
1831         if (IS_GEN2(dev))
1832                 return hung;
1833
1834         /* Is the chip hanging on a WAIT_FOR_EVENT?
1835          * If so we can simply poke the RB_WAIT bit
1836          * and break the hang. This should work on
1837          * all but the second generation chipsets.
1838          */
1839         tmp = I915_READ_CTL(ring);
1840         if (tmp & RING_WAIT) {
1841                 DRM_ERROR("Kicking stuck wait on %s\n",
1842                           ring->name);
1843                 I915_WRITE_CTL(ring, tmp);
1844                 return kick;
1845         }
1846
1847         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
1848                 switch (semaphore_passed(ring)) {
1849                 default:
1850                         return hung;
1851                 case 1:
1852                         DRM_ERROR("Kicking stuck semaphore on %s\n",
1853                                   ring->name);
1854                         I915_WRITE_CTL(ring, tmp);
1855                         return kick;
1856                 case 0:
1857                         return wait;
1858                 }
1859         }
1860
1861         return hung;
1862 }
1863
1864 /**
1865  * This is called when the chip hasn't reported back with completed
1866  * batchbuffers in a long time. We keep track per ring seqno progress and
1867  * if there are no progress, hangcheck score for that ring is increased.
1868  * Further, acthd is inspected to see if the ring is stuck. On stuck case
1869  * we kick the ring. If we see no progress on three subsequent calls
1870  * we assume chip is wedged and try to fix it by resetting the chip.
1871  */
1872 void i915_hangcheck_elapsed(unsigned long data)
1873 {
1874         struct drm_device *dev = (struct drm_device *)data;
1875         drm_i915_private_t *dev_priv = dev->dev_private;
1876         struct intel_ring_buffer *ring;
1877         int i;
1878         int busy_count = 0, rings_hung = 0;
1879         bool stuck[I915_NUM_RINGS] = { 0 };
1880 #define BUSY 1
1881 #define KICK 5
1882 #define HUNG 20
1883 #define FIRE 30
1884
1885         if (!i915_enable_hangcheck)
1886                 return;
1887
1888         for_each_ring(ring, dev_priv, i) {
1889                 u32 seqno, acthd;
1890                 bool busy = true;
1891
1892                 semaphore_clear_deadlocks(dev_priv);
1893
1894                 seqno = ring->get_seqno(ring, false);
1895                 acthd = intel_ring_get_active_head(ring);
1896
1897                 if (ring->hangcheck.seqno == seqno) {
1898                         if (ring_idle(ring, seqno)) {
1899                                 if (waitqueue_active(&ring->irq_queue)) {
1900                                         /* Issue a wake-up to catch stuck h/w. */
1901                                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1902                                                   ring->name);
1903                                         wake_up_all(&ring->irq_queue);
1904                                         ring->hangcheck.score += HUNG;
1905                                 } else
1906                                         busy = false;
1907                         } else {
1908                                 int score;
1909
1910                                 /* We always increment the hangcheck score
1911                                  * if the ring is busy and still processing
1912                                  * the same request, so that no single request
1913                                  * can run indefinitely (such as a chain of
1914                                  * batches). The only time we do not increment
1915                                  * the hangcheck score on this ring, if this
1916                                  * ring is in a legitimate wait for another
1917                                  * ring. In that case the waiting ring is a
1918                                  * victim and we want to be sure we catch the
1919                                  * right culprit. Then every time we do kick
1920                                  * the ring, add a small increment to the
1921                                  * score so that we can catch a batch that is
1922                                  * being repeatedly kicked and so responsible
1923                                  * for stalling the machine.
1924                                  */
1925                                 ring->hangcheck.action = ring_stuck(ring,
1926                                                                     acthd);
1927
1928                                 switch (ring->hangcheck.action) {
1929                                 case wait:
1930                                         score = 0;
1931                                         break;
1932                                 case active:
1933                                         score = BUSY;
1934                                         break;
1935                                 case kick:
1936                                         score = KICK;
1937                                         break;
1938                                 case hung:
1939                                         score = HUNG;
1940                                         stuck[i] = true;
1941                                         break;
1942                                 }
1943                                 ring->hangcheck.score += score;
1944                         }
1945                 } else {
1946                         /* Gradually reduce the count so that we catch DoS
1947                          * attempts across multiple batches.
1948                          */
1949                         if (ring->hangcheck.score > 0)
1950                                 ring->hangcheck.score--;
1951                 }
1952
1953                 ring->hangcheck.seqno = seqno;
1954                 ring->hangcheck.acthd = acthd;
1955                 busy_count += busy;
1956         }
1957
1958         for_each_ring(ring, dev_priv, i) {
1959                 if (ring->hangcheck.score > FIRE) {
1960                         DRM_ERROR("%s on %s\n",
1961                                   stuck[i] ? "stuck" : "no progress",
1962                                   ring->name);
1963                         rings_hung++;
1964                 }
1965         }
1966
1967         if (rings_hung)
1968                 return i915_handle_error(dev, true);
1969
1970         if (busy_count)
1971                 /* Reset timer case chip hangs without another request
1972                  * being added */
1973                 i915_queue_hangcheck(dev);
1974 }
1975
1976 void i915_queue_hangcheck(struct drm_device *dev)
1977 {
1978         struct drm_i915_private *dev_priv = dev->dev_private;
1979         if (!i915_enable_hangcheck)
1980                 return;
1981
1982         mod_timer(&dev_priv->gpu_error.hangcheck_timer,
1983                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1984 }
1985
1986 static void ibx_irq_preinstall(struct drm_device *dev)
1987 {
1988         struct drm_i915_private *dev_priv = dev->dev_private;
1989
1990         if (HAS_PCH_NOP(dev))
1991                 return;
1992
1993         /* south display irq */
1994         I915_WRITE(SDEIMR, 0xffffffff);
1995         /*
1996          * SDEIER is also touched by the interrupt handler to work around missed
1997          * PCH interrupts. Hence we can't update it after the interrupt handler
1998          * is enabled - instead we unconditionally enable all PCH interrupt
1999          * sources here, but then only unmask them as needed with SDEIMR.
2000          */
2001         I915_WRITE(SDEIER, 0xffffffff);
2002         POSTING_READ(SDEIER);
2003 }
2004
2005 static void gen5_gt_irq_preinstall(struct drm_device *dev)
2006 {
2007         struct drm_i915_private *dev_priv = dev->dev_private;
2008
2009         /* and GT */
2010         I915_WRITE(GTIMR, 0xffffffff);
2011         I915_WRITE(GTIER, 0x0);
2012         POSTING_READ(GTIER);
2013
2014         if (INTEL_INFO(dev)->gen >= 6) {
2015                 /* and PM */
2016                 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2017                 I915_WRITE(GEN6_PMIER, 0x0);
2018                 POSTING_READ(GEN6_PMIER);
2019         }
2020 }
2021
2022 /* drm_dma.h hooks
2023 */
2024 static void ironlake_irq_preinstall(struct drm_device *dev)
2025 {
2026         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2027
2028         atomic_set(&dev_priv->irq_received, 0);
2029
2030         I915_WRITE(HWSTAM, 0xeffe);
2031
2032         I915_WRITE(DEIMR, 0xffffffff);
2033         I915_WRITE(DEIER, 0x0);
2034         POSTING_READ(DEIER);
2035
2036         gen5_gt_irq_preinstall(dev);
2037
2038         ibx_irq_preinstall(dev);
2039 }
2040
2041 static void valleyview_irq_preinstall(struct drm_device *dev)
2042 {
2043         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2044         int pipe;
2045
2046         atomic_set(&dev_priv->irq_received, 0);
2047
2048         /* VLV magic */
2049         I915_WRITE(VLV_IMR, 0);
2050         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2051         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2052         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2053
2054         /* and GT */
2055         I915_WRITE(GTIIR, I915_READ(GTIIR));
2056         I915_WRITE(GTIIR, I915_READ(GTIIR));
2057
2058         gen5_gt_irq_preinstall(dev);
2059
2060         I915_WRITE(DPINVGTT, 0xff);
2061
2062         I915_WRITE(PORT_HOTPLUG_EN, 0);
2063         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2064         for_each_pipe(pipe)
2065                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2066         I915_WRITE(VLV_IIR, 0xffffffff);
2067         I915_WRITE(VLV_IMR, 0xffffffff);
2068         I915_WRITE(VLV_IER, 0x0);
2069         POSTING_READ(VLV_IER);
2070 }
2071
2072 static void ibx_hpd_irq_setup(struct drm_device *dev)
2073 {
2074         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2075         struct drm_mode_config *mode_config = &dev->mode_config;
2076         struct intel_encoder *intel_encoder;
2077         u32 hotplug_irqs, hotplug, enabled_irqs = 0;
2078
2079         if (HAS_PCH_IBX(dev)) {
2080                 hotplug_irqs = SDE_HOTPLUG_MASK;
2081                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2082                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2083                                 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
2084         } else {
2085                 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
2086                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2087                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2088                                 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
2089         }
2090
2091         ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2092
2093         /*
2094          * Enable digital hotplug on the PCH, and configure the DP short pulse
2095          * duration to 2ms (which is the minimum in the Display Port spec)
2096          *
2097          * This register is the same on all known PCH chips.
2098          */
2099         hotplug = I915_READ(PCH_PORT_HOTPLUG);
2100         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2101         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2102         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2103         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2104         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2105 }
2106
2107 static void ibx_irq_postinstall(struct drm_device *dev)
2108 {
2109         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2110         u32 mask;
2111
2112         if (HAS_PCH_NOP(dev))
2113                 return;
2114
2115         if (HAS_PCH_IBX(dev)) {
2116                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2117                        SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2118         } else {
2119                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2120
2121                 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2122         }
2123
2124         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2125         I915_WRITE(SDEIMR, ~mask);
2126 }
2127
2128 static void gen5_gt_irq_postinstall(struct drm_device *dev)
2129 {
2130         struct drm_i915_private *dev_priv = dev->dev_private;
2131         u32 pm_irqs, gt_irqs;
2132
2133         pm_irqs = gt_irqs = 0;
2134
2135         dev_priv->gt_irq_mask = ~0;
2136         if (HAS_L3_GPU_CACHE(dev)) {
2137                 /* L3 parity interrupt is always unmasked. */
2138                 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2139                 gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2140         }
2141
2142         gt_irqs |= GT_RENDER_USER_INTERRUPT;
2143         if (IS_GEN5(dev)) {
2144                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2145                            ILK_BSD_USER_INTERRUPT;
2146         } else {
2147                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2148         }
2149
2150         I915_WRITE(GTIIR, I915_READ(GTIIR));
2151         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2152         I915_WRITE(GTIER, gt_irqs);
2153         POSTING_READ(GTIER);
2154
2155         if (INTEL_INFO(dev)->gen >= 6) {
2156                 pm_irqs |= GEN6_PM_RPS_EVENTS;
2157
2158                 if (HAS_VEBOX(dev))
2159                         pm_irqs |= PM_VEBOX_USER_INTERRUPT;
2160
2161                 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2162                 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2163                 I915_WRITE(GEN6_PMIER, pm_irqs);
2164                 POSTING_READ(GEN6_PMIER);
2165         }
2166 }
2167
2168 static int ironlake_irq_postinstall(struct drm_device *dev)
2169 {
2170         unsigned long irqflags;
2171         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2172         u32 display_mask, extra_mask;
2173
2174         if (INTEL_INFO(dev)->gen >= 7) {
2175                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2176                                 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
2177                                 DE_PLANEB_FLIP_DONE_IVB |
2178                                 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
2179                                 DE_ERR_INT_IVB);
2180                 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
2181                               DE_PIPEA_VBLANK_IVB);
2182
2183                 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2184         } else {
2185                 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2186                                 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2187                                 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2188                                 DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
2189                 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
2190         }
2191
2192         dev_priv->irq_mask = ~display_mask;
2193
2194         /* should always can generate irq */
2195         I915_WRITE(DEIIR, I915_READ(DEIIR));
2196         I915_WRITE(DEIMR, dev_priv->irq_mask);
2197         I915_WRITE(DEIER, display_mask | extra_mask);
2198         POSTING_READ(DEIER);
2199
2200         gen5_gt_irq_postinstall(dev);
2201
2202         ibx_irq_postinstall(dev);
2203
2204         if (IS_IRONLAKE_M(dev)) {
2205                 /* Enable PCU event interrupts
2206                  *
2207                  * spinlocking not required here for correctness since interrupt
2208                  * setup is guaranteed to run in single-threaded context. But we
2209                  * need it to make the assert_spin_locked happy. */
2210                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2211                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2212                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2213         }
2214
2215         return 0;
2216 }
2217
2218 static int valleyview_irq_postinstall(struct drm_device *dev)
2219 {
2220         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2221         u32 enable_mask;
2222         u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2223         unsigned long irqflags;
2224
2225         enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2226         enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2227                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2228                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2229                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2230
2231         /*
2232          *Leave vblank interrupts masked initially.  enable/disable will
2233          * toggle them based on usage.
2234          */
2235         dev_priv->irq_mask = (~enable_mask) |
2236                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2237                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2238
2239         I915_WRITE(PORT_HOTPLUG_EN, 0);
2240         POSTING_READ(PORT_HOTPLUG_EN);
2241
2242         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2243         I915_WRITE(VLV_IER, enable_mask);
2244         I915_WRITE(VLV_IIR, 0xffffffff);
2245         I915_WRITE(PIPESTAT(0), 0xffff);
2246         I915_WRITE(PIPESTAT(1), 0xffff);
2247         POSTING_READ(VLV_IER);
2248
2249         /* Interrupt setup is already guaranteed to be single-threaded, this is
2250          * just to make the assert_spin_locked check happy. */
2251         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2252         i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2253         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2254         i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2255         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2256
2257         I915_WRITE(VLV_IIR, 0xffffffff);
2258         I915_WRITE(VLV_IIR, 0xffffffff);
2259
2260         gen5_gt_irq_postinstall(dev);
2261
2262         /* ack & enable invalid PTE error interrupts */
2263 #if 0 /* FIXME: add support to irq handler for checking these bits */
2264         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2265         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2266 #endif
2267
2268         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2269
2270         return 0;
2271 }
2272
2273 static void valleyview_irq_uninstall(struct drm_device *dev)
2274 {
2275         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2276         int pipe;
2277
2278         if (!dev_priv)
2279                 return;
2280
2281         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2282
2283         for_each_pipe(pipe)
2284                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2285
2286         I915_WRITE(HWSTAM, 0xffffffff);
2287         I915_WRITE(PORT_HOTPLUG_EN, 0);
2288         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2289         for_each_pipe(pipe)
2290                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2291         I915_WRITE(VLV_IIR, 0xffffffff);
2292         I915_WRITE(VLV_IMR, 0xffffffff);
2293         I915_WRITE(VLV_IER, 0x0);
2294         POSTING_READ(VLV_IER);
2295 }
2296
2297 static void ironlake_irq_uninstall(struct drm_device *dev)
2298 {
2299         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2300
2301         if (!dev_priv)
2302                 return;
2303
2304         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2305
2306         I915_WRITE(HWSTAM, 0xffffffff);
2307
2308         I915_WRITE(DEIMR, 0xffffffff);
2309         I915_WRITE(DEIER, 0x0);
2310         I915_WRITE(DEIIR, I915_READ(DEIIR));
2311         if (IS_GEN7(dev))
2312                 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2313
2314         I915_WRITE(GTIMR, 0xffffffff);
2315         I915_WRITE(GTIER, 0x0);
2316         I915_WRITE(GTIIR, I915_READ(GTIIR));
2317
2318         if (HAS_PCH_NOP(dev))
2319                 return;
2320
2321         I915_WRITE(SDEIMR, 0xffffffff);
2322         I915_WRITE(SDEIER, 0x0);
2323         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2324         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2325                 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2326 }
2327
2328 static void i8xx_irq_preinstall(struct drm_device * dev)
2329 {
2330         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2331         int pipe;
2332
2333         atomic_set(&dev_priv->irq_received, 0);
2334
2335         for_each_pipe(pipe)
2336                 I915_WRITE(PIPESTAT(pipe), 0);
2337         I915_WRITE16(IMR, 0xffff);
2338         I915_WRITE16(IER, 0x0);
2339         POSTING_READ16(IER);
2340 }
2341
2342 static int i8xx_irq_postinstall(struct drm_device *dev)
2343 {
2344         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2345
2346         I915_WRITE16(EMR,
2347                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2348
2349         /* Unmask the interrupts that we always want on. */
2350         dev_priv->irq_mask =
2351                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2352                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2353                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2354                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2355                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2356         I915_WRITE16(IMR, dev_priv->irq_mask);
2357
2358         I915_WRITE16(IER,
2359                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2360                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2361                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2362                      I915_USER_INTERRUPT);
2363         POSTING_READ16(IER);
2364
2365         return 0;
2366 }
2367
2368 /*
2369  * Returns true when a page flip has completed.
2370  */
2371 static bool i8xx_handle_vblank(struct drm_device *dev,
2372                                int pipe, u16 iir)
2373 {
2374         drm_i915_private_t *dev_priv = dev->dev_private;
2375         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2376
2377         if (!drm_handle_vblank(dev, pipe))
2378                 return false;
2379
2380         if ((iir & flip_pending) == 0)
2381                 return false;
2382
2383         intel_prepare_page_flip(dev, pipe);
2384
2385         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2386          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2387          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2388          * the flip is completed (no longer pending). Since this doesn't raise
2389          * an interrupt per se, we watch for the change at vblank.
2390          */
2391         if (I915_READ16(ISR) & flip_pending)
2392                 return false;
2393
2394         intel_finish_page_flip(dev, pipe);
2395
2396         return true;
2397 }
2398
2399 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2400 {
2401         struct drm_device *dev = (struct drm_device *) arg;
2402         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2403         u16 iir, new_iir;
2404         u32 pipe_stats[2];
2405         unsigned long irqflags;
2406         int irq_received;
2407         int pipe;
2408         u16 flip_mask =
2409                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2410                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2411
2412         atomic_inc(&dev_priv->irq_received);
2413
2414         iir = I915_READ16(IIR);
2415         if (iir == 0)
2416                 return IRQ_NONE;
2417
2418         while (iir & ~flip_mask) {
2419                 /* Can't rely on pipestat interrupt bit in iir as it might
2420                  * have been cleared after the pipestat interrupt was received.
2421                  * It doesn't set the bit in iir again, but it still produces
2422                  * interrupts (for non-MSI).
2423                  */
2424                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2425                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2426                         i915_handle_error(dev, false);
2427
2428                 for_each_pipe(pipe) {
2429                         int reg = PIPESTAT(pipe);
2430                         pipe_stats[pipe] = I915_READ(reg);
2431
2432                         /*
2433                          * Clear the PIPE*STAT regs before the IIR
2434                          */
2435                         if (pipe_stats[pipe] & 0x8000ffff) {
2436                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2437                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2438                                                          pipe_name(pipe));
2439                                 I915_WRITE(reg, pipe_stats[pipe]);
2440                                 irq_received = 1;
2441                         }
2442                 }
2443                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2444
2445                 I915_WRITE16(IIR, iir & ~flip_mask);
2446                 new_iir = I915_READ16(IIR); /* Flush posted writes */
2447
2448                 i915_update_dri1_breadcrumb(dev);
2449
2450                 if (iir & I915_USER_INTERRUPT)
2451                         notify_ring(dev, &dev_priv->ring[RCS]);
2452
2453                 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2454                     i8xx_handle_vblank(dev, 0, iir))
2455                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
2456
2457                 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2458                     i8xx_handle_vblank(dev, 1, iir))
2459                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
2460
2461                 iir = new_iir;
2462         }
2463
2464         return IRQ_HANDLED;
2465 }
2466
2467 static void i8xx_irq_uninstall(struct drm_device * dev)
2468 {
2469         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2470         int pipe;
2471
2472         for_each_pipe(pipe) {
2473                 /* Clear enable bits; then clear status bits */
2474                 I915_WRITE(PIPESTAT(pipe), 0);
2475                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2476         }
2477         I915_WRITE16(IMR, 0xffff);
2478         I915_WRITE16(IER, 0x0);
2479         I915_WRITE16(IIR, I915_READ16(IIR));
2480 }
2481
2482 static void i915_irq_preinstall(struct drm_device * dev)
2483 {
2484         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2485         int pipe;
2486
2487         atomic_set(&dev_priv->irq_received, 0);
2488
2489         if (I915_HAS_HOTPLUG(dev)) {
2490                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2491                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2492         }
2493
2494         I915_WRITE16(HWSTAM, 0xeffe);
2495         for_each_pipe(pipe)
2496                 I915_WRITE(PIPESTAT(pipe), 0);
2497         I915_WRITE(IMR, 0xffffffff);
2498         I915_WRITE(IER, 0x0);
2499         POSTING_READ(IER);
2500 }
2501
2502 static int i915_irq_postinstall(struct drm_device *dev)
2503 {
2504         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2505         u32 enable_mask;
2506
2507         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2508
2509         /* Unmask the interrupts that we always want on. */
2510         dev_priv->irq_mask =
2511                 ~(I915_ASLE_INTERRUPT |
2512                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2513                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2514                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2515                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2516                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2517
2518         enable_mask =
2519                 I915_ASLE_INTERRUPT |
2520                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2521                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2522                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2523                 I915_USER_INTERRUPT;
2524
2525         if (I915_HAS_HOTPLUG(dev)) {
2526                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2527                 POSTING_READ(PORT_HOTPLUG_EN);
2528
2529                 /* Enable in IER... */
2530                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2531                 /* and unmask in IMR */
2532                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2533         }
2534
2535         I915_WRITE(IMR, dev_priv->irq_mask);
2536         I915_WRITE(IER, enable_mask);
2537         POSTING_READ(IER);
2538
2539         i915_enable_asle_pipestat(dev);
2540
2541         return 0;
2542 }
2543
2544 /*
2545  * Returns true when a page flip has completed.
2546  */
2547 static bool i915_handle_vblank(struct drm_device *dev,
2548                                int plane, int pipe, u32 iir)
2549 {
2550         drm_i915_private_t *dev_priv = dev->dev_private;
2551         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2552
2553         if (!drm_handle_vblank(dev, pipe))
2554                 return false;
2555
2556         if ((iir & flip_pending) == 0)
2557                 return false;
2558
2559         intel_prepare_page_flip(dev, plane);
2560
2561         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2562          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2563          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2564          * the flip is completed (no longer pending). Since this doesn't raise
2565          * an interrupt per se, we watch for the change at vblank.
2566          */
2567         if (I915_READ(ISR) & flip_pending)
2568                 return false;
2569
2570         intel_finish_page_flip(dev, pipe);
2571
2572         return true;
2573 }
2574
2575 static irqreturn_t i915_irq_handler(int irq, void *arg)
2576 {
2577         struct drm_device *dev = (struct drm_device *) arg;
2578         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2579         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2580         unsigned long irqflags;
2581         u32 flip_mask =
2582                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2583                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2584         int pipe, ret = IRQ_NONE;
2585
2586         atomic_inc(&dev_priv->irq_received);
2587
2588         iir = I915_READ(IIR);
2589         do {
2590                 bool irq_received = (iir & ~flip_mask) != 0;
2591                 bool blc_event = false;
2592
2593                 /* Can't rely on pipestat interrupt bit in iir as it might
2594                  * have been cleared after the pipestat interrupt was received.
2595                  * It doesn't set the bit in iir again, but it still produces
2596                  * interrupts (for non-MSI).
2597                  */
2598                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2599                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2600                         i915_handle_error(dev, false);
2601
2602                 for_each_pipe(pipe) {
2603                         int reg = PIPESTAT(pipe);
2604                         pipe_stats[pipe] = I915_READ(reg);
2605
2606                         /* Clear the PIPE*STAT regs before the IIR */
2607                         if (pipe_stats[pipe] & 0x8000ffff) {
2608                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2609                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2610                                                          pipe_name(pipe));
2611                                 I915_WRITE(reg, pipe_stats[pipe]);
2612                                 irq_received = true;
2613                         }
2614                 }
2615                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2616
2617                 if (!irq_received)
2618                         break;
2619
2620                 /* Consume port.  Then clear IIR or we'll miss events */
2621                 if ((I915_HAS_HOTPLUG(dev)) &&
2622                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2623                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2624                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2625
2626                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2627                                   hotplug_status);
2628
2629                         intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
2630
2631                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2632                         POSTING_READ(PORT_HOTPLUG_STAT);
2633                 }
2634
2635                 I915_WRITE(IIR, iir & ~flip_mask);
2636                 new_iir = I915_READ(IIR); /* Flush posted writes */
2637
2638                 if (iir & I915_USER_INTERRUPT)
2639                         notify_ring(dev, &dev_priv->ring[RCS]);
2640
2641                 for_each_pipe(pipe) {
2642                         int plane = pipe;
2643                         if (IS_MOBILE(dev))
2644                                 plane = !plane;
2645
2646                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2647                             i915_handle_vblank(dev, plane, pipe, iir))
2648                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
2649
2650                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2651                                 blc_event = true;
2652                 }
2653
2654                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2655                         intel_opregion_asle_intr(dev);
2656
2657                 /* With MSI, interrupts are only generated when iir
2658                  * transitions from zero to nonzero.  If another bit got
2659                  * set while we were handling the existing iir bits, then
2660                  * we would never get another interrupt.
2661                  *
2662                  * This is fine on non-MSI as well, as if we hit this path
2663                  * we avoid exiting the interrupt handler only to generate
2664                  * another one.
2665                  *
2666                  * Note that for MSI this could cause a stray interrupt report
2667                  * if an interrupt landed in the time between writing IIR and
2668                  * the posting read.  This should be rare enough to never
2669                  * trigger the 99% of 100,000 interrupts test for disabling
2670                  * stray interrupts.
2671                  */
2672                 ret = IRQ_HANDLED;
2673                 iir = new_iir;
2674         } while (iir & ~flip_mask);
2675
2676         i915_update_dri1_breadcrumb(dev);
2677
2678         return ret;
2679 }
2680
2681 static void i915_irq_uninstall(struct drm_device * dev)
2682 {
2683         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2684         int pipe;
2685
2686         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2687
2688         if (I915_HAS_HOTPLUG(dev)) {
2689                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2690                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2691         }
2692
2693         I915_WRITE16(HWSTAM, 0xffff);
2694         for_each_pipe(pipe) {
2695                 /* Clear enable bits; then clear status bits */
2696                 I915_WRITE(PIPESTAT(pipe), 0);
2697                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2698         }
2699         I915_WRITE(IMR, 0xffffffff);
2700         I915_WRITE(IER, 0x0);
2701
2702         I915_WRITE(IIR, I915_READ(IIR));
2703 }
2704
2705 static void i965_irq_preinstall(struct drm_device * dev)
2706 {
2707         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2708         int pipe;
2709
2710         atomic_set(&dev_priv->irq_received, 0);
2711
2712         I915_WRITE(PORT_HOTPLUG_EN, 0);
2713         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2714
2715         I915_WRITE(HWSTAM, 0xeffe);
2716         for_each_pipe(pipe)
2717                 I915_WRITE(PIPESTAT(pipe), 0);
2718         I915_WRITE(IMR, 0xffffffff);
2719         I915_WRITE(IER, 0x0);
2720         POSTING_READ(IER);
2721 }
2722
2723 static int i965_irq_postinstall(struct drm_device *dev)
2724 {
2725         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2726         u32 enable_mask;
2727         u32 error_mask;
2728         unsigned long irqflags;
2729
2730         /* Unmask the interrupts that we always want on. */
2731         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2732                                I915_DISPLAY_PORT_INTERRUPT |
2733                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2734                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2735                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2736                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2737                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2738
2739         enable_mask = ~dev_priv->irq_mask;
2740         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2741                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
2742         enable_mask |= I915_USER_INTERRUPT;
2743
2744         if (IS_G4X(dev))
2745                 enable_mask |= I915_BSD_USER_INTERRUPT;
2746
2747         /* Interrupt setup is already guaranteed to be single-threaded, this is
2748          * just to make the assert_spin_locked check happy. */
2749         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2750         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2751         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2752
2753         /*
2754          * Enable some error detection, note the instruction error mask
2755          * bit is reserved, so we leave it masked.
2756          */
2757         if (IS_G4X(dev)) {
2758                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2759                                GM45_ERROR_MEM_PRIV |
2760                                GM45_ERROR_CP_PRIV |
2761                                I915_ERROR_MEMORY_REFRESH);
2762         } else {
2763                 error_mask = ~(I915_ERROR_PAGE_TABLE |
2764                                I915_ERROR_MEMORY_REFRESH);
2765         }
2766         I915_WRITE(EMR, error_mask);
2767
2768         I915_WRITE(IMR, dev_priv->irq_mask);
2769         I915_WRITE(IER, enable_mask);
2770         POSTING_READ(IER);
2771
2772         I915_WRITE(PORT_HOTPLUG_EN, 0);
2773         POSTING_READ(PORT_HOTPLUG_EN);
2774
2775         i915_enable_asle_pipestat(dev);
2776
2777         return 0;
2778 }
2779
2780 static void i915_hpd_irq_setup(struct drm_device *dev)
2781 {
2782         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2783         struct drm_mode_config *mode_config = &dev->mode_config;
2784         struct intel_encoder *intel_encoder;
2785         u32 hotplug_en;
2786
2787         assert_spin_locked(&dev_priv->irq_lock);
2788
2789         if (I915_HAS_HOTPLUG(dev)) {
2790                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2791                 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2792                 /* Note HDMI and DP share hotplug bits */
2793                 /* enable bits are the same for all generations */
2794                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2795                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2796                                 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
2797                 /* Programming the CRT detection parameters tends
2798                    to generate a spurious hotplug event about three
2799                    seconds later.  So just do it once.
2800                 */
2801                 if (IS_G4X(dev))
2802                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2803                 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
2804                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2805
2806                 /* Ignore TV since it's buggy */
2807                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2808         }
2809 }
2810
2811 static irqreturn_t i965_irq_handler(int irq, void *arg)
2812 {
2813         struct drm_device *dev = (struct drm_device *) arg;
2814         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2815         u32 iir, new_iir;
2816         u32 pipe_stats[I915_MAX_PIPES];
2817         unsigned long irqflags;
2818         int irq_received;
2819         int ret = IRQ_NONE, pipe;
2820         u32 flip_mask =
2821                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2822                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2823
2824         atomic_inc(&dev_priv->irq_received);
2825
2826         iir = I915_READ(IIR);
2827
2828         for (;;) {
2829                 bool blc_event = false;
2830
2831                 irq_received = (iir & ~flip_mask) != 0;
2832
2833                 /* Can't rely on pipestat interrupt bit in iir as it might
2834                  * have been cleared after the pipestat interrupt was received.
2835                  * It doesn't set the bit in iir again, but it still produces
2836                  * interrupts (for non-MSI).
2837                  */
2838                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2839                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2840                         i915_handle_error(dev, false);
2841
2842                 for_each_pipe(pipe) {
2843                         int reg = PIPESTAT(pipe);
2844                         pipe_stats[pipe] = I915_READ(reg);
2845
2846                         /*
2847                          * Clear the PIPE*STAT regs before the IIR
2848                          */
2849                         if (pipe_stats[pipe] & 0x8000ffff) {
2850                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2851                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2852                                                          pipe_name(pipe));
2853                                 I915_WRITE(reg, pipe_stats[pipe]);
2854                                 irq_received = 1;
2855                         }
2856                 }
2857                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2858
2859                 if (!irq_received)
2860                         break;
2861
2862                 ret = IRQ_HANDLED;
2863
2864                 /* Consume port.  Then clear IIR or we'll miss events */
2865                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2866                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2867                         u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2868                                                                   HOTPLUG_INT_STATUS_G4X :
2869                                                                   HOTPLUG_INT_STATUS_I915);
2870
2871                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2872                                   hotplug_status);
2873
2874                         intel_hpd_irq_handler(dev, hotplug_trigger,
2875                                               IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
2876
2877                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2878                         I915_READ(PORT_HOTPLUG_STAT);
2879                 }
2880
2881                 I915_WRITE(IIR, iir & ~flip_mask);
2882                 new_iir = I915_READ(IIR); /* Flush posted writes */
2883
2884                 if (iir & I915_USER_INTERRUPT)
2885                         notify_ring(dev, &dev_priv->ring[RCS]);
2886                 if (iir & I915_BSD_USER_INTERRUPT)
2887                         notify_ring(dev, &dev_priv->ring[VCS]);
2888
2889                 for_each_pipe(pipe) {
2890                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2891                             i915_handle_vblank(dev, pipe, pipe, iir))
2892                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
2893
2894                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2895                                 blc_event = true;
2896                 }
2897
2898
2899                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2900                         intel_opregion_asle_intr(dev);
2901
2902                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2903                         gmbus_irq_handler(dev);
2904
2905                 /* With MSI, interrupts are only generated when iir
2906                  * transitions from zero to nonzero.  If another bit got
2907                  * set while we were handling the existing iir bits, then
2908                  * we would never get another interrupt.
2909                  *
2910                  * This is fine on non-MSI as well, as if we hit this path
2911                  * we avoid exiting the interrupt handler only to generate
2912                  * another one.
2913                  *
2914                  * Note that for MSI this could cause a stray interrupt report
2915                  * if an interrupt landed in the time between writing IIR and
2916                  * the posting read.  This should be rare enough to never
2917                  * trigger the 99% of 100,000 interrupts test for disabling
2918                  * stray interrupts.
2919                  */
2920                 iir = new_iir;
2921         }
2922
2923         i915_update_dri1_breadcrumb(dev);
2924
2925         return ret;
2926 }
2927
2928 static void i965_irq_uninstall(struct drm_device * dev)
2929 {
2930         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2931         int pipe;
2932
2933         if (!dev_priv)
2934                 return;
2935
2936         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2937
2938         I915_WRITE(PORT_HOTPLUG_EN, 0);
2939         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2940
2941         I915_WRITE(HWSTAM, 0xffffffff);
2942         for_each_pipe(pipe)
2943                 I915_WRITE(PIPESTAT(pipe), 0);
2944         I915_WRITE(IMR, 0xffffffff);
2945         I915_WRITE(IER, 0x0);
2946
2947         for_each_pipe(pipe)
2948                 I915_WRITE(PIPESTAT(pipe),
2949                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2950         I915_WRITE(IIR, I915_READ(IIR));
2951 }
2952
2953 static void i915_reenable_hotplug_timer_func(unsigned long data)
2954 {
2955         drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
2956         struct drm_device *dev = dev_priv->dev;
2957         struct drm_mode_config *mode_config = &dev->mode_config;
2958         unsigned long irqflags;
2959         int i;
2960
2961         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2962         for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
2963                 struct drm_connector *connector;
2964
2965                 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
2966                         continue;
2967
2968                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
2969
2970                 list_for_each_entry(connector, &mode_config->connector_list, head) {
2971                         struct intel_connector *intel_connector = to_intel_connector(connector);
2972
2973                         if (intel_connector->encoder->hpd_pin == i) {
2974                                 if (connector->polled != intel_connector->polled)
2975                                         DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
2976                                                          drm_get_connector_name(connector));
2977                                 connector->polled = intel_connector->polled;
2978                                 if (!connector->polled)
2979                                         connector->polled = DRM_CONNECTOR_POLL_HPD;
2980                         }
2981                 }
2982         }
2983         if (dev_priv->display.hpd_irq_setup)
2984                 dev_priv->display.hpd_irq_setup(dev);
2985         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2986 }
2987
2988 void intel_irq_init(struct drm_device *dev)
2989 {
2990         struct drm_i915_private *dev_priv = dev->dev_private;
2991
2992         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2993         INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
2994         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2995         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
2996
2997         setup_timer(&dev_priv->gpu_error.hangcheck_timer,
2998                     i915_hangcheck_elapsed,
2999                     (unsigned long) dev);
3000         setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3001                     (unsigned long) dev_priv);
3002
3003         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3004
3005         dev->driver->get_vblank_counter = i915_get_vblank_counter;
3006         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3007         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3008                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3009                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3010         }
3011
3012         if (drm_core_check_feature(dev, DRIVER_MODESET))
3013                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3014         else
3015                 dev->driver->get_vblank_timestamp = NULL;
3016         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3017
3018         if (IS_VALLEYVIEW(dev)) {
3019                 dev->driver->irq_handler = valleyview_irq_handler;
3020                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3021                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3022                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3023                 dev->driver->enable_vblank = valleyview_enable_vblank;
3024                 dev->driver->disable_vblank = valleyview_disable_vblank;
3025                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3026         } else if (HAS_PCH_SPLIT(dev)) {
3027                 dev->driver->irq_handler = ironlake_irq_handler;
3028                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3029                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3030                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3031                 dev->driver->enable_vblank = ironlake_enable_vblank;
3032                 dev->driver->disable_vblank = ironlake_disable_vblank;
3033                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3034         } else {
3035                 if (INTEL_INFO(dev)->gen == 2) {
3036                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
3037                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
3038                         dev->driver->irq_handler = i8xx_irq_handler;
3039                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
3040                 } else if (INTEL_INFO(dev)->gen == 3) {
3041                         dev->driver->irq_preinstall = i915_irq_preinstall;
3042                         dev->driver->irq_postinstall = i915_irq_postinstall;
3043                         dev->driver->irq_uninstall = i915_irq_uninstall;
3044                         dev->driver->irq_handler = i915_irq_handler;
3045                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3046                 } else {
3047                         dev->driver->irq_preinstall = i965_irq_preinstall;
3048                         dev->driver->irq_postinstall = i965_irq_postinstall;
3049                         dev->driver->irq_uninstall = i965_irq_uninstall;
3050                         dev->driver->irq_handler = i965_irq_handler;
3051                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3052                 }
3053                 dev->driver->enable_vblank = i915_enable_vblank;
3054                 dev->driver->disable_vblank = i915_disable_vblank;
3055         }
3056 }
3057
3058 void intel_hpd_init(struct drm_device *dev)
3059 {
3060         struct drm_i915_private *dev_priv = dev->dev_private;
3061         struct drm_mode_config *mode_config = &dev->mode_config;
3062         struct drm_connector *connector;
3063         unsigned long irqflags;
3064         int i;
3065
3066         for (i = 1; i < HPD_NUM_PINS; i++) {
3067                 dev_priv->hpd_stats[i].hpd_cnt = 0;
3068                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3069         }
3070         list_for_each_entry(connector, &mode_config->connector_list, head) {
3071                 struct intel_connector *intel_connector = to_intel_connector(connector);
3072                 connector->polled = intel_connector->polled;
3073                 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3074                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3075         }
3076
3077         /* Interrupt setup is already guaranteed to be single-threaded, this is
3078          * just to make the assert_spin_locked checks happy. */
3079         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3080         if (dev_priv->display.hpd_irq_setup)
3081                 dev_priv->display.hpd_irq_setup(dev);
3082         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3083 }