]> Pileus Git - ~andy/linux/commitdiff
Merge commit 'Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux'
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 25 Jul 2013 07:41:59 +0000 (09:41 +0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 25 Jul 2013 13:18:41 +0000 (15:18 +0200)
This backmerges Linus' merge commit of the latest drm-fixes pull:

commit 549f3a1218ba18fcde11ef0e22b07e6365645788
Merge: 42577ca 058ca4a
Author: Linus Torvalds <torvalds@linux-foundation.org>
Date:   Tue Jul 23 15:47:08 2013 -0700

    Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

We've accrued a few too many conflicts, but the real reason is that I
want to merge the 100% solution for Haswell concurrent registers
writes into drm-intel-next. But that depends upon the 90% bandaid
merged into -fixes:

commit a7cd1b8fea2f341b626b255d9898a5ca5fabbf0a
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Fri Jul 19 20:36:51 2013 +0100

    drm/i915: Serialize almost all register access

Also, we can roll up on accrued conflicts.

Usually I'd backmerge a tagged -rc, but I want to get this done before
heading off to vacations next week ;-)

Conflicts:
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_gem.c

v2: For added hilarity we have a init sequence conflict around the
gt_lock, so need to move that one, too. Spotted by Jani Nikula.

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
38 files changed:
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_debug.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gpu_error.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
include/drm/drm_dp_helper.h
include/drm/drm_mm.h
include/uapi/drm/i915_drm.h

index 543b9b3171d32310de903668bc69f30901b12e91..fe304f903b130de19dbd4a11e6a75ea687a40ec4 100644 (file)
@@ -147,33 +147,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
        }
 }
 
-struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
-                                       unsigned long start,
-                                       unsigned long size,
-                                       bool atomic)
+int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 {
-       struct drm_mm_node *hole, *node;
-       unsigned long end = start + size;
+       struct drm_mm_node *hole;
+       unsigned long end = node->start + node->size;
        unsigned long hole_start;
        unsigned long hole_end;
 
+       BUG_ON(node == NULL);
+
+       /* Find the relevant hole to add our node to */
        drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
-               if (hole_start > start || hole_end < end)
+               if (hole_start > node->start || hole_end < end)
                        continue;
 
-               node = drm_mm_kmalloc(mm, atomic);
-               if (unlikely(node == NULL))
-                       return NULL;
-
-               node->start = start;
-               node->size = size;
                node->mm = mm;
                node->allocated = 1;
 
                INIT_LIST_HEAD(&node->hole_stack);
                list_add(&node->node_list, &hole->node_list);
 
-               if (start == hole_start) {
+               if (node->start == hole_start) {
                        hole->hole_follows = 0;
                        list_del_init(&hole->hole_stack);
                }
@@ -184,13 +178,14 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
                        node->hole_follows = 1;
                }
 
-               return node;
+               return 0;
        }
 
-       WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
-       return NULL;
+       WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
+            node->start, node->size);
+       return -ENOSPC;
 }
-EXPORT_SYMBOL(drm_mm_create_block);
+EXPORT_SYMBOL(drm_mm_reserve_node);
 
 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
                                             unsigned long size,
index 40034ecefd3b977a435f661f5f8a057392fb9fe0..9d1da7cceb21b6103f6b0cd09ed9776262d1420e 100644 (file)
@@ -5,6 +5,7 @@
 ccflags-y := -Iinclude/drm
 i915-y := i915_drv.o i915_dma.o i915_irq.o \
          i915_debugfs.o \
+         i915_gpu_error.o \
           i915_suspend.o \
          i915_gem.o \
          i915_gem_context.o \
index 47d6c748057e446ab70c017d788d8d015c25430c..9d871c7eeaeefa99f6fff680d192b3c9533d1ae2 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/export.h>
-#include <generated/utsrelease.h>
 #include <drm/drmP.h>
 #include "intel_drv.h"
 #include "intel_ringbuffer.h"
@@ -90,16 +89,6 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
        }
 }
 
-static const char *cache_level_str(int type)
-{
-       switch (type) {
-       case I915_CACHE_NONE: return " uncached";
-       case I915_CACHE_LLC: return " snooped (LLC)";
-       case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
-       default: return "";
-       }
-}
-
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
@@ -113,7 +102,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                   obj->last_read_seqno,
                   obj->last_write_seqno,
                   obj->last_fenced_seqno,
-                  cache_level_str(obj->cache_level),
+                  i915_cache_level_str(obj->cache_level),
                   obj->dirty ? " dirty" : "",
                   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
        if (obj->base.name)
@@ -122,9 +111,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                seq_printf(m, " (pinned x %d)", obj->pin_count);
        if (obj->fence_reg != I915_FENCE_REG_NONE)
                seq_printf(m, " (fence: %d)", obj->fence_reg);
-       if (obj->gtt_space != NULL)
-               seq_printf(m, " (gtt offset: %08x, size: %08x)",
-                          obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+       if (i915_gem_obj_ggtt_bound(obj))
+               seq_printf(m, " (gtt offset: %08lx, size: %08x)",
+                          i915_gem_obj_ggtt_offset(obj), (unsigned int)i915_gem_obj_ggtt_size(obj));
        if (obj->stolen)
                seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
        if (obj->pin_mappable || obj->fault_mappable) {
@@ -146,7 +135,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        uintptr_t list = (uintptr_t) node->info_ent->data;
        struct list_head *head;
        struct drm_device *dev = node->minor->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        struct drm_i915_gem_object *obj;
        size_t total_obj_size, total_gtt_size;
        int count, ret;
@@ -157,12 +147,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 
        switch (list) {
        case ACTIVE_LIST:
-               seq_printf(m, "Active:\n");
-               head = &dev_priv->mm.active_list;
+               seq_puts(m, "Active:\n");
+               head = &vm->active_list;
                break;
        case INACTIVE_LIST:
-               seq_printf(m, "Inactive:\n");
-               head = &dev_priv->mm.inactive_list;
+               seq_puts(m, "Inactive:\n");
+               head = &vm->inactive_list;
                break;
        default:
                mutex_unlock(&dev->struct_mutex);
@@ -171,11 +161,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 
        total_obj_size = total_gtt_size = count = 0;
        list_for_each_entry(obj, head, mm_list) {
-               seq_printf(m, "   ");
+               seq_puts(m, "   ");
                describe_obj(m, obj);
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
                total_obj_size += obj->base.size;
-               total_gtt_size += obj->gtt_space->size;
+               total_gtt_size += i915_gem_obj_ggtt_size(obj);
                count++;
        }
        mutex_unlock(&dev->struct_mutex);
@@ -187,10 +177,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 
 #define count_objects(list, member) do { \
        list_for_each_entry(obj, list, member) { \
-               size += obj->gtt_space->size; \
+               size += i915_gem_obj_ggtt_size(obj); \
                ++count; \
                if (obj->map_and_fenceable) { \
-                       mappable_size += obj->gtt_space->size; \
+                       mappable_size += i915_gem_obj_ggtt_size(obj); \
                        ++mappable_count; \
                } \
        } \
@@ -209,7 +199,7 @@ static int per_file_stats(int id, void *ptr, void *data)
        stats->count++;
        stats->total += obj->base.size;
 
-       if (obj->gtt_space) {
+       if (i915_gem_obj_ggtt_bound(obj)) {
                if (!list_empty(&obj->ring_list))
                        stats->active += obj->base.size;
                else
@@ -222,7 +212,7 @@ static int per_file_stats(int id, void *ptr, void *data)
        return 0;
 }
 
-static int i915_gem_object_info(struct seq_file *m, voiddata)
+static int i915_gem_object_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
@@ -230,6 +220,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
        u32 count, mappable_count, purgeable_count;
        size_t size, mappable_size, purgeable_size;
        struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        struct drm_file *file;
        int ret;
 
@@ -247,12 +238,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
-       count_objects(&dev_priv->mm.active_list, mm_list);
+       count_objects(&vm->active_list, mm_list);
        seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
-       count_objects(&dev_priv->mm.inactive_list, mm_list);
+       count_objects(&vm->inactive_list, mm_list);
        seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
@@ -267,11 +258,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
        size = count = mappable_size = mappable_count = 0;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                if (obj->fault_mappable) {
-                       size += obj->gtt_space->size;
+                       size += i915_gem_obj_ggtt_size(obj);
                        ++count;
                }
                if (obj->pin_mappable) {
-                       mappable_size += obj->gtt_space->size;
+                       mappable_size += i915_gem_obj_ggtt_size(obj);
                        ++mappable_count;
                }
                if (obj->madv == I915_MADV_DONTNEED) {
@@ -287,10 +278,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
                   count, size);
 
        seq_printf(m, "%zu [%lu] gtt total\n",
-                  dev_priv->gtt.total,
-                  dev_priv->gtt.mappable_end - dev_priv->gtt.start);
+                  dev_priv->gtt.base.total,
+                  dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
 
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
        list_for_each_entry_reverse(file, &dev->filelist, lhead) {
                struct file_stats stats;
 
@@ -310,7 +301,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
        return 0;
 }
 
-static int i915_gem_gtt_info(struct seq_file *m, voiddata)
+static int i915_gem_gtt_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
@@ -329,11 +320,11 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
                if (list == PINNED_LIST && obj->pin_count == 0)
                        continue;
 
-               seq_printf(m, "   ");
+               seq_puts(m, "   ");
                describe_obj(m, obj);
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
                total_obj_size += obj->base.size;
-               total_gtt_size += obj->gtt_space->size;
+               total_gtt_size += i915_gem_obj_ggtt_size(obj);
                count++;
        }
 
@@ -371,20 +362,22 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                                           pipe, plane);
                        }
                        if (work->enable_stall_check)
-                               seq_printf(m, "Stall check enabled, ");
+                               seq_puts(m, "Stall check enabled, ");
                        else
-                               seq_printf(m, "Stall check waiting for page flip ioctl, ");
+                               seq_puts(m, "Stall check waiting for page flip ioctl, ");
                        seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
 
                        if (work->old_fb_obj) {
                                struct drm_i915_gem_object *obj = work->old_fb_obj;
                                if (obj)
-                                       seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+                                       seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
+                                                  i915_gem_obj_ggtt_offset(obj));
                        }
                        if (work->pending_flip_obj) {
                                struct drm_i915_gem_object *obj = work->pending_flip_obj;
                                if (obj)
-                                       seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+                                       seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
+                                                  i915_gem_obj_ggtt_offset(obj));
                        }
                }
                spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -424,7 +417,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
        mutex_unlock(&dev->struct_mutex);
 
        if (count == 0)
-               seq_printf(m, "No requests\n");
+               seq_puts(m, "No requests\n");
 
        return 0;
 }
@@ -574,10 +567,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
                seq_printf(m, "Fence %d, pin count = %d, object = ",
                           i, dev_priv->fence_regs[i].pin_count);
                if (obj == NULL)
-                       seq_printf(m, "unused");
+                       seq_puts(m, "unused");
                else
                        describe_obj(m, obj);
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -606,361 +599,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
        return 0;
 }
 
-static const char *ring_str(int ring)
-{
-       switch (ring) {
-       case RCS: return "render";
-       case VCS: return "bsd";
-       case BCS: return "blt";
-       case VECS: return "vebox";
-       default: return "";
-       }
-}
-
-static const char *pin_flag(int pinned)
-{
-       if (pinned > 0)
-               return " P";
-       else if (pinned < 0)
-               return " p";
-       else
-               return "";
-}
-
-static const char *tiling_flag(int tiling)
-{
-       switch (tiling) {
-       default:
-       case I915_TILING_NONE: return "";
-       case I915_TILING_X: return " X";
-       case I915_TILING_Y: return " Y";
-       }
-}
-
-static const char *dirty_flag(int dirty)
-{
-       return dirty ? " dirty" : "";
-}
-
-static const char *purgeable_flag(int purgeable)
-{
-       return purgeable ? " purgeable" : "";
-}
-
-static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
-{
-
-       if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
-               e->err = -ENOSPC;
-               return false;
-       }
-
-       if (e->bytes == e->size - 1 || e->err)
-               return false;
-
-       return true;
-}
-
-static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
-                             unsigned len)
-{
-       if (e->pos + len <= e->start) {
-               e->pos += len;
-               return false;
-       }
-
-       /* First vsnprintf needs to fit in its entirety for memmove */
-       if (len >= e->size) {
-               e->err = -EIO;
-               return false;
-       }
-
-       return true;
-}
-
-static void __i915_error_advance(struct drm_i915_error_state_buf *e,
-                                unsigned len)
-{
-       /* If this is first printf in this window, adjust it so that
-        * start position matches start of the buffer
-        */
-
-       if (e->pos < e->start) {
-               const size_t off = e->start - e->pos;
-
-               /* Should not happen but be paranoid */
-               if (off > len || e->bytes) {
-                       e->err = -EIO;
-                       return;
-               }
-
-               memmove(e->buf, e->buf + off, len - off);
-               e->bytes = len - off;
-               e->pos = e->start;
-               return;
-       }
-
-       e->bytes += len;
-       e->pos += len;
-}
-
-static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
-                              const char *f, va_list args)
-{
-       unsigned len;
-
-       if (!__i915_error_ok(e))
-               return;
-
-       /* Seek the first printf which is hits start position */
-       if (e->pos < e->start) {
-               len = vsnprintf(NULL, 0, f, args);
-               if (!__i915_error_seek(e, len))
-                       return;
-       }
-
-       len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
-       if (len >= e->size - e->bytes)
-               len = e->size - e->bytes - 1;
-
-       __i915_error_advance(e, len);
-}
-
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
-                           const char *str)
-{
-       unsigned len;
-
-       if (!__i915_error_ok(e))
-               return;
-
-       len = strlen(str);
-
-       /* Seek the first printf which is hits start position */
-       if (e->pos < e->start) {
-               if (!__i915_error_seek(e, len))
-                       return;
-       }
-
-       if (len >= e->size - e->bytes)
-               len = e->size - e->bytes - 1;
-       memcpy(e->buf + e->bytes, str, len);
-
-       __i915_error_advance(e, len);
-}
-
-void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
-{
-       va_list args;
-
-       va_start(args, f);
-       i915_error_vprintf(e, f, args);
-       va_end(args);
-}
-
-#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
-#define err_puts(e, s) i915_error_puts(e, s)
-
-static void print_error_buffers(struct drm_i915_error_state_buf *m,
-                               const char *name,
-                               struct drm_i915_error_buffer *err,
-                               int count)
-{
-       err_printf(m, "%s [%d]:\n", name, count);
-
-       while (count--) {
-               err_printf(m, "  %08x %8u %02x %02x %x %x",
-                          err->gtt_offset,
-                          err->size,
-                          err->read_domains,
-                          err->write_domain,
-                          err->rseqno, err->wseqno);
-               err_puts(m, pin_flag(err->pinned));
-               err_puts(m, tiling_flag(err->tiling));
-               err_puts(m, dirty_flag(err->dirty));
-               err_puts(m, purgeable_flag(err->purgeable));
-               err_puts(m, err->ring != -1 ? " " : "");
-               err_puts(m, ring_str(err->ring));
-               err_puts(m, cache_level_str(err->cache_level));
-
-               if (err->name)
-                       err_printf(m, " (name: %d)", err->name);
-               if (err->fence_reg != I915_FENCE_REG_NONE)
-                       err_printf(m, " (fence: %d)", err->fence_reg);
-
-               err_puts(m, "\n");
-               err++;
-       }
-}
-
-static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
-                                 struct drm_device *dev,
-                                 struct drm_i915_error_state *error,
-                                 unsigned ring)
-{
-       BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
-       err_printf(m, "%s command stream:\n", ring_str(ring));
-       err_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
-       err_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
-       err_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
-       err_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
-       err_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
-       err_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
-       err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
-       if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
-               err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
-
-       if (INTEL_INFO(dev)->gen >= 4)
-               err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
-       err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
-       err_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
-       if (INTEL_INFO(dev)->gen >= 6) {
-               err_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
-               err_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
-               err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
-                          error->semaphore_mboxes[ring][0],
-                          error->semaphore_seqno[ring][0]);
-               err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
-                          error->semaphore_mboxes[ring][1],
-                          error->semaphore_seqno[ring][1]);
-       }
-       err_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
-       err_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
-       err_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
-       err_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
-}
-
-struct i915_error_state_file_priv {
-       struct drm_device *dev;
-       struct drm_i915_error_state *error;
-};
-
-
-static int i915_error_state(struct i915_error_state_file_priv *error_priv,
-                           struct drm_i915_error_state_buf *m)
-
-{
-       struct drm_device *dev = error_priv->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_error_state *error = error_priv->error;
-       struct intel_ring_buffer *ring;
-       int i, j, page, offset, elt;
-
-       if (!error) {
-               err_printf(m, "no error state collected\n");
-               return 0;
-       }
-
-       err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
-                  error->time.tv_usec);
-       err_printf(m, "Kernel: " UTS_RELEASE "\n");
-       err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
-       err_printf(m, "EIR: 0x%08x\n", error->eir);
-       err_printf(m, "IER: 0x%08x\n", error->ier);
-       err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
-       err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
-       err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
-       err_printf(m, "CCID: 0x%08x\n", error->ccid);
-
-       for (i = 0; i < dev_priv->num_fence_regs; i++)
-               err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
-
-       for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
-               err_printf(m, "  INSTDONE_%d: 0x%08x\n", i,
-                          error->extra_instdone[i]);
-
-       if (INTEL_INFO(dev)->gen >= 6) {
-               err_printf(m, "ERROR: 0x%08x\n", error->error);
-               err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
-       }
-
-       if (INTEL_INFO(dev)->gen == 7)
-               err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
-
-       for_each_ring(ring, dev_priv, i)
-               i915_ring_error_state(m, dev, error, i);
-
-       if (error->active_bo)
-               print_error_buffers(m, "Active",
-                                   error->active_bo,
-                                   error->active_bo_count);
-
-       if (error->pinned_bo)
-               print_error_buffers(m, "Pinned",
-                                   error->pinned_bo,
-                                   error->pinned_bo_count);
-
-       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-               struct drm_i915_error_object *obj;
-
-               if ((obj = error->ring[i].batchbuffer)) {
-                       err_printf(m, "%s --- gtt_offset = 0x%08x\n",
-                                  dev_priv->ring[i].name,
-                                  obj->gtt_offset);
-                       offset = 0;
-                       for (page = 0; page < obj->page_count; page++) {
-                               for (elt = 0; elt < PAGE_SIZE/4; elt++) {
-                                       err_printf(m, "%08x :  %08x\n", offset,
-                                                  obj->pages[page][elt]);
-                                       offset += 4;
-                               }
-                       }
-               }
-
-               if (error->ring[i].num_requests) {
-                       err_printf(m, "%s --- %d requests\n",
-                                  dev_priv->ring[i].name,
-                                  error->ring[i].num_requests);
-                       for (j = 0; j < error->ring[i].num_requests; j++) {
-                               err_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
-                                          error->ring[i].requests[j].seqno,
-                                          error->ring[i].requests[j].jiffies,
-                                          error->ring[i].requests[j].tail);
-                       }
-               }
-
-               if ((obj = error->ring[i].ringbuffer)) {
-                       err_printf(m, "%s --- ringbuffer = 0x%08x\n",
-                                  dev_priv->ring[i].name,
-                                  obj->gtt_offset);
-                       offset = 0;
-                       for (page = 0; page < obj->page_count; page++) {
-                               for (elt = 0; elt < PAGE_SIZE/4; elt++) {
-                                       err_printf(m, "%08x :  %08x\n",
-                                                  offset,
-                                                  obj->pages[page][elt]);
-                                       offset += 4;
-                               }
-                       }
-               }
-
-               obj = error->ring[i].ctx;
-               if (obj) {
-                       err_printf(m, "%s --- HW Context = 0x%08x\n",
-                                  dev_priv->ring[i].name,
-                                  obj->gtt_offset);
-                       offset = 0;
-                       for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
-                               err_printf(m, "[%04x] %08x %08x %08x %08x\n",
-                                          offset,
-                                          obj->pages[0][elt],
-                                          obj->pages[0][elt+1],
-                                          obj->pages[0][elt+2],
-                                          obj->pages[0][elt+3]);
-                                       offset += 16;
-                       }
-               }
-       }
-
-       if (error->overlay)
-               intel_overlay_print_error_state(m, error->overlay);
-
-       if (error->display)
-               intel_display_print_error_state(m, dev, error->display);
-
-       return 0;
-}
-
 static ssize_t
 i915_error_state_write(struct file *filp,
                       const char __user *ubuf,
@@ -986,9 +624,7 @@ i915_error_state_write(struct file *filp,
 static int i915_error_state_open(struct inode *inode, struct file *file)
 {
        struct drm_device *dev = inode->i_private;
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct i915_error_state_file_priv *error_priv;
-       unsigned long flags;
 
        error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
        if (!error_priv)
@@ -996,11 +632,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
 
        error_priv->dev = dev;
 
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
-       error_priv->error = dev_priv->gpu_error.first_error;
-       if (error_priv->error)
-               kref_get(&error_priv->error->ref);
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+       i915_error_state_get(dev, error_priv);
 
        file->private_data = error_priv;
 
@@ -1011,8 +643,7 @@ static int i915_error_state_release(struct inode *inode, struct file *file)
 {
        struct i915_error_state_file_priv *error_priv = file->private_data;
 
-       if (error_priv->error)
-               kref_put(&error_priv->error->ref, i915_error_state_free);
+       i915_error_state_put(error_priv);
        kfree(error_priv);
 
        return 0;
@@ -1025,40 +656,15 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
        struct drm_i915_error_state_buf error_str;
        loff_t tmp_pos = 0;
        ssize_t ret_count = 0;
-       int ret = 0;
-
-       memset(&error_str, 0, sizeof(error_str));
-
-       /* We need to have enough room to store any i915_error_state printf
-        * so that we can move it to start position.
-        */
-       error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
-       error_str.buf = kmalloc(error_str.size,
-                               GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
-
-       if (error_str.buf == NULL) {
-               error_str.size = PAGE_SIZE;
-               error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
-       }
-
-       if (error_str.buf == NULL) {
-               error_str.size = 128;
-               error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
-       }
-
-       if (error_str.buf == NULL)
-               return -ENOMEM;
-
-       error_str.start = *pos;
+       int ret;
 
-       ret = i915_error_state(error_priv, &error_str);
+       ret = i915_error_state_buf_init(&error_str, count, *pos);
        if (ret)
-               goto out;
+               return ret;
 
-       if (error_str.bytes == 0 && error_str.err) {
-               ret = error_str.err;
+       ret = i915_error_state_to_str(&error_str, error_priv);
+       if (ret)
                goto out;
-       }
 
        ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
                                            error_str.buf,
@@ -1069,7 +675,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
        else
                *pos = error_str.start + ret_count;
 out:
-       kfree(error_str.buf);
+       i915_error_state_buf_release(&error_str);
        return ret ?: ret_count;
 }
 
@@ -1246,7 +852,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
                                        (freq_sts >> 8) & 0xff));
                mutex_unlock(&dev_priv->rps.hw_lock);
        } else {
-               seq_printf(m, "no P-state info available\n");
+               seq_puts(m, "no P-state info available\n");
        }
 
        return 0;
@@ -1341,28 +947,28 @@ static int ironlake_drpc_info(struct seq_file *m)
        seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
        seq_printf(m, "Render standby enabled: %s\n",
                   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
-       seq_printf(m, "Current RS state: ");
+       seq_puts(m, "Current RS state: ");
        switch (rstdbyctl & RSX_STATUS_MASK) {
        case RSX_STATUS_ON:
-               seq_printf(m, "on\n");
+               seq_puts(m, "on\n");
                break;
        case RSX_STATUS_RC1:
-               seq_printf(m, "RC1\n");
+               seq_puts(m, "RC1\n");
                break;
        case RSX_STATUS_RC1E:
-               seq_printf(m, "RC1E\n");
+               seq_puts(m, "RC1E\n");
                break;
        case RSX_STATUS_RS1:
-               seq_printf(m, "RS1\n");
+               seq_puts(m, "RS1\n");
                break;
        case RSX_STATUS_RS2:
-               seq_printf(m, "RS2 (RC6)\n");
+               seq_puts(m, "RS2 (RC6)\n");
                break;
        case RSX_STATUS_RS3:
-               seq_printf(m, "RC3 (RC6+)\n");
+               seq_puts(m, "RC3 (RC6+)\n");
                break;
        default:
-               seq_printf(m, "unknown\n");
+               seq_puts(m, "unknown\n");
                break;
        }
 
@@ -1377,8 +983,7 @@ static int gen6_drpc_info(struct seq_file *m)
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
        unsigned forcewake_count;
-       int count=0, ret;
-
+       int count = 0, ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
@@ -1389,8 +994,8 @@ static int gen6_drpc_info(struct seq_file *m)
        spin_unlock_irq(&dev_priv->gt_lock);
 
        if (forcewake_count) {
-               seq_printf(m, "RC information inaccurate because somebody "
-                             "holds a forcewake reference \n");
+               seq_puts(m, "RC information inaccurate because somebody "
+                           "holds a forcewake reference \n");
        } else {
                /* NB: we cannot use forcewake, else we read the wrong values */
                while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
@@ -1423,25 +1028,25 @@ static int gen6_drpc_info(struct seq_file *m)
                   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
        seq_printf(m, "Deepest RC6 Enabled: %s\n",
                   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
-       seq_printf(m, "Current RC state: ");
+       seq_puts(m, "Current RC state: ");
        switch (gt_core_status & GEN6_RCn_MASK) {
        case GEN6_RC0:
                if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
-                       seq_printf(m, "Core Power Down\n");
+                       seq_puts(m, "Core Power Down\n");
                else
-                       seq_printf(m, "on\n");
+                       seq_puts(m, "on\n");
                break;
        case GEN6_RC3:
-               seq_printf(m, "RC3\n");
+               seq_puts(m, "RC3\n");
                break;
        case GEN6_RC6:
-               seq_printf(m, "RC6\n");
+               seq_puts(m, "RC6\n");
                break;
        case GEN6_RC7:
-               seq_printf(m, "RC7\n");
+               seq_puts(m, "RC7\n");
                break;
        default:
-               seq_printf(m, "Unknown\n");
+               seq_puts(m, "Unknown\n");
                break;
        }
 
@@ -1485,43 +1090,46 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        if (!I915_HAS_FBC(dev)) {
-               seq_printf(m, "FBC unsupported on this chipset\n");
+               seq_puts(m, "FBC unsupported on this chipset\n");
                return 0;
        }
 
        if (intel_fbc_enabled(dev)) {
-               seq_printf(m, "FBC enabled\n");
+               seq_puts(m, "FBC enabled\n");
        } else {
-               seq_printf(m, "FBC disabled: ");
-               switch (dev_priv->no_fbc_reason) {
+               seq_puts(m, "FBC disabled: ");
+               switch (dev_priv->fbc.no_fbc_reason) {
                case FBC_NO_OUTPUT:
-                       seq_printf(m, "no outputs");
+                       seq_puts(m, "no outputs");
                        break;
                case FBC_STOLEN_TOO_SMALL:
-                       seq_printf(m, "not enough stolen memory");
+                       seq_puts(m, "not enough stolen memory");
                        break;
                case FBC_UNSUPPORTED_MODE:
-                       seq_printf(m, "mode not supported");
+                       seq_puts(m, "mode not supported");
                        break;
                case FBC_MODE_TOO_LARGE:
-                       seq_printf(m, "mode too large");
+                       seq_puts(m, "mode too large");
                        break;
                case FBC_BAD_PLANE:
-                       seq_printf(m, "FBC unsupported on plane");
+                       seq_puts(m, "FBC unsupported on plane");
                        break;
                case FBC_NOT_TILED:
-                       seq_printf(m, "scanout buffer not tiled");
+                       seq_puts(m, "scanout buffer not tiled");
                        break;
                case FBC_MULTIPLE_PIPES:
-                       seq_printf(m, "multiple pipes are enabled");
+                       seq_puts(m, "multiple pipes are enabled");
                        break;
                case FBC_MODULE_PARAM:
-                       seq_printf(m, "disabled per module param (default off)");
+                       seq_puts(m, "disabled per module param (default off)");
+                       break;
+               case FBC_CHIP_DEFAULT:
+                       seq_puts(m, "disabled per chip default");
                        break;
                default:
-                       seq_printf(m, "unknown reason");
+                       seq_puts(m, "unknown reason");
                }
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
        return 0;
 }
@@ -1604,7 +1212,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
        int gpu_freq, ia_freq;
 
        if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
-               seq_printf(m, "unsupported on this chipset\n");
+               seq_puts(m, "unsupported on this chipset\n");
                return 0;
        }
 
@@ -1612,7 +1220,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
        if (ret)
                return ret;
 
-       seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+       seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
 
        for (gpu_freq = dev_priv->rps.min_delay;
             gpu_freq <= dev_priv->rps.max_delay;
@@ -1701,7 +1309,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                   fb->base.bits_per_pixel,
                   atomic_read(&fb->base.refcount.refcount));
        describe_obj(m, fb->obj);
-       seq_printf(m, "\n");
+       seq_putc(m, '\n');
        mutex_unlock(&dev->mode_config.mutex);
 
        mutex_lock(&dev->mode_config.fb_lock);
@@ -1716,7 +1324,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                           fb->base.bits_per_pixel,
                           atomic_read(&fb->base.refcount.refcount));
                describe_obj(m, fb->obj);
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
        mutex_unlock(&dev->mode_config.fb_lock);
 
@@ -1736,22 +1344,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
                return ret;
 
        if (dev_priv->ips.pwrctx) {
-               seq_printf(m, "power context ");
+               seq_puts(m, "power context ");
                describe_obj(m, dev_priv->ips.pwrctx);
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
 
        if (dev_priv->ips.renderctx) {
-               seq_printf(m, "render context ");
+               seq_puts(m, "render context ");
                describe_obj(m, dev_priv->ips.renderctx);
-               seq_printf(m, "\n");
+               seq_putc(m, '\n');
        }
 
        for_each_ring(ring, dev_priv, i) {
                if (ring->default_context) {
                        seq_printf(m, "HW default context %s ring ", ring->name);
                        describe_obj(m, ring->default_context->obj);
-                       seq_printf(m, "\n");
+                       seq_putc(m, '\n');
                }
        }
 
@@ -1778,7 +1386,7 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
 
 static const char *swizzle_string(unsigned swizzle)
 {
-       switch(swizzle) {
+       switch (swizzle) {
        case I915_BIT_6_SWIZZLE_NONE:
                return "none";
        case I915_BIT_6_SWIZZLE_9:
@@ -1868,7 +1476,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
        if (dev_priv->mm.aliasing_ppgtt) {
                struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
-               seq_printf(m, "aliasing PPGTT:\n");
+               seq_puts(m, "aliasing PPGTT:\n");
                seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
        }
        seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
@@ -1886,7 +1494,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
 
 
        if (!IS_VALLEYVIEW(dev)) {
-               seq_printf(m, "unsupported\n");
+               seq_puts(m, "unsupported\n");
                return 0;
        }
 
@@ -1924,6 +1532,148 @@ static int i915_dpio_info(struct seq_file *m, void *data)
        return 0;
 }
 
+static int i915_llc(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = (struct drm_info_node *) m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* Size calculation for LLC is a bit of a pain. Ignore for now. */
+       seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
+       seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
+
+       return 0;
+}
+
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 psrstat, psrperf;
+
+       if (!IS_HASWELL(dev)) {
+               seq_puts(m, "PSR not supported on this platform\n");
+       } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
+               seq_puts(m, "PSR enabled\n");
+       } else {
+               seq_puts(m, "PSR disabled: ");
+               switch (dev_priv->no_psr_reason) {
+               case PSR_NO_SOURCE:
+                       seq_puts(m, "not supported on this platform");
+                       break;
+               case PSR_NO_SINK:
+                       seq_puts(m, "not supported by panel");
+                       break;
+               case PSR_MODULE_PARAM:
+                       seq_puts(m, "disabled by flag");
+                       break;
+               case PSR_CRTC_NOT_ACTIVE:
+                       seq_puts(m, "crtc not active");
+                       break;
+               case PSR_PWR_WELL_ENABLED:
+                       seq_puts(m, "power well enabled");
+                       break;
+               case PSR_NOT_TILED:
+                       seq_puts(m, "not tiled");
+                       break;
+               case PSR_SPRITE_ENABLED:
+                       seq_puts(m, "sprite enabled");
+                       break;
+               case PSR_S3D_ENABLED:
+                       seq_puts(m, "stereo 3d enabled");
+                       break;
+               case PSR_INTERLACED_ENABLED:
+                       seq_puts(m, "interlaced enabled");
+                       break;
+               case PSR_HSW_NOT_DDIA:
+                       seq_puts(m, "HSW ties PSR to DDI A (eDP)");
+                       break;
+               default:
+                       seq_puts(m, "unknown reason");
+               }
+               seq_puts(m, "\n");
+               return 0;
+       }
+
+       psrstat = I915_READ(EDP_PSR_STATUS_CTL);
+
+       seq_puts(m, "PSR Current State: ");
+       switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
+       case EDP_PSR_STATUS_STATE_IDLE:
+               seq_puts(m, "Reset state\n");
+               break;
+       case EDP_PSR_STATUS_STATE_SRDONACK:
+               seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
+               break;
+       case EDP_PSR_STATUS_STATE_SRDENT:
+               seq_puts(m, "SRD entry\n");
+               break;
+       case EDP_PSR_STATUS_STATE_BUFOFF:
+               seq_puts(m, "Wait for buffer turn off\n");
+               break;
+       case EDP_PSR_STATUS_STATE_BUFON:
+               seq_puts(m, "Wait for buffer turn on\n");
+               break;
+       case EDP_PSR_STATUS_STATE_AUXACK:
+               seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
+               break;
+       case EDP_PSR_STATUS_STATE_SRDOFFACK:
+               seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
+               break;
+       default:
+               seq_puts(m, "Unknown\n");
+               break;
+       }
+
+       seq_puts(m, "Link Status: ");
+       switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
+       case EDP_PSR_STATUS_LINK_FULL_OFF:
+               seq_puts(m, "Link is fully off\n");
+               break;
+       case EDP_PSR_STATUS_LINK_FULL_ON:
+               seq_puts(m, "Link is fully on\n");
+               break;
+       case EDP_PSR_STATUS_LINK_STANDBY:
+               seq_puts(m, "Link is in standby\n");
+               break;
+       default:
+               seq_puts(m, "Unknown\n");
+               break;
+       }
+
+       seq_printf(m, "PSR Entry Count: %u\n",
+                  psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
+                  EDP_PSR_STATUS_COUNT_MASK);
+
+       seq_printf(m, "Max Sleep Timer Counter: %u\n",
+                  psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
+                  EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
+
+       seq_printf(m, "Had AUX error: %s\n",
+                  yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
+
+       seq_printf(m, "Sending AUX: %s\n",
+                  yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
+
+       seq_printf(m, "Sending Idle: %s\n",
+                  yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
+
+       seq_printf(m, "Sending TP2 TP3: %s\n",
+                  yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
+
+       seq_printf(m, "Sending TP1: %s\n",
+                  yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
+
+       seq_printf(m, "Idle Count: %u\n",
+                  psrstat & EDP_PSR_STATUS_IDLE_MASK);
+
+       psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
+       seq_printf(m, "Performance Counter: %u\n", psrperf);
+
+       return 0;
+}
+
 static int
 i915_wedged_get(void *data, u64 *val)
 {
@@ -2006,6 +1756,7 @@ i915_drop_caches_set(void *data, u64 val)
        struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj, *next;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        int ret;
 
        DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
@@ -2026,7 +1777,8 @@ i915_drop_caches_set(void *data, u64 val)
                i915_gem_retire_requests(dev);
 
        if (val & DROP_BOUND) {
-               list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
+               list_for_each_entry_safe(obj, next, &vm->inactive_list,
+                                        mm_list)
                        if (obj->pin_count == 0) {
                                ret = i915_gem_object_unbind(obj);
                                if (ret)
@@ -2353,64 +2105,40 @@ static struct drm_info_list i915_debugfs_list[] = {
        {"i915_swizzle_info", i915_swizzle_info, 0},
        {"i915_ppgtt_info", i915_ppgtt_info, 0},
        {"i915_dpio", i915_dpio_info, 0},
+       {"i915_llc", i915_llc, 0},
+       {"i915_edp_psr_status", i915_edp_psr_status, 0},
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
+struct i915_debugfs_files {
+       const char *name;
+       const struct file_operations *fops;
+} i915_debugfs_files[] = {
+       {"i915_wedged", &i915_wedged_fops},
+       {"i915_max_freq", &i915_max_freq_fops},
+       {"i915_min_freq", &i915_min_freq_fops},
+       {"i915_cache_sharing", &i915_cache_sharing_fops},
+       {"i915_ring_stop", &i915_ring_stop_fops},
+       {"i915_gem_drop_caches", &i915_drop_caches_fops},
+       {"i915_error_state", &i915_error_state_fops},
+       {"i915_next_seqno", &i915_next_seqno_fops},
+};
+
 int i915_debugfs_init(struct drm_minor *minor)
 {
-       int ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_wedged",
-                                 &i915_wedged_fops);
-       if (ret)
-               return ret;
+       int ret, i;
 
        ret = i915_forcewake_create(minor->debugfs_root, minor);
        if (ret)
                return ret;
 
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_max_freq",
-                                 &i915_max_freq_fops);
-       if (ret)
-               return ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_min_freq",
-                                 &i915_min_freq_fops);
-       if (ret)
-               return ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_cache_sharing",
-                                 &i915_cache_sharing_fops);
-       if (ret)
-               return ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_ring_stop",
-                                 &i915_ring_stop_fops);
-       if (ret)
-               return ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_gem_drop_caches",
-                                 &i915_drop_caches_fops);
-       if (ret)
-               return ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                 "i915_error_state",
-                                 &i915_error_state_fops);
-       if (ret)
-               return ret;
-
-       ret = i915_debugfs_create(minor->debugfs_root, minor,
-                                "i915_next_seqno",
-                                &i915_next_seqno_fops);
-       if (ret)
-               return ret;
+       for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+               ret = i915_debugfs_create(minor->debugfs_root, minor,
+                                         i915_debugfs_files[i].name,
+                                         i915_debugfs_files[i].fops);
+               if (ret)
+                       return ret;
+       }
 
        return drm_debugfs_create_files(i915_debugfs_list,
                                        I915_DEBUGFS_ENTRIES,
@@ -2419,26 +2147,18 @@ int i915_debugfs_init(struct drm_minor *minor)
 
 void i915_debugfs_cleanup(struct drm_minor *minor)
 {
+       int i;
+
        drm_debugfs_remove_files(i915_debugfs_list,
                                 I915_DEBUGFS_ENTRIES, minor);
        drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
                                 1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
-                                1, minor);
-       drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
-                                1, minor);
+       for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+               struct drm_info_list *info_list =
+                       (struct drm_info_list *) i915_debugfs_files[i].fops;
+
+               drm_debugfs_remove_files(info_list, 1, minor);
+       }
 }
 
 #endif /* CONFIG_DEBUG_FS */
index 67ec54f67afe2d7a56cbabda7800819bfee753ac..1c5b397385080c27f53691d932418d6b884cd035 100644 (file)
@@ -1323,10 +1323,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
        /* Always safe in the mode setting case. */
        /* FIXME: do pre/post-mode set stuff in core KMS code */
        dev->vblank_disable_allowed = 1;
-       if (INTEL_INFO(dev)->num_pipes == 0) {
-               dev_priv->mm.suspended = 0;
+       if (INTEL_INFO(dev)->num_pipes == 0)
                return 0;
-       }
 
        ret = intel_fbdev_init(dev);
        if (ret)
@@ -1352,9 +1350,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        drm_kms_helper_poll_init(dev);
 
-       /* We're off and running w/KMS */
-       dev_priv->mm.suspended = 0;
-
        return 0;
 
 cleanup_gem:
@@ -1363,7 +1358,7 @@ cleanup_gem:
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
        i915_gem_cleanup_aliasing_ppgtt(dev);
-       drm_mm_takedown(&dev_priv->mm.gtt_space);
+       drm_mm_takedown(&dev_priv->gtt.base.mm);
 cleanup_irq:
        drm_irq_uninstall(dev);
 cleanup_gem_stolen:
@@ -1497,14 +1492,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->gpu_error.lock);
-       spin_lock_init(&dev_priv->rps.lock);
        spin_lock_init(&dev_priv->backlight.lock);
+       spin_lock_init(&dev_priv->gt_lock);
        mutex_init(&dev_priv->dpio_lock);
        mutex_init(&dev_priv->rps.hw_lock);
        mutex_init(&dev_priv->modeset_restore_lock);
 
        i915_dump_device_info(dev_priv);
 
+       INIT_LIST_HEAD(&dev_priv->vm_list);
+       INIT_LIST_HEAD(&dev_priv->gtt.base.global_link);
+       list_add(&dev_priv->gtt.base.global_link, &dev_priv->vm_list);
+
        if (i915_get_bridge_dev(dev)) {
                ret = -EIO;
                goto free_priv;
@@ -1532,6 +1531,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        intel_early_sanitize_regs(dev);
 
+       if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
+               /* The docs do not explain exactly how the calculation can be
+                * made. It is somewhat guessable, but for now, it's always
+                * 128MB.
+                * NB: We can't write IDICR yet because we do not have gt funcs
+                * set up */
+               dev_priv->ellc_size = 128;
+               DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+       }
+
        ret = i915_gem_gtt_init(dev);
        if (ret)
                goto put_bridge;
@@ -1566,8 +1575,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto out_rmmap;
        }
 
-       dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
-                                                aperture_size);
+       dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+                                             aperture_size);
 
        /* The i915 workqueue is primarily used for batched retirement of
         * requests (and thus managing bo) once the task has been completed
@@ -1629,9 +1638,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                        goto out_gem_unload;
        }
 
-       /* Start out suspended */
-       dev_priv->mm.suspended = 1;
-
        if (HAS_POWER_WELL(dev))
                i915_init_power_well(dev);
 
@@ -1641,6 +1647,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                        DRM_ERROR("failed to init modeset\n");
                        goto out_gem_unload;
                }
+       } else {
+               /* Start out suspended in ums mode. */
+               dev_priv->ums.mm_suspended = 1;
        }
 
        i915_setup_sysfs(dev);
@@ -1667,9 +1676,9 @@ out_gem_unload:
        intel_teardown_mchbar(dev);
        destroy_workqueue(dev_priv->wq);
 out_mtrrfree:
-       arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
+       arch_phys_wc_del(dev_priv->gtt.mtrr);
        io_mapping_free(dev_priv->gtt.mappable);
-       dev_priv->gtt.gtt_remove(dev);
+       dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 out_rmmap:
        pci_iounmap(dev->pdev, dev_priv->regs);
 put_bridge:
@@ -1705,7 +1714,7 @@ int i915_driver_unload(struct drm_device *dev)
        cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
        io_mapping_free(dev_priv->gtt.mappable);
-       arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
+       arch_phys_wc_del(dev_priv->gtt.mtrr);
 
        acpi_video_unregister();
 
@@ -1754,7 +1763,9 @@ int i915_driver_unload(struct drm_device *dev)
                        i915_free_hws(dev);
        }
 
-       drm_mm_takedown(&dev_priv->mm.gtt_space);
+       list_del(&dev_priv->gtt.base.global_link);
+       WARN_ON(!list_empty(&dev_priv->vm_list));
+       drm_mm_takedown(&dev_priv->gtt.base.mm);
        if (dev_priv->regs != NULL)
                pci_iounmap(dev->pdev, dev_priv->regs);
 
@@ -1764,7 +1775,7 @@ int i915_driver_unload(struct drm_device *dev)
        destroy_workqueue(dev_priv->wq);
        pm_qos_remove_request(&dev_priv->pm_qos);
 
-       dev_priv->gtt.gtt_remove(dev);
+       dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 
        if (dev_priv->slab)
                kmem_cache_destroy(dev_priv->slab);
index 45b3c030f48393b6921e66406e48f92e2f1d643e..5849b0a91b4e86d28cc4368785041587b3cbba52 100644 (file)
@@ -118,6 +118,10 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
 MODULE_PARM_DESC(i915_enable_ppgtt,
                "Enable PPGTT (default: true)");
 
+int i915_enable_psr __read_mostly = 0;
+module_param_named(enable_psr, i915_enable_psr, int, 0600);
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+
 unsigned int i915_preliminary_hw_support __read_mostly = 0;
 module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
 MODULE_PARM_DESC(preliminary_hw_support,
@@ -132,6 +136,16 @@ int i915_enable_ips __read_mostly = 1;
 module_param_named(enable_ips, i915_enable_ips, int, 0600);
 MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
 
+bool i915_fastboot __read_mostly = 0;
+module_param_named(fastboot, i915_fastboot, bool, 0600);
+MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
+                "(default: false)");
+
+bool i915_prefault_disable __read_mostly;
+module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
+MODULE_PARM_DESC(prefault_disable,
+               "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
+
 static struct drm_driver driver;
 extern int intel_agp_enabled;
 
@@ -551,7 +565,11 @@ static int i915_drm_freeze(struct drm_device *dev)
 
        /* If KMS is active, we do the leavevt stuff here */
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               int error = i915_gem_idle(dev);
+               int error;
+
+               mutex_lock(&dev->struct_mutex);
+               error = i915_gem_idle(dev);
+               mutex_unlock(&dev->struct_mutex);
                if (error) {
                        dev_err(&dev->pdev->dev,
                                "GEM idle failed, resume might fail\n");
@@ -656,7 +674,6 @@ static int __i915_drm_thaw(struct drm_device *dev)
                intel_init_pch_refclk(dev);
 
                mutex_lock(&dev->struct_mutex);
-               dev_priv->mm.suspended = 0;
 
                error = i915_gem_init_hw(dev);
                mutex_unlock(&dev->struct_mutex);
@@ -793,28 +810,29 @@ static int i965_reset_complete(struct drm_device *dev)
 static int i965_do_reset(struct drm_device *dev)
 {
        int ret;
-       u8 gdrst;
 
        /*
         * Set the domains we want to reset (GRDOM/bits 2 and 3) as
         * well as the reset bit (GR/bit 0).  Setting the GR bit
         * triggers the reset; when done, the hardware will clear it.
         */
-       pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
        pci_write_config_byte(dev->pdev, I965_GDRST,
-                             gdrst | GRDOM_RENDER |
-                             GRDOM_RESET_ENABLE);
+                             GRDOM_RENDER | GRDOM_RESET_ENABLE);
        ret =  wait_for(i965_reset_complete(dev), 500);
        if (ret)
                return ret;
 
        /* We can't reset render&media without also resetting display ... */
-       pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
        pci_write_config_byte(dev->pdev, I965_GDRST,
-                             gdrst | GRDOM_MEDIA |
-                             GRDOM_RESET_ENABLE);
+                             GRDOM_MEDIA | GRDOM_RESET_ENABLE);
 
-       return wait_for(i965_reset_complete(dev), 500);
+       ret =  wait_for(i965_reset_complete(dev), 500);
+       if (ret)
+               return ret;
+
+       pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+
+       return 0;
 }
 
 static int ironlake_do_reset(struct drm_device *dev)
@@ -955,11 +973,11 @@ int i915_reset(struct drm_device *dev)
         * switched away).
         */
        if (drm_core_check_feature(dev, DRIVER_MODESET) ||
-                       !dev_priv->mm.suspended) {
+                       !dev_priv->ums.mm_suspended) {
                struct intel_ring_buffer *ring;
                int i;
 
-               dev_priv->mm.suspended = 0;
+               dev_priv->ums.mm_suspended = 0;
 
                i915_gem_init_swizzling(dev);
 
index d2ee3343c9439cbcf306ce443b9fbe57e53e534a..331c00b69f151b5e13adec8f79d4e21212c4b50b 100644 (file)
@@ -144,6 +144,7 @@ enum intel_dpll_id {
 
 struct intel_dpll_hw_state {
        uint32_t dpll;
+       uint32_t dpll_md;
        uint32_t fp0;
        uint32_t fp1;
 };
@@ -156,6 +157,8 @@ struct intel_shared_dpll {
        /* should match the index in the dev_priv->shared_dplls array */
        enum intel_dpll_id id;
        struct intel_dpll_hw_state hw_state;
+       void (*mode_set)(struct drm_i915_private *dev_priv,
+                        struct intel_shared_dpll *pll);
        void (*enable)(struct drm_i915_private *dev_priv,
                       struct intel_shared_dpll *pll);
        void (*disable)(struct drm_i915_private *dev_priv,
@@ -364,6 +367,7 @@ struct drm_i915_display_funcs {
         * fills out the pipe-config with the hw state. */
        bool (*get_pipe_config)(struct intel_crtc *,
                                struct intel_crtc_config *);
+       void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
        int (*crtc_mode_set)(struct drm_crtc *crtc,
                             int x, int y,
                             struct drm_framebuffer *old_fb);
@@ -442,6 +446,54 @@ enum i915_cache_level {
 
 typedef uint32_t gen6_gtt_pte_t;
 
+struct i915_address_space {
+       struct drm_mm mm;
+       struct drm_device *dev;
+       struct list_head global_link;
+       unsigned long start;            /* Start offset always 0 for dri2 */
+       size_t total;           /* size addr space maps (ex. 2GB for ggtt) */
+
+       struct {
+               dma_addr_t addr;
+               struct page *page;
+       } scratch;
+
+       /**
+        * List of objects currently involved in rendering.
+        *
+        * Includes buffers having the contents of their GPU caches
+        * flushed, not necessarily primitives.  last_rendering_seqno
+        * represents when the rendering involved will be completed.
+        *
+        * A reference is held on the buffer while on this list.
+        */
+       struct list_head active_list;
+
+       /**
+        * LRU list of objects which are not in the ringbuffer and
+        * are ready to unbind, but are still in the GTT.
+        *
+        * last_rendering_seqno is 0 while an object is in this list.
+        *
+        * A reference is not held on the buffer while on this list,
+        * as merely being GTT-bound shouldn't prevent its being
+        * freed, and we'll pull it off the list in the free path.
+        */
+       struct list_head inactive_list;
+
+       /* FIXME: Need a more generic return type */
+       gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
+                                    enum i915_cache_level level);
+       void (*clear_range)(struct i915_address_space *vm,
+                           unsigned int first_entry,
+                           unsigned int num_entries);
+       void (*insert_entries)(struct i915_address_space *vm,
+                              struct sg_table *st,
+                              unsigned int first_entry,
+                              enum i915_cache_level cache_level);
+       void (*cleanup)(struct i915_address_space *vm);
+};
+
 /* The Graphics Translation Table is the way in which GEN hardware translates a
  * Graphics Virtual Address into a Physical Address. In addition to the normal
  * collateral associated with any va->pa translations GEN hardware also has a
@@ -450,8 +502,7 @@ typedef uint32_t gen6_gtt_pte_t;
  * the spec.
  */
 struct i915_gtt {
-       unsigned long start;            /* Start offset of used GTT */
-       size_t total;                   /* Total size GTT can map */
+       struct i915_address_space base;
        size_t stolen_size;             /* Total size of stolen memory */
 
        unsigned long mappable_end;     /* End offset that we can CPU map */
@@ -462,50 +513,35 @@ struct i915_gtt {
        void __iomem *gsm;
 
        bool do_idle_maps;
-       dma_addr_t scratch_page_dma;
-       struct page *scratch_page;
+
+       int mtrr;
 
        /* global gtt ops */
        int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
                          size_t *stolen, phys_addr_t *mappable_base,
                          unsigned long *mappable_end);
-       void (*gtt_remove)(struct drm_device *dev);
-       void (*gtt_clear_range)(struct drm_device *dev,
-                               unsigned int first_entry,
-                               unsigned int num_entries);
-       void (*gtt_insert_entries)(struct drm_device *dev,
-                                  struct sg_table *st,
-                                  unsigned int pg_start,
-                                  enum i915_cache_level cache_level);
-       gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
-                                    dma_addr_t addr,
-                                    enum i915_cache_level level);
 };
-#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
+#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
 
-#define I915_PPGTT_PD_ENTRIES 512
-#define I915_PPGTT_PT_ENTRIES 1024
 struct i915_hw_ppgtt {
-       struct drm_device *dev;
+       struct i915_address_space base;
        unsigned num_pd_entries;
        struct page **pt_pages;
        uint32_t pd_offset;
        dma_addr_t *pt_dma_addr;
-       dma_addr_t scratch_page_dma_addr;
 
-       /* pte functions, mirroring the interface of the global gtt. */
-       void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
-                           unsigned int first_entry,
-                           unsigned int num_entries);
-       void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
-                              struct sg_table *st,
-                              unsigned int pg_start,
-                              enum i915_cache_level cache_level);
-       gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
-                                    dma_addr_t addr,
-                                    enum i915_cache_level level);
        int (*enable)(struct drm_device *dev);
-       void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
+};
+
+/* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+       struct drm_mm_node node;
+       struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm;
+
+       struct list_head vma_link; /* Link in the object's VMA list */
 };
 
 struct i915_ctx_hang_stats {
@@ -528,15 +564,46 @@ struct i915_hw_context {
        struct i915_ctx_hang_stats hang_stats;
 };
 
-enum no_fbc_reason {
-       FBC_NO_OUTPUT, /* no outputs enabled to compress */
-       FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
-       FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
-       FBC_MODE_TOO_LARGE, /* mode too large for compression */
-       FBC_BAD_PLANE, /* fbc not supported on plane */
-       FBC_NOT_TILED, /* buffer not tiled */
-       FBC_MULTIPLE_PIPES, /* more than one pipe active */
-       FBC_MODULE_PARAM,
+struct i915_fbc {
+       unsigned long size;
+       unsigned int fb_id;
+       enum plane plane;
+       int y;
+
+       struct drm_mm_node *compressed_fb;
+       struct drm_mm_node *compressed_llb;
+
+       struct intel_fbc_work {
+               struct delayed_work work;
+               struct drm_crtc *crtc;
+               struct drm_framebuffer *fb;
+               int interval;
+       } *fbc_work;
+
+       enum {
+               FBC_NO_OUTPUT, /* no outputs enabled to compress */
+               FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
+               FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+               FBC_MODE_TOO_LARGE, /* mode too large for compression */
+               FBC_BAD_PLANE, /* fbc not supported on plane */
+               FBC_NOT_TILED, /* buffer not tiled */
+               FBC_MULTIPLE_PIPES, /* more than one pipe active */
+               FBC_MODULE_PARAM,
+               FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+       } no_fbc_reason;
+};
+
+enum no_psr_reason {
+       PSR_NO_SOURCE, /* Not supported on platform */
+       PSR_NO_SINK, /* Not supported by panel */
+       PSR_MODULE_PARAM,
+       PSR_CRTC_NOT_ACTIVE,
+       PSR_PWR_WELL_ENABLED,
+       PSR_NOT_TILED,
+       PSR_SPRITE_ENABLED,
+       PSR_S3D_ENABLED,
+       PSR_INTERLACED_ENABLED,
+       PSR_HSW_NOT_DDIA,
 };
 
 enum intel_pch {
@@ -722,12 +789,12 @@ struct i915_suspend_saved_registers {
 };
 
 struct intel_gen6_power_mgmt {
+       /* work and pm_iir are protected by dev_priv->irq_lock */
        struct work_struct work;
-       struct delayed_work vlv_work;
        u32 pm_iir;
-       /* lock - irqsave spinlock that protectects the work_struct and
-        * pm_iir. */
-       spinlock_t lock;
+
+       /* On vlv we need to manually drop to Vmin with a delayed work. */
+       struct delayed_work vlv_work;
 
        /* The below variables an all the rps hw state are protected by
         * dev->struct mutext. */
@@ -793,6 +860,18 @@ struct i915_dri1_state {
        uint32_t counter;
 };
 
+struct i915_ums_state {
+       /**
+        * Flag if the X Server, and thus DRM, is not currently in
+        * control of the device.
+        *
+        * This is set between LeaveVT and EnterVT.  It needs to be
+        * replaced with a semaphore.  It also needs to be
+        * transitioned away from for kernel modesetting.
+        */
+       int mm_suspended;
+};
+
 struct intel_l3_parity {
        u32 *remap_info;
        struct work_struct error_work;
@@ -801,8 +880,6 @@ struct intel_l3_parity {
 struct i915_gem_mm {
        /** Memory allocator for GTT stolen memory */
        struct drm_mm stolen;
-       /** Memory allocator for GTT */
-       struct drm_mm gtt_space;
        /** List of all objects in gtt_space. Used to restore gtt
         * mappings on resume */
        struct list_head bound_list;
@@ -816,37 +893,12 @@ struct i915_gem_mm {
        /** Usable portion of the GTT for GEM */
        unsigned long stolen_base; /* limited to low memory (32-bit) */
 
-       int gtt_mtrr;
-
        /** PPGTT used for aliasing the PPGTT with the GTT */
        struct i915_hw_ppgtt *aliasing_ppgtt;
 
        struct shrinker inactive_shrinker;
        bool shrinker_no_lock_stealing;
 
-       /**
-        * List of objects currently involved in rendering.
-        *
-        * Includes buffers having the contents of their GPU caches
-        * flushed, not necessarily primitives.  last_rendering_seqno
-        * represents when the rendering involved will be completed.
-        *
-        * A reference is held on the buffer while on this list.
-        */
-       struct list_head active_list;
-
-       /**
-        * LRU list of objects which are not in the ringbuffer and
-        * are ready to unbind, but are still in the GTT.
-        *
-        * last_rendering_seqno is 0 while an object is in this list.
-        *
-        * A reference is not held on the buffer while on this list,
-        * as merely being GTT-bound shouldn't prevent its being
-        * freed, and we'll pull it off the list in the free path.
-        */
-       struct list_head inactive_list;
-
        /** LRU list of objects with fence regs on them. */
        struct list_head fence_list;
 
@@ -865,16 +917,6 @@ struct i915_gem_mm {
         */
        bool interruptible;
 
-       /**
-        * Flag if the X Server, and thus DRM, is not currently in
-        * control of the device.
-        *
-        * This is set between LeaveVT and EnterVT.  It needs to be
-        * replaced with a semaphore.  It also needs to be
-        * transitioned away from for kernel modesetting.
-        */
-       int suspended;
-
        /** Bit 6 swizzling required for X tiling */
        uint32_t bit_6_swizzle_x;
        /** Bit 6 swizzling required for Y tiling */
@@ -897,6 +939,11 @@ struct drm_i915_error_state_buf {
        loff_t pos;
 };
 
+struct i915_error_state_file_priv {
+       struct drm_device *dev;
+       struct drm_i915_error_state *error;
+};
+
 struct i915_gpu_error {
        /* For hangcheck timer */
 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -1059,12 +1106,7 @@ typedef struct drm_i915_private {
 
        int num_plane;
 
-       unsigned long cfb_size;
-       unsigned int cfb_fb;
-       enum plane cfb_plane;
-       int cfb_y;
-       struct intel_fbc_work *fbc_work;
-
+       struct i915_fbc fbc;
        struct intel_opregion opregion;
        struct intel_vbt_data vbt;
 
@@ -1081,8 +1123,6 @@ typedef struct drm_i915_private {
        } backlight;
 
        /* LVDS info */
-       struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
-       struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
        bool no_aux_handshake;
 
        struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
@@ -1105,7 +1145,8 @@ typedef struct drm_i915_private {
        enum modeset_restore modeset_restore;
        struct mutex modeset_restore_lock;
 
-       struct i915_gtt gtt;
+       struct list_head vm_list; /* Global list of all address spaces */
+       struct i915_gtt gtt; /* VMA representing the global address space */
 
        struct i915_gem_mm mm;
 
@@ -1132,6 +1173,9 @@ typedef struct drm_i915_private {
 
        struct intel_l3_parity l3_parity;
 
+       /* Cannot be determined by PCIID. You must always read a register. */
+       size_t ellc_size;
+
        /* gen6+ rps state */
        struct intel_gen6_power_mgmt rps;
 
@@ -1142,10 +1186,7 @@ typedef struct drm_i915_private {
        /* Haswell power well */
        struct i915_power_well power_well;
 
-       enum no_fbc_reason no_fbc_reason;
-
-       struct drm_mm_node *compressed_fb;
-       struct drm_mm_node *compressed_llb;
+       enum no_psr_reason no_psr_reason;
 
        struct i915_gpu_error gpu_error;
 
@@ -1173,6 +1214,8 @@ typedef struct drm_i915_private {
        /* Old dri1 support infrastructure, beware the dragons ya fools entering
         * here! */
        struct i915_dri1_state dri1;
+       /* Old ums support infrastructure, same warning applies. */
+       struct i915_ums_state ums;
 } drm_i915_private_t;
 
 /* Iterate over initialised rings */
@@ -1187,7 +1230,7 @@ enum hdmi_force_audio {
        HDMI_AUDIO_ON,                  /* force turn on HDMI audio */
 };
 
-#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
+#define I915_GTT_OFFSET_NONE ((u32)-1)
 
 struct drm_i915_gem_object_ops {
        /* Interface between the GEM object and its backing storage.
@@ -1212,8 +1255,9 @@ struct drm_i915_gem_object {
 
        const struct drm_i915_gem_object_ops *ops;
 
-       /** Current space allocated to this object in the GTT, if any. */
-       struct drm_mm_node *gtt_space;
+       /** List of VMAs backed by this object */
+       struct list_head vma_list;
+
        /** Stolen memory for this object, instead of being backed by shmem. */
        struct drm_mm_node *stolen;
        struct list_head global_list;
@@ -1314,13 +1358,6 @@ struct drm_i915_gem_object {
        unsigned long exec_handle;
        struct drm_i915_gem_exec_object2 *exec_entry;
 
-       /**
-        * Current offset of the object in GTT space.
-        *
-        * This is the same as gtt_space->start
-        */
-       uint32_t gtt_offset;
-
        struct intel_ring_buffer *ring;
 
        /** Breadcrumb of last rendering to the buffer. */
@@ -1346,6 +1383,52 @@ struct drm_i915_gem_object {
 
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
+/* This is a temporary define to help transition us to real VMAs. If you see
+ * this, you're either reviewing code, or bisecting it. */
+static inline struct i915_vma *
+__i915_gem_obj_to_vma(struct drm_i915_gem_object *obj)
+{
+       if (list_empty(&obj->vma_list))
+               return NULL;
+       return list_first_entry(&obj->vma_list, struct i915_vma, vma_link);
+}
+
+/* Whether or not this object is currently mapped by the translation tables */
+static inline bool
+i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
+{
+       struct i915_vma *vma = __i915_gem_obj_to_vma(o);
+       if (vma == NULL)
+               return false;
+       return drm_mm_node_allocated(&vma->node);
+}
+
+/* Offset of the first PTE pointing to this object */
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
+{
+       BUG_ON(list_empty(&o->vma_list));
+       return __i915_gem_obj_to_vma(o)->node.start;
+}
+
+/* The size used in the translation tables may be larger than the actual size of
+ * the object on GEN2/GEN3 because of the way tiling is handled. See
+ * i915_gem_get_gtt_size() for more details.
+ */
+static inline unsigned long
+i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
+{
+       BUG_ON(list_empty(&o->vma_list));
+       return __i915_gem_obj_to_vma(o)->node.size;
+}
+
+static inline void
+i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
+                           enum i915_cache_level color)
+{
+       __i915_gem_obj_to_vma(o)->node.color = color;
+}
+
 /**
  * Request queue structure.
  *
@@ -1540,9 +1623,12 @@ extern int i915_enable_rc6 __read_mostly;
 extern int i915_enable_fbc __read_mostly;
 extern bool i915_enable_hangcheck __read_mostly;
 extern int i915_enable_ppgtt __read_mostly;
+extern int i915_enable_psr __read_mostly;
 extern unsigned int i915_preliminary_hw_support __read_mostly;
 extern int i915_disable_power_well __read_mostly;
 extern int i915_enable_ips __read_mostly;
+extern bool i915_fastboot __read_mostly;
+extern bool i915_prefault_disable __read_mostly;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
@@ -1578,6 +1664,7 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 extern void intel_console_resume(struct work_struct *work);
 
 /* i915_irq.c */
+void i915_queue_hangcheck(struct drm_device *dev);
 void i915_hangcheck_elapsed(unsigned long data);
 void i915_handle_error(struct drm_device *dev, bool wedged);
 
@@ -1586,21 +1673,12 @@ extern void intel_hpd_init(struct drm_device *dev);
 extern void intel_gt_init(struct drm_device *dev);
 extern void intel_gt_sanitize(struct drm_device *dev);
 
-void i915_error_state_free(struct kref *error_ref);
-
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
 void
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
-#ifdef CONFIG_DEBUG_FS
-extern void i915_destroy_error_state(struct drm_device *dev);
-#else
-#define i915_destroy_error_state(x)
-#endif
-
-
 /* i915_gem.c */
 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
@@ -1657,6 +1735,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+                                    struct i915_address_space *vm);
+void i915_gem_vma_destroy(struct i915_vma *vma);
 
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
                                     uint32_t alignment,
@@ -1827,7 +1908,7 @@ static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
 }
 
 struct i915_ctx_hang_stats * __must_check
-i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
+i915_gem_context_get_hang_stats(struct drm_device *dev,
                                struct drm_file *file,
                                u32 id);
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -1911,8 +1992,27 @@ void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
 /* i915_debugfs.c */
 int i915_debugfs_init(struct drm_minor *minor);
 void i915_debugfs_cleanup(struct drm_minor *minor);
+
+/* i915_gpu_error.c */
 __printf(2, 3)
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
+int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
+                           const struct i915_error_state_file_priv *error);
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
+                             size_t count, loff_t pos);
+static inline void i915_error_state_buf_release(
+       struct drm_i915_error_state_buf *eb)
+{
+       kfree(eb->buf);
+}
+void i915_capture_error_state(struct drm_device *dev);
+void i915_error_state_get(struct drm_device *dev,
+                         struct i915_error_state_file_priv *error_priv);
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
+void i915_destroy_error_state(struct drm_device *dev);
+
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+const char *i915_cache_level_str(int type);
 
 /* i915_suspend.c */
 extern int i915_save_state(struct drm_device *dev);
@@ -1992,7 +2092,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
 
 /* overlay */
-#ifdef CONFIG_DEBUG_FS
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
                                            struct intel_overlay_error_state *error);
@@ -2001,7 +2100,6 @@ extern struct intel_display_error_state *intel_display_capture_error_state(struc
 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
                                            struct drm_device *dev,
                                            struct intel_display_error_state *error);
-#endif
 
 /* On SNB platform, before reading ring registers forcewake bit
  * must be set to prevent GT core from power down and stale values being
index d9e2208cfe98f2fdcb3957bf4ee13f0497e06dfb..eceab96d76f52cb1433aeb6c69dc06d9970b703a 100644 (file)
@@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 static inline bool
 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 {
-       return obj->gtt_space && !obj->active;
+       return i915_gem_obj_ggtt_bound(obj) && !obj->active;
 }
 
 int
@@ -178,10 +178,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        mutex_lock(&dev->struct_mutex);
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
                if (obj->pin_count)
-                       pinned += obj->gtt_space->size;
+                       pinned += i915_gem_obj_ggtt_size(obj);
        mutex_unlock(&dev->struct_mutex);
 
-       args->aper_size = dev_priv->gtt.total;
+       args->aper_size = dev_priv->gtt.base.total;
        args->aper_available_size = args->aper_size - pinned;
 
        return 0;
@@ -219,16 +219,10 @@ i915_gem_create(struct drm_file *file,
                return -ENOMEM;
 
        ret = drm_gem_handle_create(file, &obj->base, &handle);
-       if (ret) {
-               drm_gem_object_release(&obj->base);
-               i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
-               i915_gem_object_free(obj);
-               return ret;
-       }
-
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference(&obj->base);
-       trace_i915_gem_object_create(obj);
+       drm_gem_object_unreference_unlocked(&obj->base);
+       if (ret)
+               return ret;
 
        *handle_p = handle;
        return 0;
@@ -422,7 +416,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
                 * anyway again before the next pread happens. */
                if (obj->cache_level == I915_CACHE_NONE)
                        needs_clflush = 1;
-               if (obj->gtt_space) {
+               if (i915_gem_obj_ggtt_bound(obj)) {
                        ret = i915_gem_object_set_to_gtt_domain(obj, false);
                        if (ret)
                                return ret;
@@ -465,7 +459,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
 
                mutex_unlock(&dev->struct_mutex);
 
-               if (!prefaulted) {
+               if (likely(!i915_prefault_disable) && !prefaulted) {
                        ret = fault_in_multipages_writeable(user_data, remain);
                        /* Userspace is tricking us, but we've already clobbered
                         * its pages with the prefault and promised to write the
@@ -609,7 +603,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        user_data = to_user_ptr(args->data_ptr);
        remain = args->size;
 
-       offset = obj->gtt_offset + args->offset;
+       offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
 
        while (remain > 0) {
                /* Operation in this page
@@ -739,7 +733,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
                 * right away and we therefore have to clflush anyway. */
                if (obj->cache_level == I915_CACHE_NONE)
                        needs_clflush_after = 1;
-               if (obj->gtt_space) {
+               if (i915_gem_obj_ggtt_bound(obj)) {
                        ret = i915_gem_object_set_to_gtt_domain(obj, true);
                        if (ret)
                                return ret;
@@ -860,10 +854,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                       args->size))
                return -EFAULT;
 
-       ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
-                                          args->size);
-       if (ret)
-               return -EFAULT;
+       if (likely(!i915_prefault_disable)) {
+               ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+                                                  args->size);
+               if (ret)
+                       return -EFAULT;
+       }
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
@@ -1360,8 +1356,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        obj->fault_mappable = true;
 
-       pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
-               page_offset;
+       pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
+       pfn >>= PAGE_SHIFT;
+       pfn += page_offset;
 
        /* Finally, remap it using the new GTT offset */
        ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -1667,7 +1664,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        if (obj->pages == NULL)
                return 0;
 
-       BUG_ON(obj->gtt_space);
+       BUG_ON(i915_gem_obj_ggtt_bound(obj));
 
        if (obj->pages_pin_count)
                return -EBUSY;
@@ -1691,6 +1688,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
                  bool purgeable_only)
 {
        struct drm_i915_gem_object *obj, *next;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        long count = 0;
 
        list_for_each_entry_safe(obj, next,
@@ -1704,9 +1702,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
                }
        }
 
-       list_for_each_entry_safe(obj, next,
-                                &dev_priv->mm.inactive_list,
-                                mm_list) {
+       list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) {
                if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
                    i915_gem_object_unbind(obj) == 0 &&
                    i915_gem_object_put_pages(obj) == 0) {
@@ -1877,6 +1873,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        u32 seqno = intel_ring_get_seqno(ring);
 
        BUG_ON(ring == NULL);
@@ -1893,7 +1890,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
        }
 
        /* Move from whatever list we were on to the tail of execution. */
-       list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+       list_move_tail(&obj->mm_list, &vm->active_list);
        list_move_tail(&obj->ring_list, &ring->active_list);
 
        obj->last_read_seqno = seqno;
@@ -1917,11 +1914,12 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
 
        BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
        BUG_ON(!obj->active);
 
-       list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+       list_move_tail(&obj->mm_list, &vm->inactive_list);
 
        list_del_init(&obj->ring_list);
        obj->ring = NULL;
@@ -2085,11 +2083,9 @@ int __i915_add_request(struct intel_ring_buffer *ring,
        trace_i915_gem_request_add(ring, request->seqno);
        ring->outstanding_lazy_request = 0;
 
-       if (!dev_priv->mm.suspended) {
-               if (i915_enable_hangcheck) {
-                       mod_timer(&dev_priv->gpu_error.hangcheck_timer,
-                                 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
-               }
+       if (!dev_priv->ums.mm_suspended) {
+               i915_queue_hangcheck(ring->dev);
+
                if (was_empty) {
                        queue_delayed_work(dev_priv->wq,
                                           &dev_priv->mm.retire_work,
@@ -2121,8 +2117,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 
 static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
 {
-       if (acthd >= obj->gtt_offset &&
-           acthd < obj->gtt_offset + obj->base.size)
+       if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+           acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
                return true;
 
        return false;
@@ -2180,11 +2176,11 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
 
        if (ring->hangcheck.action != wait &&
            i915_request_guilty(request, acthd, &inside)) {
-               DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
+               DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
                          ring->name,
                          inside ? "inside" : "flushing",
                          request->batch_obj ?
-                         request->batch_obj->gtt_offset : 0,
+                         i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
                          request->ctx ? request->ctx->id : 0,
                          acthd);
 
@@ -2275,6 +2271,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
 void i915_gem_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        struct drm_i915_gem_object *obj;
        struct intel_ring_buffer *ring;
        int i;
@@ -2285,12 +2282,8 @@ void i915_gem_reset(struct drm_device *dev)
        /* Move everything out of the GPU domains to ensure we do any
         * necessary invalidation upon reuse.
         */
-       list_for_each_entry(obj,
-                           &dev_priv->mm.inactive_list,
-                           mm_list)
-       {
+       list_for_each_entry(obj, &vm->inactive_list, mm_list)
                obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
-       }
 
        i915_gem_restore_fences(dev);
 }
@@ -2400,7 +2393,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
                idle &= list_empty(&ring->request_list);
        }
 
-       if (!dev_priv->mm.suspended && !idle)
+       if (!dev_priv->ums.mm_suspended && !idle)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
                                   round_jiffies_up_relative(HZ));
        if (idle)
@@ -2593,9 +2586,10 @@ int
 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 {
        drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+       struct i915_vma *vma;
        int ret;
 
-       if (obj->gtt_space == NULL)
+       if (!i915_gem_obj_ggtt_bound(obj))
                return 0;
 
        if (obj->pin_count)
@@ -2630,13 +2624,20 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        i915_gem_object_unpin_pages(obj);
 
        list_del(&obj->mm_list);
-       list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
        /* Avoid an unnecessary call to unbind on rebind. */
        obj->map_and_fenceable = true;
 
-       drm_mm_put_block(obj->gtt_space);
-       obj->gtt_space = NULL;
-       obj->gtt_offset = 0;
+       vma = __i915_gem_obj_to_vma(obj);
+       list_del(&vma->vma_link);
+       drm_mm_remove_node(&vma->node);
+       i915_gem_vma_destroy(vma);
+
+       /* Since the unbound list is global, only move to that list if
+        * no more VMAs exist.
+        * NB: Until we have real VMAs there will only ever be one */
+       WARN_ON(!list_empty(&obj->vma_list));
+       if (list_empty(&obj->vma_list))
+               list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
        return 0;
 }
@@ -2691,12 +2692,12 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
        POSTING_READ(fence_reg);
 
        if (obj) {
-               u32 size = obj->gtt_space->size;
+               u32 size = i915_gem_obj_ggtt_size(obj);
                uint64_t val;
 
-               val = (uint64_t)((obj->gtt_offset + size - 4096) &
+               val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
                                 0xfffff000) << 32;
-               val |= obj->gtt_offset & 0xfffff000;
+               val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
                val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2720,15 +2721,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
        u32 val;
 
        if (obj) {
-               u32 size = obj->gtt_space->size;
+               u32 size = i915_gem_obj_ggtt_size(obj);
                int pitch_val;
                int tile_width;
 
-               WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+               WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
                     (size & -size) != size ||
-                    (obj->gtt_offset & (size - 1)),
-                    "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
-                    obj->gtt_offset, obj->map_and_fenceable, size);
+                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+                    "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+                    i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
 
                if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
                        tile_width = 128;
@@ -2739,7 +2740,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
                pitch_val = obj->stride / tile_width;
                pitch_val = ffs(pitch_val) - 1;
 
-               val = obj->gtt_offset;
+               val = i915_gem_obj_ggtt_offset(obj);
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I830_FENCE_TILING_Y_SHIFT;
                val |= I915_FENCE_SIZE_BITS(size);
@@ -2764,19 +2765,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
        uint32_t val;
 
        if (obj) {
-               u32 size = obj->gtt_space->size;
+               u32 size = i915_gem_obj_ggtt_size(obj);
                uint32_t pitch_val;
 
-               WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+               WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
                     (size & -size) != size ||
-                    (obj->gtt_offset & (size - 1)),
-                    "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
-                    obj->gtt_offset, size);
+                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+                    "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+                    i915_gem_obj_ggtt_offset(obj), size);
 
                pitch_val = obj->stride / 128;
                pitch_val = ffs(pitch_val) - 1;
 
-               val = obj->gtt_offset;
+               val = i915_gem_obj_ggtt_offset(obj);
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I830_FENCE_TILING_Y_SHIFT;
                val |= I830_FENCE_SIZE_BITS(size);
@@ -2997,7 +2998,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
        if (HAS_LLC(dev))
                return true;
 
-       if (gtt_space == NULL)
+       if (!drm_mm_node_allocated(gtt_space))
                return true;
 
        if (list_empty(&gtt_space->node_list))
@@ -3030,8 +3031,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
 
                if (obj->cache_level != obj->gtt_space->color) {
                        printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
-                              obj->gtt_space->start,
-                              obj->gtt_space->start + obj->gtt_space->size,
+                              i915_gem_obj_ggtt_offset(obj),
+                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
                               obj->cache_level,
                               obj->gtt_space->color);
                        err++;
@@ -3042,8 +3043,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
                                              obj->gtt_space,
                                              obj->cache_level)) {
                        printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
-                              obj->gtt_space->start,
-                              obj->gtt_space->start + obj->gtt_space->size,
+                              i915_gem_obj_ggtt_offset(obj),
+                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
                               obj->cache_level);
                        err++;
                        continue;
@@ -3065,13 +3066,17 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_mm_node *node;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        bool mappable, fenceable;
        size_t gtt_max = map_and_fenceable ?
-               dev_priv->gtt.mappable_end : dev_priv->gtt.total;
+               dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
+       struct i915_vma *vma;
        int ret;
 
+       if (WARN_ON(!list_empty(&obj->vma_list)))
+               return -EBUSY;
+
        fence_size = i915_gem_get_gtt_size(dev,
                                           obj->base.size,
                                           obj->tiling_mode);
@@ -3110,14 +3115,15 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
        i915_gem_object_pin_pages(obj);
 
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (node == NULL) {
-               i915_gem_object_unpin_pages(obj);
-               return -ENOMEM;
+       vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err_unpin;
        }
 
 search_free:
-       ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+       ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+                                                 &vma->node,
                                                  size, alignment,
                                                  obj->cache_level, 0, gtt_max);
        if (ret) {
@@ -3128,41 +3134,42 @@ search_free:
                if (ret == 0)
                        goto search_free;
 
-               i915_gem_object_unpin_pages(obj);
-               kfree(node);
-               return ret;
+               goto err_free_vma;
        }
-       if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
-               i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(node);
-               return -EINVAL;
+       if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
+                                             obj->cache_level))) {
+               ret = -EINVAL;
+               goto err_remove_node;
        }
 
        ret = i915_gem_gtt_prepare_object(obj);
-       if (ret) {
-               i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(node);
-               return ret;
-       }
+       if (ret)
+               goto err_remove_node;
 
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
-
-       obj->gtt_space = node;
-       obj->gtt_offset = node->start;
+       list_add_tail(&obj->mm_list, &vm->inactive_list);
+       list_add(&vma->vma_link, &obj->vma_list);
 
        fenceable =
-               node->size == fence_size &&
-               (node->start & (fence_alignment - 1)) == 0;
+               i915_gem_obj_ggtt_size(obj) == fence_size &&
+               (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
 
-       mappable =
-               obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
+       mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
+               dev_priv->gtt.mappable_end;
 
        obj->map_and_fenceable = mappable && fenceable;
 
        trace_i915_gem_object_bind(obj, map_and_fenceable);
        i915_gem_verify_gtt(dev);
        return 0;
+
+err_remove_node:
+       drm_mm_remove_node(&vma->node);
+err_free_vma:
+       i915_gem_vma_destroy(vma);
+err_unpin:
+       i915_gem_object_unpin_pages(obj);
+       return ret;
 }
 
 void
@@ -3258,7 +3265,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        int ret;
 
        /* Not valid to be called on unbound objects. */
-       if (obj->gtt_space == NULL)
+       if (!i915_gem_obj_ggtt_bound(obj))
                return -EINVAL;
 
        if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3297,7 +3304,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 
        /* And bump the LRU for this access */
        if (i915_gem_object_is_inactive(obj))
-               list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+               list_move_tail(&obj->mm_list,
+                              &dev_priv->gtt.base.inactive_list);
 
        return 0;
 }
@@ -3307,6 +3315,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
        int ret;
 
        if (obj->cache_level == cache_level)
@@ -3317,13 +3326,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                return -EBUSY;
        }
 
-       if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+       if (vma && !i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
                ret = i915_gem_object_unbind(obj);
                if (ret)
                        return ret;
        }
 
-       if (obj->gtt_space) {
+       if (i915_gem_obj_ggtt_bound(obj)) {
                ret = i915_gem_object_finish_gpu(obj);
                if (ret)
                        return ret;
@@ -3346,7 +3355,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                        i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
                                               obj, cache_level);
 
-               obj->gtt_space->color = cache_level;
+               i915_gem_obj_ggtt_set_color(obj, cache_level);
        }
 
        if (cache_level == I915_CACHE_NONE) {
@@ -3627,14 +3636,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
        if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
                return -EBUSY;
 
-       if (obj->gtt_space != NULL) {
-               if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+       if (i915_gem_obj_ggtt_bound(obj)) {
+               if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
                    (map_and_fenceable && !obj->map_and_fenceable)) {
                        WARN(obj->pin_count,
                             "bo is already pinned with incorrect alignment:"
-                            " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+                            " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
-                            obj->gtt_offset, alignment,
+                            i915_gem_obj_ggtt_offset(obj), alignment,
                             map_and_fenceable,
                             obj->map_and_fenceable);
                        ret = i915_gem_object_unbind(obj);
@@ -3643,7 +3652,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
                }
        }
 
-       if (obj->gtt_space == NULL) {
+       if (!i915_gem_obj_ggtt_bound(obj)) {
                struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 
                ret = i915_gem_object_bind_to_gtt(obj, alignment,
@@ -3669,7 +3678,7 @@ void
 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
 {
        BUG_ON(obj->pin_count == 0);
-       BUG_ON(obj->gtt_space == NULL);
+       BUG_ON(!i915_gem_obj_ggtt_bound(obj));
 
        if (--obj->pin_count == 0)
                obj->pin_mappable = false;
@@ -3719,7 +3728,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
         * as the X server doesn't manage domains yet
         */
        i915_gem_object_flush_cpu_write_domain(obj);
-       args->offset = obj->gtt_offset;
+       args->offset = i915_gem_obj_ggtt_offset(obj);
 out:
        drm_gem_object_unreference(&obj->base);
 unlock:
@@ -3862,6 +3871,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        INIT_LIST_HEAD(&obj->global_list);
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->exec_list);
+       INIT_LIST_HEAD(&obj->vma_list);
 
        obj->ops = ops;
 
@@ -3926,6 +3936,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        } else
                obj->cache_level = I915_CACHE_NONE;
 
+       trace_i915_gem_object_create(obj);
+
        return obj;
 }
 
@@ -3982,15 +3994,33 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        i915_gem_object_free(obj);
 }
 
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+                                    struct i915_address_space *vm)
+{
+       struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+       if (vma == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&vma->vma_link);
+       vma->vm = vm;
+       vma->obj = obj;
+
+       return vma;
+}
+
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+       WARN_ON(vma->node.allocated);
+       kfree(vma);
+}
+
 int
 i915_gem_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
-       mutex_lock(&dev->struct_mutex);
-
-       if (dev_priv->mm.suspended) {
+       if (dev_priv->ums.mm_suspended) {
                mutex_unlock(&dev->struct_mutex);
                return 0;
        }
@@ -4006,18 +4036,11 @@ i915_gem_idle(struct drm_device *dev)
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_gem_evict_everything(dev);
 
-       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
-        * We need to replace this with a semaphore, or something.
-        * And not confound mm.suspended!
-        */
-       dev_priv->mm.suspended = 1;
        del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
 
        i915_kernel_lost_context(dev);
        i915_gem_cleanup_ringbuffer(dev);
 
-       mutex_unlock(&dev->struct_mutex);
-
        /* Cancel the retire work handler, which should be idle now. */
        cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
@@ -4150,8 +4173,8 @@ i915_gem_init_hw(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
                return -EIO;
 
-       if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
-               I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+       if (dev_priv->ellc_size)
+               I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
        if (HAS_PCH_NOP(dev)) {
                u32 temp = I915_READ(GEN7_MSG_CTL);
@@ -4227,7 +4250,7 @@ int
 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -4239,7 +4262,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
        }
 
        mutex_lock(&dev->struct_mutex);
-       dev_priv->mm.suspended = 0;
+       dev_priv->ums.mm_suspended = 0;
 
        ret = i915_gem_init_hw(dev);
        if (ret != 0) {
@@ -4247,7 +4270,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
                return ret;
        }
 
-       BUG_ON(!list_empty(&dev_priv->mm.active_list));
+       BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
        mutex_unlock(&dev->struct_mutex);
 
        ret = drm_irq_install(dev);
@@ -4259,7 +4282,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
 cleanup_ringbuffer:
        mutex_lock(&dev->struct_mutex);
        i915_gem_cleanup_ringbuffer(dev);
-       dev_priv->mm.suspended = 1;
+       dev_priv->ums.mm_suspended = 1;
        mutex_unlock(&dev->struct_mutex);
 
        return ret;
@@ -4269,11 +4292,26 @@ int
 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
        drm_irq_uninstall(dev);
-       return i915_gem_idle(dev);
+
+       mutex_lock(&dev->struct_mutex);
+       ret =  i915_gem_idle(dev);
+
+       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
+        * We need to replace this with a semaphore, or something.
+        * And not confound ums.mm_suspended!
+        */
+       if (ret != 0)
+               dev_priv->ums.mm_suspended = 1;
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
 }
 
 void
@@ -4284,9 +4322,11 @@ i915_gem_lastclose(struct drm_device *dev)
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return;
 
+       mutex_lock(&dev->struct_mutex);
        ret = i915_gem_idle(dev);
        if (ret)
                DRM_ERROR("failed to idle hardware: %d\n", ret);
+       mutex_unlock(&dev->struct_mutex);
 }
 
 static void
@@ -4308,8 +4348,8 @@ i915_gem_load(struct drm_device *dev)
                                  SLAB_HWCACHE_ALIGN,
                                  NULL);
 
-       INIT_LIST_HEAD(&dev_priv->mm.active_list);
-       INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+       INIT_LIST_HEAD(&dev_priv->gtt.base.active_list);
+       INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list);
        INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
        INIT_LIST_HEAD(&dev_priv->mm.bound_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4580,6 +4620,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
                             struct drm_i915_private,
                             mm.inactive_shrinker);
        struct drm_device *dev = dev_priv->dev;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        struct drm_i915_gem_object *obj;
        int nr_to_scan = sc->nr_to_scan;
        bool unlock = true;
@@ -4608,7 +4649,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
                if (obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
-       list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
+       list_for_each_entry(obj, &vm->inactive_list, mm_list)
                if (obj->pin_count == 0 && obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
 
index 51b7a2171caee8a9b8c7a3c6f51edb5c5487dd25..2470206a4d07490979ac66e2f2597eebbdf3aa32 100644 (file)
@@ -304,31 +304,24 @@ static int context_idr_cleanup(int id, void *p, void *data)
 }
 
 struct i915_ctx_hang_stats *
-i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
+i915_gem_context_get_hang_stats(struct drm_device *dev,
                                struct drm_file *file,
                                u32 id)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct i915_hw_context *to;
-
-       if (dev_priv->hw_contexts_disabled)
-               return ERR_PTR(-ENOENT);
-
-       if (ring->id != RCS)
-               return ERR_PTR(-EINVAL);
-
-       if (file == NULL)
-               return ERR_PTR(-EINVAL);
+       struct i915_hw_context *ctx;
 
        if (id == DEFAULT_CONTEXT_ID)
                return &file_priv->hang_stats;
 
-       to = i915_gem_context_get(file->driver_priv, id);
-       if (to == NULL)
+       ctx = NULL;
+       if (!dev_priv->hw_contexts_disabled)
+               ctx = i915_gem_context_get(file->driver_priv, id);
+       if (ctx == NULL)
                return ERR_PTR(-ENOENT);
 
-       return &to->hang_stats;
+       return &ctx->hang_stats;
 }
 
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
@@ -377,7 +370,7 @@ mi_set_context(struct intel_ring_buffer *ring,
 
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_emit(ring, MI_SET_CONTEXT);
-       intel_ring_emit(ring, new_context->obj->gtt_offset |
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
                        MI_MM_SPACE_GTT |
                        MI_SAVE_EXT_STATE_EN |
                        MI_RESTORE_EXT_STATE_EN |
index 582e6a5f3dac6e4d464440ebab84f3604cc5f62e..bf945a39fbb1dedba155a76e9483795c19e6932c 100644 (file)
@@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev)
                }
        }
 
-       list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
+       list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
                if (obj->base.dev != dev ||
                    !atomic_read(&obj->base.refcount.refcount)) {
                        DRM_ERROR("freed inactive %p\n", obj);
index c86d5d9356fd086b756b9dc207b6d7c9e824f630..df61f338dea184c1486fea791294ab388cbffbaf 100644 (file)
 static bool
 mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
 {
+       struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
+
        if (obj->pin_count)
                return false;
 
        list_add(&obj->exec_list, unwind);
-       return drm_mm_scan_add_block(obj->gtt_space);
+       return drm_mm_scan_add_block(&vma->node);
 }
 
 int
@@ -47,7 +49,9 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
                         bool mappable, bool nonblocking)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        struct list_head eviction_list, unwind_list;
+       struct i915_vma *vma;
        struct drm_i915_gem_object *obj;
        int ret = 0;
 
@@ -78,15 +82,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
 
        INIT_LIST_HEAD(&unwind_list);
        if (mappable)
-               drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
-                                           min_size, alignment, cache_level,
-                                           0, dev_priv->gtt.mappable_end);
+               drm_mm_init_scan_with_range(&vm->mm, min_size,
+                                           alignment, cache_level, 0,
+                                           dev_priv->gtt.mappable_end);
        else
-               drm_mm_init_scan(&dev_priv->mm.gtt_space,
-                                min_size, alignment, cache_level);
+               drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
 
        /* First see if there is a large enough contiguous idle region... */
-       list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+       list_for_each_entry(obj, &vm->inactive_list, mm_list) {
                if (mark_free(obj, &unwind_list))
                        goto found;
        }
@@ -95,7 +98,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
                goto none;
 
        /* Now merge in the soon-to-be-expired objects... */
-       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+       list_for_each_entry(obj, &vm->active_list, mm_list) {
                if (mark_free(obj, &unwind_list))
                        goto found;
        }
@@ -106,8 +109,8 @@ none:
                obj = list_first_entry(&unwind_list,
                                       struct drm_i915_gem_object,
                                       exec_list);
-
-               ret = drm_mm_scan_remove_block(obj->gtt_space);
+               vma = __i915_gem_obj_to_vma(obj);
+               ret = drm_mm_scan_remove_block(&vma->node);
                BUG_ON(ret);
 
                list_del_init(&obj->exec_list);
@@ -127,7 +130,8 @@ found:
                obj = list_first_entry(&unwind_list,
                                       struct drm_i915_gem_object,
                                       exec_list);
-               if (drm_mm_scan_remove_block(obj->gtt_space)) {
+               vma = __i915_gem_obj_to_vma(obj);
+               if (drm_mm_scan_remove_block(&vma->node)) {
                        list_move(&obj->exec_list, &eviction_list);
                        drm_gem_object_reference(&obj->base);
                        continue;
@@ -154,12 +158,13 @@ int
 i915_gem_evict_everything(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        struct drm_i915_gem_object *obj, *next;
        bool lists_empty;
        int ret;
 
-       lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-                      list_empty(&dev_priv->mm.active_list));
+       lists_empty = (list_empty(&vm->inactive_list) &&
+                      list_empty(&vm->active_list));
        if (lists_empty)
                return -ENOSPC;
 
@@ -176,8 +181,7 @@ i915_gem_evict_everything(struct drm_device *dev)
        i915_gem_retire_requests(dev);
 
        /* Having flushed everything, unbind() should never raise an error */
-       list_for_each_entry_safe(obj, next,
-                                &dev_priv->mm.inactive_list, mm_list)
+       list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
                if (obj->pin_count == 0)
                        WARN_ON(i915_gem_object_unbind(obj));
 
index 87a3227e51795ef44ba03933be925a49e1409504..1734825bef3438026f3dc97914f836b00f922fd1 100644 (file)
@@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                return -ENOENT;
 
        target_i915_obj = to_intel_bo(target_obj);
-       target_offset = target_i915_obj->gtt_offset;
+       target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
 
        /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
         * pipe_control writes because the gpu doesn't properly redirect them
@@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                        return ret;
 
                /* Map the page containing the relocation we're going to perform.  */
-               reloc->offset += obj->gtt_offset;
+               reloc->offset += i915_gem_obj_ggtt_offset(obj);
                reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
                                                      reloc->offset & PAGE_MASK);
                reloc_entry = (uint32_t __iomem *)
@@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
                obj->has_aliasing_ppgtt_mapping = 1;
        }
 
-       if (entry->offset != obj->gtt_offset) {
-               entry->offset = obj->gtt_offset;
+       if (entry->offset != i915_gem_obj_ggtt_offset(obj)) {
+               entry->offset = i915_gem_obj_ggtt_offset(obj);
                *need_reloc = true;
        }
 
@@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_gem_exec_object2 *entry;
 
-       if (!obj->gtt_space)
+       if (!i915_gem_obj_ggtt_bound(obj))
                return;
 
        entry = obj->exec_entry;
@@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                        struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
                        bool need_fence, need_mappable;
 
-                       if (!obj->gtt_space)
+                       if (!i915_gem_obj_ggtt_bound(obj))
                                continue;
 
                        need_fence =
@@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                                obj->tiling_mode != I915_TILING_NONE;
                        need_mappable = need_fence || need_reloc_mappable(obj);
 
-                       if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+                       if ((entry->alignment &&
+                            i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) ||
                            (need_mappable && !obj->map_and_fenceable))
                                ret = i915_gem_object_unbind(obj);
                        else
@@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 
                /* Bind fresh objects */
                list_for_each_entry(obj, objects, exec_list) {
-                       if (obj->gtt_space)
+                       if (i915_gem_obj_ggtt_bound(obj))
                                continue;
 
                        ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
@@ -758,8 +759,10 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
                if (!access_ok(VERIFY_WRITE, ptr, length))
                        return -EFAULT;
 
-               if (fault_in_multipages_readable(ptr, length))
-                       return -EFAULT;
+               if (likely(!i915_prefault_disable)) {
+                       if (fault_in_multipages_readable(ptr, length))
+                               return -EFAULT;
+               }
        }
 
        return 0;
@@ -872,7 +875,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                break;
        case I915_EXEC_BSD:
                ring = &dev_priv->ring[VCS];
-               if (ctx_id != 0) {
+               if (ctx_id != DEFAULT_CONTEXT_ID) {
                        DRM_DEBUG("Ring %s doesn't support contexts\n",
                                  ring->name);
                        return -EPERM;
@@ -880,7 +883,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                break;
        case I915_EXEC_BLT:
                ring = &dev_priv->ring[BCS];
-               if (ctx_id != 0) {
+               if (ctx_id != DEFAULT_CONTEXT_ID) {
                        DRM_DEBUG("Ring %s doesn't support contexts\n",
                                  ring->name);
                        return -EPERM;
@@ -888,7 +891,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                break;
        case I915_EXEC_VEBOX:
                ring = &dev_priv->ring[VECS];
-               if (ctx_id != 0) {
+               if (ctx_id != DEFAULT_CONTEXT_ID) {
                        DRM_DEBUG("Ring %s doesn't support contexts\n",
                                  ring->name);
                        return -EPERM;
@@ -972,7 +975,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret)
                goto pre_mutex_err;
 
-       if (dev_priv->mm.suspended) {
+       if (dev_priv->ums.mm_suspended) {
                mutex_unlock(&dev->struct_mutex);
                ret = -EBUSY;
                goto pre_mutex_err;
@@ -1058,7 +1061,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                        goto err;
        }
 
-       exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+       exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset;
        exec_len = args->batch_len;
        if (cliprects) {
                for (i = 0; i < args->num_cliprects; i++) {
index 5101ab6869b47eef37b5be33e9b4786bc862beca..3b639a94dddf72b1fdebcf2f00e46f9d60c17bb8 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+#define GEN6_PPGTT_PD_ENTRIES 512
+#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+
 /* PPGTT stuff */
 #define GEN6_GTT_ADDR_ENCODE(addr)     ((addr) | (((addr) >> 28) & 0xff0))
+#define HSW_GTT_ADDR_ENCODE(addr)      ((addr) | (((addr) >> 28) & 0x7f0))
 
 #define GEN6_PDE_VALID                 (1 << 0)
 /* gen6+ has bit 11-4 for physical addr bit 39-32 */
 #define GEN6_PTE_CACHE_LLC             (2 << 1)
 #define GEN6_PTE_CACHE_LLC_MLC         (3 << 1)
 #define GEN6_PTE_ADDR_ENCODE(addr)     GEN6_GTT_ADDR_ENCODE(addr)
+#define HSW_PTE_ADDR_ENCODE(addr)      HSW_GTT_ADDR_ENCODE(addr)
+
+/* Cacheability Control is a 4-bit value. The low three bits are stored in *
+ * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
+ */
+#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
+                                        (((bits) & 0x8) << (11 - 3)))
+#define HSW_WB_LLC_AGE0                        HSW_CACHEABILITY_CONTROL(0x3)
+#define HSW_WB_ELLC_LLC_AGE0           HSW_CACHEABILITY_CONTROL(0xb)
 
-static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
-                                     dma_addr_t addr,
+static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr,
                                      enum i915_cache_level level)
 {
        gen6_gtt_pte_t pte = GEN6_PTE_VALID;
@@ -69,8 +81,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
 #define BYT_PTE_WRITEABLE              (1 << 1)
 #define BYT_PTE_SNOOPED_BY_CPU_CACHES  (1 << 2)
 
-static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
-                                    dma_addr_t addr,
+static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
                                     enum i915_cache_level level)
 {
        gen6_gtt_pte_t pte = GEN6_PTE_VALID;
@@ -87,22 +98,33 @@ static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
        return pte;
 }
 
-static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
-                                    dma_addr_t addr,
+static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
                                     enum i915_cache_level level)
 {
        gen6_gtt_pte_t pte = GEN6_PTE_VALID;
-       pte |= GEN6_PTE_ADDR_ENCODE(addr);
+       pte |= HSW_PTE_ADDR_ENCODE(addr);
 
        if (level != I915_CACHE_NONE)
-               pte |= GEN6_PTE_CACHE_LLC;
+               pte |= HSW_WB_LLC_AGE0;
+
+       return pte;
+}
+
+static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
+                                     enum i915_cache_level level)
+{
+       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       pte |= HSW_PTE_ADDR_ENCODE(addr);
+
+       if (level != I915_CACHE_NONE)
+               pte |= HSW_WB_ELLC_LLC_AGE0;
 
        return pte;
 }
 
 static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
 {
-       struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
+       struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
        gen6_gtt_pte_t __iomem *pd_addr;
        uint32_t pd_entry;
        int i;
@@ -181,18 +203,18 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
 }
 
 /* PPGTT support for Sandybdrige/Gen6 and later */
-static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
                                   unsigned first_entry,
                                   unsigned num_entries)
 {
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
        gen6_gtt_pte_t *pt_vaddr, scratch_pte;
        unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
        unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        unsigned last_pte, i;
 
-       scratch_pte = ppgtt->pte_encode(ppgtt->dev,
-                                       ppgtt->scratch_page_dma_addr,
-                                       I915_CACHE_LLC);
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
 
        while (num_entries) {
                last_pte = first_pte + num_entries;
@@ -212,11 +234,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
        }
 }
 
-static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                                      struct sg_table *pages,
                                      unsigned first_entry,
                                      enum i915_cache_level cache_level)
 {
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
        gen6_gtt_pte_t *pt_vaddr;
        unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
        unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
@@ -227,8 +251,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
                dma_addr_t page_addr;
 
                page_addr = sg_page_iter_dma_address(&sg_iter);
-               pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr,
-                                                     cache_level);
+               pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
                if (++act_pte == I915_PPGTT_PT_ENTRIES) {
                        kunmap_atomic(pt_vaddr);
                        act_pt++;
@@ -240,13 +263,17 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
        kunmap_atomic(pt_vaddr);
 }
 
-static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
 {
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
        int i;
 
+       drm_mm_takedown(&ppgtt->base.mm);
+
        if (ppgtt->pt_dma_addr) {
                for (i = 0; i < ppgtt->num_pd_entries; i++)
-                       pci_unmap_page(ppgtt->dev->pdev,
+                       pci_unmap_page(ppgtt->base.dev->pdev,
                                       ppgtt->pt_dma_addr[i],
                                       4096, PCI_DMA_BIDIRECTIONAL);
        }
@@ -260,7 +287,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
 
 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 {
-       struct drm_device *dev = ppgtt->dev;
+       struct drm_device *dev = ppgtt->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned first_pd_entry_in_global_pt;
        int i;
@@ -272,17 +299,18 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
        first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
 
        if (IS_HASWELL(dev)) {
-               ppgtt->pte_encode = hsw_pte_encode;
+               ppgtt->base.pte_encode = hsw_pte_encode;
        } else if (IS_VALLEYVIEW(dev)) {
-               ppgtt->pte_encode = byt_pte_encode;
+               ppgtt->base.pte_encode = byt_pte_encode;
        } else {
-               ppgtt->pte_encode = gen6_pte_encode;
+               ppgtt->base.pte_encode = gen6_pte_encode;
        }
-       ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+       ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
        ppgtt->enable = gen6_ppgtt_enable;
-       ppgtt->clear_range = gen6_ppgtt_clear_range;
-       ppgtt->insert_entries = gen6_ppgtt_insert_entries;
-       ppgtt->cleanup = gen6_ppgtt_cleanup;
+       ppgtt->base.clear_range = gen6_ppgtt_clear_range;
+       ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+       ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+       ppgtt->base.scratch = dev_priv->gtt.base.scratch;
        ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
                                  GFP_KERNEL);
        if (!ppgtt->pt_pages)
@@ -313,8 +341,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
                ppgtt->pt_dma_addr[i] = pt_addr;
        }
 
-       ppgtt->clear_range(ppgtt, 0,
-                          ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+       ppgtt->base.clear_range(&ppgtt->base, 0,
+                               ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
 
        ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
 
@@ -347,8 +375,7 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
        if (!ppgtt)
                return -ENOMEM;
 
-       ppgtt->dev = dev;
-       ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
+       ppgtt->base.dev = dev;
 
        if (INTEL_INFO(dev)->gen < 8)
                ret = gen6_ppgtt_init(ppgtt);
@@ -357,8 +384,11 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
 
        if (ret)
                kfree(ppgtt);
-       else
+       else {
                dev_priv->mm.aliasing_ppgtt = ppgtt;
+               drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
+                           ppgtt->base.total);
+       }
 
        return ret;
 }
@@ -371,7 +401,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
        if (!ppgtt)
                return;
 
-       ppgtt->cleanup(ppgtt);
+       ppgtt->base.cleanup(&ppgtt->base);
        dev_priv->mm.aliasing_ppgtt = NULL;
 }
 
@@ -379,17 +409,17 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
                            struct drm_i915_gem_object *obj,
                            enum i915_cache_level cache_level)
 {
-       ppgtt->insert_entries(ppgtt, obj->pages,
-                             obj->gtt_space->start >> PAGE_SHIFT,
-                             cache_level);
+       ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
+                                  i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
+                                  cache_level);
 }
 
 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
                              struct drm_i915_gem_object *obj)
 {
-       ppgtt->clear_range(ppgtt,
-                          obj->gtt_space->start >> PAGE_SHIFT,
-                          obj->base.size >> PAGE_SHIFT);
+       ppgtt->base.clear_range(&ppgtt->base,
+                               i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
+                               obj->base.size >> PAGE_SHIFT);
 }
 
 extern int intel_iommu_gfx_mapped;
@@ -436,8 +466,9 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
        struct drm_i915_gem_object *obj;
 
        /* First fill our portion of the GTT with scratch pages */
-       dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
-                                     dev_priv->gtt.total / PAGE_SIZE);
+       dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+                                      dev_priv->gtt.base.start / PAGE_SIZE,
+                                      dev_priv->gtt.base.total / PAGE_SIZE);
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                i915_gem_clflush_object(obj);
@@ -466,12 +497,12 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
  * within the global GTT as well as accessible by the GPU through the GMADR
  * mapped BAR (dev_priv->mm.gtt->gtt).
  */
-static void gen6_ggtt_insert_entries(struct drm_device *dev,
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *st,
                                     unsigned int first_entry,
                                     enum i915_cache_level level)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = vm->dev->dev_private;
        gen6_gtt_pte_t __iomem *gtt_entries =
                (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
        int i = 0;
@@ -480,8 +511,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
 
        for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
                addr = sg_page_iter_dma_address(&sg_iter);
-               iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
-                         &gtt_entries[i]);
+               iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
                i++;
        }
 
@@ -492,8 +522,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
         * hardware should work, we must keep this posting read for paranoia.
         */
        if (i != 0)
-               WARN_ON(readl(&gtt_entries[i-1])
-                       != dev_priv->gtt.pte_encode(dev, addr, level));
+               WARN_ON(readl(&gtt_entries[i-1]) !=
+                       vm->pte_encode(addr, level));
 
        /* This next bit makes the above posting read even more important. We
         * want to flush the TLBs only after we're certain all the PTE updates
@@ -503,11 +533,11 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
        POSTING_READ(GFX_FLSH_CNTL_GEN6);
 }
 
-static void gen6_ggtt_clear_range(struct drm_device *dev,
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                                  unsigned int first_entry,
                                  unsigned int num_entries)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = vm->dev->dev_private;
        gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
                (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
        const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -518,16 +548,14 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
                 first_entry, num_entries, max_entries))
                num_entries = max_entries;
 
-       scratch_pte = dev_priv->gtt.pte_encode(dev,
-                                              dev_priv->gtt.scratch_page_dma,
-                                              I915_CACHE_LLC);
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
        for (i = 0; i < num_entries; i++)
                iowrite32(scratch_pte, &gtt_base[i]);
        readl(gtt_base);
 }
 
 
-static void i915_ggtt_insert_entries(struct drm_device *dev,
+static void i915_ggtt_insert_entries(struct i915_address_space *vm,
                                     struct sg_table *st,
                                     unsigned int pg_start,
                                     enum i915_cache_level cache_level)
@@ -539,7 +567,7 @@ static void i915_ggtt_insert_entries(struct drm_device *dev,
 
 }
 
-static void i915_ggtt_clear_range(struct drm_device *dev,
+static void i915_ggtt_clear_range(struct i915_address_space *vm,
                                  unsigned int first_entry,
                                  unsigned int num_entries)
 {
@@ -552,10 +580,11 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
 
-       dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
-                                        obj->gtt_space->start >> PAGE_SHIFT,
-                                        cache_level);
+       dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
+                                         entry,
+                                         cache_level);
 
        obj->has_global_gtt_mapping = 1;
 }
@@ -564,10 +593,11 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
 
-       dev_priv->gtt.gtt_clear_range(obj->base.dev,
-                                     obj->gtt_space->start >> PAGE_SHIFT,
-                                     obj->base.size >> PAGE_SHIFT);
+       dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+                                      entry,
+                                      obj->base.size >> PAGE_SHIFT);
 
        obj->has_global_gtt_mapping = 0;
 }
@@ -626,37 +656,42 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
        BUG_ON(mappable_end > end);
 
        /* Subtract the guard page ... */
-       drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+       drm_mm_init(&dev_priv->gtt.base.mm, start, end - start - PAGE_SIZE);
        if (!HAS_LLC(dev))
-               dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
+               dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
 
        /* Mark any preallocated objects as occupied */
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
-                             obj->gtt_offset, obj->base.size);
-
-               BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
-               obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
-                                                    obj->gtt_offset,
-                                                    obj->base.size,
-                                                    false);
+               struct i915_vma *vma = __i915_gem_obj_to_vma(obj);
+               int ret;
+               DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
+                             i915_gem_obj_ggtt_offset(obj), obj->base.size);
+
+               WARN_ON(i915_gem_obj_ggtt_bound(obj));
+               ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
+               if (ret)
+                       DRM_DEBUG_KMS("Reservation failed\n");
                obj->has_global_gtt_mapping = 1;
+               list_add(&vma->vma_link, &obj->vma_list);
        }
 
-       dev_priv->gtt.start = start;
-       dev_priv->gtt.total = end - start;
+       dev_priv->gtt.base.start = start;
+       dev_priv->gtt.base.total = end - start;
 
        /* Clear any non-preallocated blocks */
-       drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
+       drm_mm_for_each_hole(entry, &dev_priv->gtt.base.mm,
                             hole_start, hole_end) {
+               const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
                              hole_start, hole_end);
-               dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
-                                             (hole_end-hole_start) / PAGE_SIZE);
+               dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+                                              hole_start / PAGE_SIZE,
+                                              count);
        }
 
        /* And finally clear the reserved guard page */
-       dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
+       dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+                                      end / PAGE_SIZE - 1, 1);
 }
 
 static bool
@@ -679,7 +714,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long gtt_size, mappable_size;
 
-       gtt_size = dev_priv->gtt.total;
+       gtt_size = dev_priv->gtt.base.total;
        mappable_size = dev_priv->gtt.mappable_end;
 
        if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
@@ -688,7 +723,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
                if (INTEL_INFO(dev)->gen <= 7) {
                        /* PPGTT pdes are stolen from global gtt ptes, so shrink the
                         * aperture accordingly when using aliasing ppgtt. */
-                       gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+                       gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
                }
 
                i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
@@ -698,8 +733,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
                        return;
 
                DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
-               drm_mm_takedown(&dev_priv->mm.gtt_space);
-               gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+               drm_mm_takedown(&dev_priv->gtt.base.mm);
+               gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
        }
        i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
 }
@@ -724,8 +759,8 @@ static int setup_scratch_page(struct drm_device *dev)
 #else
        dma_addr = page_to_phys(page);
 #endif
-       dev_priv->gtt.scratch_page = page;
-       dev_priv->gtt.scratch_page_dma = dma_addr;
+       dev_priv->gtt.base.scratch.page = page;
+       dev_priv->gtt.base.scratch.addr = dma_addr;
 
        return 0;
 }
@@ -733,11 +768,13 @@ static int setup_scratch_page(struct drm_device *dev)
 static void teardown_scratch_page(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       set_pages_wb(dev_priv->gtt.scratch_page, 1);
-       pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
+       struct page *page = dev_priv->gtt.base.scratch.page;
+
+       set_pages_wb(page, 1);
+       pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       put_page(dev_priv->gtt.scratch_page);
-       __free_page(dev_priv->gtt.scratch_page);
+       put_page(page);
+       __free_page(page);
 }
 
 static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -800,17 +837,18 @@ static int gen6_gmch_probe(struct drm_device *dev,
        if (ret)
                DRM_ERROR("Scratch setup failed\n");
 
-       dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
-       dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
+       dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
+       dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
 
        return ret;
 }
 
-static void gen6_gmch_remove(struct drm_device *dev)
+static void gen6_gmch_remove(struct i915_address_space *vm)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       iounmap(dev_priv->gtt.gsm);
-       teardown_scratch_page(dev_priv->dev);
+
+       struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+       iounmap(gtt->gsm);
+       teardown_scratch_page(vm->dev);
 }
 
 static int i915_gmch_probe(struct drm_device *dev,
@@ -831,13 +869,13 @@ static int i915_gmch_probe(struct drm_device *dev,
        intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
 
        dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
-       dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
-       dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
+       dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
+       dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
 
        return 0;
 }
 
-static void i915_gmch_remove(struct drm_device *dev)
+static void i915_gmch_remove(struct i915_address_space *vm)
 {
        intel_gmch_remove();
 }
@@ -849,34 +887,33 @@ int i915_gem_gtt_init(struct drm_device *dev)
        int ret;
 
        if (INTEL_INFO(dev)->gen <= 5) {
-               dev_priv->gtt.gtt_probe = i915_gmch_probe;
-               dev_priv->gtt.gtt_remove = i915_gmch_remove;
+               gtt->gtt_probe = i915_gmch_probe;
+               gtt->base.cleanup = i915_gmch_remove;
        } else {
-               dev_priv->gtt.gtt_probe = gen6_gmch_probe;
-               dev_priv->gtt.gtt_remove = gen6_gmch_remove;
-               if (IS_HASWELL(dev)) {
-                       dev_priv->gtt.pte_encode = hsw_pte_encode;
-               } else if (IS_VALLEYVIEW(dev)) {
-                       dev_priv->gtt.pte_encode = byt_pte_encode;
-               } else {
-                       dev_priv->gtt.pte_encode = gen6_pte_encode;
-               }
+               gtt->gtt_probe = gen6_gmch_probe;
+               gtt->base.cleanup = gen6_gmch_remove;
+               if (IS_HASWELL(dev) && dev_priv->ellc_size)
+                       gtt->base.pte_encode = iris_pte_encode;
+               else if (IS_HASWELL(dev))
+                       gtt->base.pte_encode = hsw_pte_encode;
+               else if (IS_VALLEYVIEW(dev))
+                       gtt->base.pte_encode = byt_pte_encode;
+               else
+                       gtt->base.pte_encode = gen6_pte_encode;
        }
 
-       ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
-                                    &dev_priv->gtt.stolen_size,
-                                    &gtt->mappable_base,
-                                    &gtt->mappable_end);
+       ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
+                            &gtt->mappable_base, &gtt->mappable_end);
        if (ret)
                return ret;
 
+       gtt->base.dev = dev;
+
        /* GMADR is the PCI mmio aperture into the global GTT. */
        DRM_INFO("Memory usable by graphics device = %zdM\n",
-                dev_priv->gtt.total >> 20);
-       DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
-                        dev_priv->gtt.mappable_end >> 20);
-       DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
-                        dev_priv->gtt.stolen_size >> 20);
+                gtt->base.total >> 20);
+       DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
+       DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
 
        return 0;
 }
index 982d4732cecff93e45370f30dfab03cc1bfdb32a..4bbde2ae1819c0025926aa0e158c646099f55e13 100644 (file)
 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct pci_dev *pdev = dev_priv->bridge_dev;
+       struct resource *r;
        u32 base;
 
-       /* On the machines I have tested the Graphics Base of Stolen Memory
-        * is unreliable, so on those compute the base by subtracting the
-        * stolen memory from the Top of Low Usable DRAM which is where the
-        * BIOS places the graphics stolen memory.
+       /* Almost universally we can find the Graphics Base of Stolen Memory
+        * at offset 0x5c in the igfx configuration space. On a few (desktop)
+        * machines this is also mirrored in the bridge device at different
+        * locations, or in the MCHBAR. On gen2, the layout is again slightly
+        * different with the Graphics Segment immediately following Top of
+        * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
+        * reported by 865g, so we just use the top of memory as determined
+        * by the e820 probe.
         *
-        * On gen2, the layout is slightly different with the Graphics Segment
-        * immediately following Top of Memory (or Top of Usable DRAM). Note
-        * it appears that TOUD is only reported by 865g, so we just use the
-        * top of memory as determined by the e820 probe.
-        *
-        * XXX gen2 requires an unavailable symbol and 945gm fails with
-        * its value of TOLUD.
+        * XXX However gen2 requires an unavailable symbol.
         */
        base = 0;
-       if (IS_VALLEYVIEW(dev)) {
+       if (INTEL_INFO(dev)->gen >= 3) {
+               /* Read Graphics Base of Stolen Memory directly */
                pci_read_config_dword(dev->pdev, 0x5c, &base);
                base &= ~((1<<20) - 1);
-       } else if (INTEL_INFO(dev)->gen >= 6) {
-               /* Read Base Data of Stolen Memory Register (BDSM) directly.
-                * Note that there is also a MCHBAR miror at 0x1080c0 or
-                * we could use device 2:0x5c instead.
-               */
-               pci_read_config_dword(pdev, 0xB0, &base);
-               base &= ~4095; /* lower bits used for locking register */
-       } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
-               /* Read Graphics Base of Stolen Memory directly */
-               pci_read_config_dword(pdev, 0xA4, &base);
+       } else { /* GEN2 */
 #if 0
-       } else if (IS_GEN3(dev)) {
-               u8 val;
-               /* Stolen is immediately below Top of Low Usable DRAM */
-               pci_read_config_byte(pdev, 0x9c, &val);
-               base = val >> 3 << 27;
-               base -= dev_priv->mm.gtt->stolen_size;
-       } else {
                /* Stolen is immediately above Top of Memory */
                base = max_low_pfn_mapped << PAGE_SHIFT;
 #endif
        }
 
+       if (base == 0)
+               return 0;
+
+       /* Verify that nothing else uses this physical address. Stolen
+        * memory should be reserved by the BIOS and hidden from the
+        * kernel. So if the region is already marked as busy, something
+        * is seriously wrong.
+        */
+       r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+                                   "Graphics Stolen Memory");
+       if (r == NULL) {
+               DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+                         base, base + (uint32_t)dev_priv->gtt.stolen_size);
+               base = 0;
+       }
+
        return base;
 }
 
@@ -120,7 +119,7 @@ static int i915_setup_compression(struct drm_device *dev, int size)
                if (!compressed_llb)
                        goto err_fb;
 
-               dev_priv->compressed_llb = compressed_llb;
+               dev_priv->fbc.compressed_llb = compressed_llb;
 
                I915_WRITE(FBC_CFB_BASE,
                           dev_priv->mm.stolen_base + compressed_fb->start);
@@ -128,8 +127,8 @@ static int i915_setup_compression(struct drm_device *dev, int size)
                           dev_priv->mm.stolen_base + compressed_llb->start);
        }
 
-       dev_priv->compressed_fb = compressed_fb;
-       dev_priv->cfb_size = size;
+       dev_priv->fbc.compressed_fb = compressed_fb;
+       dev_priv->fbc.size = size;
 
        DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
                      size);
@@ -150,7 +149,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
        if (!drm_mm_initialized(&dev_priv->mm.stolen))
                return -ENODEV;
 
-       if (size < dev_priv->cfb_size)
+       if (size < dev_priv->fbc.size)
                return 0;
 
        /* Release any current block */
@@ -163,16 +162,16 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (dev_priv->cfb_size == 0)
+       if (dev_priv->fbc.size == 0)
                return;
 
-       if (dev_priv->compressed_fb)
-               drm_mm_put_block(dev_priv->compressed_fb);
+       if (dev_priv->fbc.compressed_fb)
+               drm_mm_put_block(dev_priv->fbc.compressed_fb);
 
-       if (dev_priv->compressed_llb)
-               drm_mm_put_block(dev_priv->compressed_llb);
+       if (dev_priv->fbc.compressed_llb)
+               drm_mm_put_block(dev_priv->fbc.compressed_llb);
 
-       dev_priv->cfb_size = 0;
+       dev_priv->fbc.size = 0;
 }
 
 void i915_gem_cleanup_stolen(struct drm_device *dev)
@@ -201,6 +200,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
        if (IS_VALLEYVIEW(dev))
                bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
 
+       if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
+               return 0;
+
        /* Basic memrange allocator for stolen space */
        drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
                    bios_reserved);
@@ -331,8 +333,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
                                               u32 size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_address_space *vm = &dev_priv->gtt.base;
        struct drm_i915_gem_object *obj;
        struct drm_mm_node *stolen;
+       struct i915_vma *vma;
+       int ret;
 
        if (!drm_mm_initialized(&dev_priv->mm.stolen))
                return NULL;
@@ -347,11 +352,16 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
        if (WARN_ON(size == 0))
                return NULL;
 
-       stolen = drm_mm_create_block(&dev_priv->mm.stolen,
-                                    stolen_offset, size,
-                                    false);
-       if (stolen == NULL) {
+       stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+       if (!stolen)
+               return NULL;
+
+       stolen->start = stolen_offset;
+       stolen->size = size;
+       ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+       if (ret) {
                DRM_DEBUG_KMS("failed to allocate stolen space\n");
+               kfree(stolen);
                return NULL;
        }
 
@@ -363,33 +373,42 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
        }
 
        /* Some objects just need physical mem from stolen space */
-       if (gtt_offset == -1)
+       if (gtt_offset == I915_GTT_OFFSET_NONE)
                return obj;
 
+       vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err_out;
+       }
+
        /* To simplify the initialisation sequence between KMS and GTT,
         * we allow construction of the stolen object prior to
         * setting up the GTT space. The actual reservation will occur
         * later.
         */
-       if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
-               obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
-                                                    gtt_offset, size,
-                                                    false);
-               if (obj->gtt_space == NULL) {
+       vma->node.start = gtt_offset;
+       vma->node.size = size;
+       if (drm_mm_initialized(&dev_priv->gtt.base.mm)) {
+               ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node);
+               if (ret) {
                        DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
-                       drm_gem_object_unreference(&obj->base);
-                       return NULL;
+                       i915_gem_vma_destroy(vma);
+                       goto err_out;
                }
-       } else
-               obj->gtt_space = I915_GTT_RESERVED;
+       }
 
-       obj->gtt_offset = gtt_offset;
        obj->has_global_gtt_mapping = 1;
 
        list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+       list_add_tail(&obj->mm_list, &vm->inactive_list);
 
        return obj;
+
+err_out:
+       drm_mm_put_block(stolen);
+       drm_gem_object_unreference(&obj->base);
+       return NULL;
 }
 
 void
index 537545be69db89fb8c2d19e692a94cca0b418bf8..92a8d279ca39749d6582cb412e874d21c07416cd 100644 (file)
@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
                return true;
 
        if (INTEL_INFO(obj->base.dev)->gen == 3) {
-               if (obj->gtt_offset & ~I915_FENCE_START_MASK)
+               if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
                        return false;
        } else {
-               if (obj->gtt_offset & ~I830_FENCE_START_MASK)
+               if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
                        return false;
        }
 
        size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
-       if (obj->gtt_space->size != size)
+       if (i915_gem_obj_ggtt_size(obj) != size)
                return false;
 
-       if (obj->gtt_offset & (size - 1))
+       if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
                return false;
 
        return true;
@@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                 */
 
                obj->map_and_fenceable =
-                       obj->gtt_space == NULL ||
-                       (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
+                       !i915_gem_obj_ggtt_bound(obj) ||
+                       (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end &&
                         i915_gem_object_fence_ok(obj, args->tiling_mode));
 
                /* Rebind if we need a change of alignment */
@@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                                i915_gem_get_gtt_alignment(dev, obj->base.size,
                                                            args->tiling_mode,
                                                            false);
-                       if (obj->gtt_offset & (unfenced_alignment - 1))
+                       if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
                                ret = i915_gem_object_unbind(obj);
                }
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
new file mode 100644 (file)
index 0000000..d970d84
--- /dev/null
@@ -0,0 +1,973 @@
+/*
+ * Copyright (c) 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Keith Packard <keithp@keithp.com>
+ *    Mika Kuoppala <mika.kuoppala@intel.com>
+ *
+ */
+
+#include <generated/utsrelease.h>
+#include "i915_drv.h"
+
+static const char *yesno(int v)
+{
+       return v ? "yes" : "no";
+}
+
+static const char *ring_str(int ring)
+{
+       switch (ring) {
+       case RCS: return "render";
+       case VCS: return "bsd";
+       case BCS: return "blt";
+       case VECS: return "vebox";
+       default: return "";
+       }
+}
+
+static const char *pin_flag(int pinned)
+{
+       if (pinned > 0)
+               return " P";
+       else if (pinned < 0)
+               return " p";
+       else
+               return "";
+}
+
+static const char *tiling_flag(int tiling)
+{
+       switch (tiling) {
+       default:
+       case I915_TILING_NONE: return "";
+       case I915_TILING_X: return " X";
+       case I915_TILING_Y: return " Y";
+       }
+}
+
+static const char *dirty_flag(int dirty)
+{
+       return dirty ? " dirty" : "";
+}
+
+static const char *purgeable_flag(int purgeable)
+{
+       return purgeable ? " purgeable" : "";
+}
+
+static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+{
+
+       if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
+               e->err = -ENOSPC;
+               return false;
+       }
+
+       if (e->bytes == e->size - 1 || e->err)
+               return false;
+
+       return true;
+}
+
+static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
+                             unsigned len)
+{
+       if (e->pos + len <= e->start) {
+               e->pos += len;
+               return false;
+       }
+
+       /* First vsnprintf needs to fit in its entirety for memmove */
+       if (len >= e->size) {
+               e->err = -EIO;
+               return false;
+       }
+
+       return true;
+}
+
+static void __i915_error_advance(struct drm_i915_error_state_buf *e,
+                                unsigned len)
+{
+       /* If this is first printf in this window, adjust it so that
+        * start position matches start of the buffer
+        */
+
+       if (e->pos < e->start) {
+               const size_t off = e->start - e->pos;
+
+               /* Should not happen but be paranoid */
+               if (off > len || e->bytes) {
+                       e->err = -EIO;
+                       return;
+               }
+
+               memmove(e->buf, e->buf + off, len - off);
+               e->bytes = len - off;
+               e->pos = e->start;
+               return;
+       }
+
+       e->bytes += len;
+       e->pos += len;
+}
+
+static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
+                              const char *f, va_list args)
+{
+       unsigned len;
+
+       if (!__i915_error_ok(e))
+               return;
+
+       /* Seek the first printf which is hits start position */
+       if (e->pos < e->start) {
+               len = vsnprintf(NULL, 0, f, args);
+               if (!__i915_error_seek(e, len))
+                       return;
+       }
+
+       len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
+       if (len >= e->size - e->bytes)
+               len = e->size - e->bytes - 1;
+
+       __i915_error_advance(e, len);
+}
+
+static void i915_error_puts(struct drm_i915_error_state_buf *e,
+                           const char *str)
+{
+       unsigned len;
+
+       if (!__i915_error_ok(e))
+               return;
+
+       len = strlen(str);
+
+       /* Seek the first printf which is hits start position */
+       if (e->pos < e->start) {
+               if (!__i915_error_seek(e, len))
+                       return;
+       }
+
+       if (len >= e->size - e->bytes)
+               len = e->size - e->bytes - 1;
+       memcpy(e->buf + e->bytes, str, len);
+
+       __i915_error_advance(e, len);
+}
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+#define err_puts(e, s) i915_error_puts(e, s)
+
+static void print_error_buffers(struct drm_i915_error_state_buf *m,
+                               const char *name,
+                               struct drm_i915_error_buffer *err,
+                               int count)
+{
+       err_printf(m, "%s [%d]:\n", name, count);
+
+       while (count--) {
+               err_printf(m, "  %08x %8u %02x %02x %x %x",
+                          err->gtt_offset,
+                          err->size,
+                          err->read_domains,
+                          err->write_domain,
+                          err->rseqno, err->wseqno);
+               err_puts(m, pin_flag(err->pinned));
+               err_puts(m, tiling_flag(err->tiling));
+               err_puts(m, dirty_flag(err->dirty));
+               err_puts(m, purgeable_flag(err->purgeable));
+               err_puts(m, err->ring != -1 ? " " : "");
+               err_puts(m, ring_str(err->ring));
+               err_puts(m, i915_cache_level_str(err->cache_level));
+
+               if (err->name)
+                       err_printf(m, " (name: %d)", err->name);
+               if (err->fence_reg != I915_FENCE_REG_NONE)
+                       err_printf(m, " (fence: %d)", err->fence_reg);
+
+               err_puts(m, "\n");
+               err++;
+       }
+}
+
+static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
+                                 struct drm_device *dev,
+                                 struct drm_i915_error_state *error,
+                                 unsigned ring)
+{
+       BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
+       err_printf(m, "%s command stream:\n", ring_str(ring));
+       err_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
+       err_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
+       err_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
+       err_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
+       err_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
+       err_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
+       err_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
+       if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
+               err_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
+
+       if (INTEL_INFO(dev)->gen >= 4)
+               err_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
+       err_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
+       err_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
+       if (INTEL_INFO(dev)->gen >= 6) {
+               err_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
+               err_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+               err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
+                          error->semaphore_mboxes[ring][0],
+                          error->semaphore_seqno[ring][0]);
+               err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
+                          error->semaphore_mboxes[ring][1],
+                          error->semaphore_seqno[ring][1]);
+       }
+       err_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
+       err_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
+       err_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
+       err_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+}
+
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
+{
+       va_list args;
+
+       va_start(args, f);
+       i915_error_vprintf(e, f, args);
+       va_end(args);
+}
+
+int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
+                           const struct i915_error_state_file_priv *error_priv)
+{
+       struct drm_device *dev = error_priv->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_error_state *error = error_priv->error;
+       struct intel_ring_buffer *ring;
+       int i, j, page, offset, elt;
+
+       if (!error) {
+               err_printf(m, "no error state collected\n");
+               goto out;
+       }
+
+       err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+                  error->time.tv_usec);
+       err_printf(m, "Kernel: " UTS_RELEASE "\n");
+       err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+       err_printf(m, "EIR: 0x%08x\n", error->eir);
+       err_printf(m, "IER: 0x%08x\n", error->ier);
+       err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+       err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+       err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
+       err_printf(m, "CCID: 0x%08x\n", error->ccid);
+
+       for (i = 0; i < dev_priv->num_fence_regs; i++)
+               err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
+
+       for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+               err_printf(m, "  INSTDONE_%d: 0x%08x\n", i,
+                          error->extra_instdone[i]);
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               err_printf(m, "ERROR: 0x%08x\n", error->error);
+               err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+       }
+
+       if (INTEL_INFO(dev)->gen == 7)
+               err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
+       for_each_ring(ring, dev_priv, i)
+               i915_ring_error_state(m, dev, error, i);
+
+       if (error->active_bo)
+               print_error_buffers(m, "Active",
+                                   error->active_bo,
+                                   error->active_bo_count);
+
+       if (error->pinned_bo)
+               print_error_buffers(m, "Pinned",
+                                   error->pinned_bo,
+                                   error->pinned_bo_count);
+
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+               struct drm_i915_error_object *obj;
+
+               if ((obj = error->ring[i].batchbuffer)) {
+                       err_printf(m, "%s --- gtt_offset = 0x%08x\n",
+                                  dev_priv->ring[i].name,
+                                  obj->gtt_offset);
+                       offset = 0;
+                       for (page = 0; page < obj->page_count; page++) {
+                               for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+                                       err_printf(m, "%08x :  %08x\n", offset,
+                                                  obj->pages[page][elt]);
+                                       offset += 4;
+                               }
+                       }
+               }
+
+               if (error->ring[i].num_requests) {
+                       err_printf(m, "%s --- %d requests\n",
+                                  dev_priv->ring[i].name,
+                                  error->ring[i].num_requests);
+                       for (j = 0; j < error->ring[i].num_requests; j++) {
+                               err_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+                                          error->ring[i].requests[j].seqno,
+                                          error->ring[i].requests[j].jiffies,
+                                          error->ring[i].requests[j].tail);
+                       }
+               }
+
+               if ((obj = error->ring[i].ringbuffer)) {
+                       err_printf(m, "%s --- ringbuffer = 0x%08x\n",
+                                  dev_priv->ring[i].name,
+                                  obj->gtt_offset);
+                       offset = 0;
+                       for (page = 0; page < obj->page_count; page++) {
+                               for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+                                       err_printf(m, "%08x :  %08x\n",
+                                                  offset,
+                                                  obj->pages[page][elt]);
+                                       offset += 4;
+                               }
+                       }
+               }
+
+               obj = error->ring[i].ctx;
+               if (obj) {
+                       err_printf(m, "%s --- HW Context = 0x%08x\n",
+                                  dev_priv->ring[i].name,
+                                  obj->gtt_offset);
+                       offset = 0;
+                       for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+                               err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+                                          offset,
+                                          obj->pages[0][elt],
+                                          obj->pages[0][elt+1],
+                                          obj->pages[0][elt+2],
+                                          obj->pages[0][elt+3]);
+                                       offset += 16;
+                       }
+               }
+       }
+
+       if (error->overlay)
+               intel_overlay_print_error_state(m, error->overlay);
+
+       if (error->display)
+               intel_display_print_error_state(m, dev, error->display);
+
+out:
+       if (m->bytes == 0 && m->err)
+               return m->err;
+
+       return 0;
+}
+
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
+                             size_t count, loff_t pos)
+{
+       memset(ebuf, 0, sizeof(*ebuf));
+
+       /* We need to have enough room to store any i915_error_state printf
+        * so that we can move it to start position.
+        */
+       ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
+       ebuf->buf = kmalloc(ebuf->size,
+                               GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
+
+       if (ebuf->buf == NULL) {
+               ebuf->size = PAGE_SIZE;
+               ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+       }
+
+       if (ebuf->buf == NULL) {
+               ebuf->size = 128;
+               ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+       }
+
+       if (ebuf->buf == NULL)
+               return -ENOMEM;
+
+       ebuf->start = pos;
+
+       return 0;
+}
+
+static void i915_error_object_free(struct drm_i915_error_object *obj)
+{
+       int page;
+
+       if (obj == NULL)
+               return;
+
+       for (page = 0; page < obj->page_count; page++)
+               kfree(obj->pages[page]);
+
+       kfree(obj);
+}
+
+static void i915_error_state_free(struct kref *error_ref)
+{
+       struct drm_i915_error_state *error = container_of(error_ref,
+                                                         typeof(*error), ref);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+               i915_error_object_free(error->ring[i].batchbuffer);
+               i915_error_object_free(error->ring[i].ringbuffer);
+               i915_error_object_free(error->ring[i].ctx);
+               kfree(error->ring[i].requests);
+       }
+
+       kfree(error->active_bo);
+       kfree(error->overlay);
+       kfree(error->display);
+       kfree(error);
+}
+
+static struct drm_i915_error_object *
+i915_error_object_create_sized(struct drm_i915_private *dev_priv,
+                              struct drm_i915_gem_object *src,
+                              const int num_pages)
+{
+       struct drm_i915_error_object *dst;
+       int i;
+       u32 reloc_offset;
+
+       if (src == NULL || src->pages == NULL)
+               return NULL;
+
+       dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+       if (dst == NULL)
+               return NULL;
+
+       reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
+       for (i = 0; i < num_pages; i++) {
+               unsigned long flags;
+               void *d;
+
+               d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+               if (d == NULL)
+                       goto unwind;
+
+               local_irq_save(flags);
+               if (reloc_offset < dev_priv->gtt.mappable_end &&
+                   src->has_global_gtt_mapping) {
+                       void __iomem *s;
+
+                       /* Simply ignore tiling or any overlapping fence.
+                        * It's part of the error state, and this hopefully
+                        * captures what the GPU read.
+                        */
+
+                       s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+                                                    reloc_offset);
+                       memcpy_fromio(d, s, PAGE_SIZE);
+                       io_mapping_unmap_atomic(s);
+               } else if (src->stolen) {
+                       unsigned long offset;
+
+                       offset = dev_priv->mm.stolen_base;
+                       offset += src->stolen->start;
+                       offset += i << PAGE_SHIFT;
+
+                       memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
+               } else {
+                       struct page *page;
+                       void *s;
+
+                       page = i915_gem_object_get_page(src, i);
+
+                       drm_clflush_pages(&page, 1);
+
+                       s = kmap_atomic(page);
+                       memcpy(d, s, PAGE_SIZE);
+                       kunmap_atomic(s);
+
+                       drm_clflush_pages(&page, 1);
+               }
+               local_irq_restore(flags);
+
+               dst->pages[i] = d;
+
+               reloc_offset += PAGE_SIZE;
+       }
+       dst->page_count = num_pages;
+
+       return dst;
+
+unwind:
+       while (i--)
+               kfree(dst->pages[i]);
+       kfree(dst);
+       return NULL;
+}
+#define i915_error_object_create(dev_priv, src) \
+       i915_error_object_create_sized((dev_priv), (src), \
+                                      (src)->base.size>>PAGE_SHIFT)
+
+static void capture_bo(struct drm_i915_error_buffer *err,
+                      struct drm_i915_gem_object *obj)
+{
+       err->size = obj->base.size;
+       err->name = obj->base.name;
+       err->rseqno = obj->last_read_seqno;
+       err->wseqno = obj->last_write_seqno;
+       err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
+       err->read_domains = obj->base.read_domains;
+       err->write_domain = obj->base.write_domain;
+       err->fence_reg = obj->fence_reg;
+       err->pinned = 0;
+       if (obj->pin_count > 0)
+               err->pinned = 1;
+       if (obj->user_pin_count > 0)
+               err->pinned = -1;
+       err->tiling = obj->tiling_mode;
+       err->dirty = obj->dirty;
+       err->purgeable = obj->madv != I915_MADV_WILLNEED;
+       err->ring = obj->ring ? obj->ring->id : -1;
+       err->cache_level = obj->cache_level;
+}
+
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+                            int count, struct list_head *head)
+{
+       struct drm_i915_gem_object *obj;
+       int i = 0;
+
+       list_for_each_entry(obj, head, mm_list) {
+               capture_bo(err++, obj);
+               if (++i == count)
+                       break;
+       }
+
+       return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+                            int count, struct list_head *head)
+{
+       struct drm_i915_gem_object *obj;
+       int i = 0;
+
+       list_for_each_entry(obj, head, global_list) {
+               if (obj->pin_count == 0)
+                       continue;
+
+               capture_bo(err++, obj);
+               if (++i == count)
+                       break;
+       }
+
+       return i;
+}
+
+static void i915_gem_record_fences(struct drm_device *dev,
+                                  struct drm_i915_error_state *error)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+
+       /* Fences */
+       switch (INTEL_INFO(dev)->gen) {
+       case 7:
+       case 6:
+               for (i = 0; i < dev_priv->num_fence_regs; i++)
+                       error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+               break;
+       case 5:
+       case 4:
+               for (i = 0; i < 16; i++)
+                       error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+               break;
+       case 3:
+               if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+                       for (i = 0; i < 8; i++)
+                               error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+       case 2:
+               for (i = 0; i < 8; i++)
+                       error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+               break;
+
+       default:
+               BUG();
+       }
+}
+
+static struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+                            struct intel_ring_buffer *ring)
+{
+       struct i915_address_space *vm = &dev_priv->gtt.base;
+       struct drm_i915_gem_object *obj;
+       u32 seqno;
+
+       if (!ring->get_seqno)
+               return NULL;
+
+       if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+               u32 acthd = I915_READ(ACTHD);
+
+               if (WARN_ON(ring->id != RCS))
+                       return NULL;
+
+               obj = ring->private;
+               if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+                   acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
+                       return i915_error_object_create(dev_priv, obj);
+       }
+
+       seqno = ring->get_seqno(ring, false);
+       list_for_each_entry(obj, &vm->active_list, mm_list) {
+               if (obj->ring != ring)
+                       continue;
+
+               if (i915_seqno_passed(seqno, obj->last_read_seqno))
+                       continue;
+
+               if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+                       continue;
+
+               /* We need to copy these to an anonymous buffer as the simplest
+                * method to avoid being overwritten by userspace.
+                */
+               return i915_error_object_create(dev_priv, obj);
+       }
+
+       return NULL;
+}
+
+static void i915_record_ring_state(struct drm_device *dev,
+                                  struct drm_i915_error_state *error,
+                                  struct intel_ring_buffer *ring)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
+               error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+               error->semaphore_mboxes[ring->id][0]
+                       = I915_READ(RING_SYNC_0(ring->mmio_base));
+               error->semaphore_mboxes[ring->id][1]
+                       = I915_READ(RING_SYNC_1(ring->mmio_base));
+               error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+               error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
+       }
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+               error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+               error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+               error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+               error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+               if (ring->id == RCS)
+                       error->bbaddr = I915_READ64(BB_ADDR);
+       } else {
+               error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
+               error->ipeir[ring->id] = I915_READ(IPEIR);
+               error->ipehr[ring->id] = I915_READ(IPEHR);
+               error->instdone[ring->id] = I915_READ(INSTDONE);
+       }
+
+       error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
+       error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+       error->seqno[ring->id] = ring->get_seqno(ring, false);
+       error->acthd[ring->id] = intel_ring_get_active_head(ring);
+       error->head[ring->id] = I915_READ_HEAD(ring);
+       error->tail[ring->id] = I915_READ_TAIL(ring);
+       error->ctl[ring->id] = I915_READ_CTL(ring);
+
+       error->cpu_ring_head[ring->id] = ring->head;
+       error->cpu_ring_tail[ring->id] = ring->tail;
+}
+
+
+static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
+                                          struct drm_i915_error_state *error,
+                                          struct drm_i915_error_ring *ering)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_gem_object *obj;
+
+       /* Currently render ring is the only HW context user */
+       if (ring->id != RCS || !error->ccid)
+               return;
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
+                       ering->ctx = i915_error_object_create_sized(dev_priv,
+                                                                   obj, 1);
+                       break;
+               }
+       }
+}
+
+static void i915_gem_record_rings(struct drm_device *dev,
+                                 struct drm_i915_error_state *error)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       struct drm_i915_gem_request *request;
+       int i, count;
+
+       for_each_ring(ring, dev_priv, i) {
+               i915_record_ring_state(dev, error, ring);
+
+               error->ring[i].batchbuffer =
+                       i915_error_first_batchbuffer(dev_priv, ring);
+
+               error->ring[i].ringbuffer =
+                       i915_error_object_create(dev_priv, ring->obj);
+
+
+               i915_gem_record_active_context(ring, error, &error->ring[i]);
+
+               count = 0;
+               list_for_each_entry(request, &ring->request_list, list)
+                       count++;
+
+               error->ring[i].num_requests = count;
+               error->ring[i].requests =
+                       kmalloc(count*sizeof(struct drm_i915_error_request),
+                               GFP_ATOMIC);
+               if (error->ring[i].requests == NULL) {
+                       error->ring[i].num_requests = 0;
+                       continue;
+               }
+
+               count = 0;
+               list_for_each_entry(request, &ring->request_list, list) {
+                       struct drm_i915_error_request *erq;
+
+                       erq = &error->ring[i].requests[count++];
+                       erq->seqno = request->seqno;
+                       erq->jiffies = request->emitted_jiffies;
+                       erq->tail = request->tail;
+               }
+       }
+}
+
+static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
+                                    struct drm_i915_error_state *error)
+{
+       struct i915_address_space *vm = &dev_priv->gtt.base;
+       struct drm_i915_gem_object *obj;
+       int i;
+
+       i = 0;
+       list_for_each_entry(obj, &vm->active_list, mm_list)
+               i++;
+       error->active_bo_count = i;
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+               if (obj->pin_count)
+                       i++;
+       error->pinned_bo_count = i - error->active_bo_count;
+
+       if (i) {
+               error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
+                                          GFP_ATOMIC);
+               if (error->active_bo)
+                       error->pinned_bo =
+                               error->active_bo + error->active_bo_count;
+       }
+
+       if (error->active_bo)
+               error->active_bo_count =
+                       capture_active_bo(error->active_bo,
+                                         error->active_bo_count,
+                                         &vm->active_list);
+
+       if (error->pinned_bo)
+               error->pinned_bo_count =
+                       capture_pinned_bo(error->pinned_bo,
+                                         error->pinned_bo_count,
+                                         &dev_priv->mm.bound_list);
+}
+
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @dev: drm device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error.  Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+void i915_capture_error_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_error_state *error;
+       unsigned long flags;
+       int pipe;
+
+       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+       error = dev_priv->gpu_error.first_error;
+       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+       if (error)
+               return;
+
+       /* Account for pipe specific data like PIPE*STAT */
+       error = kzalloc(sizeof(*error), GFP_ATOMIC);
+       if (!error) {
+               DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+               return;
+       }
+
+       DRM_INFO("capturing error event; look for more information in "
+                "/sys/class/drm/card%d/error\n", dev->primary->index);
+
+       kref_init(&error->ref);
+       error->eir = I915_READ(EIR);
+       error->pgtbl_er = I915_READ(PGTBL_ER);
+       if (HAS_HW_CONTEXTS(dev))
+               error->ccid = I915_READ(CCID);
+
+       if (HAS_PCH_SPLIT(dev))
+               error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+       else if (IS_VALLEYVIEW(dev))
+               error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+       else if (IS_GEN2(dev))
+               error->ier = I915_READ16(IER);
+       else
+               error->ier = I915_READ(IER);
+
+       if (INTEL_INFO(dev)->gen >= 6)
+               error->derrmr = I915_READ(DERRMR);
+
+       if (IS_VALLEYVIEW(dev))
+               error->forcewake = I915_READ(FORCEWAKE_VLV);
+       else if (INTEL_INFO(dev)->gen >= 7)
+               error->forcewake = I915_READ(FORCEWAKE_MT);
+       else if (INTEL_INFO(dev)->gen == 6)
+               error->forcewake = I915_READ(FORCEWAKE);
+
+       if (!HAS_PCH_SPLIT(dev))
+               for_each_pipe(pipe)
+                       error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               error->error = I915_READ(ERROR_GEN6);
+               error->done_reg = I915_READ(DONE_REG);
+       }
+
+       if (INTEL_INFO(dev)->gen == 7)
+               error->err_int = I915_READ(GEN7_ERR_INT);
+
+       i915_get_extra_instdone(dev, error->extra_instdone);
+
+       i915_gem_capture_buffers(dev_priv, error);
+       i915_gem_record_fences(dev, error);
+       i915_gem_record_rings(dev, error);
+
+       do_gettimeofday(&error->time);
+
+       error->overlay = intel_overlay_capture_error_state(dev);
+       error->display = intel_display_capture_error_state(dev);
+
+       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+       if (dev_priv->gpu_error.first_error == NULL) {
+               dev_priv->gpu_error.first_error = error;
+               error = NULL;
+       }
+       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+       if (error)
+               i915_error_state_free(&error->ref);
+}
+
+void i915_error_state_get(struct drm_device *dev,
+                         struct i915_error_state_file_priv *error_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+       error_priv->error = dev_priv->gpu_error.first_error;
+       if (error_priv->error)
+               kref_get(&error_priv->error->ref);
+       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+}
+
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
+{
+       if (error_priv->error)
+               kref_put(&error_priv->error->ref, i915_error_state_free);
+}
+
+void i915_destroy_error_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_error_state *error;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+       error = dev_priv->gpu_error.first_error;
+       dev_priv->gpu_error.first_error = NULL;
+       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+       if (error)
+               kref_put(&error->ref, i915_error_state_free);
+}
+
+const char *i915_cache_level_str(int type)
+{
+       switch (type) {
+       case I915_CACHE_NONE: return " uncached";
+       case I915_CACHE_LLC: return " snooped (LLC)";
+       case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
+       default: return "";
+       }
+}
+
+/* NB: please notice the memset */
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+       switch (INTEL_INFO(dev)->gen) {
+       case 2:
+       case 3:
+               instdone[0] = I915_READ(INSTDONE);
+               break;
+       case 4:
+       case 5:
+       case 6:
+               instdone[0] = I915_READ(INSTDONE_I965);
+               instdone[1] = I915_READ(INSTDONE1);
+               break;
+       default:
+               WARN_ONCE(1, "Unsupported platform\n");
+       case 7:
+               instdone[0] = I915_READ(GEN7_INSTDONE_1);
+               instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+               instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+               instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+               break;
+       }
+}
index 3d92a7cef1541bd20b2c4016cdc2447791991c56..f708e4efa1be3cf417bb9851d74582b1b22eec92 100644 (file)
@@ -128,6 +128,8 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
        enum pipe pipe;
        struct intel_crtc *crtc;
 
+       assert_spin_locked(&dev_priv->irq_lock);
+
        for_each_pipe(pipe) {
                crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
@@ -152,38 +154,66 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
 }
 
 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
-                                                 bool enable)
+                                                 enum pipe pipe, bool enable)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-
        if (enable) {
+               I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+
                if (!ivb_can_enable_err_int(dev))
                        return;
 
-               I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
-                                        ERR_INT_FIFO_UNDERRUN_B |
-                                        ERR_INT_FIFO_UNDERRUN_C);
-
                ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
        } else {
+               bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
+
+               /* Change the state _after_ we've read out the current one. */
                ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
+               if (!was_enabled &&
+                   (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
+                       DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
+                                     pipe_name(pipe));
+               }
        }
 }
 
-static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
+/**
+ * ibx_display_interrupt_update - update SDEIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+                                        uint32_t interrupt_mask,
+                                        uint32_t enabled_irq_mask)
+{
+       uint32_t sdeimr = I915_READ(SDEIMR);
+       sdeimr &= ~interrupt_mask;
+       sdeimr |= (~enabled_irq_mask & interrupt_mask);
+
+       assert_spin_locked(&dev_priv->irq_lock);
+
+       I915_WRITE(SDEIMR, sdeimr);
+       POSTING_READ(SDEIMR);
+}
+#define ibx_enable_display_interrupt(dev_priv, bits) \
+       ibx_display_interrupt_update((dev_priv), (bits), (bits))
+#define ibx_disable_display_interrupt(dev_priv, bits) \
+       ibx_display_interrupt_update((dev_priv), (bits), 0)
+
+static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+                                           enum transcoder pch_transcoder,
                                            bool enable)
 {
-       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
-                                               SDE_TRANSB_FIFO_UNDER;
+       uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
+                      SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
 
        if (enable)
-               I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
+               ibx_enable_display_interrupt(dev_priv, bit);
        else
-               I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
-
-       POSTING_READ(SDEIMR);
+               ibx_disable_display_interrupt(dev_priv, bit);
 }
 
 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -193,19 +223,26 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (enable) {
+               I915_WRITE(SERR_INT,
+                          SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+
                if (!cpt_can_enable_serr_int(dev))
                        return;
 
-               I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
-                                    SERR_INT_TRANS_B_FIFO_UNDERRUN |
-                                    SERR_INT_TRANS_C_FIFO_UNDERRUN);
-
-               I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
+               ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
        } else {
-               I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
-       }
+               uint32_t tmp = I915_READ(SERR_INT);
+               bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
 
-       POSTING_READ(SDEIMR);
+               /* Change the state _after_ we've read out the current one. */
+               ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+
+               if (!was_enabled &&
+                   (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
+                       DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
+                                     transcoder_name(pch_transcoder));
+               }
+       }
 }
 
 /**
@@ -243,7 +280,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
        if (IS_GEN5(dev) || IS_GEN6(dev))
                ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
        else if (IS_GEN7(dev))
-               ivybridge_set_fifo_underrun_reporting(dev, enable);
+               ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
 
 done:
        spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -269,29 +306,19 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
                                           bool enable)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       enum pipe p;
-       struct drm_crtc *crtc;
-       struct intel_crtc *intel_crtc;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        unsigned long flags;
        bool ret;
 
-       if (HAS_PCH_LPT(dev)) {
-               crtc = NULL;
-               for_each_pipe(p) {
-                       struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
-                       if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
-                               crtc = c;
-                               break;
-                       }
-               }
-               if (!crtc) {
-                       DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
-                       return false;
-               }
-       } else {
-               crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
-       }
-       intel_crtc = to_intel_crtc(crtc);
+       /*
+        * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
+        * has only one pch transcoder A that all pipes can use. To avoid racy
+        * pch transcoder -> pipe lookups from interrupt code simply store the
+        * underrun statistics in crtc A. Since we never expose this anywhere
+        * nor use it outside of the fifo underrun code here using the "wrong"
+        * crtc on LPT won't cause issues.
+        */
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
 
@@ -303,7 +330,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
        intel_crtc->pch_fifo_underrun_disabled = !enable;
 
        if (HAS_PCH_IBX(dev))
-               ibx_set_fifo_underrun_reporting(intel_crtc, enable);
+               ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
        else
                cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
 
@@ -319,6 +346,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
        u32 reg = PIPESTAT(pipe);
        u32 pipestat = I915_READ(reg) & 0x7fff0000;
 
+       assert_spin_locked(&dev_priv->irq_lock);
+
        if ((pipestat & mask) == mask)
                return;
 
@@ -334,6 +363,8 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
        u32 reg = PIPESTAT(pipe);
        u32 pipestat = I915_READ(reg) & 0x7fff0000;
 
+       assert_spin_locked(&dev_priv->irq_lock);
+
        if ((pipestat & mask) == 0)
                return;
 
@@ -625,14 +656,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
                drm_kms_helper_hotplug_event(dev);
 }
 
-static void ironlake_handle_rps_change(struct drm_device *dev)
+static void ironlake_rps_change_irq_handler(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        u32 busy_up, busy_down, max_avg, min_avg;
        u8 new_delay;
-       unsigned long flags;
 
-       spin_lock_irqsave(&mchdev_lock, flags);
+       spin_lock(&mchdev_lock);
 
        I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
 
@@ -660,7 +690,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
        if (ironlake_set_drps(dev, new_delay))
                dev_priv->ips.cur_delay = new_delay;
 
-       spin_unlock_irqrestore(&mchdev_lock, flags);
+       spin_unlock(&mchdev_lock);
 
        return;
 }
@@ -668,18 +698,13 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
 static void notify_ring(struct drm_device *dev,
                        struct intel_ring_buffer *ring)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
        if (ring->obj == NULL)
                return;
 
        trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
 
        wake_up_all(&ring->irq_queue);
-       if (i915_enable_hangcheck) {
-               mod_timer(&dev_priv->gpu_error.hangcheck_timer,
-                         round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
-       }
+       i915_queue_hangcheck(dev);
 }
 
 static void gen6_pm_rps_work(struct work_struct *work)
@@ -689,13 +714,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
        u32 pm_iir, pm_imr;
        u8 new_delay;
 
-       spin_lock_irq(&dev_priv->rps.lock);
+       spin_lock_irq(&dev_priv->irq_lock);
        pm_iir = dev_priv->rps.pm_iir;
        dev_priv->rps.pm_iir = 0;
        pm_imr = I915_READ(GEN6_PMIMR);
        /* Make sure not to corrupt PMIMR state used by ringbuffer code */
        I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
-       spin_unlock_irq(&dev_priv->rps.lock);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
                return;
@@ -787,7 +812,7 @@ static void ivybridge_parity_work(struct work_struct *work)
 
        mutex_unlock(&dev_priv->dev->struct_mutex);
 
-       parity_event[0] = "L3_PARITY_ERROR=1";
+       parity_event[0] = I915_L3_PARITY_UEVENT "=1";
        parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
        parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
        parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
@@ -804,22 +829,32 @@ static void ivybridge_parity_work(struct work_struct *work)
        kfree(parity_event[1]);
 }
 
-static void ivybridge_handle_parity_error(struct drm_device *dev)
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long flags;
 
        if (!HAS_L3_GPU_CACHE(dev))
                return;
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       spin_lock(&dev_priv->irq_lock);
        dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
        I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+       spin_unlock(&dev_priv->irq_lock);
 
        queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 }
 
+static void ilk_gt_irq_handler(struct drm_device *dev,
+                              struct drm_i915_private *dev_priv,
+                              u32 gt_iir)
+{
+       if (gt_iir &
+           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+               notify_ring(dev, &dev_priv->ring[RCS]);
+       if (gt_iir & ILK_BSD_USER_INTERRUPT)
+               notify_ring(dev, &dev_priv->ring[VCS]);
+}
+
 static void snb_gt_irq_handler(struct drm_device *dev,
                               struct drm_i915_private *dev_priv,
                               u32 gt_iir)
@@ -841,15 +876,13 @@ static void snb_gt_irq_handler(struct drm_device *dev,
        }
 
        if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
-               ivybridge_handle_parity_error(dev);
+               ivybridge_parity_error_irq_handler(dev);
 }
 
 /* Legacy way of handling PM interrupts */
-static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
-                               u32 pm_iir)
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv,
+                                u32 pm_iir)
 {
-       unsigned long flags;
-
        /*
         * IIR bits should never already be set because IMR should
         * prevent an interrupt from being shown in IIR. The warning
@@ -860,11 +893,11 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
         * The mask bit in IMR is cleared by dev_priv->rps.work.
         */
 
-       spin_lock_irqsave(&dev_priv->rps.lock, flags);
+       spin_lock(&dev_priv->irq_lock);
        dev_priv->rps.pm_iir |= pm_iir;
        I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
        POSTING_READ(GEN6_PMIMR);
-       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+       spin_unlock(&dev_priv->irq_lock);
 
        queue_work(dev_priv->wq, &dev_priv->rps.work);
 }
@@ -928,7 +961,7 @@ static void dp_aux_irq_handler(struct drm_device *dev)
        wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
-/* Unlike gen6_queue_rps_work() from which this function is originally derived,
+/* Unlike gen6_rps_irq_handler() from which this function is originally derived,
  * we must be able to deal with other PM interrupts. This is complicated because
  * of the way in which we use the masks to defer the RPS work (which for
  * posterity is necessary because of forcewake).
@@ -936,27 +969,23 @@ static void dp_aux_irq_handler(struct drm_device *dev)
 static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
                               u32 pm_iir)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev_priv->rps.lock, flags);
-       dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
-       if (dev_priv->rps.pm_iir) {
+       if (pm_iir & GEN6_PM_RPS_EVENTS) {
+               spin_lock(&dev_priv->irq_lock);
+               dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
                I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
                /* never want to mask useful interrupts. (also posting read) */
                WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
-               /* TODO: if queue_work is slow, move it out of the spinlock */
+               spin_unlock(&dev_priv->irq_lock);
+
                queue_work(dev_priv->wq, &dev_priv->rps.work);
        }
-       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
 
-       if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
-               if (pm_iir & PM_VEBOX_USER_INTERRUPT)
-                       notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
+       if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+               notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
 
-               if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
-                       DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
-                       i915_handle_error(dev_priv->dev, false);
-               }
+       if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
+               DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
+               i915_handle_error(dev_priv->dev, false);
        }
 }
 
@@ -1029,7 +1058,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
                        gmbus_irq_handler(dev);
 
                if (pm_iir & GEN6_PM_RPS_EVENTS)
-                       gen6_queue_rps_work(dev_priv, pm_iir);
+                       gen6_rps_irq_handler(dev_priv, pm_iir);
 
                I915_WRITE(GTIIR, gt_iir);
                I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -1179,163 +1208,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
                cpt_serr_int_handler(dev);
 }
 
-static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
+static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
-       struct drm_device *dev = (struct drm_device *) arg;
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
-       irqreturn_t ret = IRQ_NONE;
-       int i;
-
-       atomic_inc(&dev_priv->irq_received);
-
-       /* We get interrupts on unclaimed registers, so check for this before we
-        * do any I915_{READ,WRITE}. */
-       if (IS_HASWELL(dev) &&
-           (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
-               DRM_ERROR("Unclaimed register before interrupt\n");
-               I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-       }
-
-       /* disable master interrupt before clearing iir  */
-       de_ier = I915_READ(DEIER);
-       I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-
-       /* Disable south interrupts. We'll only write to SDEIIR once, so further
-        * interrupts will will be stored on its back queue, and then we'll be
-        * able to process them after we restore SDEIER (as soon as we restore
-        * it, we'll get an interrupt if SDEIIR still has something to process
-        * due to its back queue). */
-       if (!HAS_PCH_NOP(dev)) {
-               sde_ier = I915_READ(SDEIER);
-               I915_WRITE(SDEIER, 0);
-               POSTING_READ(SDEIER);
-       }
-
-       /* On Haswell, also mask ERR_INT because we don't want to risk
-        * generating "unclaimed register" interrupts from inside the interrupt
-        * handler. */
-       if (IS_HASWELL(dev)) {
-               spin_lock(&dev_priv->irq_lock);
-               ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
-               spin_unlock(&dev_priv->irq_lock);
-       }
-
-       gt_iir = I915_READ(GTIIR);
-       if (gt_iir) {
-               snb_gt_irq_handler(dev, dev_priv, gt_iir);
-               I915_WRITE(GTIIR, gt_iir);
-               ret = IRQ_HANDLED;
-       }
-
-       de_iir = I915_READ(DEIIR);
-       if (de_iir) {
-               if (de_iir & DE_ERR_INT_IVB)
-                       ivb_err_int_handler(dev);
-
-               if (de_iir & DE_AUX_CHANNEL_A_IVB)
-                       dp_aux_irq_handler(dev);
-
-               if (de_iir & DE_GSE_IVB)
-                       intel_opregion_asle_intr(dev);
-
-               for (i = 0; i < 3; i++) {
-                       if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
-                               drm_handle_vblank(dev, i);
-                       if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
-                               intel_prepare_page_flip(dev, i);
-                               intel_finish_page_flip_plane(dev, i);
-                       }
-               }
-
-               /* check event from PCH */
-               if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
-                       u32 pch_iir = I915_READ(SDEIIR);
-
-                       cpt_irq_handler(dev, pch_iir);
-
-                       /* clear PCH hotplug event before clear CPU irq */
-                       I915_WRITE(SDEIIR, pch_iir);
-               }
-
-               I915_WRITE(DEIIR, de_iir);
-               ret = IRQ_HANDLED;
-       }
-
-       pm_iir = I915_READ(GEN6_PMIIR);
-       if (pm_iir) {
-               if (IS_HASWELL(dev))
-                       hsw_pm_irq_handler(dev_priv, pm_iir);
-               else if (pm_iir & GEN6_PM_RPS_EVENTS)
-                       gen6_queue_rps_work(dev_priv, pm_iir);
-               I915_WRITE(GEN6_PMIIR, pm_iir);
-               ret = IRQ_HANDLED;
-       }
-
-       if (IS_HASWELL(dev)) {
-               spin_lock(&dev_priv->irq_lock);
-               if (ivb_can_enable_err_int(dev))
-                       ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-               spin_unlock(&dev_priv->irq_lock);
-       }
-
-       I915_WRITE(DEIER, de_ier);
-       POSTING_READ(DEIER);
-       if (!HAS_PCH_NOP(dev)) {
-               I915_WRITE(SDEIER, sde_ier);
-               POSTING_READ(SDEIER);
-       }
-
-       return ret;
-}
-
-static void ilk_gt_irq_handler(struct drm_device *dev,
-                              struct drm_i915_private *dev_priv,
-                              u32 gt_iir)
-{
-       if (gt_iir &
-           (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
-               notify_ring(dev, &dev_priv->ring[RCS]);
-       if (gt_iir & ILK_BSD_USER_INTERRUPT)
-               notify_ring(dev, &dev_priv->ring[VCS]);
-}
-
-static irqreturn_t ironlake_irq_handler(int irq, void *arg)
-{
-       struct drm_device *dev = (struct drm_device *) arg;
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       int ret = IRQ_NONE;
-       u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
-
-       atomic_inc(&dev_priv->irq_received);
-
-       /* disable master interrupt before clearing iir  */
-       de_ier = I915_READ(DEIER);
-       I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-       POSTING_READ(DEIER);
-
-       /* Disable south interrupts. We'll only write to SDEIIR once, so further
-        * interrupts will will be stored on its back queue, and then we'll be
-        * able to process them after we restore SDEIER (as soon as we restore
-        * it, we'll get an interrupt if SDEIIR still has something to process
-        * due to its back queue). */
-       sde_ier = I915_READ(SDEIER);
-       I915_WRITE(SDEIER, 0);
-       POSTING_READ(SDEIER);
-
-       de_iir = I915_READ(DEIIR);
-       gt_iir = I915_READ(GTIIR);
-       pm_iir = I915_READ(GEN6_PMIIR);
-
-       if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
-               goto done;
-
-       ret = IRQ_HANDLED;
-
-       if (IS_GEN5(dev))
-               ilk_gt_irq_handler(dev, dev_priv, gt_iir);
-       else
-               snb_gt_irq_handler(dev, dev_priv, gt_iir);
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        if (de_iir & DE_AUX_CHANNEL_A)
                dp_aux_irq_handler(dev);
@@ -1383,622 +1258,203 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
                I915_WRITE(SDEIIR, pch_iir);
        }
 
-       if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
-               ironlake_handle_rps_change(dev);
-
-       if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
-               gen6_queue_rps_work(dev_priv, pm_iir);
-
-       I915_WRITE(GTIIR, gt_iir);
-       I915_WRITE(DEIIR, de_iir);
-       I915_WRITE(GEN6_PMIIR, pm_iir);
-
-done:
-       I915_WRITE(DEIER, de_ier);
-       POSTING_READ(DEIER);
-       I915_WRITE(SDEIER, sde_ier);
-       POSTING_READ(SDEIER);
-
-       return ret;
-}
-
-/**
- * i915_error_work_func - do process context error handling work
- * @work: work struct
- *
- * Fire an error uevent so userspace can see that a hang or error
- * was detected.
- */
-static void i915_error_work_func(struct work_struct *work)
-{
-       struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
-                                                   work);
-       drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
-                                                   gpu_error);
-       struct drm_device *dev = dev_priv->dev;
-       struct intel_ring_buffer *ring;
-       char *error_event[] = { "ERROR=1", NULL };
-       char *reset_event[] = { "RESET=1", NULL };
-       char *reset_done_event[] = { "ERROR=0", NULL };
-       int i, ret;
-
-       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
-
-       /*
-        * Note that there's only one work item which does gpu resets, so we
-        * need not worry about concurrent gpu resets potentially incrementing
-        * error->reset_counter twice. We only need to take care of another
-        * racing irq/hangcheck declaring the gpu dead for a second time. A
-        * quick check for that is good enough: schedule_work ensures the
-        * correct ordering between hang detection and this work item, and since
-        * the reset in-progress bit is only ever set by code outside of this
-        * work we don't need to worry about any other races.
-        */
-       if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
-               DRM_DEBUG_DRIVER("resetting chip\n");
-               kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
-                                  reset_event);
-
-               ret = i915_reset(dev);
-
-               if (ret == 0) {
-                       /*
-                        * After all the gem state is reset, increment the reset
-                        * counter and wake up everyone waiting for the reset to
-                        * complete.
-                        *
-                        * Since unlock operations are a one-sided barrier only,
-                        * we need to insert a barrier here to order any seqno
-                        * updates before
-                        * the counter increment.
-                        */
-                       smp_mb__before_atomic_inc();
-                       atomic_inc(&dev_priv->gpu_error.reset_counter);
-
-                       kobject_uevent_env(&dev->primary->kdev.kobj,
-                                          KOBJ_CHANGE, reset_done_event);
-               } else {
-                       atomic_set(&error->reset_counter, I915_WEDGED);
-               }
-
-               for_each_ring(ring, dev_priv, i)
-                       wake_up_all(&ring->irq_queue);
-
-               intel_display_handle_reset(dev);
-
-               wake_up_all(&dev_priv->gpu_error.reset_queue);
-       }
+       if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
+               ironlake_rps_change_irq_handler(dev);
 }
 
-/* NB: please notice the memset */
-static void i915_get_extra_instdone(struct drm_device *dev,
-                                   uint32_t *instdone)
+static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
-
-       switch(INTEL_INFO(dev)->gen) {
-       case 2:
-       case 3:
-               instdone[0] = I915_READ(INSTDONE);
-               break;
-       case 4:
-       case 5:
-       case 6:
-               instdone[0] = I915_READ(INSTDONE_I965);
-               instdone[1] = I915_READ(INSTDONE1);
-               break;
-       default:
-               WARN_ONCE(1, "Unsupported platform\n");
-       case 7:
-               instdone[0] = I915_READ(GEN7_INSTDONE_1);
-               instdone[1] = I915_READ(GEN7_SC_INSTDONE);
-               instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
-               instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
-               break;
-       }
-}
-
-#ifdef CONFIG_DEBUG_FS
-static struct drm_i915_error_object *
-i915_error_object_create_sized(struct drm_i915_private *dev_priv,
-                              struct drm_i915_gem_object *src,
-                              const int num_pages)
-{
-       struct drm_i915_error_object *dst;
        int i;
-       u32 reloc_offset;
 
-       if (src == NULL || src->pages == NULL)
-               return NULL;
-
-       dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
-       if (dst == NULL)
-               return NULL;
-
-       reloc_offset = src->gtt_offset;
-       for (i = 0; i < num_pages; i++) {
-               unsigned long flags;
-               void *d;
-
-               d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
-               if (d == NULL)
-                       goto unwind;
-
-               local_irq_save(flags);
-               if (reloc_offset < dev_priv->gtt.mappable_end &&
-                   src->has_global_gtt_mapping) {
-                       void __iomem *s;
-
-                       /* Simply ignore tiling or any overlapping fence.
-                        * It's part of the error state, and this hopefully
-                        * captures what the GPU read.
-                        */
-
-                       s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
-                                                    reloc_offset);
-                       memcpy_fromio(d, s, PAGE_SIZE);
-                       io_mapping_unmap_atomic(s);
-               } else if (src->stolen) {
-                       unsigned long offset;
-
-                       offset = dev_priv->mm.stolen_base;
-                       offset += src->stolen->start;
-                       offset += i << PAGE_SHIFT;
-
-                       memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
-               } else {
-                       struct page *page;
-                       void *s;
+       if (de_iir & DE_ERR_INT_IVB)
+               ivb_err_int_handler(dev);
 
-                       page = i915_gem_object_get_page(src, i);
-
-                       drm_clflush_pages(&page, 1);
+       if (de_iir & DE_AUX_CHANNEL_A_IVB)
+               dp_aux_irq_handler(dev);
 
-                       s = kmap_atomic(page);
-                       memcpy(d, s, PAGE_SIZE);
-                       kunmap_atomic(s);
+       if (de_iir & DE_GSE_IVB)
+               intel_opregion_asle_intr(dev);
 
-                       drm_clflush_pages(&page, 1);
+       for (i = 0; i < 3; i++) {
+               if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+                       drm_handle_vblank(dev, i);
+               if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+                       intel_prepare_page_flip(dev, i);
+                       intel_finish_page_flip_plane(dev, i);
                }
-               local_irq_restore(flags);
-
-               dst->pages[i] = d;
-
-               reloc_offset += PAGE_SIZE;
        }
-       dst->page_count = num_pages;
-       dst->gtt_offset = src->gtt_offset;
-
-       return dst;
 
-unwind:
-       while (i--)
-               kfree(dst->pages[i]);
-       kfree(dst);
-       return NULL;
-}
-#define i915_error_object_create(dev_priv, src) \
-       i915_error_object_create_sized((dev_priv), (src), \
-                                      (src)->base.size>>PAGE_SHIFT)
-
-static void
-i915_error_object_free(struct drm_i915_error_object *obj)
-{
-       int page;
-
-       if (obj == NULL)
-               return;
-
-       for (page = 0; page < obj->page_count; page++)
-               kfree(obj->pages[page]);
-
-       kfree(obj);
-}
-
-void
-i915_error_state_free(struct kref *error_ref)
-{
-       struct drm_i915_error_state *error = container_of(error_ref,
-                                                         typeof(*error), ref);
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-               i915_error_object_free(error->ring[i].batchbuffer);
-               i915_error_object_free(error->ring[i].ringbuffer);
-               i915_error_object_free(error->ring[i].ctx);
-               kfree(error->ring[i].requests);
-       }
-
-       kfree(error->active_bo);
-       kfree(error->overlay);
-       kfree(error->display);
-       kfree(error);
-}
-static void capture_bo(struct drm_i915_error_buffer *err,
-                      struct drm_i915_gem_object *obj)
-{
-       err->size = obj->base.size;
-       err->name = obj->base.name;
-       err->rseqno = obj->last_read_seqno;
-       err->wseqno = obj->last_write_seqno;
-       err->gtt_offset = obj->gtt_offset;
-       err->read_domains = obj->base.read_domains;
-       err->write_domain = obj->base.write_domain;
-       err->fence_reg = obj->fence_reg;
-       err->pinned = 0;
-       if (obj->pin_count > 0)
-               err->pinned = 1;
-       if (obj->user_pin_count > 0)
-               err->pinned = -1;
-       err->tiling = obj->tiling_mode;
-       err->dirty = obj->dirty;
-       err->purgeable = obj->madv != I915_MADV_WILLNEED;
-       err->ring = obj->ring ? obj->ring->id : -1;
-       err->cache_level = obj->cache_level;
-}
-
-static u32 capture_active_bo(struct drm_i915_error_buffer *err,
-                            int count, struct list_head *head)
-{
-       struct drm_i915_gem_object *obj;
-       int i = 0;
-
-       list_for_each_entry(obj, head, mm_list) {
-               capture_bo(err++, obj);
-               if (++i == count)
-                       break;
-       }
-
-       return i;
-}
-
-static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
-                            int count, struct list_head *head)
-{
-       struct drm_i915_gem_object *obj;
-       int i = 0;
-
-       list_for_each_entry(obj, head, global_list) {
-               if (obj->pin_count == 0)
-                       continue;
-
-               capture_bo(err++, obj);
-               if (++i == count)
-                       break;
-       }
-
-       return i;
-}
-
-static void i915_gem_record_fences(struct drm_device *dev,
-                                  struct drm_i915_error_state *error)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int i;
-
-       /* Fences */
-       switch (INTEL_INFO(dev)->gen) {
-       case 7:
-       case 6:
-               for (i = 0; i < dev_priv->num_fence_regs; i++)
-                       error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
-               break;
-       case 5:
-       case 4:
-               for (i = 0; i < 16; i++)
-                       error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
-               break;
-       case 3:
-               if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-                       for (i = 0; i < 8; i++)
-                               error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
-       case 2:
-               for (i = 0; i < 8; i++)
-                       error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
-               break;
-
-       default:
-               BUG();
-       }
-}
-
-static struct drm_i915_error_object *
-i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
-                            struct intel_ring_buffer *ring)
-{
-       struct drm_i915_gem_object *obj;
-       u32 seqno;
-
-       if (!ring->get_seqno)
-               return NULL;
-
-       if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
-               u32 acthd = I915_READ(ACTHD);
-
-               if (WARN_ON(ring->id != RCS))
-                       return NULL;
-
-               obj = ring->private;
-               if (acthd >= obj->gtt_offset &&
-                   acthd < obj->gtt_offset + obj->base.size)
-                       return i915_error_object_create(dev_priv, obj);
-       }
-
-       seqno = ring->get_seqno(ring, false);
-       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-               if (obj->ring != ring)
-                       continue;
-
-               if (i915_seqno_passed(seqno, obj->last_read_seqno))
-                       continue;
+       /* check event from PCH */
+       if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+               u32 pch_iir = I915_READ(SDEIIR);
 
-               if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
-                       continue;
+               cpt_irq_handler(dev, pch_iir);
 
-               /* We need to copy these to an anonymous buffer as the simplest
-                * method to avoid being overwritten by userspace.
-                */
-               return i915_error_object_create(dev_priv, obj);
+               /* clear PCH hotplug event before clear CPU irq */
+               I915_WRITE(SDEIIR, pch_iir);
        }
-
-       return NULL;
 }
 
-static void i915_record_ring_state(struct drm_device *dev,
-                                  struct drm_i915_error_state *error,
-                                  struct intel_ring_buffer *ring)
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_device *dev = (struct drm_device *) arg;
+       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       u32 de_iir, gt_iir, de_ier, sde_ier = 0;
+       irqreturn_t ret = IRQ_NONE;
 
-       if (INTEL_INFO(dev)->gen >= 6) {
-               error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
-               error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
-               error->semaphore_mboxes[ring->id][0]
-                       = I915_READ(RING_SYNC_0(ring->mmio_base));
-               error->semaphore_mboxes[ring->id][1]
-                       = I915_READ(RING_SYNC_1(ring->mmio_base));
-               error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
-               error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
-       }
+       atomic_inc(&dev_priv->irq_received);
 
-       if (INTEL_INFO(dev)->gen >= 4) {
-               error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
-               error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
-               error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
-               error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
-               error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
-               if (ring->id == RCS)
-                       error->bbaddr = I915_READ64(BB_ADDR);
-       } else {
-               error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
-               error->ipeir[ring->id] = I915_READ(IPEIR);
-               error->ipehr[ring->id] = I915_READ(IPEHR);
-               error->instdone[ring->id] = I915_READ(INSTDONE);
+       /* We get interrupts on unclaimed registers, so check for this before we
+        * do any I915_{READ,WRITE}. */
+       if (IS_HASWELL(dev) &&
+           (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+               DRM_ERROR("Unclaimed register before interrupt\n");
+               I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
        }
 
-       error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
-       error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
-       error->seqno[ring->id] = ring->get_seqno(ring, false);
-       error->acthd[ring->id] = intel_ring_get_active_head(ring);
-       error->head[ring->id] = I915_READ_HEAD(ring);
-       error->tail[ring->id] = I915_READ_TAIL(ring);
-       error->ctl[ring->id] = I915_READ_CTL(ring);
-
-       error->cpu_ring_head[ring->id] = ring->head;
-       error->cpu_ring_tail[ring->id] = ring->tail;
-}
-
-
-static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
-                                          struct drm_i915_error_state *error,
-                                          struct drm_i915_error_ring *ering)
-{
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       struct drm_i915_gem_object *obj;
-
-       /* Currently render ring is the only HW context user */
-       if (ring->id != RCS || !error->ccid)
-               return;
+       /* disable master interrupt before clearing iir  */
+       de_ier = I915_READ(DEIER);
+       I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+       POSTING_READ(DEIER);
 
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
-                       ering->ctx = i915_error_object_create_sized(dev_priv,
-                                                                   obj, 1);
-               }
+       /* Disable south interrupts. We'll only write to SDEIIR once, so further
+        * interrupts will will be stored on its back queue, and then we'll be
+        * able to process them after we restore SDEIER (as soon as we restore
+        * it, we'll get an interrupt if SDEIIR still has something to process
+        * due to its back queue). */
+       if (!HAS_PCH_NOP(dev)) {
+               sde_ier = I915_READ(SDEIER);
+               I915_WRITE(SDEIER, 0);
+               POSTING_READ(SDEIER);
        }
-}
-
-static void i915_gem_record_rings(struct drm_device *dev,
-                                 struct drm_i915_error_state *error)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ring_buffer *ring;
-       struct drm_i915_gem_request *request;
-       int i, count;
-
-       for_each_ring(ring, dev_priv, i) {
-               i915_record_ring_state(dev, error, ring);
-
-               error->ring[i].batchbuffer =
-                       i915_error_first_batchbuffer(dev_priv, ring);
-
-               error->ring[i].ringbuffer =
-                       i915_error_object_create(dev_priv, ring->obj);
 
+       /* On Haswell, also mask ERR_INT because we don't want to risk
+        * generating "unclaimed register" interrupts from inside the interrupt
+        * handler. */
+       if (IS_HASWELL(dev)) {
+               spin_lock(&dev_priv->irq_lock);
+               ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+               spin_unlock(&dev_priv->irq_lock);
+       }
 
-               i915_gem_record_active_context(ring, error, &error->ring[i]);
+       gt_iir = I915_READ(GTIIR);
+       if (gt_iir) {
+               if (INTEL_INFO(dev)->gen >= 6)
+                       snb_gt_irq_handler(dev, dev_priv, gt_iir);
+               else
+                       ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+               I915_WRITE(GTIIR, gt_iir);
+               ret = IRQ_HANDLED;
+       }
 
-               count = 0;
-               list_for_each_entry(request, &ring->request_list, list)
-                       count++;
+       de_iir = I915_READ(DEIIR);
+       if (de_iir) {
+               if (INTEL_INFO(dev)->gen >= 7)
+                       ivb_display_irq_handler(dev, de_iir);
+               else
+                       ilk_display_irq_handler(dev, de_iir);
+               I915_WRITE(DEIIR, de_iir);
+               ret = IRQ_HANDLED;
+       }
 
-               error->ring[i].num_requests = count;
-               error->ring[i].requests =
-                       kmalloc(count*sizeof(struct drm_i915_error_request),
-                               GFP_ATOMIC);
-               if (error->ring[i].requests == NULL) {
-                       error->ring[i].num_requests = 0;
-                       continue;
+       if (INTEL_INFO(dev)->gen >= 6) {
+               u32 pm_iir = I915_READ(GEN6_PMIIR);
+               if (pm_iir) {
+                       if (IS_HASWELL(dev))
+                               hsw_pm_irq_handler(dev_priv, pm_iir);
+                       else if (pm_iir & GEN6_PM_RPS_EVENTS)
+                               gen6_rps_irq_handler(dev_priv, pm_iir);
+                       I915_WRITE(GEN6_PMIIR, pm_iir);
+                       ret = IRQ_HANDLED;
                }
+       }
 
-               count = 0;
-               list_for_each_entry(request, &ring->request_list, list) {
-                       struct drm_i915_error_request *erq;
+       if (IS_HASWELL(dev)) {
+               spin_lock(&dev_priv->irq_lock);
+               if (ivb_can_enable_err_int(dev))
+                       ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
+               spin_unlock(&dev_priv->irq_lock);
+       }
 
-                       erq = &error->ring[i].requests[count++];
-                       erq->seqno = request->seqno;
-                       erq->jiffies = request->emitted_jiffies;
-                       erq->tail = request->tail;
-               }
+       I915_WRITE(DEIER, de_ier);
+       POSTING_READ(DEIER);
+       if (!HAS_PCH_NOP(dev)) {
+               I915_WRITE(SDEIER, sde_ier);
+               POSTING_READ(SDEIER);
        }
+
+       return ret;
 }
 
 /**
- * i915_capture_error_state - capture an error record for later analysis
- * @dev: drm device
+ * i915_error_work_func - do process context error handling work
+ * @work: work struct
  *
- * Should be called when an error is detected (either a hang or an error
- * interrupt) to capture error state from the time of the error.  Fills
- * out a structure which becomes available in debugfs for user level tools
- * to pick up.
+ * Fire an error uevent so userspace can see that a hang or error
+ * was detected.
  */
-static void i915_capture_error_state(struct drm_device *dev)
+static void i915_error_work_func(struct work_struct *work)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj;
-       struct drm_i915_error_state *error;
-       unsigned long flags;
-       int i, pipe;
-
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
-       error = dev_priv->gpu_error.first_error;
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-       if (error)
-               return;
-
-       /* Account for pipe specific data like PIPE*STAT */
-       error = kzalloc(sizeof(*error), GFP_ATOMIC);
-       if (!error) {
-               DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
-               return;
-       }
-
-       DRM_INFO("capturing error event; look for more information in "
-                "/sys/kernel/debug/dri/%d/i915_error_state\n",
-                dev->primary->index);
-
-       kref_init(&error->ref);
-       error->eir = I915_READ(EIR);
-       error->pgtbl_er = I915_READ(PGTBL_ER);
-       if (HAS_HW_CONTEXTS(dev))
-               error->ccid = I915_READ(CCID);
-
-       if (HAS_PCH_SPLIT(dev))
-               error->ier = I915_READ(DEIER) | I915_READ(GTIER);
-       else if (IS_VALLEYVIEW(dev))
-               error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
-       else if (IS_GEN2(dev))
-               error->ier = I915_READ16(IER);
-       else
-               error->ier = I915_READ(IER);
-
-       if (INTEL_INFO(dev)->gen >= 6)
-               error->derrmr = I915_READ(DERRMR);
-
-       if (IS_VALLEYVIEW(dev))
-               error->forcewake = I915_READ(FORCEWAKE_VLV);
-       else if (INTEL_INFO(dev)->gen >= 7)
-               error->forcewake = I915_READ(FORCEWAKE_MT);
-       else if (INTEL_INFO(dev)->gen == 6)
-               error->forcewake = I915_READ(FORCEWAKE);
-
-       if (!HAS_PCH_SPLIT(dev))
-               for_each_pipe(pipe)
-                       error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
-
-       if (INTEL_INFO(dev)->gen >= 6) {
-               error->error = I915_READ(ERROR_GEN6);
-               error->done_reg = I915_READ(DONE_REG);
-       }
-
-       if (INTEL_INFO(dev)->gen == 7)
-               error->err_int = I915_READ(GEN7_ERR_INT);
-
-       i915_get_extra_instdone(dev, error->extra_instdone);
-
-       i915_gem_record_fences(dev, error);
-       i915_gem_record_rings(dev, error);
+       struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
+                                                   work);
+       drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
+                                                   gpu_error);
+       struct drm_device *dev = dev_priv->dev;
+       struct intel_ring_buffer *ring;
+       char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
+       char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
+       char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
+       int i, ret;
 
-       /* Record buffers on the active and pinned lists. */
-       error->active_bo = NULL;
-       error->pinned_bo = NULL;
+       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 
-       i = 0;
-       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
-               i++;
-       error->active_bo_count = i;
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-               if (obj->pin_count)
-                       i++;
-       error->pinned_bo_count = i - error->active_bo_count;
+       /*
+        * Note that there's only one work item which does gpu resets, so we
+        * need not worry about concurrent gpu resets potentially incrementing
+        * error->reset_counter twice. We only need to take care of another
+        * racing irq/hangcheck declaring the gpu dead for a second time. A
+        * quick check for that is good enough: schedule_work ensures the
+        * correct ordering between hang detection and this work item, and since
+        * the reset in-progress bit is only ever set by code outside of this
+        * work we don't need to worry about any other races.
+        */
+       if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
+               DRM_DEBUG_DRIVER("resetting chip\n");
+               kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
+                                  reset_event);
 
-       error->active_bo = NULL;
-       error->pinned_bo = NULL;
-       if (i) {
-               error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
-                                          GFP_ATOMIC);
-               if (error->active_bo)
-                       error->pinned_bo =
-                               error->active_bo + error->active_bo_count;
-       }
+               ret = i915_reset(dev);
 
-       if (error->active_bo)
-               error->active_bo_count =
-                       capture_active_bo(error->active_bo,
-                                         error->active_bo_count,
-                                         &dev_priv->mm.active_list);
+               if (ret == 0) {
+                       /*
+                        * After all the gem state is reset, increment the reset
+                        * counter and wake up everyone waiting for the reset to
+                        * complete.
+                        *
+                        * Since unlock operations are a one-sided barrier only,
+                        * we need to insert a barrier here to order any seqno
+                        * updates before
+                        * the counter increment.
+                        */
+                       smp_mb__before_atomic_inc();
+                       atomic_inc(&dev_priv->gpu_error.reset_counter);
 
-       if (error->pinned_bo)
-               error->pinned_bo_count =
-                       capture_pinned_bo(error->pinned_bo,
-                                         error->pinned_bo_count,
-                                         &dev_priv->mm.bound_list);
+                       kobject_uevent_env(&dev->primary->kdev.kobj,
+                                          KOBJ_CHANGE, reset_done_event);
+               } else {
+                       atomic_set(&error->reset_counter, I915_WEDGED);
+               }
 
-       do_gettimeofday(&error->time);
+               for_each_ring(ring, dev_priv, i)
+                       wake_up_all(&ring->irq_queue);
 
-       error->overlay = intel_overlay_capture_error_state(dev);
-       error->display = intel_display_capture_error_state(dev);
+               intel_display_handle_reset(dev);
 
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
-       if (dev_priv->gpu_error.first_error == NULL) {
-               dev_priv->gpu_error.first_error = error;
-               error = NULL;
+               wake_up_all(&dev_priv->gpu_error.reset_queue);
        }
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-
-       if (error)
-               i915_error_state_free(&error->ref);
 }
 
-void i915_destroy_error_state(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_error_state *error;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
-       error = dev_priv->gpu_error.first_error;
-       dev_priv->gpu_error.first_error = NULL;
-       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-
-       if (error)
-               kref_put(&error->ref, i915_error_state_free);
-}
-#else
-#define i915_capture_error_state(x)
-#endif
-
 static void i915_report_and_clear_eir(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2155,10 +1611,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
        if (INTEL_INFO(dev)->gen >= 4) {
                int dspsurf = DSPSURF(intel_crtc->plane);
                stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
-                                       obj->gtt_offset;
+                                       i915_gem_obj_ggtt_offset(obj);
        } else {
                int dspaddr = DSPADDR(intel_crtc->plane);
-               stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+               stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
                                                        crtc->y * crtc->fb->pitches[0] +
                                                        crtc->x * crtc->fb->bits_per_pixel/8);
        }
@@ -2202,29 +1658,14 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
+       uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+                                                    DE_PIPE_VBLANK_ILK(pipe);
 
        if (!i915_pipe_enabled(dev, pipe))
                return -EINVAL;
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
-                                   DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
-       return 0;
-}
-
-static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long irqflags;
-
-       if (!i915_pipe_enabled(dev, pipe))
-               return -EINVAL;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       ironlake_enable_display_irq(dev_priv,
-                                   DE_PIPEA_VBLANK_IVB << (5 * pipe));
+       ironlake_enable_display_irq(dev_priv, bit);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
        return 0;
@@ -2275,21 +1716,11 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long irqflags;
+       uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+                                                    DE_PIPE_VBLANK_ILK(pipe);
 
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
-                                    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-}
-
-static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
-{
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-       ironlake_disable_display_irq(dev_priv,
-                                    DE_PIPEA_VBLANK_IVB << (pipe * 5));
+       ironlake_disable_display_irq(dev_priv, bit);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
@@ -2536,9 +1967,17 @@ void i915_hangcheck_elapsed(unsigned long data)
        if (busy_count)
                /* Reset timer case chip hangs without another request
                 * being added */
-               mod_timer(&dev_priv->gpu_error.hangcheck_timer,
-                         round_jiffies_up(jiffies +
-                                          DRM_I915_HANGCHECK_JIFFIES));
+               i915_queue_hangcheck(dev);
+}
+
+void i915_queue_hangcheck(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       if (!i915_enable_hangcheck)
+               return;
+
+       mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+                 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
 }
 
 static void ibx_irq_preinstall(struct drm_device *dev)
@@ -2560,31 +1999,26 @@ static void ibx_irq_preinstall(struct drm_device *dev)
        POSTING_READ(SDEIER);
 }
 
-/* drm_dma.h hooks
-*/
-static void ironlake_irq_preinstall(struct drm_device *dev)
+static void gen5_gt_irq_preinstall(struct drm_device *dev)
 {
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
-       atomic_set(&dev_priv->irq_received, 0);
-
-       I915_WRITE(HWSTAM, 0xeffe);
-
-       /* XXX hotplug from PCH */
-
-       I915_WRITE(DEIMR, 0xffffffff);
-       I915_WRITE(DEIER, 0x0);
-       POSTING_READ(DEIER);
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        /* and GT */
        I915_WRITE(GTIMR, 0xffffffff);
        I915_WRITE(GTIER, 0x0);
        POSTING_READ(GTIER);
 
-       ibx_irq_preinstall(dev);
+       if (INTEL_INFO(dev)->gen >= 6) {
+               /* and PM */
+               I915_WRITE(GEN6_PMIMR, 0xffffffff);
+               I915_WRITE(GEN6_PMIER, 0x0);
+               POSTING_READ(GEN6_PMIER);
+       }
 }
 
-static void ivybridge_irq_preinstall(struct drm_device *dev)
+/* drm_dma.h hooks
+*/
+static void ironlake_irq_preinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -2592,21 +2026,11 @@ static void ivybridge_irq_preinstall(struct drm_device *dev)
 
        I915_WRITE(HWSTAM, 0xeffe);
 
-       /* XXX hotplug from PCH */
-
        I915_WRITE(DEIMR, 0xffffffff);
        I915_WRITE(DEIER, 0x0);
        POSTING_READ(DEIER);
 
-       /* and GT */
-       I915_WRITE(GTIMR, 0xffffffff);
-       I915_WRITE(GTIER, 0x0);
-       POSTING_READ(GTIER);
-
-       /* Power management */
-       I915_WRITE(GEN6_PMIMR, 0xffffffff);
-       I915_WRITE(GEN6_PMIER, 0x0);
-       POSTING_READ(GEN6_PMIER);
+       gen5_gt_irq_preinstall(dev);
 
        ibx_irq_preinstall(dev);
 }
@@ -2627,9 +2051,8 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
        /* and GT */
        I915_WRITE(GTIIR, I915_READ(GTIIR));
        I915_WRITE(GTIIR, I915_READ(GTIIR));
-       I915_WRITE(GTIMR, 0xffffffff);
-       I915_WRITE(GTIER, 0x0);
-       POSTING_READ(GTIER);
+
+       gen5_gt_irq_preinstall(dev);
 
        I915_WRITE(DPINVGTT, 0xff);
 
@@ -2648,22 +2071,21 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct intel_encoder *intel_encoder;
-       u32 mask = ~I915_READ(SDEIMR);
-       u32 hotplug;
+       u32 hotplug_irqs, hotplug, enabled_irqs = 0;
 
        if (HAS_PCH_IBX(dev)) {
-               mask &= ~SDE_HOTPLUG_MASK;
+               hotplug_irqs = SDE_HOTPLUG_MASK;
                list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
                        if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
-                               mask |= hpd_ibx[intel_encoder->hpd_pin];
+                               enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
        } else {
-               mask &= ~SDE_HOTPLUG_MASK_CPT;
+               hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
                list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
                        if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
-                               mask |= hpd_cpt[intel_encoder->hpd_pin];
+                               enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
        }
 
-       I915_WRITE(SDEIMR, ~mask);
+       ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
 
        /*
         * Enable digital hotplug on the PCH, and configure the DP short pulse
@@ -2700,123 +2122,102 @@ static void ibx_irq_postinstall(struct drm_device *dev)
        I915_WRITE(SDEIMR, ~mask);
 }
 
-static int ironlake_irq_postinstall(struct drm_device *dev)
+static void gen5_gt_irq_postinstall(struct drm_device *dev)
 {
-       unsigned long irqflags;
-
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       /* enable kind of interrupts always enabled */
-       u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
-                          DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
-                          DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
-                          DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
-       u32 gt_irqs;
-
-       dev_priv->irq_mask = ~display_mask;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 pm_irqs, gt_irqs;
 
-       /* should always can generate irq */
-       I915_WRITE(DEIIR, I915_READ(DEIIR));
-       I915_WRITE(DEIMR, dev_priv->irq_mask);
-       I915_WRITE(DEIER, display_mask |
-                         DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
-       POSTING_READ(DEIER);
+       pm_irqs = gt_irqs = 0;
 
        dev_priv->gt_irq_mask = ~0;
+       if (HAS_L3_GPU_CACHE(dev)) {
+               /* L3 parity interrupt is always unmasked. */
+               dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+               gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+       }
 
-       I915_WRITE(GTIIR, I915_READ(GTIIR));
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
-       gt_irqs = GT_RENDER_USER_INTERRUPT;
-
-       if (IS_GEN6(dev))
-               gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
-       else
+       gt_irqs |= GT_RENDER_USER_INTERRUPT;
+       if (IS_GEN5(dev)) {
                gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
                           ILK_BSD_USER_INTERRUPT;
+       } else {
+               gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
+       }
 
+       I915_WRITE(GTIIR, I915_READ(GTIIR));
+       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
        I915_WRITE(GTIER, gt_irqs);
        POSTING_READ(GTIER);
 
-       ibx_irq_postinstall(dev);
+       if (INTEL_INFO(dev)->gen >= 6) {
+               pm_irqs |= GEN6_PM_RPS_EVENTS;
 
-       if (IS_IRONLAKE_M(dev)) {
-               /* Enable PCU event interrupts
-                *
-                * spinlocking not required here for correctness since interrupt
-                * setup is guaranteed to run in single-threaded context. But we
-                * need it to make the assert_spin_locked happy. */
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-               ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-       }
+               if (HAS_VEBOX(dev))
+                       pm_irqs |= PM_VEBOX_USER_INTERRUPT;
 
-       return 0;
+               I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+               I915_WRITE(GEN6_PMIMR, 0xffffffff);
+               I915_WRITE(GEN6_PMIER, pm_irqs);
+               POSTING_READ(GEN6_PMIER);
+       }
 }
 
-static int ivybridge_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
 {
+       unsigned long irqflags;
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       /* enable kind of interrupts always enabled */
-       u32 display_mask =
-               DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
-               DE_PLANEC_FLIP_DONE_IVB |
-               DE_PLANEB_FLIP_DONE_IVB |
-               DE_PLANEA_FLIP_DONE_IVB |
-               DE_AUX_CHANNEL_A_IVB |
-               DE_ERR_INT_IVB;
-       u32 pm_irqs = GEN6_PM_RPS_EVENTS;
-       u32 gt_irqs;
+       u32 display_mask, extra_mask;
+
+       if (INTEL_INFO(dev)->gen >= 7) {
+               display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
+                               DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
+                               DE_PLANEB_FLIP_DONE_IVB |
+                               DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
+                               DE_ERR_INT_IVB);
+               extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
+                             DE_PIPEA_VBLANK_IVB);
+
+               I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
+       } else {
+               display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+                               DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+                               DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
+                               DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
+               extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
+       }
 
        dev_priv->irq_mask = ~display_mask;
 
        /* should always can generate irq */
-       I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
        I915_WRITE(DEIIR, I915_READ(DEIIR));
        I915_WRITE(DEIMR, dev_priv->irq_mask);
-       I915_WRITE(DEIER,
-                  display_mask |
-                  DE_PIPEC_VBLANK_IVB |
-                  DE_PIPEB_VBLANK_IVB |
-                  DE_PIPEA_VBLANK_IVB);
+       I915_WRITE(DEIER, display_mask | extra_mask);
        POSTING_READ(DEIER);
 
-       dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
-       I915_WRITE(GTIIR, I915_READ(GTIIR));
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
-       gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
-                 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-       I915_WRITE(GTIER, gt_irqs);
-       POSTING_READ(GTIER);
-
-       I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
-       if (HAS_VEBOX(dev))
-               pm_irqs |= PM_VEBOX_USER_INTERRUPT |
-                       PM_VEBOX_CS_ERROR_INTERRUPT;
-
-       /* Our enable/disable rps functions may touch these registers so
-        * make sure to set a known state for only the non-RPS bits.
-        * The RMW is extra paranoia since this should be called after being set
-        * to a known state in preinstall.
-        * */
-       I915_WRITE(GEN6_PMIMR,
-                  (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
-       I915_WRITE(GEN6_PMIER,
-                  (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
-       POSTING_READ(GEN6_PMIER);
+       gen5_gt_irq_postinstall(dev);
 
        ibx_irq_postinstall(dev);
 
+       if (IS_IRONLAKE_M(dev)) {
+               /* Enable PCU event interrupts
+                *
+                * spinlocking not required here for correctness since interrupt
+                * setup is guaranteed to run in single-threaded context. But we
+                * need it to make the assert_spin_locked happy. */
+               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+               ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+       }
+
        return 0;
 }
 
 static int valleyview_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 gt_irqs;
        u32 enable_mask;
        u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+       unsigned long irqflags;
 
        enable_mask = I915_DISPLAY_PORT_INTERRUPT;
        enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -2842,20 +2243,18 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
        I915_WRITE(PIPESTAT(1), 0xffff);
        POSTING_READ(VLV_IER);
 
+       /* Interrupt setup is already guaranteed to be single-threaded, this is
+        * just to make the assert_spin_locked check happy. */
+       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        i915_enable_pipestat(dev_priv, 0, pipestat_enable);
        i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
        i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
        I915_WRITE(VLV_IIR, 0xffffffff);
        I915_WRITE(VLV_IIR, 0xffffffff);
 
-       I915_WRITE(GTIIR, I915_READ(GTIIR));
-       I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
-       gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
-               GT_BLT_USER_INTERRUPT;
-       I915_WRITE(GTIER, gt_irqs);
-       POSTING_READ(GTIER);
+       gen5_gt_irq_postinstall(dev);
 
        /* ack & enable invalid PTE error interrupts */
 #if 0 /* FIXME: add support to irq handler for checking these bits */
@@ -3323,6 +2722,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 enable_mask;
        u32 error_mask;
+       unsigned long irqflags;
 
        /* Unmask the interrupts that we always want on. */
        dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -3341,7 +2741,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
        if (IS_G4X(dev))
                enable_mask |= I915_BSD_USER_INTERRUPT;
 
+       /* Interrupt setup is already guaranteed to be single-threaded, this is
+        * just to make the assert_spin_locked check happy. */
+       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
        /*
         * Enable some error detection, note the instruction error mask
@@ -3616,15 +3020,6 @@ void intel_irq_init(struct drm_device *dev)
                dev->driver->enable_vblank = valleyview_enable_vblank;
                dev->driver->disable_vblank = valleyview_disable_vblank;
                dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
-       } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
-               /* Share uninstall handlers with ILK/SNB */
-               dev->driver->irq_handler = ivybridge_irq_handler;
-               dev->driver->irq_preinstall = ivybridge_irq_preinstall;
-               dev->driver->irq_postinstall = ivybridge_irq_postinstall;
-               dev->driver->irq_uninstall = ironlake_irq_uninstall;
-               dev->driver->enable_vblank = ivybridge_enable_vblank;
-               dev->driver->disable_vblank = ivybridge_disable_vblank;
-               dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev->driver->irq_handler = ironlake_irq_handler;
                dev->driver->irq_preinstall = ironlake_irq_preinstall;
index f2326fc60ac93d4a2f1c760333945fb33bbe0524..6caa748fa00f20da9b2934a78226f886235423b5 100644 (file)
 #define PUNIT_REG_GPU_LFM                      0xd3
 #define PUNIT_REG_GPU_FREQ_REQ                 0xd4
 #define PUNIT_REG_GPU_FREQ_STS                 0xd8
+#define   GENFREQSTATUS                                (1<<0)
 #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ         0xdc
 
 #define PUNIT_FUSE_BUS2                                0xf6 /* bits 47:40 */
 #define   ERR_INT_FIFO_UNDERRUN_C      (1<<6)
 #define   ERR_INT_FIFO_UNDERRUN_B      (1<<3)
 #define   ERR_INT_FIFO_UNDERRUN_A      (1<<0)
+#define   ERR_INT_FIFO_UNDERRUN(pipe)  (1<<(pipe*3))
 
 #define FPGA_DBG               0x42300
 #define   FPGA_DBG_RM_NOCLAIM  (1<<31)
 #define _DPLL_B        (dev_priv->info->display_mmio_offset + 0x6018)
 #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
 #define   DPLL_VCO_ENABLE              (1 << 31)
-#define   DPLL_DVO_HIGH_SPEED          (1 << 30)
+#define   DPLL_SDVO_HIGH_SPEED         (1 << 30)
+#define   DPLL_DVO_2X_MODE             (1 << 30)
 #define   DPLL_EXT_BUFFER_ENABLE_VLV   (1 << 30)
 #define   DPLL_SYNCLOCK_ENABLE         (1 << 29)
 #define   DPLL_REFA_CLK_ENABLE_VLV     (1 << 29)
 #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
 #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
 
+/* HSW eDP PSR registers */
+#define EDP_PSR_CTL                            0x64800
+#define   EDP_PSR_ENABLE                       (1<<31)
+#define   EDP_PSR_LINK_DISABLE                 (0<<27)
+#define   EDP_PSR_LINK_STANDBY                 (1<<27)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_MASK     (3<<25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES  (0<<25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES  (1<<25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES  (2<<25)
+#define   EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES  (3<<25)
+#define   EDP_PSR_MAX_SLEEP_TIME_SHIFT         20
+#define   EDP_PSR_SKIP_AUX_EXIT                        (1<<12)
+#define   EDP_PSR_TP1_TP2_SEL                  (0<<11)
+#define   EDP_PSR_TP1_TP3_SEL                  (1<<11)
+#define   EDP_PSR_TP2_TP3_TIME_500us           (0<<8)
+#define   EDP_PSR_TP2_TP3_TIME_100us           (1<<8)
+#define   EDP_PSR_TP2_TP3_TIME_2500us          (2<<8)
+#define   EDP_PSR_TP2_TP3_TIME_0us             (3<<8)
+#define   EDP_PSR_TP1_TIME_500us               (0<<4)
+#define   EDP_PSR_TP1_TIME_100us               (1<<4)
+#define   EDP_PSR_TP1_TIME_2500us              (2<<4)
+#define   EDP_PSR_TP1_TIME_0us                 (3<<4)
+#define   EDP_PSR_IDLE_FRAME_SHIFT             0
+
+#define EDP_PSR_AUX_CTL                        0x64810
+#define EDP_PSR_AUX_DATA1              0x64814
+#define   EDP_PSR_DPCD_COMMAND         0x80060000
+#define EDP_PSR_AUX_DATA2              0x64818
+#define   EDP_PSR_DPCD_NORMAL_OPERATION        (1<<24)
+#define EDP_PSR_AUX_DATA3              0x6481c
+#define EDP_PSR_AUX_DATA4              0x64820
+#define EDP_PSR_AUX_DATA5              0x64824
+
+#define EDP_PSR_STATUS_CTL                     0x64840
+#define   EDP_PSR_STATUS_STATE_MASK            (7<<29)
+#define   EDP_PSR_STATUS_STATE_IDLE            (0<<29)
+#define   EDP_PSR_STATUS_STATE_SRDONACK                (1<<29)
+#define   EDP_PSR_STATUS_STATE_SRDENT          (2<<29)
+#define   EDP_PSR_STATUS_STATE_BUFOFF          (3<<29)
+#define   EDP_PSR_STATUS_STATE_BUFON           (4<<29)
+#define   EDP_PSR_STATUS_STATE_AUXACK          (5<<29)
+#define   EDP_PSR_STATUS_STATE_SRDOFFACK       (6<<29)
+#define   EDP_PSR_STATUS_LINK_MASK             (3<<26)
+#define   EDP_PSR_STATUS_LINK_FULL_OFF         (0<<26)
+#define   EDP_PSR_STATUS_LINK_FULL_ON          (1<<26)
+#define   EDP_PSR_STATUS_LINK_STANDBY          (2<<26)
+#define   EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
+#define   EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK  0x1f
+#define   EDP_PSR_STATUS_COUNT_SHIFT           16
+#define   EDP_PSR_STATUS_COUNT_MASK            0xf
+#define   EDP_PSR_STATUS_AUX_ERROR             (1<<15)
+#define   EDP_PSR_STATUS_AUX_SENDING           (1<<12)
+#define   EDP_PSR_STATUS_SENDING_IDLE          (1<<9)
+#define   EDP_PSR_STATUS_SENDING_TP2_TP3       (1<<8)
+#define   EDP_PSR_STATUS_SENDING_TP1           (1<<4)
+#define   EDP_PSR_STATUS_IDLE_MASK             0xf
+
+#define EDP_PSR_PERF_CNT               0x64844
+#define   EDP_PSR_PERF_CNT_MASK                0xffffff
+
+#define EDP_PSR_DEBUG_CTL              0x64860
+#define   EDP_PSR_DEBUG_MASK_LPSP      (1<<27)
+#define   EDP_PSR_DEBUG_MASK_MEMUP     (1<<26)
+#define   EDP_PSR_DEBUG_MASK_HPD       (1<<25)
+
 /* VGA port control */
 #define ADPA                   0x61100
 #define PCH_ADPA                0xe1100
  * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
  * of the infoframe structure specified by CEA-861. */
 #define   VIDEO_DIP_DATA_SIZE  32
+#define   VIDEO_DIP_VSC_DATA_SIZE      36
 #define VIDEO_DIP_CTL          0x61170
 /* Pre HSW: */
 #define   VIDEO_DIP_ENABLE             (1 << 31)
 #define BLC_PWM_CPU_CTL2       0x48250
 #define BLC_PWM_CPU_CTL                0x48254
 
+#define HSW_BLC_PWM2_CTL       0x48350
+
 /* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
  * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
 #define BLC_PWM_PCH_CTL1       0xc8250
 #define   BLM_PCH_POLARITY                     (1 << 29)
 #define BLC_PWM_PCH_CTL2       0xc8254
 
+#define UTIL_PIN_CTL           0x48400
+#define   UTIL_PIN_ENABLE      (1 << 31)
+
+#define PCH_GTC_CTL            0xe7000
+#define   PCH_GTC_ENABLE       (1 << 31)
+
 /* TV port control */
 #define TV_CTL                 0x68000
 /** Enables the TV encoder */
 #define DE_PLANEA_FLIP_DONE_IVB                (1<<3)
 #define DE_PIPEA_VBLANK_IVB            (1<<0)
 
+#define DE_PIPE_VBLANK_ILK(pipe)       (1 << ((pipe * 8) + 7))
+#define DE_PIPE_VBLANK_IVB(pipe)       (1 << (pipe * 5))
+
 #define VLV_MASTER_IER                 0x4400c /* Gunit master IER */
 #define   MASTER_INTERRUPT_ENABLE      (1<<31)
 
 #define  SERR_INT_TRANS_C_FIFO_UNDERRUN        (1<<6)
 #define  SERR_INT_TRANS_B_FIFO_UNDERRUN        (1<<3)
 #define  SERR_INT_TRANS_A_FIFO_UNDERRUN        (1<<0)
+#define  SERR_INT_TRANS_FIFO_UNDERRUN(pipe)    (1<<(pipe*3))
 
 /* digital port hotplug */
 #define PCH_PORT_HOTPLUG        0xc4030                /* SHOTPLUG_CTL */
 #define HSW_TVIDEO_DIP_VSC_DATA(trans) \
         _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
 
+#define HSW_STEREO_3D_CTL_A    0x70020
+#define   S3D_ENABLE           (1<<31)
+#define HSW_STEREO_3D_CTL_B    0x71020
+
+#define HSW_STEREO_3D_CTL(trans) \
+       _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A)
+
 #define _PCH_TRANS_HTOTAL_B          0xe1000
 #define _PCH_TRANS_HBLANK_B          0xe1004
 #define _PCH_TRANS_HSYNC_B           0xe1008
 #define  GT_FIFO_FREE_ENTRIES                  0x120008
 #define    GT_FIFO_NUM_RESERVED_ENTRIES                20
 
+#define  HSW_IDICR                             0x9008
+#define    IDIHASHMSK(x)                       (((x) & 0x3f) << 16)
+#define  HSW_EDRAM_PRESENT                     0x120010
+
 #define GEN6_UCGCTL1                           0x9400
 # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE               (1 << 5)
 # define GEN6_CSUNIT_CLOCK_GATE_DISABLE                        (1 << 7)
 #define  SBI_SSCAUXDIV6                                0x0610
 #define   SBI_SSCAUXDIV_FINALDIV2SEL(x)                ((x)<<4)
 #define  SBI_DBUFF0                            0x2a00
-#define   SBI_DBUFF0_ENABLE                    (1<<0)
+#define  SBI_GEN0                              0x1f00
+#define   SBI_GEN0_CFG_BUFFENABLE_DISABLE      (1<<0)
 
 /* LPT PIXCLK_GATE */
 #define PIXCLK_GATE                    0xC6020
 #define  LCPLL_CLK_FREQ_450            (0<<26)
 #define  LCPLL_CD_CLOCK_DISABLE                (1<<25)
 #define  LCPLL_CD2X_CLOCK_DISABLE      (1<<23)
+#define  LCPLL_POWER_DOWN_ALLOW                (1<<22)
 #define  LCPLL_CD_SOURCE_FCLK          (1<<21)
+#define  LCPLL_CD_SOURCE_FCLK_DONE     (1<<19)
+
+#define D_COMP                         (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+#define  D_COMP_RCOMP_IN_PROGRESS      (1<<9)
+#define  D_COMP_COMP_FORCE             (1<<8)
+#define  D_COMP_COMP_DISABLE           (1<<0)
 
 /* Pipe WM_LINETIME - watermark line time */
 #define PIPE_WM_LINETIME_A             0x45270
index 6875b5654c63d55a65dcf639734e0de101f1f037..a777e7f3b0df924c7e7d2401b3994f6b8e963f3c 100644 (file)
@@ -409,6 +409,71 @@ static const struct attribute *gen6_attrs[] = {
        NULL,
 };
 
+static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
+                               struct bin_attribute *attr, char *buf,
+                               loff_t off, size_t count)
+{
+
+       struct device *kdev = container_of(kobj, struct device, kobj);
+       struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_device *dev = minor->dev;
+       struct i915_error_state_file_priv error_priv;
+       struct drm_i915_error_state_buf error_str;
+       ssize_t ret_count = 0;
+       int ret;
+
+       memset(&error_priv, 0, sizeof(error_priv));
+
+       ret = i915_error_state_buf_init(&error_str, count, off);
+       if (ret)
+               return ret;
+
+       error_priv.dev = dev;
+       i915_error_state_get(dev, &error_priv);
+
+       ret = i915_error_state_to_str(&error_str, &error_priv);
+       if (ret)
+               goto out;
+
+       ret_count = count < error_str.bytes ? count : error_str.bytes;
+
+       memcpy(buf, error_str.buf, ret_count);
+out:
+       i915_error_state_put(&error_priv);
+       i915_error_state_buf_release(&error_str);
+
+       return ret ?: ret_count;
+}
+
+static ssize_t error_state_write(struct file *file, struct kobject *kobj,
+                                struct bin_attribute *attr, char *buf,
+                                loff_t off, size_t count)
+{
+       struct device *kdev = container_of(kobj, struct device, kobj);
+       struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_device *dev = minor->dev;
+       int ret;
+
+       DRM_DEBUG_DRIVER("Resetting error state\n");
+
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       i915_destroy_error_state(dev);
+       mutex_unlock(&dev->struct_mutex);
+
+       return count;
+}
+
+static struct bin_attribute error_state_attr = {
+       .attr.name = "error",
+       .attr.mode = S_IRUSR | S_IWUSR,
+       .size = 0,
+       .read = error_state_read,
+       .write = error_state_write,
+};
+
 void i915_setup_sysfs(struct drm_device *dev)
 {
        int ret;
@@ -432,10 +497,16 @@ void i915_setup_sysfs(struct drm_device *dev)
                if (ret)
                        DRM_ERROR("gen6 sysfs setup failed\n");
        }
+
+       ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
+                                   &error_state_attr);
+       if (ret)
+               DRM_ERROR("error_state sysfs setup failed\n");
 }
 
 void i915_teardown_sysfs(struct drm_device *dev)
 {
+       sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
        sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
        device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
 #ifdef CONFIG_PM
index 3db4a681771320f1d42f8fb023c83d73e4426fb6..7d283b5fcbf93125989aeafc588a0b43634dfa34 100644 (file)
@@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind,
 
            TP_fast_assign(
                           __entry->obj = obj;
-                          __entry->offset = obj->gtt_space->start;
-                          __entry->size = obj->gtt_space->size;
+                          __entry->offset = i915_gem_obj_ggtt_offset(obj);
+                          __entry->size = i915_gem_obj_ggtt_size(obj);
                           __entry->mappable = mappable;
                           ),
 
@@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind,
 
            TP_fast_assign(
                           __entry->obj = obj;
-                          __entry->offset = obj->gtt_space->start;
-                          __entry->size = obj->gtt_space->size;
+                          __entry->offset = i915_gem_obj_ggtt_offset(obj);
+                          __entry->size = i915_gem_obj_ggtt_size(obj);
                           ),
 
            TP_printk("obj=%p, offset=%08x size=%x",
index 3acec8c4816606f1f2aa82bbe0ad3f5c105f4ad5..0c0d4e8d768e7c4d852931a694658609f7b1b48f 100644 (file)
@@ -613,6 +613,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
        enum drm_connector_status status;
        struct intel_load_detect_pipe tmp;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+                     connector->base.id, drm_get_connector_name(connector),
+                     force);
+
        if (I915_HAS_HOTPLUG(dev)) {
                /* We can not rely on the HPD pin always being correctly wired
                 * up, for example many KVM do not pass it through, and so
index b042ee5c40704a8beaa1e5b99f3d6a1b2082d722..931b4bb1f9dcf57dd1320f2fe79dba28c12ff308 100644 (file)
@@ -1118,6 +1118,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
                        intel_dp_stop_link_train(intel_dp);
 
                ironlake_edp_backlight_on(intel_dp);
+               intel_edp_psr_enable(intel_dp);
        }
 
        if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
@@ -1148,6 +1149,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
        if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
+               intel_edp_psr_disable(intel_dp);
                ironlake_edp_backlight_off(intel_dp);
        }
 }
index 5fb305840db89ecd80b5cd42b69ff9c35dd9432b..baaefd70cc67c814226e5879c9b427af07bc67ea 100644 (file)
@@ -45,6 +45,11 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
 static void intel_increase_pllclock(struct drm_crtc *crtc);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+                               struct intel_crtc_config *pipe_config);
+static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
+                                   struct intel_crtc_config *pipe_config);
+
 typedef struct {
        int     min, max;
 } intel_range_t;
@@ -84,7 +89,7 @@ intel_fdi_link_freq(struct drm_device *dev)
                return 27;
 }
 
-static const intel_limit_t intel_limits_i8xx_dvo = {
+static const intel_limit_t intel_limits_i8xx_dac = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 930000, .max = 1400000 },
        .n = { .min = 3, .max = 16 },
@@ -97,6 +102,19 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
                .p2_slow = 4, .p2_fast = 2 },
 };
 
+static const intel_limit_t intel_limits_i8xx_dvo = {
+       .dot = { .min = 25000, .max = 350000 },
+       .vco = { .min = 930000, .max = 1400000 },
+       .n = { .min = 3, .max = 16 },
+       .m = { .min = 96, .max = 140 },
+       .m1 = { .min = 18, .max = 26 },
+       .m2 = { .min = 6, .max = 16 },
+       .p = { .min = 4, .max = 128 },
+       .p1 = { .min = 2, .max = 33 },
+       .p2 = { .dot_limit = 165000,
+               .p2_slow = 4, .p2_fast = 4 },
+};
+
 static const intel_limit_t intel_limits_i8xx_lvds = {
        .dot = { .min = 25000, .max = 350000 },
        .vco = { .min = 930000, .max = 1400000 },
@@ -405,8 +423,10 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
        } else {
                if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
                        limit = &intel_limits_i8xx_lvds;
-               else
+               else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
                        limit = &intel_limits_i8xx_dvo;
+               else
+                       limit = &intel_limits_i8xx_dac;
        }
        return limit;
 }
@@ -892,8 +912,8 @@ static const char *state_string(bool enabled)
 }
 
 /* Only for pre-ILK configs */
-static void assert_pll(struct drm_i915_private *dev_priv,
-                      enum pipe pipe, bool state)
+void assert_pll(struct drm_i915_private *dev_priv,
+               enum pipe pipe, bool state)
 {
        int reg;
        u32 val;
@@ -906,10 +926,8 @@ static void assert_pll(struct drm_i915_private *dev_priv,
             "PLL state assertion failure (expected %s, current %s)\n",
             state_string(state), state_string(cur_state));
 }
-#define assert_pll_enabled(d, p) assert_pll(d, p, true)
-#define assert_pll_disabled(d, p) assert_pll(d, p, false)
 
-static struct intel_shared_dpll *
+struct intel_shared_dpll *
 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
@@ -921,9 +939,9 @@ intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
 }
 
 /* For ILK+ */
-static void assert_shared_dpll(struct drm_i915_private *dev_priv,
-                              struct intel_shared_dpll *pll,
-                              bool state)
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+                       struct intel_shared_dpll *pll,
+                       bool state)
 {
        bool cur_state;
        struct intel_dpll_hw_state hw_state;
@@ -942,8 +960,6 @@ static void assert_shared_dpll(struct drm_i915_private *dev_priv,
             "%s assertion failure (expected %s, current %s)\n",
             pll->name, state_string(state), state_string(cur_state));
 }
-#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
-#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
 
 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
                          enum pipe pipe, bool state)
@@ -1007,15 +1023,19 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
        WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
 }
 
-static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
-                                     enum pipe pipe)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+                      enum pipe pipe, bool state)
 {
        int reg;
        u32 val;
+       bool cur_state;
 
        reg = FDI_RX_CTL(pipe);
        val = I915_READ(reg);
-       WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
+       cur_state = !!(val & FDI_RX_PLL_ENABLE);
+       WARN(cur_state != state,
+            "FDI RX PLL assertion failure (expected %s, current %s)\n",
+            state_string(state), state_string(cur_state));
 }
 
 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1111,7 +1131,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
        }
 
        /* Need to check both planes against the pipe */
-       for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+       for_each_pipe(i) {
                reg = DSPCNTR(i);
                val = I915_READ(reg);
                cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
@@ -1301,51 +1321,92 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
        assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
 }
 
-/**
- * intel_enable_pll - enable a PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to enable
- *
- * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
- * make sure the PLL reg is writable first though, since the panel write
- * protect mechanism may be enabled.
- *
- * Note!  This is for pre-ILK only.
- *
- * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
- */
-static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+static void vlv_enable_pll(struct intel_crtc *crtc)
 {
-       int reg;
-       u32 val;
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int reg = DPLL(crtc->pipe);
+       u32 dpll = crtc->config.dpll_hw_state.dpll;
 
-       assert_pipe_disabled(dev_priv, pipe);
+       assert_pipe_disabled(dev_priv, crtc->pipe);
 
        /* No really, not for ILK+ */
-       BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
+       BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
 
        /* PLL is protected by panel, make sure we can write it */
        if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
-               assert_panel_unlocked(dev_priv, pipe);
+               assert_panel_unlocked(dev_priv, crtc->pipe);
 
-       reg = DPLL(pipe);
-       val = I915_READ(reg);
-       val |= DPLL_VCO_ENABLE;
+       I915_WRITE(reg, dpll);
+       POSTING_READ(reg);
+       udelay(150);
+
+       if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+               DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
+
+       I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
+       POSTING_READ(DPLL_MD(crtc->pipe));
 
        /* We do this three times for luck */
-       I915_WRITE(reg, val);
+       I915_WRITE(reg, dpll);
        POSTING_READ(reg);
        udelay(150); /* wait for warmup */
-       I915_WRITE(reg, val);
+       I915_WRITE(reg, dpll);
        POSTING_READ(reg);
        udelay(150); /* wait for warmup */
-       I915_WRITE(reg, val);
+       I915_WRITE(reg, dpll);
+       POSTING_READ(reg);
+       udelay(150); /* wait for warmup */
+}
+
+static void i9xx_enable_pll(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int reg = DPLL(crtc->pipe);
+       u32 dpll = crtc->config.dpll_hw_state.dpll;
+
+       assert_pipe_disabled(dev_priv, crtc->pipe);
+
+       /* No really, not for ILK+ */
+       BUG_ON(dev_priv->info->gen >= 5);
+
+       /* PLL is protected by panel, make sure we can write it */
+       if (IS_MOBILE(dev) && !IS_I830(dev))
+               assert_panel_unlocked(dev_priv, crtc->pipe);
+
+       I915_WRITE(reg, dpll);
+
+       /* Wait for the clocks to stabilize. */
+       POSTING_READ(reg);
+       udelay(150);
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               I915_WRITE(DPLL_MD(crtc->pipe),
+                          crtc->config.dpll_hw_state.dpll_md);
+       } else {
+               /* The pixel multiplier can only be updated once the
+                * DPLL is enabled and the clocks are stable.
+                *
+                * So write it again.
+                */
+               I915_WRITE(reg, dpll);
+       }
+
+       /* We do this three times for luck */
+       I915_WRITE(reg, dpll);
+       POSTING_READ(reg);
+       udelay(150); /* wait for warmup */
+       I915_WRITE(reg, dpll);
+       POSTING_READ(reg);
+       udelay(150); /* wait for warmup */
+       I915_WRITE(reg, dpll);
        POSTING_READ(reg);
        udelay(150); /* wait for warmup */
 }
 
 /**
- * intel_disable_pll - disable a PLL
+ * i9xx_disable_pll - disable a PLL
  * @dev_priv: i915 private structure
  * @pipe: pipe PLL to disable
  *
@@ -1353,11 +1414,8 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
  *
  * Note!  This is for pre-ILK only.
  */
-static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
 {
-       int reg;
-       u32 val;
-
        /* Don't disable pipe A or pipe A PLLs if needed */
        if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
                return;
@@ -1365,11 +1423,8 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
        /* Make sure the pipe isn't still relying on us */
        assert_pipe_disabled(dev_priv, pipe);
 
-       reg = DPLL(pipe);
-       val = I915_READ(reg);
-       val &= ~DPLL_VCO_ENABLE;
-       I915_WRITE(reg, val);
-       POSTING_READ(reg);
+       I915_WRITE(DPLL(pipe), 0);
+       POSTING_READ(DPLL(pipe));
 }
 
 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
@@ -1942,16 +1997,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                intel_crtc->dspaddr_offset = linear_offset;
        }
 
-       DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
-                     obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+       DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+                     i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+                     fb->pitches[0]);
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        if (INTEL_INFO(dev)->gen >= 4) {
                I915_MODIFY_DISPBASE(DSPSURF(plane),
-                                    obj->gtt_offset + intel_crtc->dspaddr_offset);
+                                    i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
                I915_WRITE(DSPLINOFF(plane), linear_offset);
        } else
-               I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
+               I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
        POSTING_READ(reg);
 
        return 0;
@@ -2031,11 +2087,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
                                               fb->pitches[0]);
        linear_offset -= intel_crtc->dspaddr_offset;
 
-       DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
-                     obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+       DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+                     i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+                     fb->pitches[0]);
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        I915_MODIFY_DISPBASE(DSPSURF(plane),
-                            obj->gtt_offset + intel_crtc->dspaddr_offset);
+                            i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
        if (IS_HASWELL(dev)) {
                I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
        } else {
@@ -2183,6 +2240,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                return ret;
        }
 
+       /* Update pipe size and adjust fitter if needed */
+       if (i915_fastboot) {
+               I915_WRITE(PIPESRC(intel_crtc->pipe),
+                          ((crtc->mode.hdisplay - 1) << 16) |
+                          (crtc->mode.vdisplay - 1));
+               if (!intel_crtc->config.pch_pfit.size &&
+                   (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+                    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+                       I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
+                       I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
+                       I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
+               }
+       }
+
        ret = dev_priv->display.update_plane(crtc, fb, x, y);
        if (ret) {
                intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
@@ -2203,6 +2274,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
        }
 
        intel_update_fbc(dev);
+       intel_edp_psr_update(dev);
        mutex_unlock(&dev->struct_mutex);
 
        intel_crtc_update_sarea_pos(crtc, x, y);
@@ -2927,15 +2999,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
        /* For PCH output, training FDI link */
        dev_priv->display.fdi_link_train(crtc);
 
-       /* XXX: pch pll's can be enabled any time before we enable the PCH
-        * transcoder, and we actually should do this to not upset any PCH
-        * transcoder that already use the clock when we share it.
-        *
-        * Note that enable_shared_dpll tries to do the right thing, but
-        * get_shared_dpll unconditionally resets the pll - we need that to have
-        * the right LVDS enable sequence. */
-       ironlake_enable_shared_dpll(intel_crtc);
-
+       /* We need to program the right clock selection before writing the pixel
+        * mutliplier into the DPLL. */
        if (HAS_PCH_CPT(dev)) {
                u32 sel;
 
@@ -2949,6 +3014,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
                I915_WRITE(PCH_DPLL_SEL, temp);
        }
 
+       /* XXX: pch pll's can be enabled any time before we enable the PCH
+        * transcoder, and we actually should do this to not upset any PCH
+        * transcoder that already use the clock when we share it.
+        *
+        * Note that enable_shared_dpll tries to do the right thing, but
+        * get_shared_dpll unconditionally resets the pll - we need that to have
+        * the right LVDS enable sequence. */
+       ironlake_enable_shared_dpll(intel_crtc);
+
        /* set transcoder timing, panel must allow it */
        assert_panel_unlocked(dev_priv, pipe);
        ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
@@ -3031,7 +3105,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
        crtc->config.shared_dpll = DPLL_ID_PRIVATE;
 }
 
-static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, u32 dpll, u32 fp)
+static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
        struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3045,7 +3119,7 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
 
        if (HAS_PCH_IBX(dev_priv->dev)) {
                /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
-               i = crtc->pipe;
+               i = (enum intel_dpll_id) crtc->pipe;
                pll = &dev_priv->shared_dplls[i];
 
                DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
@@ -3061,8 +3135,8 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
                if (pll->refcount == 0)
                        continue;
 
-               if (dpll == (I915_READ(PCH_DPLL(pll->id)) & 0x7fffffff) &&
-                   fp == I915_READ(PCH_FP0(pll->id))) {
+               if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
+                          sizeof(pll->hw_state)) == 0) {
                        DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
                                      crtc->base.base.id,
                                      pll->name, pll->refcount, pll->active);
@@ -3096,13 +3170,7 @@ found:
                WARN_ON(pll->on);
                assert_shared_dpll_disabled(dev_priv, pll);
 
-               /* Wait for the clocks to stabilize before rewriting the regs */
-               I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
-               POSTING_READ(PCH_DPLL(pll->id));
-               udelay(150);
-
-               I915_WRITE(PCH_FP0(pll->id), fp);
-               I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
+               pll->mode_set(dev_priv, pll);
        }
        pll->refcount++;
 
@@ -3174,7 +3242,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
-       u32 temp;
 
        WARN_ON(!crtc->enabled);
 
@@ -3188,12 +3255,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
 
        intel_update_watermarks(dev);
 
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-               temp = I915_READ(PCH_LVDS);
-               if ((temp & LVDS_PORT_EN) == 0)
-                       I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
-       }
-
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               if (encoder->pre_enable)
+                       encoder->pre_enable(encoder);
 
        if (intel_crtc->config.has_pch_encoder) {
                /* Note: FDI PLL enabling _must_ be done before we enable the
@@ -3205,10 +3269,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
                assert_fdi_rx_disabled(dev_priv, pipe);
        }
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_enable)
-                       encoder->pre_enable(encoder);
-
        ironlake_pfit_enable(intel_crtc);
 
        /*
@@ -3389,7 +3449,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        intel_crtc_wait_for_pending_flips(crtc);
        drm_vblank_off(dev, pipe);
 
-       if (dev_priv->cfb_plane == plane)
+       if (dev_priv->fbc.plane == plane)
                intel_disable_fbc(dev);
 
        intel_crtc_update_cursor(crtc, false);
@@ -3462,7 +3522,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
        drm_vblank_off(dev, pipe);
 
        /* FBC must be disabled before disabling the plane on HSW. */
-       if (dev_priv->cfb_plane == plane)
+       if (dev_priv->fbc.plane == plane)
                intel_disable_fbc(dev);
 
        hsw_disable_ips(intel_crtc);
@@ -3599,7 +3659,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
                if (encoder->pre_pll_enable)
                        encoder->pre_pll_enable(encoder);
 
-       intel_enable_pll(dev_priv, pipe);
+       vlv_enable_pll(intel_crtc);
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_enable)
@@ -3640,12 +3700,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
        intel_crtc->active = true;
        intel_update_watermarks(dev);
 
-       intel_enable_pll(dev_priv, pipe);
-
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_enable)
                        encoder->pre_enable(encoder);
 
+       i9xx_enable_pll(intel_crtc);
+
        i9xx_pfit_enable(intel_crtc);
 
        intel_crtc_load_lut(crtc);
@@ -3701,7 +3761,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
        intel_crtc_wait_for_pending_flips(crtc);
        drm_vblank_off(dev, pipe);
 
-       if (dev_priv->cfb_plane == plane)
+       if (dev_priv->fbc.plane == plane)
                intel_disable_fbc(dev);
 
        intel_crtc_dpms_overlay(intel_crtc, false);
@@ -3717,7 +3777,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
                if (encoder->post_disable)
                        encoder->post_disable(encoder);
 
-       intel_disable_pll(dev_priv, pipe);
+       i9xx_disable_pll(dev_priv, pipe);
 
        intel_crtc->active = false;
        intel_update_fbc(dev);
@@ -4266,14 +4326,17 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
        }
 
        I915_WRITE(FP0(pipe), fp);
+       crtc->config.dpll_hw_state.fp0 = fp;
 
        crtc->lowfreq_avail = false;
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
            reduced_clock && i915_powersave) {
                I915_WRITE(FP1(pipe), fp2);
+               crtc->config.dpll_hw_state.fp1 = fp2;
                crtc->lowfreq_avail = true;
        } else {
                I915_WRITE(FP1(pipe), fp);
+               crtc->config.dpll_hw_state.fp1 = fp;
        }
 }
 
@@ -4351,7 +4414,6 @@ static void vlv_update_pll(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *encoder;
        int pipe = crtc->pipe;
        u32 dpll, mdiv;
        u32 bestn, bestm1, bestm2, bestp1, bestp2;
@@ -4407,7 +4469,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
                vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
-                                0x005f0021);
+                                0x009f0003);
        else
                vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
                                 0x00d0000f);
@@ -4440,10 +4502,6 @@ static void vlv_update_pll(struct intel_crtc *crtc)
 
        vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
 
-       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
-               if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder);
-
        /* Enable DPIO clock input */
        dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
                DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
@@ -4451,17 +4509,11 @@ static void vlv_update_pll(struct intel_crtc *crtc)
                dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
        dpll |= DPLL_VCO_ENABLE;
-       I915_WRITE(DPLL(pipe), dpll);
-       POSTING_READ(DPLL(pipe));
-       udelay(150);
-
-       if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
-               DRM_ERROR("DPLL %d failed to lock\n", pipe);
+       crtc->config.dpll_hw_state.dpll = dpll;
 
        dpll_md = (crtc->config.pixel_multiplier - 1)
                << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-       I915_WRITE(DPLL_MD(pipe), dpll_md);
-       POSTING_READ(DPLL_MD(pipe));
+       crtc->config.dpll_hw_state.dpll_md = dpll_md;
 
        if (crtc->config.has_dp_encoder)
                intel_dp_set_m_n(crtc);
@@ -4475,8 +4527,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *encoder;
-       int pipe = crtc->pipe;
        u32 dpll;
        bool is_sdvo;
        struct dpll *clock = &crtc->config.dpll;
@@ -4499,10 +4549,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
        }
 
        if (is_sdvo)
-               dpll |= DPLL_DVO_HIGH_SPEED;
+               dpll |= DPLL_SDVO_HIGH_SPEED;
 
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
-               dpll |= DPLL_DVO_HIGH_SPEED;
+               dpll |= DPLL_SDVO_HIGH_SPEED;
 
        /* compute bitmask from p1 value */
        if (IS_PINEVIEW(dev))
@@ -4538,35 +4588,16 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
                dpll |= PLL_REF_INPUT_DREFCLK;
 
        dpll |= DPLL_VCO_ENABLE;
-       I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-       POSTING_READ(DPLL(pipe));
-       udelay(150);
-
-       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
-               if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder);
-
-       if (crtc->config.has_dp_encoder)
-               intel_dp_set_m_n(crtc);
-
-       I915_WRITE(DPLL(pipe), dpll);
-
-       /* Wait for the clocks to stabilize. */
-       POSTING_READ(DPLL(pipe));
-       udelay(150);
+       crtc->config.dpll_hw_state.dpll = dpll;
 
        if (INTEL_INFO(dev)->gen >= 4) {
                u32 dpll_md = (crtc->config.pixel_multiplier - 1)
                        << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-               I915_WRITE(DPLL_MD(pipe), dpll_md);
-       } else {
-               /* The pixel multiplier can only be updated once the
-                * DPLL is enabled and the clocks are stable.
-                *
-                * So write it again.
-                */
-               I915_WRITE(DPLL(pipe), dpll);
+               crtc->config.dpll_hw_state.dpll_md = dpll_md;
        }
+
+       if (crtc->config.has_dp_encoder)
+               intel_dp_set_m_n(crtc);
 }
 
 static void i8xx_update_pll(struct intel_crtc *crtc,
@@ -4575,8 +4606,6 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
 {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_encoder *encoder;
-       int pipe = crtc->pipe;
        u32 dpll;
        struct dpll *clock = &crtc->config.dpll;
 
@@ -4595,6 +4624,9 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
                        dpll |= PLL_P2_DIVIDE_BY_4;
        }
 
+       if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
+               dpll |= DPLL_DVO_2X_MODE;
+
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
                 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
                dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
@@ -4602,26 +4634,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
                dpll |= PLL_REF_INPUT_DREFCLK;
 
        dpll |= DPLL_VCO_ENABLE;
-       I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
-       POSTING_READ(DPLL(pipe));
-       udelay(150);
-
-       for_each_encoder_on_crtc(dev, &crtc->base, encoder)
-               if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder);
-
-       I915_WRITE(DPLL(pipe), dpll);
-
-       /* Wait for the clocks to stabilize. */
-       POSTING_READ(DPLL(pipe));
-       udelay(150);
-
-       /* The pixel multiplier can only be updated once the
-        * DPLL is enabled and the clocks are stable.
-        *
-        * So write it again.
-        */
-       I915_WRITE(DPLL(pipe), dpll);
+       crtc->config.dpll_hw_state.dpll = dpll;
 }
 
 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -4727,6 +4740,27 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
        pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
 }
 
+static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
+                                            struct intel_crtc_config *pipe_config)
+{
+       struct drm_crtc *crtc = &intel_crtc->base;
+
+       crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
+       crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
+       crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
+       crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
+
+       crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
+       crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
+       crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
+       crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
+
+       crtc->mode.flags = pipe_config->adjusted_mode.flags;
+
+       crtc->mode.clock = pipe_config->adjusted_mode.clock;
+       crtc->mode.flags |= pipe_config->adjusted_mode.flags;
+}
+
 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
 {
        struct drm_device *dev = intel_crtc->base.dev;
@@ -4939,7 +4973,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
 
-       pipe_config->cpu_transcoder = crtc->pipe;
+       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
        tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -4955,6 +4989,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
                pipe_config->pixel_multiplier =
                        ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
                         >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
+               pipe_config->dpll_hw_state.dpll_md = tmp;
        } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
                tmp = I915_READ(DPLL(crtc->pipe));
                pipe_config->pixel_multiplier =
@@ -4966,6 +5001,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
                 * function. */
                pipe_config->pixel_multiplier = 1;
        }
+       pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
+       if (!IS_VALLEYVIEW(dev)) {
+               pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
+               pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
+       } else {
+               /* Mask out read-only status bits. */
+               pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
+                                                    DPLL_PORTC_READY_MASK |
+                                                    DPLL_PORTB_READY_MASK);
+       }
 
        return true;
 }
@@ -5119,74 +5164,37 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
        BUG_ON(val != final);
 }
 
-/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
-static void lpt_init_pch_refclk(struct drm_device *dev)
+static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_mode_config *mode_config = &dev->mode_config;
-       struct intel_encoder *encoder;
-       bool has_vga = false;
-       bool is_sdv = false;
-       u32 tmp;
-
-       list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
-               switch (encoder->type) {
-               case INTEL_OUTPUT_ANALOG:
-                       has_vga = true;
-                       break;
-               }
-       }
-
-       if (!has_vga)
-               return;
-
-       mutex_lock(&dev_priv->dpio_lock);
-
-       /* XXX: Rip out SDV support once Haswell ships for real. */
-       if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
-               is_sdv = true;
-
-       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
-       tmp &= ~SBI_SSCCTL_DISABLE;
-       tmp |= SBI_SSCCTL_PATHALT;
-       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
-
-       udelay(24);
+       uint32_t tmp;
 
-       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
-       tmp &= ~SBI_SSCCTL_PATHALT;
-       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+       tmp = I915_READ(SOUTH_CHICKEN2);
+       tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+       I915_WRITE(SOUTH_CHICKEN2, tmp);
 
-       if (!is_sdv) {
-               tmp = I915_READ(SOUTH_CHICKEN2);
-               tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
-               I915_WRITE(SOUTH_CHICKEN2, tmp);
+       if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+                              FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+               DRM_ERROR("FDI mPHY reset assert timeout\n");
 
-               if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
-                                      FDI_MPHY_IOSFSB_RESET_STATUS, 100))
-                       DRM_ERROR("FDI mPHY reset assert timeout\n");
+       tmp = I915_READ(SOUTH_CHICKEN2);
+       tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+       I915_WRITE(SOUTH_CHICKEN2, tmp);
 
-               tmp = I915_READ(SOUTH_CHICKEN2);
-               tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
-               I915_WRITE(SOUTH_CHICKEN2, tmp);
+       if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+                               FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+               DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+}
 
-               if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
-                                       FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
-                                      100))
-                       DRM_ERROR("FDI mPHY reset de-assert timeout\n");
-       }
+/* WaMPhyProgramming:hsw */
+static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
+{
+       uint32_t tmp;
 
        tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
        tmp &= ~(0xFF << 24);
        tmp |= (0x12 << 24);
        intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
 
-       if (is_sdv) {
-               tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
-               tmp |= 0x7FFF;
-               intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
-       }
-
        tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
        tmp |= (1 << 11);
        intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
@@ -5195,24 +5203,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
        tmp |= (1 << 11);
        intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
 
-       if (is_sdv) {
-               tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
-               tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
-               intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
-
-               tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
-               tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
-               intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
-
-               tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
-               tmp |= (0x3F << 8);
-               intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
-
-               tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
-               tmp |= (0x3F << 8);
-               intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
-       }
-
        tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
        tmp |= (1 << 24) | (1 << 21) | (1 << 18);
        intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
@@ -5221,17 +5211,15 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
        tmp |= (1 << 24) | (1 << 21) | (1 << 18);
        intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
 
-       if (!is_sdv) {
-               tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
-               tmp &= ~(7 << 13);
-               tmp |= (5 << 13);
-               intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+       tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+       tmp &= ~(7 << 13);
+       tmp |= (5 << 13);
+       intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
 
-               tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
-               tmp &= ~(7 << 13);
-               tmp |= (5 << 13);
-               intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
-       }
+       tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+       tmp &= ~(7 << 13);
+       tmp |= (5 << 13);
+       intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
 
        tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
        tmp &= ~0xFF;
@@ -5253,34 +5241,120 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
        tmp |= (0x1C << 16);
        intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
 
-       if (!is_sdv) {
-               tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
-               tmp |= (1 << 27);
-               intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+       tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+       tmp |= (1 << 27);
+       intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+       tmp |= (1 << 27);
+       intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
 
-               tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
-               tmp |= (1 << 27);
-               intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+       tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+       tmp &= ~(0xF << 28);
+       tmp |= (4 << 28);
+       intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+       tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+       tmp &= ~(0xF << 28);
+       tmp |= (4 << 28);
+       intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+}
 
-               tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
-               tmp &= ~(0xF << 28);
-               tmp |= (4 << 28);
-               intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+/* Implements 3 different sequences from BSpec chapter "Display iCLK
+ * Programming" based on the parameters passed:
+ * - Sequence to enable CLKOUT_DP
+ * - Sequence to enable CLKOUT_DP without spread
+ * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
+ */
+static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
+                                bool with_fdi)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t reg, tmp;
 
-               tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
-               tmp &= ~(0xF << 28);
-               tmp |= (4 << 28);
-               intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+       if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
+               with_spread = true;
+       if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
+                with_fdi, "LP PCH doesn't have FDI\n"))
+               with_fdi = false;
+
+       mutex_lock(&dev_priv->dpio_lock);
+
+       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+       tmp &= ~SBI_SSCCTL_DISABLE;
+       tmp |= SBI_SSCCTL_PATHALT;
+       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+       udelay(24);
+
+       if (with_spread) {
+               tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+               tmp &= ~SBI_SSCCTL_PATHALT;
+               intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+               if (with_fdi) {
+                       lpt_reset_fdi_mphy(dev_priv);
+                       lpt_program_fdi_mphy(dev_priv);
+               }
        }
 
-       /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
-       tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
-       tmp |= SBI_DBUFF0_ENABLE;
-       intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+       reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
+              SBI_GEN0 : SBI_DBUFF0;
+       tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+       tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+       intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+       mutex_unlock(&dev_priv->dpio_lock);
+}
+
+/* Sequence to disable CLKOUT_DP */
+static void lpt_disable_clkout_dp(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t reg, tmp;
+
+       mutex_lock(&dev_priv->dpio_lock);
+
+       reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
+              SBI_GEN0 : SBI_DBUFF0;
+       tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+       tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+       intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+       tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+       if (!(tmp & SBI_SSCCTL_DISABLE)) {
+               if (!(tmp & SBI_SSCCTL_PATHALT)) {
+                       tmp |= SBI_SSCCTL_PATHALT;
+                       intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+                       udelay(32);
+               }
+               tmp |= SBI_SSCCTL_DISABLE;
+               intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+       }
 
        mutex_unlock(&dev_priv->dpio_lock);
 }
 
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct intel_encoder *encoder;
+       bool has_vga = false;
+
+       list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+               switch (encoder->type) {
+               case INTEL_OUTPUT_ANALOG:
+                       has_vga = true;
+                       break;
+               }
+       }
+
+       if (has_vga)
+               lpt_enable_clkout_dp(dev, true, true);
+       else
+               lpt_disable_clkout_dp(dev);
+}
+
 /*
  * Initialize reference clocks when the driver loads
  */
@@ -5610,9 +5684,9 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
                << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
 
        if (is_sdvo)
-               dpll |= DPLL_DVO_HIGH_SPEED;
+               dpll |= DPLL_SDVO_HIGH_SPEED;
        if (intel_crtc->config.has_dp_encoder)
-               dpll |= DPLL_DVO_HIGH_SPEED;
+               dpll |= DPLL_SDVO_HIGH_SPEED;
 
        /* compute bitmask from p1 value */
        dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
@@ -5708,7 +5782,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
                else
                        intel_crtc->config.dpll_hw_state.fp1 = fp;
 
-               pll = intel_get_shared_dpll(intel_crtc, dpll, fp);
+               pll = intel_get_shared_dpll(intel_crtc);
                if (pll == NULL) {
                        DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
                                         pipe_name(pipe));
@@ -5720,10 +5794,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        if (intel_crtc->config.has_dp_encoder)
                intel_dp_set_m_n(intel_crtc);
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->pre_pll_enable)
-                       encoder->pre_pll_enable(encoder);
-
        if (is_lvds && has_reduced_clock && i915_powersave)
                intel_crtc->lowfreq_avail = true;
        else
@@ -5732,23 +5802,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        if (intel_crtc->config.has_pch_encoder) {
                pll = intel_crtc_to_shared_dpll(intel_crtc);
 
-               I915_WRITE(PCH_DPLL(pll->id), dpll);
-
-               /* Wait for the clocks to stabilize. */
-               POSTING_READ(PCH_DPLL(pll->id));
-               udelay(150);
-
-               /* The pixel multiplier can only be updated once the
-                * DPLL is enabled and the clocks are stable.
-                *
-                * So write it again.
-                */
-               I915_WRITE(PCH_DPLL(pll->id), dpll);
-
-               if (has_reduced_clock)
-                       I915_WRITE(PCH_FP1(pll->id), fp2);
-               else
-                       I915_WRITE(PCH_FP1(pll->id), fp);
        }
 
        intel_set_pipe_timings(intel_crtc);
@@ -5820,7 +5873,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
 
-       pipe_config->cpu_transcoder = crtc->pipe;
+       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
        tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -5838,12 +5891,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 
                ironlake_get_fdi_m_n_config(crtc, pipe_config);
 
-               /* XXX: Can't properly read out the pch dpll pixel multiplier
-                * since we don't have state tracking for pch clocks yet. */
-               pipe_config->pixel_multiplier = 1;
-
                if (HAS_PCH_IBX(dev_priv->dev)) {
-                       pipe_config->shared_dpll = crtc->pipe;
+                       pipe_config->shared_dpll =
+                               (enum intel_dpll_id) crtc->pipe;
                } else {
                        tmp = I915_READ(PCH_DPLL_SEL);
                        if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
@@ -5856,6 +5906,11 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
 
                WARN_ON(!pll->get_hw_state(dev_priv, pll,
                                           &pipe_config->dpll_hw_state));
+
+               tmp = pipe_config->dpll_hw_state.dpll;
+               pipe_config->pixel_multiplier =
+                       ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
+                        >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
        } else {
                pipe_config->pixel_multiplier = 1;
        }
@@ -5867,6 +5922,142 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
        return true;
 }
 
+static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+       struct intel_crtc *crtc;
+       unsigned long irqflags;
+       uint32_t val, pch_hpd_mask;
+
+       pch_hpd_mask = SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT;
+       if (!(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE))
+               pch_hpd_mask |= SDE_PORTD_HOTPLUG_CPT | SDE_CRT_HOTPLUG_CPT;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
+               WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
+                    pipe_name(crtc->pipe));
+
+       WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+       WARN(plls->spll_refcount, "SPLL enabled\n");
+       WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
+       WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
+       WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+       WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+            "CPU PWM1 enabled\n");
+       WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+            "CPU PWM2 enabled\n");
+       WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+            "PCH PWM1 enabled\n");
+       WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+            "Utility pin enabled\n");
+       WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
+
+       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       val = I915_READ(DEIMR);
+       WARN((val & ~DE_PCH_EVENT_IVB) != val,
+            "Unexpected DEIMR bits enabled: 0x%x\n", val);
+       val = I915_READ(SDEIMR);
+       WARN((val & ~pch_hpd_mask) != val,
+            "Unexpected SDEIMR bits enabled: 0x%x\n", val);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+/*
+ * This function implements pieces of two sequences from BSpec:
+ * - Sequence for display software to disable LCPLL
+ * - Sequence for display software to allow package C8+
+ * The steps implemented here are just the steps that actually touch the LCPLL
+ * register. Callers should take care of disabling all the display engine
+ * functions, doing the mode unset, fixing interrupts, etc.
+ */
+void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+                      bool switch_to_fclk, bool allow_power_down)
+{
+       uint32_t val;
+
+       assert_can_disable_lcpll(dev_priv);
+
+       val = I915_READ(LCPLL_CTL);
+
+       if (switch_to_fclk) {
+               val |= LCPLL_CD_SOURCE_FCLK;
+               I915_WRITE(LCPLL_CTL, val);
+
+               if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+                                      LCPLL_CD_SOURCE_FCLK_DONE, 1))
+                       DRM_ERROR("Switching to FCLK failed\n");
+
+               val = I915_READ(LCPLL_CTL);
+       }
+
+       val |= LCPLL_PLL_DISABLE;
+       I915_WRITE(LCPLL_CTL, val);
+       POSTING_READ(LCPLL_CTL);
+
+       if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
+               DRM_ERROR("LCPLL still locked\n");
+
+       val = I915_READ(D_COMP);
+       val |= D_COMP_COMP_DISABLE;
+       I915_WRITE(D_COMP, val);
+       POSTING_READ(D_COMP);
+       ndelay(100);
+
+       if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+               DRM_ERROR("D_COMP RCOMP still in progress\n");
+
+       if (allow_power_down) {
+               val = I915_READ(LCPLL_CTL);
+               val |= LCPLL_POWER_DOWN_ALLOW;
+               I915_WRITE(LCPLL_CTL, val);
+               POSTING_READ(LCPLL_CTL);
+       }
+}
+
+/*
+ * Fully restores LCPLL, disallowing power down and switching back to LCPLL
+ * source.
+ */
+void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+{
+       uint32_t val;
+
+       val = I915_READ(LCPLL_CTL);
+
+       if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
+                   LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
+               return;
+
+       if (val & LCPLL_POWER_DOWN_ALLOW) {
+               val &= ~LCPLL_POWER_DOWN_ALLOW;
+               I915_WRITE(LCPLL_CTL, val);
+       }
+
+       val = I915_READ(D_COMP);
+       val |= D_COMP_COMP_FORCE;
+       val &= ~D_COMP_COMP_DISABLE;
+       I915_WRITE(D_COMP, val);
+       I915_READ(D_COMP);
+
+       val = I915_READ(LCPLL_CTL);
+       val &= ~LCPLL_PLL_DISABLE;
+       I915_WRITE(LCPLL_CTL, val);
+
+       if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
+               DRM_ERROR("LCPLL not locked yet\n");
+
+       if (val & LCPLL_CD_SOURCE_FCLK) {
+               val = I915_READ(LCPLL_CTL);
+               val &= ~LCPLL_CD_SOURCE_FCLK;
+               I915_WRITE(LCPLL_CTL, val);
+
+               if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+                                       LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+                       DRM_ERROR("Switching back to LCPLL failed\n");
+       }
+}
+
 static void haswell_modeset_global_resources(struct drm_device *dev)
 {
        bool enable = false;
@@ -5935,7 +6126,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        enum intel_display_power_domain pfit_domain;
        uint32_t tmp;
 
-       pipe_config->cpu_transcoder = crtc->pipe;
+       pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
        tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -6548,7 +6739,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
                        goto fail_unpin;
                }
 
-               addr = obj->gtt_offset;
+               addr = i915_gem_obj_ggtt_offset(obj);
        } else {
                int align = IS_I830(dev) ? 16 * 1024 : 256;
                ret = i915_gem_attach_phys_object(dev, obj,
@@ -6875,11 +7066,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
 }
 
 /* Returns the clock of the currently programmed mode of the given pipe. */
-static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+                               struct intel_crtc_config *pipe_config)
 {
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
+       int pipe = pipe_config->cpu_transcoder;
        u32 dpll = I915_READ(DPLL(pipe));
        u32 fp;
        intel_clock_t clock;
@@ -6918,7 +7110,8 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
                default:
                        DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
                                  "mode\n", (int)(dpll & DPLL_MODE_MASK));
-                       return 0;
+                       pipe_config->adjusted_mode.clock = 0;
+                       return;
                }
 
                if (IS_PINEVIEW(dev))
@@ -6955,12 +7148,55 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
                }
        }
 
-       /* XXX: It would be nice to validate the clocks, but we can't reuse
-        * i830PllIsValid() because it relies on the xf86_config connector
-        * configuration being accurate, which it isn't necessarily.
+       pipe_config->adjusted_mode.clock = clock.dot *
+               pipe_config->pixel_multiplier;
+}
+
+static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
+                                   struct intel_crtc_config *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+       int link_freq, repeat;
+       u64 clock;
+       u32 link_m, link_n;
+
+       repeat = pipe_config->pixel_multiplier;
+
+       /*
+        * The calculation for the data clock is:
+        * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
+        * But we want to avoid losing precison if possible, so:
+        * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
+        *
+        * and the link clock is simpler:
+        * link_clock = (m * link_clock * repeat) / n
         */
 
-       return clock.dot;
+       /*
+        * We need to get the FDI or DP link clock here to derive
+        * the M/N dividers.
+        *
+        * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
+        * For DP, it's either 1.62GHz or 2.7GHz.
+        * We do our calculations in 10*MHz since we don't need much precison.
+        */
+       if (pipe_config->has_pch_encoder)
+               link_freq = intel_fdi_link_freq(dev) * 10000;
+       else
+               link_freq = pipe_config->port_clock;
+
+       link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
+       link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
+
+       if (!link_m || !link_n)
+               return;
+
+       clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
+       do_div(clock, link_n);
+
+       pipe_config->adjusted_mode.clock = clock;
 }
 
 /** Returns the currently programmed mode of the given pipe. */
@@ -6971,6 +7207,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
        struct drm_display_mode *mode;
+       struct intel_crtc_config pipe_config;
        int htot = I915_READ(HTOTAL(cpu_transcoder));
        int hsync = I915_READ(HSYNC(cpu_transcoder));
        int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -6980,7 +7217,18 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        if (!mode)
                return NULL;
 
-       mode->clock = intel_crtc_clock_get(dev, crtc);
+       /*
+        * Construct a pipe_config sufficient for getting the clock info
+        * back out of crtc_clock_get.
+        *
+        * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
+        * to use a real value here instead.
+        */
+       pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
+       pipe_config.pixel_multiplier = 1;
+       i9xx_crtc_clock_get(intel_crtc, &pipe_config);
+
+       mode->clock = pipe_config.adjusted_mode.clock;
        mode->hdisplay = (htot & 0xffff) + 1;
        mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
        mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7263,7 +7511,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, MI_DISPLAY_FLIP |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(ring, fb->pitches[0]);
-       intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, 0); /* aux display base address, unused */
 
        intel_mark_page_flip_active(intel_crtc);
@@ -7304,7 +7552,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(ring, fb->pitches[0]);
-       intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, MI_NOOP);
 
        intel_mark_page_flip_active(intel_crtc);
@@ -7344,7 +7592,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(ring, fb->pitches[0]);
        intel_ring_emit(ring,
-                       (obj->gtt_offset + intel_crtc->dspaddr_offset) |
+                       (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
                        obj->tiling_mode);
 
        /* XXX Enabling the panel-fitter across page-flip is so far
@@ -7387,7 +7635,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, MI_DISPLAY_FLIP |
                        MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
        intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
-       intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
 
        /* Contrary to the suggestions in the documentation,
         * "Enable Panel Fitter" does not seem to be required when page
@@ -7452,7 +7700,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 
        intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
        intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
-       intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, (MI_NOOP));
 
        intel_mark_page_flip_active(intel_crtc);
@@ -7806,7 +8054,8 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
 
        drm_mode_copy(&pipe_config->adjusted_mode, mode);
        drm_mode_copy(&pipe_config->requested_mode, mode);
-       pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe;
+       pipe_config->cpu_transcoder =
+               (enum transcoder) to_intel_crtc(crtc)->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
 
        /* Compute a starting value for pipe_config->pipe_bpp taking the source
@@ -8041,6 +8290,28 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
 
 }
 
+static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
+                                   struct intel_crtc_config *new)
+{
+       int clock1, clock2, diff;
+
+       clock1 = cur->adjusted_mode.clock;
+       clock2 = new->adjusted_mode.clock;
+
+       if (clock1 == clock2)
+               return true;
+
+       if (!clock1 || !clock2)
+               return false;
+
+       diff = abs(clock1 - clock2);
+
+       if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
+               return true;
+
+       return false;
+}
+
 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
        list_for_each_entry((intel_crtc), \
                            &(dev)->mode_config.crtc_list, \
@@ -8072,7 +8343,7 @@ intel_pipe_config_compare(struct drm_device *dev,
 
 #define PIPE_CONF_CHECK_FLAGS(name, mask)      \
        if ((current_config->name ^ pipe_config->name) & (mask)) { \
-               DRM_ERROR("mismatch in " #name " " \
+               DRM_ERROR("mismatch in " #name "(" #mask ") "      \
                          "(expected %i, found %i)\n", \
                          current_config->name & (mask), \
                          pipe_config->name & (mask)); \
@@ -8106,8 +8377,7 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
        PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
 
-       if (!HAS_PCH_SPLIT(dev))
-               PIPE_CONF_CHECK_I(pixel_multiplier);
+       PIPE_CONF_CHECK_I(pixel_multiplier);
 
        PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
                              DRM_MODE_FLAG_INTERLACE);
@@ -8138,6 +8408,7 @@ intel_pipe_config_compare(struct drm_device *dev,
 
        PIPE_CONF_CHECK_I(shared_dpll);
        PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
+       PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
        PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
        PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
 
@@ -8146,6 +8417,15 @@ intel_pipe_config_compare(struct drm_device *dev,
 #undef PIPE_CONF_CHECK_FLAGS
 #undef PIPE_CONF_QUIRK
 
+       if (!IS_HASWELL(dev)) {
+               if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
+                       DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
+                                 current_config->adjusted_mode.clock,
+                                 pipe_config->adjusted_mode.clock);
+                       return false;
+               }
+       }
+
        return true;
 }
 
@@ -8275,6 +8555,9 @@ check_crtc_state(struct drm_device *dev)
                                encoder->get_config(encoder, &pipe_config);
                }
 
+               if (dev_priv->display.get_clock)
+                       dev_priv->display.get_clock(crtc, &pipe_config);
+
                WARN(crtc->active != active,
                     "crtc active state doesn't match with hw state "
                     "(expected %i, found %i)\n", crtc->active, active);
@@ -8571,8 +8854,16 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
        } else if (set->crtc->fb != set->fb) {
                /* If we have no fb then treat it as a full mode set */
                if (set->crtc->fb == NULL) {
-                       DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
-                       config->mode_changed = true;
+                       struct intel_crtc *intel_crtc =
+                               to_intel_crtc(set->crtc);
+
+                       if (intel_crtc->active && i915_fastboot) {
+                               DRM_DEBUG_KMS("crtc has no fb, will flip\n");
+                               config->fb_changed = true;
+                       } else {
+                               DRM_DEBUG_KMS("inactive crtc, full mode set\n");
+                               config->mode_changed = true;
+                       }
                } else if (set->fb == NULL) {
                        config->mode_changed = true;
                } else if (set->fb->pixel_format !=
@@ -8802,19 +9093,32 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
        return val & DPLL_VCO_ENABLE;
 }
 
+static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
+                                 struct intel_shared_dpll *pll)
+{
+       I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
+       I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
+}
+
 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
                                struct intel_shared_dpll *pll)
 {
-       uint32_t reg, val;
-
        /* PCH refclock must be enabled first */
        assert_pch_refclk_enabled(dev_priv);
 
-       reg = PCH_DPLL(pll->id);
-       val = I915_READ(reg);
-       val |= DPLL_VCO_ENABLE;
-       I915_WRITE(reg, val);
-       POSTING_READ(reg);
+       I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+
+       /* Wait for the clocks to stabilize. */
+       POSTING_READ(PCH_DPLL(pll->id));
+       udelay(150);
+
+       /* The pixel multiplier can only be updated once the
+        * DPLL is enabled and the clocks are stable.
+        *
+        * So write it again.
+        */
+       I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+       POSTING_READ(PCH_DPLL(pll->id));
        udelay(200);
 }
 
@@ -8823,7 +9127,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
 {
        struct drm_device *dev = dev_priv->dev;
        struct intel_crtc *crtc;
-       uint32_t reg, val;
 
        /* Make sure no transcoder isn't still depending on us. */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
@@ -8831,11 +9134,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
                        assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
        }
 
-       reg = PCH_DPLL(pll->id);
-       val = I915_READ(reg);
-       val &= ~DPLL_VCO_ENABLE;
-       I915_WRITE(reg, val);
-       POSTING_READ(reg);
+       I915_WRITE(PCH_DPLL(pll->id), 0);
+       POSTING_READ(PCH_DPLL(pll->id));
        udelay(200);
 }
 
@@ -8854,6 +9154,7 @@ static void ibx_pch_dpll_init(struct drm_device *dev)
        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
                dev_priv->shared_dplls[i].id = i;
                dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
+               dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
                dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
                dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
                dev_priv->shared_dplls[i].get_hw_state =
@@ -9270,6 +9571,7 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.update_plane = ironlake_update_plane;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+               dev_priv->display.get_clock = ironlake_crtc_clock_get;
                dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
                dev_priv->display.crtc_enable = ironlake_crtc_enable;
                dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9277,6 +9579,7 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.update_plane = ironlake_update_plane;
        } else if (IS_VALLEYVIEW(dev)) {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+               dev_priv->display.get_clock = i9xx_crtc_clock_get;
                dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
                dev_priv->display.crtc_enable = valleyview_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9284,6 +9587,7 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.update_plane = i9xx_update_plane;
        } else {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+               dev_priv->display.get_clock = i9xx_crtc_clock_get;
                dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
                dev_priv->display.crtc_enable = i9xx_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9584,7 +9888,7 @@ void intel_modeset_init(struct drm_device *dev)
                      INTEL_INFO(dev)->num_pipes,
                      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
 
-       for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+       for_each_pipe(i) {
                intel_crtc_init(dev, i);
                for (j = 0; j < dev_priv->num_plane; j++) {
                        ret = intel_plane_init(dev, i, j);
@@ -9860,6 +10164,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                              pipe);
        }
 
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               if (!crtc->active)
+                       continue;
+               if (dev_priv->display.get_clock)
+                       dev_priv->display.get_clock(crtc,
+                                                   &crtc->config);
+       }
+
        list_for_each_entry(connector, &dev->mode_config.connector_list,
                            base.head) {
                if (connector->get_hw_state(connector)) {
@@ -9891,6 +10204,22 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
 
        intel_modeset_readout_hw_state(dev);
 
+       /*
+        * Now that we have the config, copy it to each CRTC struct
+        * Note that this could go away if we move to using crtc_config
+        * checking everywhere.
+        */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+                           base.head) {
+               if (crtc->active && i915_fastboot) {
+                       intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
+
+                       DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
+                                     crtc->base.base.id);
+                       drm_mode_debug_printmodeline(&crtc->base.mode);
+               }
+       }
+
        /* HW state is read out, now we need to sanitize this mess. */
        list_for_each_entry(encoder, &dev->mode_config.encoder_list,
                            base.head) {
@@ -10033,9 +10362,6 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
        return 0;
 }
 
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-
 struct intel_display_error_state {
 
        u32 power_well_driver;
@@ -10179,4 +10505,3 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
                err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
        }
 }
-#endif
index 26e162bb3a5158d5da3f1b5e9b2614c7515ede6b..c6996ced2e5f78b634e49a77324c504d9517033f 100644 (file)
@@ -276,29 +276,12 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
        return status;
 }
 
-static int
-intel_dp_aux_ch(struct intel_dp *intel_dp,
-               uint8_t *send, int send_bytes,
-               uint8_t *recv, int recv_size)
+static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
-       uint32_t ch_data = ch_ctl + 4;
-       int i, ret, recv_bytes;
-       uint32_t status;
-       uint32_t aux_clock_divider;
-       int try, precharge;
-       bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
 
-       /* dp aux is extremely sensitive to irq latency, hence request the
-        * lowest possible wakeup latency and so prevent the cpu from going into
-        * deep sleep states.
-        */
-       pm_qos_update_request(&dev_priv->pm_qos, 0);
-
-       intel_dp_check_edp(intel_dp);
        /* The clock divider is based off the hrawclk,
         * and would like to run at 2MHz. So, take the
         * hrawclk value and divide by 2 and use that
@@ -307,23 +290,48 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
         * clock divider.
         */
        if (IS_VALLEYVIEW(dev)) {
-               aux_clock_divider = 100;
+               return 100;
        } else if (intel_dig_port->port == PORT_A) {
                if (HAS_DDI(dev))
-                       aux_clock_divider = DIV_ROUND_CLOSEST(
+                       return DIV_ROUND_CLOSEST(
                                intel_ddi_get_cdclk_freq(dev_priv), 2000);
                else if (IS_GEN6(dev) || IS_GEN7(dev))
-                       aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
+                       return 200; /* SNB & IVB eDP input clock at 400Mhz */
                else
-                       aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+                       return 225; /* eDP input clock at 450Mhz */
        } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                /* Workaround for non-ULT HSW */
-               aux_clock_divider = 74;
+               return 74;
        } else if (HAS_PCH_SPLIT(dev)) {
-               aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+               return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
        } else {
-               aux_clock_divider = intel_hrawclk(dev) / 2;
+               return intel_hrawclk(dev) / 2;
        }
+}
+
+static int
+intel_dp_aux_ch(struct intel_dp *intel_dp,
+               uint8_t *send, int send_bytes,
+               uint8_t *recv, int recv_size)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+       uint32_t ch_data = ch_ctl + 4;
+       int i, ret, recv_bytes;
+       uint32_t status;
+       uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp);
+       int try, precharge;
+       bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+
+       /* dp aux is extremely sensitive to irq latency, hence request the
+        * lowest possible wakeup latency and so prevent the cpu from going into
+        * deep sleep states.
+        */
+       pm_qos_update_request(&dev_priv->pm_qos, 0);
+
+       intel_dp_check_edp(intel_dp);
 
        if (IS_GEN6(dev))
                precharge = 3;
@@ -710,8 +718,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        /* Walk through all bpp values. Luckily they're all nicely spaced with 2
         * bpc in between. */
        bpp = pipe_config->pipe_bpp;
-       if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
+       if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
+               DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
+                             dev_priv->vbt.edp_bpp);
                bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
+       }
 
        for (; bpp >= 6*3; bpp -= 2*3) {
                mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
@@ -1360,6 +1371,266 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
        }
 
        pipe_config->adjusted_mode.flags |= flags;
+
+       if (dp_to_dig_port(intel_dp)->port == PORT_A) {
+               if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
+                       pipe_config->port_clock = 162000;
+               else
+                       pipe_config->port_clock = 270000;
+       }
+}
+
+static bool is_edp_psr(struct intel_dp *intel_dp)
+{
+       return is_edp(intel_dp) &&
+               intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+}
+
+static bool intel_edp_is_psr_enabled(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!IS_HASWELL(dev))
+               return false;
+
+       return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+}
+
+static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
+                                   struct edp_vsc_psr *vsc_psr)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+       u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
+       u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
+       uint32_t *data = (uint32_t *) vsc_psr;
+       unsigned int i;
+
+       /* As per BSPec (Pipe Video Data Island Packet), we need to disable
+          the video DIP being updated before program video DIP data buffer
+          registers for DIP being updated. */
+       I915_WRITE(ctl_reg, 0);
+       POSTING_READ(ctl_reg);
+
+       for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
+               if (i < sizeof(struct edp_vsc_psr))
+                       I915_WRITE(data_reg + i, *data++);
+               else
+                       I915_WRITE(data_reg + i, 0);
+       }
+
+       I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
+       POSTING_READ(ctl_reg);
+}
+
+static void intel_edp_psr_setup(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct edp_vsc_psr psr_vsc;
+
+       if (intel_dp->psr_setup_done)
+               return;
+
+       /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+       memset(&psr_vsc, 0, sizeof(psr_vsc));
+       psr_vsc.sdp_header.HB0 = 0;
+       psr_vsc.sdp_header.HB1 = 0x7;
+       psr_vsc.sdp_header.HB2 = 0x2;
+       psr_vsc.sdp_header.HB3 = 0x8;
+       intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
+
+       /* Avoid continuous PSR exit by masking memup and hpd */
+       I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
+                  EDP_PSR_DEBUG_MASK_HPD);
+
+       intel_dp->psr_setup_done = true;
+}
+
+static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp);
+       int precharge = 0x3;
+       int msg_size = 5;       /* Header(4) + Message(1) */
+
+       /* Enable PSR in sink */
+       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
+               intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
+                                           DP_PSR_ENABLE &
+                                           ~DP_PSR_MAIN_LINK_ACTIVE);
+       else
+               intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
+                                           DP_PSR_ENABLE |
+                                           DP_PSR_MAIN_LINK_ACTIVE);
+
+       /* Setup AUX registers */
+       I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
+       I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
+       I915_WRITE(EDP_PSR_AUX_CTL,
+                  DP_AUX_CH_CTL_TIME_OUT_400us |
+                  (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+                  (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+                  (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+}
+
+static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t max_sleep_time = 0x1f;
+       uint32_t idle_frames = 1;
+       uint32_t val = 0x0;
+
+       if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+               val |= EDP_PSR_LINK_STANDBY;
+               val |= EDP_PSR_TP2_TP3_TIME_0us;
+               val |= EDP_PSR_TP1_TIME_0us;
+               val |= EDP_PSR_SKIP_AUX_EXIT;
+       } else
+               val |= EDP_PSR_LINK_DISABLE;
+
+       I915_WRITE(EDP_PSR_CTL, val |
+                  EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
+                  max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
+                  idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
+                  EDP_PSR_ENABLE);
+}
+
+static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = dig_port->base.base.crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
+       struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+
+       if (!IS_HASWELL(dev)) {
+               DRM_DEBUG_KMS("PSR not supported on this platform\n");
+               dev_priv->no_psr_reason = PSR_NO_SOURCE;
+               return false;
+       }
+
+       if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
+           (dig_port->port != PORT_A)) {
+               DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+               dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
+               return false;
+       }
+
+       if (!is_edp_psr(intel_dp)) {
+               DRM_DEBUG_KMS("PSR not supported by this panel\n");
+               dev_priv->no_psr_reason = PSR_NO_SINK;
+               return false;
+       }
+
+       if (!i915_enable_psr) {
+               DRM_DEBUG_KMS("PSR disable by flag\n");
+               dev_priv->no_psr_reason = PSR_MODULE_PARAM;
+               return false;
+       }
+
+       if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
+               DRM_DEBUG_KMS("crtc not active for PSR\n");
+               dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
+               return false;
+       }
+
+       if (obj->tiling_mode != I915_TILING_X ||
+           obj->fence_reg == I915_FENCE_REG_NONE) {
+               DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
+               dev_priv->no_psr_reason = PSR_NOT_TILED;
+               return false;
+       }
+
+       if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
+               DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
+               dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
+               return false;
+       }
+
+       if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
+           S3D_ENABLE) {
+               DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+               dev_priv->no_psr_reason = PSR_S3D_ENABLED;
+               return false;
+       }
+
+       if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+               DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+               dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
+               return false;
+       }
+
+       return true;
+}
+
+static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+       if (!intel_edp_psr_match_conditions(intel_dp) ||
+           intel_edp_is_psr_enabled(dev))
+               return;
+
+       /* Setup PSR once */
+       intel_edp_psr_setup(intel_dp);
+
+       /* Enable PSR on the panel */
+       intel_edp_psr_enable_sink(intel_dp);
+
+       /* Enable PSR on the host */
+       intel_edp_psr_enable_source(intel_dp);
+}
+
+void intel_edp_psr_enable(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+       if (intel_edp_psr_match_conditions(intel_dp) &&
+           !intel_edp_is_psr_enabled(dev))
+               intel_edp_psr_do_enable(intel_dp);
+}
+
+void intel_edp_psr_disable(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!intel_edp_is_psr_enabled(dev))
+               return;
+
+       I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
+
+       /* Wait till PSR is idle */
+       if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
+                      EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+               DRM_ERROR("Timed out waiting for PSR Idle State\n");
+}
+
+void intel_edp_psr_update(struct drm_device *dev)
+{
+       struct intel_encoder *encoder;
+       struct intel_dp *intel_dp = NULL;
+
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
+               if (encoder->type == INTEL_OUTPUT_EDP) {
+                       intel_dp = enc_to_intel_dp(&encoder->base);
+
+                       if (!is_edp_psr(intel_dp))
+                               return;
+
+                       if (!intel_edp_psr_match_conditions(intel_dp))
+                               intel_edp_psr_disable(intel_dp);
+                       else
+                               if (!intel_edp_is_psr_enabled(dev))
+                                       intel_edp_psr_do_enable(intel_dp);
+               }
 }
 
 static void intel_disable_dp(struct intel_encoder *encoder)
@@ -2275,6 +2546,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
        if (intel_dp->dpcd[DP_DPCD_REV] == 0)
                return false; /* DPCD not present */
 
+       /* Check if the panel supports PSR */
+       memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
+       intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
+                                      intel_dp->psr_dpcd,
+                                      sizeof(intel_dp->psr_dpcd));
+       if (is_edp_psr(intel_dp))
+               DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
        if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
              DP_DWN_STRM_PORT_PRESENT))
                return true; /* native DP sink */
@@ -2542,6 +2820,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
        enum drm_connector_status status;
        struct edid *edid = NULL;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, drm_get_connector_name(connector));
+
        intel_dp->has_audio = false;
 
        if (HAS_PCH_SPLIT(dev))
@@ -3166,6 +3447,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
             error, port_name(port));
 
+       intel_dp->psr_setup_done = false;
+
        if (!intel_edp_init_connector(intel_dp, intel_connector)) {
                i2c_del_adapter(&intel_dp->adapter);
                if (is_edp(intel_dp)) {
index b7d6e09456ce372f8a87e8f3b1ebd75df1779e70..3fbe80bc36bbf46a93191b5e0660aec0064a3887 100644 (file)
@@ -487,6 +487,7 @@ struct intel_dp {
        uint8_t link_bw;
        uint8_t lane_count;
        uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+       uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
        uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
        struct i2c_adapter adapter;
        struct i2c_algo_dp_aux_data algo;
@@ -498,6 +499,7 @@ struct intel_dp {
        int backlight_off_delay;
        struct delayed_work panel_vdd_work;
        bool want_panel_vdd;
+       bool psr_setup_done;
        struct intel_connector *attached_connector;
 };
 
@@ -549,13 +551,6 @@ struct intel_unpin_work {
        bool enable_stall_check;
 };
 
-struct intel_fbc_work {
-       struct delayed_work work;
-       struct drm_crtc *crtc;
-       struct drm_framebuffer *fb;
-       int interval;
-};
-
 int intel_pch_rawclk(struct drm_device *dev);
 
 int intel_connector_update_modes(struct drm_connector *connector,
@@ -747,6 +742,22 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
 extern void intel_fb_output_poll_changed(struct drm_device *dev);
 extern void intel_fb_restore_mode(struct drm_device *dev);
 
+struct intel_shared_dpll *
+intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
+
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+                       struct intel_shared_dpll *pll,
+                       bool state);
+#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
+#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+void assert_pll(struct drm_i915_private *dev_priv,
+               enum pipe pipe, bool state);
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+                      enum pipe pipe, bool state);
+#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
+#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
 extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
                        bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
@@ -780,7 +791,6 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
 extern void intel_init_pm(struct drm_device *dev);
 /* FBC */
 extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
 extern void intel_update_fbc(struct drm_device *dev);
 /* IPS */
 extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -825,4 +835,11 @@ extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
                                                 enum transcoder pch_transcoder,
                                                 bool enable);
 
+extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
+extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
+extern void intel_edp_psr_update(struct drm_device *dev);
+extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+                             bool switch_to_fclk, bool allow_power_down);
+extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
+
 #endif /* __INTEL_DRV_H__ */
index eb2020eb2b7ea1c60dde15062c426182e30d9d05..8b4ad27791f338d7b4d866a3b74b2214ae83c583 100644 (file)
@@ -283,7 +283,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
        int pipe = intel_crtc->pipe;
        u32 dvo_val;
        u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
-       int dpll_reg = DPLL(pipe);
 
        switch (dvo_reg) {
        case DVOA:
@@ -314,8 +313,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
        if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
                dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
 
-       I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
-
        /*I915_WRITE(DVOB_SRCDIM,
          (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
          (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
@@ -335,6 +332,8 @@ static enum drm_connector_status
 intel_dvo_detect(struct drm_connector *connector, bool force)
 {
        struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, drm_get_connector_name(connector));
        return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
 }
 
index dff669e2387f4e5aa8fd84433935e4603ec43992..f3c97e05b0d845b5c046726c2014ad6d741d6b5e 100644 (file)
@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
        info->apertures->ranges[0].base = dev->mode_config.fb_base;
        info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
 
-       info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
+       info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
        info->fix.smem_len = size;
 
        info->screen_base =
-               ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+               ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
                           size);
        if (!info->screen_base) {
                ret = -ENOSPC;
@@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
        /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
-       DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+       DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
                      fb->width, fb->height,
-                     obj->gtt_offset, obj);
+                     i915_gem_obj_ggtt_offset(obj), obj);
 
 
        mutex_unlock(&dev->struct_mutex);
index 98df2a0c85bdf43f9ed545489700470392b11ca1..af18da76c04b0ceae107c0294ac41577978420b1 100644 (file)
@@ -866,6 +866,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
        struct edid *edid;
        enum drm_connector_status status = connector_status_disconnected;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, drm_get_connector_name(connector));
+
        intel_hdmi->has_hdmi_sink = false;
        intel_hdmi->has_audio = false;
        intel_hdmi->rgb_quant_range_selectable = false;
index 61348eae2f0436a05f7b5d88fec4242db918655d..2110df24454b90c09140b4a1cd4b8ca9d2552e93 100644 (file)
@@ -122,17 +122,25 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
  * This is an exception to the general rule that mode_set doesn't turn
  * things on.
  */
-static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
+static void intel_pre_enable_lvds(struct intel_encoder *encoder)
 {
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct drm_display_mode *fixed_mode =
                lvds_encoder->attached_connector->base.panel.fixed_mode;
-       int pipe = intel_crtc->pipe;
+       int pipe = crtc->pipe;
        u32 temp;
 
+       if (HAS_PCH_SPLIT(dev)) {
+               assert_fdi_rx_pll_disabled(dev_priv, pipe);
+               assert_shared_dpll_disabled(dev_priv,
+                                           intel_crtc_to_shared_dpll(crtc));
+       } else {
+               assert_pll_disabled(dev_priv, pipe);
+       }
+
        temp = I915_READ(lvds_encoder->reg);
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 
@@ -149,7 +157,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
 
        /* set the corresponsding LVDS_BORDER bit */
        temp &= ~LVDS_BORDER_ENABLE;
-       temp |= intel_crtc->config.gmch_pfit.lvds_border_bits;
+       temp |= crtc->config.gmch_pfit.lvds_border_bits;
        /* Set the B0-B3 data pairs corresponding to whether we're going to
         * set the DPLLs for dual-channel mode or not.
         */
@@ -169,8 +177,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
        if (INTEL_INFO(dev)->gen == 4) {
                /* Bspec wording suggests that LVDS port dithering only exists
                 * for 18bpp panels. */
-               if (intel_crtc->config.dither &&
-                   intel_crtc->config.pipe_bpp == 18)
+               if (crtc->config.dither && crtc->config.pipe_bpp == 18)
                        temp |= LVDS_ENABLE_DITHER;
                else
                        temp &= ~LVDS_ENABLE_DITHER;
@@ -336,6 +343,9 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
        struct drm_device *dev = connector->dev;
        enum drm_connector_status status;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, drm_get_connector_name(connector));
+
        status = intel_panel_detect(dev);
        if (status != connector_status_unknown)
                return status;
@@ -959,7 +969,7 @@ void intel_lvds_init(struct drm_device *dev)
                         DRM_MODE_ENCODER_LVDS);
 
        intel_encoder->enable = intel_enable_lvds;
-       intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
+       intel_encoder->pre_enable = intel_pre_enable_lvds;
        intel_encoder->compute_config = intel_lvds_compute_config;
        intel_encoder->disable = intel_disable_lvds;
        intel_encoder->get_hw_state = intel_lvds_get_hw_state;
index a3698812e9c7831f75048508b4b38ebf93ecce33..9ec5a4e12af2b150717e58b2a42fefece28053e9 100644 (file)
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
                regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
        else
                regs = io_mapping_map_wc(dev_priv->gtt.mappable,
-                                        overlay->reg_bo->gtt_offset);
+                                        i915_gem_obj_ggtt_offset(overlay->reg_bo));
 
        return regs;
 }
@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        swidth = params->src_w;
        swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
        sheight = params->src_h;
-       iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
+       iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
        ostride = params->stride_Y;
 
        if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                                      params->src_w/uv_hscale);
                swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
                sheight |= (params->src_h/uv_vscale) << 16;
-               iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
-               iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
+               iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
+               iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
                ostride |= params->stride_UV << 16;
        }
 
@@ -1333,7 +1333,9 @@ void intel_setup_overlay(struct drm_device *dev)
 
        overlay->dev = dev;
 
-       reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
+       reg_bo = NULL;
+       if (!OVERLAY_NEEDS_PHYSICAL(dev))
+               reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
        if (reg_bo == NULL)
                reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
        if (reg_bo == NULL)
@@ -1355,7 +1357,7 @@ void intel_setup_overlay(struct drm_device *dev)
                        DRM_ERROR("failed to pin overlay register bo\n");
                        goto out_free_bo;
                }
-               overlay->flip_addr = reg_bo->gtt_offset;
+               overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
 
                ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
                if (ret) {
@@ -1412,9 +1414,6 @@ void intel_cleanup_overlay(struct drm_device *dev)
        kfree(dev_priv->overlay);
 }
 
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-
 struct intel_overlay_error_state {
        struct overlay_registers regs;
        unsigned long base;
@@ -1435,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
                        overlay->reg_bo->phys_obj->handle->vaddr;
        else
                regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
-                                               overlay->reg_bo->gtt_offset);
+                                               i915_gem_obj_ggtt_offset(overlay->reg_bo));
 
        return regs;
 }
@@ -1468,7 +1467,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
        if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
                error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
        else
-               error->base = overlay->reg_bo->gtt_offset;
+               error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
 
        regs = intel_overlay_map_regs_atomic(overlay);
        if (!regs)
@@ -1537,4 +1536,3 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
        P(UVSCALEV);
 #undef P
 }
-#endif
index 6a347f54d39fd9724ac368b710abafb259ee8180..74d6c4d78360adba17dfd38aee662f4a75277cf5 100644 (file)
@@ -30,6 +30,7 @@
 #include "intel_drv.h"
 #include "../../../platform/x86/intel_ips.h"
 #include <linux/module.h>
+#include <drm/i915_powerwell.h>
 
 #define FORCEWAKE_ACK_TIMEOUT_MS 2
 
@@ -86,7 +87,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        int plane, i;
        u32 fbc_ctl, fbc_ctl2;
 
-       cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+       cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
        if (fb->pitches[0] < cfb_pitch)
                cfb_pitch = fb->pitches[0];
 
@@ -217,7 +218,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
                   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
                   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
        I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
-       I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+       I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
        /* enable it... */
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
@@ -274,7 +275,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
-       I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
+       I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
 
        I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
                   IVB_DPFC_CTL_FENCE_EN |
@@ -325,7 +326,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        mutex_lock(&dev->struct_mutex);
-       if (work == dev_priv->fbc_work) {
+       if (work == dev_priv->fbc.fbc_work) {
                /* Double check that we haven't switched fb without cancelling
                 * the prior work.
                 */
@@ -333,12 +334,12 @@ static void intel_fbc_work_fn(struct work_struct *__work)
                        dev_priv->display.enable_fbc(work->crtc,
                                                     work->interval);
 
-                       dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
-                       dev_priv->cfb_fb = work->crtc->fb->base.id;
-                       dev_priv->cfb_y = work->crtc->y;
+                       dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+                       dev_priv->fbc.fb_id = work->crtc->fb->base.id;
+                       dev_priv->fbc.y = work->crtc->y;
                }
 
-               dev_priv->fbc_work = NULL;
+               dev_priv->fbc.fbc_work = NULL;
        }
        mutex_unlock(&dev->struct_mutex);
 
@@ -347,28 +348,28 @@ static void intel_fbc_work_fn(struct work_struct *__work)
 
 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
 {
-       if (dev_priv->fbc_work == NULL)
+       if (dev_priv->fbc.fbc_work == NULL)
                return;
 
        DRM_DEBUG_KMS("cancelling pending FBC enable\n");
 
        /* Synchronisation is provided by struct_mutex and checking of
-        * dev_priv->fbc_work, so we can perform the cancellation
+        * dev_priv->fbc.fbc_work, so we can perform the cancellation
         * entirely asynchronously.
         */
-       if (cancel_delayed_work(&dev_priv->fbc_work->work))
+       if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
                /* tasklet was killed before being run, clean up */
-               kfree(dev_priv->fbc_work);
+               kfree(dev_priv->fbc.fbc_work);
 
        /* Mark the work as no longer wanted so that if it does
         * wake-up (because the work was already running and waiting
         * for our mutex), it will discover that is no longer
         * necessary to run.
         */
-       dev_priv->fbc_work = NULL;
+       dev_priv->fbc.fbc_work = NULL;
 }
 
-void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 {
        struct intel_fbc_work *work;
        struct drm_device *dev = crtc->dev;
@@ -381,6 +382,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 
        work = kzalloc(sizeof *work, GFP_KERNEL);
        if (work == NULL) {
+               DRM_ERROR("Failed to allocate FBC work structure\n");
                dev_priv->display.enable_fbc(crtc, interval);
                return;
        }
@@ -390,9 +392,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        work->interval = interval;
        INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
-       dev_priv->fbc_work = work;
-
-       DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+       dev_priv->fbc.fbc_work = work;
 
        /* Delay the actual enabling to let pageflipping cease and the
         * display to settle before starting the compression. Note that
@@ -404,6 +404,8 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
         * following the termination of the page-flipping sequence
         * and indeed performing the enable as a co-routine and not
         * waiting synchronously upon the vblank.
+        *
+        * WaFbcWaitForVBlankBeforeEnable:ilk,snb
         */
        schedule_delayed_work(&work->work, msecs_to_jiffies(50));
 }
@@ -418,7 +420,7 @@ void intel_disable_fbc(struct drm_device *dev)
                return;
 
        dev_priv->display.disable_fbc(dev);
-       dev_priv->cfb_plane = -1;
+       dev_priv->fbc.plane = -1;
 }
 
 /**
@@ -448,7 +450,6 @@ void intel_update_fbc(struct drm_device *dev)
        struct drm_framebuffer *fb;
        struct intel_framebuffer *intel_fb;
        struct drm_i915_gem_object *obj;
-       int enable_fbc;
        unsigned int max_hdisplay, max_vdisplay;
 
        if (!i915_powersave)
@@ -471,7 +472,8 @@ void intel_update_fbc(struct drm_device *dev)
                    !to_intel_crtc(tmp_crtc)->primary_disabled) {
                        if (crtc) {
                                DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
-                               dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+                               dev_priv->fbc.no_fbc_reason =
+                                       FBC_MULTIPLE_PIPES;
                                goto out_disable;
                        }
                        crtc = tmp_crtc;
@@ -480,7 +482,7 @@ void intel_update_fbc(struct drm_device *dev)
 
        if (!crtc || crtc->fb == NULL) {
                DRM_DEBUG_KMS("no output, disabling\n");
-               dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+               dev_priv->fbc.no_fbc_reason = FBC_NO_OUTPUT;
                goto out_disable;
        }
 
@@ -489,23 +491,22 @@ void intel_update_fbc(struct drm_device *dev)
        intel_fb = to_intel_framebuffer(fb);
        obj = intel_fb->obj;
 
-       enable_fbc = i915_enable_fbc;
-       if (enable_fbc < 0) {
-               DRM_DEBUG_KMS("fbc set to per-chip default\n");
-               enable_fbc = 1;
-               if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
-                       enable_fbc = 0;
+       if (i915_enable_fbc < 0 &&
+           INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
+               DRM_DEBUG_KMS("disabled per chip default\n");
+               dev_priv->fbc.no_fbc_reason = FBC_CHIP_DEFAULT;
+               goto out_disable;
        }
-       if (!enable_fbc) {
+       if (!i915_enable_fbc) {
                DRM_DEBUG_KMS("fbc disabled per module param\n");
-               dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+               dev_priv->fbc.no_fbc_reason = FBC_MODULE_PARAM;
                goto out_disable;
        }
        if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
            (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
                DRM_DEBUG_KMS("mode incompatible with compression, "
                              "disabling\n");
-               dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+               dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED_MODE;
                goto out_disable;
        }
 
@@ -519,13 +520,13 @@ void intel_update_fbc(struct drm_device *dev)
        if ((crtc->mode.hdisplay > max_hdisplay) ||
            (crtc->mode.vdisplay > max_vdisplay)) {
                DRM_DEBUG_KMS("mode too large for compression, disabling\n");
-               dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+               dev_priv->fbc.no_fbc_reason = FBC_MODE_TOO_LARGE;
                goto out_disable;
        }
        if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
            intel_crtc->plane != 0) {
                DRM_DEBUG_KMS("plane not 0, disabling compression\n");
-               dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+               dev_priv->fbc.no_fbc_reason = FBC_BAD_PLANE;
                goto out_disable;
        }
 
@@ -535,7 +536,7 @@ void intel_update_fbc(struct drm_device *dev)
        if (obj->tiling_mode != I915_TILING_X ||
            obj->fence_reg == I915_FENCE_REG_NONE) {
                DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
-               dev_priv->no_fbc_reason = FBC_NOT_TILED;
+               dev_priv->fbc.no_fbc_reason = FBC_NOT_TILED;
                goto out_disable;
        }
 
@@ -545,7 +546,7 @@ void intel_update_fbc(struct drm_device *dev)
 
        if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
                DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
-               dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+               dev_priv->fbc.no_fbc_reason = FBC_STOLEN_TOO_SMALL;
                goto out_disable;
        }
 
@@ -554,9 +555,9 @@ void intel_update_fbc(struct drm_device *dev)
         * cannot be unpinned (and have its GTT offset and fence revoked)
         * without first being decoupled from the scanout and FBC disabled.
         */
-       if (dev_priv->cfb_plane == intel_crtc->plane &&
-           dev_priv->cfb_fb == fb->base.id &&
-           dev_priv->cfb_y == crtc->y)
+       if (dev_priv->fbc.plane == intel_crtc->plane &&
+           dev_priv->fbc.fb_id == fb->base.id &&
+           dev_priv->fbc.y == crtc->y)
                return;
 
        if (intel_fbc_enabled(dev)) {
@@ -2468,8 +2469,8 @@ static void hsw_compute_wm_results(struct drm_device *dev,
 
 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
  * case both are at the same level. Prefer r1 in case they're the same. */
-struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
-                                          struct hsw_wm_values *r2)
+static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
+                                                 struct hsw_wm_values *r2)
 {
        int i, val_r1 = 0, val_r2 = 0;
 
@@ -3076,19 +3077,12 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
  */
 static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
 {
-       unsigned long timeout = jiffies + msecs_to_jiffies(10);
        u32 pval;
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-       do {
-               pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-               if (time_after(jiffies, timeout)) {
-                       DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
-                       break;
-               }
-               udelay(10);
-       } while (pval & 1);
+       if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
+               DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
 
        pval >>= 8;
 
@@ -3129,13 +3123,10 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
        trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
 }
 
-
-static void gen6_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps_interrupts(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       I915_WRITE(GEN6_RC_CONTROL, 0);
-       I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
        I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
        I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
        /* Complete PM interrupt masking here doesn't race with the rps work
@@ -3143,30 +3134,30 @@ static void gen6_disable_rps(struct drm_device *dev)
         * register (PMIMR) to mask PM interrupts. The only risk is in leaving
         * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
 
-       spin_lock_irq(&dev_priv->rps.lock);
+       spin_lock_irq(&dev_priv->irq_lock);
        dev_priv->rps.pm_iir = 0;
-       spin_unlock_irq(&dev_priv->rps.lock);
+       spin_unlock_irq(&dev_priv->irq_lock);
 
        I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
 }
 
-static void valleyview_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        I915_WRITE(GEN6_RC_CONTROL, 0);
-       I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
-       I915_WRITE(GEN6_PMIER, 0);
-       /* Complete PM interrupt masking here doesn't race with the rps work
-        * item again unmasking PM interrupts because that is using a different
-        * register (PMIMR) to mask PM interrupts. The only risk is in leaving
-        * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+       I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
 
-       spin_lock_irq(&dev_priv->rps.lock);
-       dev_priv->rps.pm_iir = 0;
-       spin_unlock_irq(&dev_priv->rps.lock);
+       gen6_disable_rps_interrupts(dev);
+}
+
+static void valleyview_disable_rps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
-       I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+       I915_WRITE(GEN6_RC_CONTROL, 0);
+
+       gen6_disable_rps_interrupts(dev);
 
        if (dev_priv->vlv_pctx) {
                drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
@@ -3176,6 +3167,10 @@ static void valleyview_disable_rps(struct drm_device *dev)
 
 int intel_enable_rc6(const struct drm_device *dev)
 {
+       /* No RC6 before Ironlake */
+       if (INTEL_INFO(dev)->gen < 5)
+               return 0;
+
        /* Respect the kernel parameter if it is set */
        if (i915_enable_rc6 >= 0)
                return i915_enable_rc6;
@@ -3199,6 +3194,19 @@ int intel_enable_rc6(const struct drm_device *dev)
        return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 }
 
+static void gen6_enable_rps_interrupts(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       spin_lock_irq(&dev_priv->irq_lock);
+       WARN_ON(dev_priv->rps.pm_iir);
+       I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
+       I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
+       spin_unlock_irq(&dev_priv->irq_lock);
+       /* unmask all PM interrupts */
+       I915_WRITE(GEN6_PMINTRMSK, 0);
+}
+
 static void gen6_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3327,17 +3335,7 @@ static void gen6_enable_rps(struct drm_device *dev)
 
        gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
 
-       /* requires MSI enabled */
-       I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
-       spin_lock_irq(&dev_priv->rps.lock);
-       /* FIXME: Our interrupt enabling sequence is bonghits.
-        * dev_priv->rps.pm_iir really should be 0 here. */
-       dev_priv->rps.pm_iir = 0;
-       I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
-       I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
-       spin_unlock_irq(&dev_priv->rps.lock);
-       /* unmask all PM interrupts */
-       I915_WRITE(GEN6_PMINTRMSK, 0);
+       gen6_enable_rps_interrupts(dev);
 
        rc6vids = 0;
        ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -3482,7 +3480,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
                pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
                pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
                                                                      pcbr_offset,
-                                                                     -1,
+                                                                     I915_GTT_OFFSET_NONE,
                                                                      pctx_size);
                goto out;
        }
@@ -3607,14 +3605,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
 
        valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
 
-       /* requires MSI enabled */
-       I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
-       spin_lock_irq(&dev_priv->rps.lock);
-       WARN_ON(dev_priv->rps.pm_iir != 0);
-       I915_WRITE(GEN6_PMIMR, 0);
-       spin_unlock_irq(&dev_priv->rps.lock);
-       /* enable all PM interrupts */
-       I915_WRITE(GEN6_PMINTRMSK, 0);
+       gen6_enable_rps_interrupts(dev);
 
        gen6_gt_force_wake_put(dev_priv);
 }
@@ -3708,7 +3699,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
 
        intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
        intel_ring_emit(ring, MI_SET_CONTEXT);
-       intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
                        MI_MM_SPACE_GTT |
                        MI_SAVE_EXT_STATE_EN |
                        MI_RESTORE_EXT_STATE_EN |
@@ -3731,7 +3722,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
                return;
        }
 
-       I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
+       I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
        I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 }
 
@@ -4429,7 +4420,10 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
-       /* Required for FBC */
+       /*
+        * Required for FBC
+        * WaFbcDisableDpfcClockGating:ilk
+        */
        dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
                   ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
                   ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
@@ -4466,6 +4460,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
         * The bit 7,8,9 of 0x42020.
         */
        if (IS_IRONLAKE_M(dev)) {
+               /* WaFbcAsynchFlipDisableFbcQueue:ilk */
                I915_WRITE(ILK_DISPLAY_CHICKEN1,
                           I915_READ(ILK_DISPLAY_CHICKEN1) |
                           ILK_FBCQ_DIS);
@@ -4602,6 +4597,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
         * The bit5 and bit7 of 0x42020
         * The bit14 of 0x70180
         * The bit14 of 0x71180
+        *
+        * WaFbcAsynchFlipDisableFbcQueue:snb
         */
        I915_WRITE(ILK_DISPLAY_CHICKEN1,
                   I915_READ(ILK_DISPLAY_CHICKEN1) |
@@ -5497,8 +5494,6 @@ void intel_gt_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       spin_lock_init(&dev_priv->gt_lock);
-
        if (IS_VALLEYVIEW(dev)) {
                dev_priv->gt.force_wake_get = vlv_force_wake_get;
                dev_priv->gt.force_wake_put = vlv_force_wake_put;
index 664118d8c1d6426353ed97bb61b1113369a7678a..8527ea05124be9d2566a23e974d60136fe478398 100644 (file)
@@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
         * registers with the above sequence (the readback of the HEAD registers
         * also enforces ordering), otherwise the hw might lose the new ring
         * register values. */
-       I915_WRITE_START(ring, obj->gtt_offset);
+       I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
        I915_WRITE_CTL(ring,
                        ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
        if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
-                    I915_READ_START(ring) == obj->gtt_offset &&
+                    I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
                     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
                DRM_ERROR("%s initialization failed "
                                "ctl %08x head %08x tail %08x start %08x\n",
@@ -505,7 +505,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
        if (ret)
                goto err_unref;
 
-       pc->gtt_offset = obj->gtt_offset;
+       pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
        pc->cpu_page = kmap(sg_page(obj->pages->sgl));
        if (pc->cpu_page == NULL) {
                ret = -ENOMEM;
@@ -836,7 +836,7 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
                I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
                POSTING_READ(GTIMR);
@@ -854,7 +854,7 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                dev_priv->gt_irq_mask |= ring->irq_enable_mask;
                I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
                POSTING_READ(GTIMR);
@@ -873,7 +873,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                dev_priv->irq_mask &= ~ring->irq_enable_mask;
                I915_WRITE(IMR, dev_priv->irq_mask);
                POSTING_READ(IMR);
@@ -891,7 +891,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                dev_priv->irq_mask |= ring->irq_enable_mask;
                I915_WRITE(IMR, dev_priv->irq_mask);
                POSTING_READ(IMR);
@@ -910,7 +910,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
                return false;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                dev_priv->irq_mask &= ~ring->irq_enable_mask;
                I915_WRITE16(IMR, dev_priv->irq_mask);
                POSTING_READ16(IMR);
@@ -928,7 +928,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                dev_priv->irq_mask |= ring->irq_enable_mask;
                I915_WRITE16(IMR, dev_priv->irq_mask);
                POSTING_READ16(IMR);
@@ -1021,7 +1021,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
        gen6_gt_force_wake_get(dev_priv);
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
                        I915_WRITE_IMR(ring,
                                       ~(ring->irq_enable_mask |
@@ -1045,7 +1045,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
        unsigned long flags;
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
                        I915_WRITE_IMR(ring,
                                       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
@@ -1070,14 +1070,14 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
        if (!dev->irq_enabled)
                return false;
 
-       spin_lock_irqsave(&dev_priv->rps.lock, flags);
-       if (ring->irq_refcount.pm++ == 0) {
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       if (ring->irq_refcount++ == 0) {
                u32 pm_imr = I915_READ(GEN6_PMIMR);
                I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
                I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
                POSTING_READ(GEN6_PMIMR);
        }
-       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
        return true;
 }
@@ -1092,14 +1092,14 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
        if (!dev->irq_enabled)
                return;
 
-       spin_lock_irqsave(&dev_priv->rps.lock, flags);
-       if (--ring->irq_refcount.pm == 0) {
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       if (--ring->irq_refcount == 0) {
                u32 pm_imr = I915_READ(GEN6_PMIMR);
                I915_WRITE_IMR(ring, ~0);
                I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
                POSTING_READ(GEN6_PMIMR);
        }
-       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 }
 
 static int
@@ -1144,7 +1144,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
                intel_ring_advance(ring);
        } else {
                struct drm_i915_gem_object *obj = ring->private;
-               u32 cs_offset = obj->gtt_offset;
+               u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
 
                if (len > I830_BATCH_LIMIT)
                        return -ENOSPC;
@@ -1229,7 +1229,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
                goto err_unref;
        }
 
-       ring->status_page.gfx_addr = obj->gtt_offset;
+       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
        ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
        if (ring->status_page.page_addr == NULL) {
                ret = -ENOMEM;
@@ -1316,7 +1316,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                goto err_unpin;
 
        ring->virtual_start =
-               ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+               ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
                           ring->size);
        if (ring->virtual_start == NULL) {
                DRM_ERROR("Failed to map ringbuffer.\n");
@@ -2008,8 +2008,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
        ring->add_request = gen6_add_request;
        ring->get_seqno = gen6_ring_get_seqno;
        ring->set_seqno = ring_set_seqno;
-       ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT |
-               PM_VEBOX_CS_ERROR_INTERRUPT;
+       ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
        ring->irq_get = hsw_vebox_get_irq;
        ring->irq_put = hsw_vebox_put_irq;
        ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
index 799f04c9da45a0bc30b19dd841547f75a6d4958e..6e38256d41e1241af7f0f380edcf0508ddee44b6 100644 (file)
@@ -78,10 +78,7 @@ struct  intel_ring_buffer {
         */
        u32             last_retired_head;
 
-       struct {
-               u32     gt; /*  protected by dev_priv->irq_lock */
-               u32     pm; /*  protected by dev_priv->rps.lock (sucks) */
-       } irq_refcount;
+       unsigned irq_refcount; /* protected by dev_priv->irq_lock */
        u32             irq_enable_mask;        /* bitmask to enable ring interrupt */
        u32             trace_irq_seqno;
        u32             sync_seqno[I915_NUM_RINGS-1];
index 2628d56224499307cffa4e409a646122a9578db0..c3b59b8593b9d7039f960ae71a4e1b49e2cf80f9 100644 (file)
@@ -1357,22 +1357,21 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
        }
 
        /* Cross check the port pixel multiplier with the sdvo encoder state. */
-       intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1);
-       switch (val) {
-       case SDVO_CLOCK_RATE_MULT_1X:
-               encoder_pixel_multiplier = 1;
-               break;
-       case SDVO_CLOCK_RATE_MULT_2X:
-               encoder_pixel_multiplier = 2;
-               break;
-       case SDVO_CLOCK_RATE_MULT_4X:
-               encoder_pixel_multiplier = 4;
-               break;
+       if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
+                                &val, 1)) {
+               switch (val) {
+               case SDVO_CLOCK_RATE_MULT_1X:
+                       encoder_pixel_multiplier = 1;
+                       break;
+               case SDVO_CLOCK_RATE_MULT_2X:
+                       encoder_pixel_multiplier = 2;
+                       break;
+               case SDVO_CLOCK_RATE_MULT_4X:
+                       encoder_pixel_multiplier = 4;
+                       break;
+               }
        }
 
-       if(HAS_PCH_SPLIT(dev))
-               return; /* no pixel multiplier readout support yet */
-
        WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
             "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
             pipe_config->pixel_multiplier, encoder_pixel_multiplier);
@@ -1697,6 +1696,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
        struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
        enum drm_connector_status ret;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, drm_get_connector_name(connector));
+
        if (!intel_sdvo_get_value(intel_sdvo,
                                  SDVO_CMD_GET_ATTACHED_DISPLAYS,
                                  &response, 2))
index 1fa5612a4572cc0c3b92a3d6a253bc9f1da573ed..55bdf70b548be5c461146c0f8fe3c7155d9decec 100644 (file)
@@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
 
        I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
        I915_WRITE(SPCNTR(pipe, plane), sprctl);
-       I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
+       I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
                             sprsurf_offset);
        POSTING_READ(SPSURF(pipe, plane));
 }
@@ -308,7 +308,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
        if (intel_plane->can_scale)
                I915_WRITE(SPRSCALE(pipe), sprscale);
        I915_WRITE(SPRCTL(pipe), sprctl);
-       I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
+       I915_MODIFY_DISPBASE(SPRSURF(pipe),
+                            i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
        POSTING_READ(SPRSURF(pipe));
 
        /* potentially re-enable LP watermarks */
@@ -478,7 +479,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
        I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
        I915_WRITE(DVSSCALE(pipe), dvsscale);
        I915_WRITE(DVSCNTR(pipe), dvscntr);
-       I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
+       I915_MODIFY_DISPBASE(DVSSURF(pipe),
+                            i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
        POSTING_READ(DVSSURF(pipe));
 }
 
index 39debd80d190241836e89b8c6794bdb713e80653..b0b446f630f7cf3ddb0c15af8ca6059916427692 100644 (file)
@@ -1305,6 +1305,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
        struct intel_tv *intel_tv = intel_attached_tv(connector);
        int type;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+                     connector->base.id, drm_get_connector_name(connector),
+                     force);
+
        mode = reported_modes[0];
 
        if (force) {
index e8e1417af3d9c933073d2d36270ee3a15ffb5705..ae8dbfb1207c71a6cbe44dc166180988083c00e6 100644 (file)
@@ -342,13 +342,42 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
 u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
                                          int lane);
 
-#define DP_RECEIVER_CAP_SIZE   0xf
+#define DP_RECEIVER_CAP_SIZE           0xf
+#define EDP_PSR_RECEIVER_CAP_SIZE      2
+
 void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
 void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
 
 u8 drm_dp_link_rate_to_bw_code(int link_rate);
 int drm_dp_bw_code_to_link_rate(u8 link_bw);
 
+struct edp_sdp_header {
+       u8 HB0; /* Secondary Data Packet ID */
+       u8 HB1; /* Secondary Data Packet Type */
+       u8 HB2; /* 7:5 reserved, 4:0 revision number */
+       u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */
+} __packed;
+
+#define EDP_SDP_HEADER_REVISION_MASK           0x1F
+#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES     0x1F
+
+struct edp_vsc_psr {
+       struct edp_sdp_header sdp_header;
+       u8 DB0; /* Stereo Interface */
+       u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */
+       u8 DB2; /* CRC value bits 7:0 of the R or Cr component */
+       u8 DB3; /* CRC value bits 15:8 of the R or Cr component */
+       u8 DB4; /* CRC value bits 7:0 of the G or Y component */
+       u8 DB5; /* CRC value bits 15:8 of the G or Y component */
+       u8 DB6; /* CRC value bits 7:0 of the B or Cb component */
+       u8 DB7; /* CRC value bits 15:8 of the B or Cb component */
+       u8 DB8_31[24]; /* Reserved */
+} __packed;
+
+#define EDP_VSC_PSR_STATE_ACTIVE       (1<<0)
+#define EDP_VSC_PSR_UPDATE_RFB         (1<<1)
+#define EDP_VSC_PSR_CRC_VALUES_VALID   (1<<2)
+
 static inline int
 drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
 {
index 4d06edb56d5fbb74480d0ffc4b24297870f8149e..b87d05e17d466179dd9c48b6a56c852012c24f16 100644 (file)
@@ -138,10 +138,7 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 /*
  * Basic range manager support (drm_mm.c)
  */
-extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
-                                              unsigned long start,
-                                              unsigned long size,
-                                              bool atomic);
+extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
 extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
                                                    unsigned long size,
                                                    unsigned alignment,
@@ -155,6 +152,7 @@ extern struct drm_mm_node *drm_mm_get_block_range_generic(
                                                unsigned long start,
                                                unsigned long end,
                                                int atomic);
+
 static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
                                                   unsigned long size,
                                                   unsigned alignment)
index 923ed7fe5775b61743ba9df9037d27c2c3fe7b6b..a1a7b6bd60d8c30f8c1f9fb95017dfa744765f89 100644 (file)
  * subject to backwards-compatibility constraints.
  */
 
+/**
+ * DOC: uevents generated by i915 on it's device node
+ *
+ * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
+ *     event from the gpu l3 cache. Additional information supplied is ROW,
+ *     BANK, SUBBANK of the affected cacheline. Userspace should keep track of
+ *     these events and if a specific cache-line seems to have a persistent
+ *     error remap it with the l3 remapping tool supplied in intel-gpu-tools.
+ *     The value supplied with the event is always 1.
+ *
+ * I915_ERROR_UEVENT - Generated upon error detection, currently only via
+ *     hangcheck. The error detection event is a good indicator of when things
+ *     began to go badly. The value supplied with the event is a 1 upon error
+ *     detection, and a 0 upon reset completion, signifying no more error
+ *     exists. NOTE: Disabling hangcheck or reset via module parameter will
+ *     cause the related events to not be seen.
+ *
+ * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
+ *     the GPU. The value supplied with the event is always 1. NOTE: Disable
+ *     reset via module parameter will cause this event to not be seen.
+ */
+#define I915_L3_PARITY_UEVENT          "L3_PARITY_ERROR"
+#define I915_ERROR_UEVENT              "ERROR"
+#define I915_RESET_UEVENT              "RESET"
 
 /* Each region is a minimum of 16k, and there are at most 255 of them.
  */