]> Pileus Git - ~andy/linux/commitdiff
Merge tag 'drm-intel-next-2013-07-12' of git://people.freedesktop.org/~danvet/drm...
authorDave Airlie <airlied@redhat.com>
Fri, 19 Jul 2013 01:56:14 +0000 (11:56 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 19 Jul 2013 02:12:21 +0000 (12:12 +1000)
 Highlights:
- follow-up refactoring after the shared dpll rework that landed in 3.11
- oddball prep cleanups from Ben for ppgtt
- encoder->get_config state tracking infrastructure from Jesse
- used by the experimental fastboot support from Jesse (disabled by
  default)
- make the error state file official and add it to our sysfs interface
  (Mika)
- drm_mm prep changes from Ben, prepares to embedd the drm_mm_node (which
  will be used by the vma rework later on)
- interrupt handling rework, follow up cleanups to the VECS enabling, hpd
  storm handling and fifo underrun reporting.
- Big pile of smaller cleanups, code improvements and related stuff.

* tag 'drm-intel-next-2013-07-12' of git://people.freedesktop.org/~danvet/drm-intel: (72 commits)
  drm/i915: clear DPLL reg when disabling i9xx dplls
  drm/i915: Fix up cpt pixel multiplier enable sequence
  drm/i915: clean up vlv ->pre_pll_enable and pll enable sequence
  drm/i915: move error state to own compilation unit
  drm/i915: Don't attempt to read an unitialized stack value
  drm/i915: Use for_each_pipe() when possible
  drm/i915: don't enable PM_VEBOX_CS_ERROR_INTERRUPT
  drm/i915: unify ring irq refcounts (again)
  drm/i915: kill dev_priv->rps.lock
  drm/i915: queue work outside spinlock in hsw_pm_irq_handler
  drm/i915: streamline hsw_pm_irq_handler
  drm/i915: irq handlers don't need interrupt-safe spinlocks
  drm/i915: kill lpt pch transcoder->crtc mapping code for fifo underruns
  drm/i915: improve GEN7_ERR_INT clearing for fifo underrun reporting
  drm/i915: improve SERR_INT clearing for fifo underrun reporting
  drm/i915: extract ibx_display_interrupt_update
  drm/i915: remove unused members from drm_i915_private
  drm/i915: don't frob mm.suspended when not using ums
  drm/i915: Fix VLV DP RBR/HDMI/DAC PLL LPF coefficients
  drm/i915: WARN if the bios reserved range is bigger than stolen size
  ...

Conflicts:
drivers/gpu/drm/i915/i915_gem.c

1  2 
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
include/drm/drm_mm.h

diff --combined drivers/gpu/drm/drm_mm.c
index 543b9b3171d32310de903668bc69f30901b12e91,52e0ee7f4a6f0c00b79c6b9a29a4f04cd7e64e44..fe304f903b130de19dbd4a11e6a75ea687a40ec4
@@@ -147,33 -147,27 +147,27 @@@ static void drm_mm_insert_helper(struc
        }
  }
  
- struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
-                                       unsigned long start,
-                                       unsigned long size,
-                                       bool atomic)
+ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
  {
-       struct drm_mm_node *hole, *node;
-       unsigned long end = start + size;
+       struct drm_mm_node *hole;
+       unsigned long end = node->start + node->size;
        unsigned long hole_start;
        unsigned long hole_end;
  
+       BUG_ON(node == NULL);
+       /* Find the relevant hole to add our node to */
        drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
-               if (hole_start > start || hole_end < end)
+               if (hole_start > node->start || hole_end < end)
                        continue;
  
-               node = drm_mm_kmalloc(mm, atomic);
-               if (unlikely(node == NULL))
-                       return NULL;
-               node->start = start;
-               node->size = size;
                node->mm = mm;
                node->allocated = 1;
  
                INIT_LIST_HEAD(&node->hole_stack);
                list_add(&node->node_list, &hole->node_list);
  
-               if (start == hole_start) {
+               if (node->start == hole_start) {
                        hole->hole_follows = 0;
                        list_del_init(&hole->hole_stack);
                }
                        node->hole_follows = 1;
                }
  
-               return node;
+               return 0;
        }
  
-       WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
-       return NULL;
+       WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
+            node->start, node->size);
+       return -ENOSPC;
  }
- EXPORT_SYMBOL(drm_mm_create_block);
+ EXPORT_SYMBOL(drm_mm_reserve_node);
  
  struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
                                             unsigned long size,
@@@ -669,7 -664,7 +664,7 @@@ int drm_mm_clean(struct drm_mm * mm
  }
  EXPORT_SYMBOL(drm_mm_clean);
  
 -int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 +void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
  {
        INIT_LIST_HEAD(&mm->hole_stack);
        INIT_LIST_HEAD(&mm->unused_nodes);
        list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
  
        mm->color_adjust = NULL;
 -
 -      return 0;
  }
  EXPORT_SYMBOL(drm_mm_init);
  
@@@ -697,8 -694,8 +692,8 @@@ void drm_mm_takedown(struct drm_mm * mm
  {
        struct drm_mm_node *entry, *next;
  
 -      if (!list_empty(&mm->head_node.node_list)) {
 -              DRM_ERROR("Memory manager not clean. Delaying takedown\n");
 +      if (WARN(!list_empty(&mm->head_node.node_list),
 +               "Memory manager not clean. Delaying takedown\n")) {
                return;
        }
  
  }
  EXPORT_SYMBOL(drm_mm_takedown);
  
 -void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
 +static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
 +                                     const char *prefix)
  {
 -      struct drm_mm_node *entry;
 -      unsigned long total_used = 0, total_free = 0, total = 0;
        unsigned long hole_start, hole_end, hole_size;
  
 -      hole_start = drm_mm_hole_node_start(&mm->head_node);
 -      hole_end = drm_mm_hole_node_end(&mm->head_node);
 -      hole_size = hole_end - hole_start;
 -      if (hole_size)
 +      if (entry->hole_follows) {
 +              hole_start = drm_mm_hole_node_start(entry);
 +              hole_end = drm_mm_hole_node_end(entry);
 +              hole_size = hole_end - hole_start;
                printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
                        prefix, hole_start, hole_end,
                        hole_size);
 -      total_free += hole_size;
 +              return hole_size;
 +      }
 +
 +      return 0;
 +}
 +
 +void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
 +{
 +      struct drm_mm_node *entry;
 +      unsigned long total_used = 0, total_free = 0, total = 0;
 +
 +      total_free += drm_mm_debug_hole(&mm->head_node, prefix);
  
        drm_mm_for_each_node(entry, mm) {
                printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
                        prefix, entry->start, entry->start + entry->size,
                        entry->size);
                total_used += entry->size;
 -
 -              if (entry->hole_follows) {
 -                      hole_start = drm_mm_hole_node_start(entry);
 -                      hole_end = drm_mm_hole_node_end(entry);
 -                      hole_size = hole_end - hole_start;
 -                      printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
 -                              prefix, hole_start, hole_end,
 -                              hole_size);
 -                      total_free += hole_size;
 -              }
 +              total_free += drm_mm_debug_hole(entry, prefix);
        }
        total = total_free + total_used;
  
index f4af1ca0fb62d82827213d51461d9642f0cf33cf,0485f435eeeafe9238c9cdca1ebf902cbbfff755..b07362f2675e28c8b8325645a6222788a845ce51
@@@ -123,15 -123,20 +123,20 @@@ module_param_named(preliminary_hw_suppo
  MODULE_PARM_DESC(preliminary_hw_support,
                "Enable preliminary hardware support. (default: false)");
  
 -int i915_disable_power_well __read_mostly = 0;
 +int i915_disable_power_well __read_mostly = 1;
  module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
  MODULE_PARM_DESC(disable_power_well,
 -               "Disable the power well when possible (default: false)");
 +               "Disable the power well when possible (default: true)");
  
  int i915_enable_ips __read_mostly = 1;
  module_param_named(enable_ips, i915_enable_ips, int, 0600);
  MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
  
+ bool i915_fastboot __read_mostly = 0;
+ module_param_named(fastboot, i915_fastboot, bool, 0600);
+ MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
+                "(default: false)");
  static struct drm_driver driver;
  extern int intel_agp_enabled;
  
@@@ -551,7 -556,11 +556,11 @@@ static int i915_drm_freeze(struct drm_d
  
        /* If KMS is active, we do the leavevt stuff here */
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               int error = i915_gem_idle(dev);
+               int error;
+               mutex_lock(&dev->struct_mutex);
+               error = i915_gem_idle(dev);
+               mutex_unlock(&dev->struct_mutex);
                if (error) {
                        dev_err(&dev->pdev->dev,
                                "GEM idle failed, resume might fail\n");
@@@ -656,7 -665,6 +665,6 @@@ static int __i915_drm_thaw(struct drm_d
                intel_init_pch_refclk(dev);
  
                mutex_lock(&dev->struct_mutex);
-               dev_priv->mm.suspended = 0;
  
                error = i915_gem_init_hw(dev);
                mutex_unlock(&dev->struct_mutex);
@@@ -793,28 -801,29 +801,29 @@@ static int i965_reset_complete(struct d
  static int i965_do_reset(struct drm_device *dev)
  {
        int ret;
-       u8 gdrst;
  
        /*
         * Set the domains we want to reset (GRDOM/bits 2 and 3) as
         * well as the reset bit (GR/bit 0).  Setting the GR bit
         * triggers the reset; when done, the hardware will clear it.
         */
-       pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
        pci_write_config_byte(dev->pdev, I965_GDRST,
-                             gdrst | GRDOM_RENDER |
-                             GRDOM_RESET_ENABLE);
+                             GRDOM_RENDER | GRDOM_RESET_ENABLE);
        ret =  wait_for(i965_reset_complete(dev), 500);
        if (ret)
                return ret;
  
        /* We can't reset render&media without also resetting display ... */
-       pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
        pci_write_config_byte(dev->pdev, I965_GDRST,
-                             gdrst | GRDOM_MEDIA |
-                             GRDOM_RESET_ENABLE);
+                             GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  
-       return wait_for(i965_reset_complete(dev), 500);
+       ret =  wait_for(i965_reset_complete(dev), 500);
+       if (ret)
+               return ret;
+       pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+       return 0;
  }
  
  static int ironlake_do_reset(struct drm_device *dev)
@@@ -955,11 -964,11 +964,11 @@@ int i915_reset(struct drm_device *dev
         * switched away).
         */
        if (drm_core_check_feature(dev, DRIVER_MODESET) ||
-                       !dev_priv->mm.suspended) {
+                       !dev_priv->ums.mm_suspended) {
                struct intel_ring_buffer *ring;
                int i;
  
-               dev_priv->mm.suspended = 0;
+               dev_priv->ums.mm_suspended = 0;
  
                i915_gem_init_swizzling(dev);
  
index a416645bcd23364bb9793100da286b7f74858794,65d54edae17657b60e08b5453f25a8ffd54f212f..cef35d3ab37b66b359b52ae9cb7d9f96468edf9d
@@@ -144,6 -144,7 +144,7 @@@ enum intel_dpll_id 
  
  struct intel_dpll_hw_state {
        uint32_t dpll;
+       uint32_t dpll_md;
        uint32_t fp0;
        uint32_t fp1;
  };
@@@ -156,6 -157,8 +157,8 @@@ struct intel_shared_dpll 
        /* should match the index in the dev_priv->shared_dplls array */
        enum intel_dpll_id id;
        struct intel_dpll_hw_state hw_state;
+       void (*mode_set)(struct drm_i915_private *dev_priv,
+                        struct intel_shared_dpll *pll);
        void (*enable)(struct drm_i915_private *dev_priv,
                       struct intel_shared_dpll *pll);
        void (*disable)(struct drm_i915_private *dev_priv,
@@@ -364,6 -367,7 +367,7 @@@ struct drm_i915_display_funcs 
         * fills out the pipe-config with the hw state. */
        bool (*get_pipe_config)(struct intel_crtc *,
                                struct intel_crtc_config *);
+       void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
        int (*crtc_mode_set)(struct drm_crtc *crtc,
                             int x, int y,
                             struct drm_framebuffer *old_fb);
@@@ -462,8 -466,12 +466,12 @@@ struct i915_gtt 
        void __iomem *gsm;
  
        bool do_idle_maps;
-       dma_addr_t scratch_page_dma;
-       struct page *scratch_page;
+       struct {
+               dma_addr_t addr;
+               struct page *page;
+       } scratch;
+       int mtrr;
  
        /* global gtt ops */
        int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
                                   struct sg_table *st,
                                   unsigned int pg_start,
                                   enum i915_cache_level cache_level);
-       gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
-                                    dma_addr_t addr,
+       gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
                                     enum i915_cache_level level);
  };
  #define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
  
- #define I915_PPGTT_PD_ENTRIES 512
- #define I915_PPGTT_PT_ENTRIES 1024
  struct i915_hw_ppgtt {
        struct drm_device *dev;
        unsigned num_pd_entries;
        struct page **pt_pages;
        uint32_t pd_offset;
        dma_addr_t *pt_dma_addr;
-       dma_addr_t scratch_page_dma_addr;
  
        /* pte functions, mirroring the interface of the global gtt. */
        void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
                               struct sg_table *st,
                               unsigned int pg_start,
                               enum i915_cache_level cache_level);
-       gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
-                                    dma_addr_t addr,
+       gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
                                     enum i915_cache_level level);
        int (*enable)(struct drm_device *dev);
        void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
@@@ -528,17 -531,36 +531,36 @@@ struct i915_hw_context 
        struct i915_ctx_hang_stats hang_stats;
  };
  
- enum no_fbc_reason {
-       FBC_NO_OUTPUT, /* no outputs enabled to compress */
-       FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
-       FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
-       FBC_MODE_TOO_LARGE, /* mode too large for compression */
-       FBC_BAD_PLANE, /* fbc not supported on plane */
-       FBC_NOT_TILED, /* buffer not tiled */
-       FBC_MULTIPLE_PIPES, /* more than one pipe active */
-       FBC_MODULE_PARAM,
+ struct i915_fbc {
+       unsigned long size;
+       unsigned int fb_id;
+       enum plane plane;
+       int y;
+       struct drm_mm_node *compressed_fb;
+       struct drm_mm_node *compressed_llb;
+       struct intel_fbc_work {
+               struct delayed_work work;
+               struct drm_crtc *crtc;
+               struct drm_framebuffer *fb;
+               int interval;
+       } *fbc_work;
+       enum {
+               FBC_NO_OUTPUT, /* no outputs enabled to compress */
+               FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
+               FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+               FBC_MODE_TOO_LARGE, /* mode too large for compression */
+               FBC_BAD_PLANE, /* fbc not supported on plane */
+               FBC_NOT_TILED, /* buffer not tiled */
+               FBC_MULTIPLE_PIPES, /* more than one pipe active */
+               FBC_MODULE_PARAM,
+               FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+       } no_fbc_reason;
  };
  
  enum intel_pch {
        PCH_NONE = 0,   /* No PCH present */
        PCH_IBX,        /* Ibexpeak PCH */
@@@ -721,12 -743,12 +743,12 @@@ struct i915_suspend_saved_registers 
  };
  
  struct intel_gen6_power_mgmt {
+       /* work and pm_iir are protected by dev_priv->irq_lock */
        struct work_struct work;
-       struct delayed_work vlv_work;
        u32 pm_iir;
-       /* lock - irqsave spinlock that protectects the work_struct and
-        * pm_iir. */
-       spinlock_t lock;
+       /* On vlv we need to manually drop to Vmin with a delayed work. */
+       struct delayed_work vlv_work;
  
        /* The below variables an all the rps hw state are protected by
         * dev->struct mutext. */
@@@ -792,6 -814,18 +814,18 @@@ struct i915_dri1_state 
        uint32_t counter;
  };
  
+ struct i915_ums_state {
+       /**
+        * Flag if the X Server, and thus DRM, is not currently in
+        * control of the device.
+        *
+        * This is set between LeaveVT and EnterVT.  It needs to be
+        * replaced with a semaphore.  It also needs to be
+        * transitioned away from for kernel modesetting.
+        */
+       int mm_suspended;
+ };
  struct intel_l3_parity {
        u32 *remap_info;
        struct work_struct error_work;
@@@ -815,8 -849,6 +849,6 @@@ struct i915_gem_mm 
        /** Usable portion of the GTT for GEM */
        unsigned long stolen_base; /* limited to low memory (32-bit) */
  
-       int gtt_mtrr;
        /** PPGTT used for aliasing the PPGTT with the GTT */
        struct i915_hw_ppgtt *aliasing_ppgtt;
  
         */
        bool interruptible;
  
-       /**
-        * Flag if the X Server, and thus DRM, is not currently in
-        * control of the device.
-        *
-        * This is set between LeaveVT and EnterVT.  It needs to be
-        * replaced with a semaphore.  It also needs to be
-        * transitioned away from for kernel modesetting.
-        */
-       int suspended;
        /** Bit 6 swizzling required for X tiling */
        uint32_t bit_6_swizzle_x;
        /** Bit 6 swizzling required for Y tiling */
@@@ -896,6 -918,11 +918,11 @@@ struct drm_i915_error_state_buf 
        loff_t pos;
  };
  
+ struct i915_error_state_file_priv {
+       struct drm_device *dev;
+       struct drm_i915_error_state *error;
+ };
  struct i915_gpu_error {
        /* For hangcheck timer */
  #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@@ -1058,12 -1085,7 +1085,7 @@@ typedef struct drm_i915_private 
  
        int num_plane;
  
-       unsigned long cfb_size;
-       unsigned int cfb_fb;
-       enum plane cfb_plane;
-       int cfb_y;
-       struct intel_fbc_work *fbc_work;
+       struct i915_fbc fbc;
        struct intel_opregion opregion;
        struct intel_vbt_data vbt;
  
        } backlight;
  
        /* LVDS info */
-       struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
-       struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
        bool no_aux_handshake;
  
        struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
        /* Haswell power well */
        struct i915_power_well power_well;
  
-       enum no_fbc_reason no_fbc_reason;
-       struct drm_mm_node *compressed_fb;
-       struct drm_mm_node *compressed_llb;
        struct i915_gpu_error gpu_error;
  
        struct drm_i915_gem_object *vlv_pctx;
        /* Old dri1 support infrastructure, beware the dragons ya fools entering
         * here! */
        struct i915_dri1_state dri1;
+       /* Old ums support infrastructure, same warning applies. */
+       struct i915_ums_state ums;
  } drm_i915_private_t;
  
  /* Iterate over initialised rings */
@@@ -1186,7 -1203,7 +1203,7 @@@ enum hdmi_force_audio 
        HDMI_AUDIO_ON,                  /* force turn on HDMI audio */
  };
  
- #define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
+ #define I915_GTT_OFFSET_NONE ((u32)-1)
  
  struct drm_i915_gem_object_ops {
        /* Interface between the GEM object and its backing storage.
@@@ -1212,7 -1229,7 +1229,7 @@@ struct drm_i915_gem_object 
        const struct drm_i915_gem_object_ops *ops;
  
        /** Current space allocated to this object in the GTT, if any. */
-       struct drm_mm_node *gtt_space;
+       struct drm_mm_node gtt_space;
        /** Stolen memory for this object, instead of being backed by shmem. */
        struct drm_mm_node *stolen;
        struct list_head global_list;
        unsigned long exec_handle;
        struct drm_i915_gem_exec_object2 *exec_entry;
  
-       /**
-        * Current offset of the object in GTT space.
-        *
-        * This is the same as gtt_space->start
-        */
-       uint32_t gtt_offset;
        struct intel_ring_buffer *ring;
  
        /** Breadcrumb of last rendering to the buffer. */
  
  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
  
+ /* Offset of the first PTE pointing to this object */
+ static inline unsigned long
+ i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
+ {
+       return o->gtt_space.start;
+ }
+ /* Whether or not this object is currently mapped by the translation tables */
+ static inline bool
+ i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
+ {
+       return drm_mm_node_allocated(&o->gtt_space);
+ }
+ /* The size used in the translation tables may be larger than the actual size of
+  * the object on GEN2/GEN3 because of the way tiling is handled. See
+  * i915_gem_get_gtt_size() for more details.
+  */
+ static inline unsigned long
+ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
+ {
+       return o->gtt_space.size;
+ }
+ static inline void
+ i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
+                           enum i915_cache_level color)
+ {
+       o->gtt_space.color = color;
+ }
  /**
   * Request queue structure.
   *
@@@ -1542,6 -1583,7 +1583,7 @@@ extern int i915_enable_ppgtt __read_mos
  extern unsigned int i915_preliminary_hw_support __read_mostly;
  extern int i915_disable_power_well __read_mostly;
  extern int i915_enable_ips __read_mostly;
+ extern bool i915_fastboot __read_mostly;
  
  extern int i915_suspend(struct drm_device *dev, pm_message_t state);
  extern int i915_resume(struct drm_device *dev);
@@@ -1585,21 -1627,12 +1627,12 @@@ extern void intel_hpd_init(struct drm_d
  extern void intel_gt_init(struct drm_device *dev);
  extern void intel_gt_reset(struct drm_device *dev);
  
- void i915_error_state_free(struct kref *error_ref);
  void
  i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
  
  void
  i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
  
- #ifdef CONFIG_DEBUG_FS
- extern void i915_destroy_error_state(struct drm_device *dev);
- #else
- #define i915_destroy_error_state(x)
- #endif
  /* i915_gem.c */
  int i915_gem_init_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
@@@ -1806,8 -1839,6 +1839,8 @@@ struct drm_gem_object *i915_gem_prime_i
  struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
                                struct drm_gem_object *gem_obj, int flags);
  
 +void i915_gem_restore_fences(struct drm_device *dev);
 +
  /* i915_gem_context.c */
  void i915_gem_context_init(struct drm_device *dev);
  void i915_gem_context_fini(struct drm_device *dev);
@@@ -1910,8 -1941,27 +1943,27 @@@ void i915_gem_dump_object(struct drm_i9
  /* i915_debugfs.c */
  int i915_debugfs_init(struct drm_minor *minor);
  void i915_debugfs_cleanup(struct drm_minor *minor);
+ /* i915_gpu_error.c */
  __printf(2, 3)
  void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
+ int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
+                           const struct i915_error_state_file_priv *error);
+ int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
+                             size_t count, loff_t pos);
+ static inline void i915_error_state_buf_release(
+       struct drm_i915_error_state_buf *eb)
+ {
+       kfree(eb->buf);
+ }
+ void i915_capture_error_state(struct drm_device *dev);
+ void i915_error_state_get(struct drm_device *dev,
+                         struct i915_error_state_file_priv *error_priv);
+ void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
+ void i915_destroy_error_state(struct drm_device *dev);
+ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+ const char *i915_cache_level_str(int type);
  
  /* i915_suspend.c */
  extern int i915_save_state(struct drm_device *dev);
@@@ -1991,7 -2041,6 +2043,6 @@@ int i915_reg_read_ioctl(struct drm_devi
                        struct drm_file *file);
  
  /* overlay */
- #ifdef CONFIG_DEBUG_FS
  extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
  extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
                                            struct intel_overlay_error_state *error);
@@@ -2000,7 -2049,6 +2051,6 @@@ extern struct intel_display_error_stat
  extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
                                            struct drm_device *dev,
                                            struct intel_display_error_state *error);
- #endif
  
  /* On SNB platform, before reading ring registers forcewake bit
   * must be set to prevent GT core from power down and stale values being
index 97afd2639fb63a1e240fce2c6f97f53fd9f82e98,20b10a0fa4528ce81389eefb4004a701f3fcf3af..46bf7e3887d4dbff248555c74e32e8df8e9ba658
@@@ -135,7 -135,7 +135,7 @@@ int i915_mutex_lock_interruptible(struc
  static inline bool
  i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
  {
-       return obj->gtt_space && !obj->active;
+       return i915_gem_obj_ggtt_bound(obj) && !obj->active;
  }
  
  int
@@@ -178,7 -178,7 +178,7 @@@ i915_gem_get_aperture_ioctl(struct drm_
        mutex_lock(&dev->struct_mutex);
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
                if (obj->pin_count)
-                       pinned += obj->gtt_space->size;
+                       pinned += i915_gem_obj_ggtt_size(obj);
        mutex_unlock(&dev->struct_mutex);
  
        args->aper_size = dev_priv->gtt.total;
@@@ -422,7 -422,7 +422,7 @@@ i915_gem_shmem_pread(struct drm_device 
                 * anyway again before the next pread happens. */
                if (obj->cache_level == I915_CACHE_NONE)
                        needs_clflush = 1;
-               if (obj->gtt_space) {
+               if (i915_gem_obj_ggtt_bound(obj)) {
                        ret = i915_gem_object_set_to_gtt_domain(obj, false);
                        if (ret)
                                return ret;
@@@ -609,7 -609,7 +609,7 @@@ i915_gem_gtt_pwrite_fast(struct drm_dev
        user_data = to_user_ptr(args->data_ptr);
        remain = args->size;
  
-       offset = obj->gtt_offset + args->offset;
+       offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
  
        while (remain > 0) {
                /* Operation in this page
@@@ -739,7 -739,7 +739,7 @@@ i915_gem_shmem_pwrite(struct drm_devic
                 * right away and we therefore have to clflush anyway. */
                if (obj->cache_level == I915_CACHE_NONE)
                        needs_clflush_after = 1;
-               if (obj->gtt_space) {
+               if (i915_gem_obj_ggtt_bound(obj)) {
                        ret = i915_gem_object_set_to_gtt_domain(obj, true);
                        if (ret)
                                return ret;
@@@ -1360,8 -1360,9 +1360,9 @@@ int i915_gem_fault(struct vm_area_struc
  
        obj->fault_mappable = true;
  
-       pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
-               page_offset;
+       pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
+       pfn >>= PAGE_SHIFT;
+       pfn += page_offset;
  
        /* Finally, remap it using the new GTT offset */
        ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
@@@ -1667,7 -1668,7 +1668,7 @@@ i915_gem_object_put_pages(struct drm_i9
        if (obj->pages == NULL)
                return 0;
  
-       BUG_ON(obj->gtt_space);
+       BUG_ON(i915_gem_obj_ggtt_bound(obj));
  
        if (obj->pages_pin_count)
                return -EBUSY;
@@@ -1880,10 -1881,6 +1881,10 @@@ i915_gem_object_move_to_active(struct d
        u32 seqno = intel_ring_get_seqno(ring);
  
        BUG_ON(ring == NULL);
 +      if (obj->ring != ring && obj->last_write_seqno) {
 +              /* Keep the seqno relative to the current ring */
 +              obj->last_write_seqno = seqno;
 +      }
        obj->ring = ring;
  
        /* Add a reference if we're newly entering the active list. */
@@@ -2085,7 -2082,7 +2086,7 @@@ int __i915_add_request(struct intel_rin
        trace_i915_gem_request_add(ring, request->seqno);
        ring->outstanding_lazy_request = 0;
  
-       if (!dev_priv->mm.suspended) {
+       if (!dev_priv->ums.mm_suspended) {
                if (i915_enable_hangcheck) {
                        mod_timer(&dev_priv->gpu_error.hangcheck_timer,
                                  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
@@@ -2121,8 -2118,8 +2122,8 @@@ i915_gem_request_remove_from_client(str
  
  static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
  {
-       if (acthd >= obj->gtt_offset &&
-           acthd < obj->gtt_offset + obj->base.size)
+       if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+           acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
                return true;
  
        return false;
@@@ -2180,11 -2177,11 +2181,11 @@@ static void i915_set_reset_status(struc
  
        if (ring->hangcheck.action != wait &&
            i915_request_guilty(request, acthd, &inside)) {
-               DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
+               DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
                          ring->name,
                          inside ? "inside" : "flushing",
                          request->batch_obj ?
-                         request->batch_obj->gtt_offset : 0,
+                         i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
                          request->ctx ? request->ctx->id : 0,
                          acthd);
  
@@@ -2251,15 -2248,25 +2252,15 @@@ static void i915_gem_reset_ring_lists(s
        }
  }
  
 -static void i915_gem_reset_fences(struct drm_device *dev)
 +void i915_gem_restore_fences(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
  
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
                struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
 -
 -              if (reg->obj)
 -                      i915_gem_object_fence_lost(reg->obj);
 -
 -              i915_gem_write_fence(dev, i, NULL);
 -
 -              reg->pin_count = 0;
 -              reg->obj = NULL;
 -              INIT_LIST_HEAD(&reg->lru_list);
 +              i915_gem_write_fence(dev, i, reg->obj);
        }
 -
 -      INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  }
  
  void i915_gem_reset(struct drm_device *dev)
                obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
        }
  
 -      /* The fence registers are invalidated so clear them out */
 -      i915_gem_reset_fences(dev);
 +      i915_gem_restore_fences(dev);
  }
  
  /**
@@@ -2390,7 -2398,7 +2391,7 @@@ i915_gem_retire_work_handler(struct wor
                idle &= list_empty(&ring->request_list);
        }
  
-       if (!dev_priv->mm.suspended && !idle)
+       if (!dev_priv->ums.mm_suspended && !idle)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
                                   round_jiffies_up_relative(HZ));
        if (idle)
@@@ -2585,7 -2593,7 +2586,7 @@@ i915_gem_object_unbind(struct drm_i915_
        drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
        int ret;
  
-       if (obj->gtt_space == NULL)
+       if (!i915_gem_obj_ggtt_bound(obj))
                return 0;
  
        if (obj->pin_count)
        /* Avoid an unnecessary call to unbind on rebind. */
        obj->map_and_fenceable = true;
  
-       drm_mm_put_block(obj->gtt_space);
-       obj->gtt_space = NULL;
-       obj->gtt_offset = 0;
+       drm_mm_remove_node(&obj->gtt_space);
  
        return 0;
  }
@@@ -2657,6 -2663,7 +2656,6 @@@ static void i965_write_fence_reg(struc
        drm_i915_private_t *dev_priv = dev->dev_private;
        int fence_reg;
        int fence_pitch_shift;
 -      uint64_t val;
  
        if (INTEL_INFO(dev)->gen >= 6) {
                fence_reg = FENCE_REG_SANDYBRIDGE_0;
                fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
        }
  
 +      fence_reg += reg * 8;
 +
 +      /* To w/a incoherency with non-atomic 64-bit register updates,
 +       * we split the 64-bit update into two 32-bit writes. In order
 +       * for a partial fence not to be evaluated between writes, we
 +       * precede the update with write to turn off the fence register,
 +       * and only enable the fence as the last step.
 +       *
 +       * For extra levels of paranoia, we make sure each step lands
 +       * before applying the next step.
 +       */
 +      I915_WRITE(fence_reg, 0);
 +      POSTING_READ(fence_reg);
 +
        if (obj) {
-               u32 size = obj->gtt_space->size;
+               u32 size = i915_gem_obj_ggtt_size(obj);
 +              uint64_t val;
  
-               val = (uint64_t)((obj->gtt_offset + size - 4096) &
+               val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
                                 0xfffff000) << 32;
-               val |= obj->gtt_offset & 0xfffff000;
+               val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
                val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I965_FENCE_TILING_Y_SHIFT;
                val |= I965_FENCE_REG_VALID;
 -      } else
 -              val = 0;
  
 -      fence_reg += reg * 8;
 -      I915_WRITE64(fence_reg, val);
 -      POSTING_READ(fence_reg);
 +              I915_WRITE(fence_reg + 4, val >> 32);
 +              POSTING_READ(fence_reg + 4);
 +
 +              I915_WRITE(fence_reg + 0, val);
 +              POSTING_READ(fence_reg);
 +      } else {
 +              I915_WRITE(fence_reg + 4, 0);
 +              POSTING_READ(fence_reg + 4);
 +      }
  }
  
  static void i915_write_fence_reg(struct drm_device *dev, int reg,
        u32 val;
  
        if (obj) {
-               u32 size = obj->gtt_space->size;
+               u32 size = i915_gem_obj_ggtt_size(obj);
                int pitch_val;
                int tile_width;
  
-               WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+               WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
                     (size & -size) != size ||
-                    (obj->gtt_offset & (size - 1)),
-                    "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
-                    obj->gtt_offset, obj->map_and_fenceable, size);
+                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+                    "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+                    i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
  
                if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
                        tile_width = 128;
                pitch_val = obj->stride / tile_width;
                pitch_val = ffs(pitch_val) - 1;
  
-               val = obj->gtt_offset;
+               val = i915_gem_obj_ggtt_offset(obj);
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I830_FENCE_TILING_Y_SHIFT;
                val |= I915_FENCE_SIZE_BITS(size);
@@@ -2754,19 -2742,19 +2753,19 @@@ static void i830_write_fence_reg(struc
        uint32_t val;
  
        if (obj) {
-               u32 size = obj->gtt_space->size;
+               u32 size = i915_gem_obj_ggtt_size(obj);
                uint32_t pitch_val;
  
-               WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+               WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
                     (size & -size) != size ||
-                    (obj->gtt_offset & (size - 1)),
-                    "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
-                    obj->gtt_offset, size);
+                    (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+                    "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+                    i915_gem_obj_ggtt_offset(obj), size);
  
                pitch_val = obj->stride / 128;
                pitch_val = ffs(pitch_val) - 1;
  
-               val = obj->gtt_offset;
+               val = i915_gem_obj_ggtt_offset(obj);
                if (obj->tiling_mode == I915_TILING_Y)
                        val |= 1 << I830_FENCE_TILING_Y_SHIFT;
                val |= I830_FENCE_SIZE_BITS(size);
@@@ -2818,17 -2806,56 +2817,17 @@@ static inline int fence_number(struct d
        return fence - dev_priv->fence_regs;
  }
  
 -struct write_fence {
 -      struct drm_device *dev;
 -      struct drm_i915_gem_object *obj;
 -      int fence;
 -};
 -
 -static void i915_gem_write_fence__ipi(void *data)
 -{
 -      struct write_fence *args = data;
 -
 -      /* Required for SNB+ with LLC */
 -      wbinvd();
 -
 -      /* Required for VLV */
 -      i915_gem_write_fence(args->dev, args->fence, args->obj);
 -}
 -
  static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
                                         struct drm_i915_fence_reg *fence,
                                         bool enable)
  {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 -      struct write_fence args = {
 -              .dev = obj->base.dev,
 -              .fence = fence_number(dev_priv, fence),
 -              .obj = enable ? obj : NULL,
 -      };
 -
 -      /* In order to fully serialize access to the fenced region and
 -       * the update to the fence register we need to take extreme
 -       * measures on SNB+. In theory, the write to the fence register
 -       * flushes all memory transactions before, and coupled with the
 -       * mb() placed around the register write we serialise all memory
 -       * operations with respect to the changes in the tiler. Yet, on
 -       * SNB+ we need to take a step further and emit an explicit wbinvd()
 -       * on each processor in order to manually flush all memory
 -       * transactions before updating the fence register.
 -       *
 -       * However, Valleyview complicates matter. There the wbinvd is
 -       * insufficient and unlike SNB/IVB requires the serialising
 -       * register write. (Note that that register write by itself is
 -       * conversely not sufficient for SNB+.) To compromise, we do both.
 -       */
 -      if (INTEL_INFO(args.dev)->gen >= 6)
 -              on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
 -      else
 -              i915_gem_write_fence(args.dev, args.fence, args.obj);
 +      int reg = fence_number(dev_priv, fence);
 +
 +      i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
  
        if (enable) {
 -              obj->fence_reg = args.fence;
 +              obj->fence_reg = reg;
                fence->obj = obj;
                list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
        } else {
@@@ -2983,7 -3010,7 +2982,7 @@@ static bool i915_gem_valid_gtt_space(st
        if (HAS_LLC(dev))
                return true;
  
-       if (gtt_space == NULL)
+       if (!drm_mm_node_allocated(gtt_space))
                return true;
  
        if (list_empty(&gtt_space->node_list))
@@@ -3016,8 -3043,8 +3015,8 @@@ static void i915_gem_verify_gtt(struct 
  
                if (obj->cache_level != obj->gtt_space->color) {
                        printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
-                              obj->gtt_space->start,
-                              obj->gtt_space->start + obj->gtt_space->size,
+                              i915_gem_obj_ggtt_offset(obj),
+                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
                               obj->cache_level,
                               obj->gtt_space->color);
                        err++;
                                              obj->gtt_space,
                                              obj->cache_level)) {
                        printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
-                              obj->gtt_space->start,
-                              obj->gtt_space->start + obj->gtt_space->size,
+                              i915_gem_obj_ggtt_offset(obj),
+                              i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
                               obj->cache_level);
                        err++;
                        continue;
@@@ -3051,7 -3078,6 +3050,6 @@@ i915_gem_object_bind_to_gtt(struct drm_
  {
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_mm_node *node;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        bool mappable, fenceable;
        size_t gtt_max = map_and_fenceable ?
  
        i915_gem_object_pin_pages(obj);
  
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (node == NULL) {
-               i915_gem_object_unpin_pages(obj);
-               return -ENOMEM;
-       }
  search_free:
-       ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+       ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space,
+                                                 &obj->gtt_space,
                                                  size, alignment,
                                                  obj->cache_level, 0, gtt_max);
        if (ret) {
                        goto search_free;
  
                i915_gem_object_unpin_pages(obj);
-               kfree(node);
                return ret;
        }
-       if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
+       if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space,
+                                             obj->cache_level))) {
                i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(node);
+               drm_mm_remove_node(&obj->gtt_space);
                return -EINVAL;
        }
  
        ret = i915_gem_gtt_prepare_object(obj);
        if (ret) {
                i915_gem_object_unpin_pages(obj);
-               drm_mm_put_block(node);
+               drm_mm_remove_node(&obj->gtt_space);
                return ret;
        }
  
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
        list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  
-       obj->gtt_space = node;
-       obj->gtt_offset = node->start;
        fenceable =
-               node->size == fence_size &&
-               (node->start & (fence_alignment - 1)) == 0;
+               i915_gem_obj_ggtt_size(obj) == fence_size &&
+               (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
  
-       mappable =
-               obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
+       mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
+               dev_priv->gtt.mappable_end;
  
        obj->map_and_fenceable = mappable && fenceable;
  
@@@ -3244,7 -3262,7 +3234,7 @@@ i915_gem_object_set_to_gtt_domain(struc
        int ret;
  
        /* Not valid to be called on unbound objects. */
-       if (obj->gtt_space == NULL)
+       if (!i915_gem_obj_ggtt_bound(obj))
                return -EINVAL;
  
        if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@@ -3303,13 -3321,13 +3293,13 @@@ int i915_gem_object_set_cache_level(str
                return -EBUSY;
        }
  
-       if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+       if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) {
                ret = i915_gem_object_unbind(obj);
                if (ret)
                        return ret;
        }
  
-       if (obj->gtt_space) {
+       if (i915_gem_obj_ggtt_bound(obj)) {
                ret = i915_gem_object_finish_gpu(obj);
                if (ret)
                        return ret;
                        i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
                                               obj, cache_level);
  
-               obj->gtt_space->color = cache_level;
+               i915_gem_obj_ggtt_set_color(obj, cache_level);
        }
  
        if (cache_level == I915_CACHE_NONE) {
@@@ -3613,14 -3631,14 +3603,14 @@@ i915_gem_object_pin(struct drm_i915_gem
        if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
                return -EBUSY;
  
-       if (obj->gtt_space != NULL) {
-               if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+       if (i915_gem_obj_ggtt_bound(obj)) {
+               if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
                    (map_and_fenceable && !obj->map_and_fenceable)) {
                        WARN(obj->pin_count,
                             "bo is already pinned with incorrect alignment:"
-                            " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+                            " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
-                            obj->gtt_offset, alignment,
+                            i915_gem_obj_ggtt_offset(obj), alignment,
                             map_and_fenceable,
                             obj->map_and_fenceable);
                        ret = i915_gem_object_unbind(obj);
                }
        }
  
-       if (obj->gtt_space == NULL) {
+       if (!i915_gem_obj_ggtt_bound(obj)) {
                struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  
                ret = i915_gem_object_bind_to_gtt(obj, alignment,
@@@ -3655,7 -3673,7 +3645,7 @@@ voi
  i915_gem_object_unpin(struct drm_i915_gem_object *obj)
  {
        BUG_ON(obj->pin_count == 0);
-       BUG_ON(obj->gtt_space == NULL);
+       BUG_ON(!i915_gem_obj_ggtt_bound(obj));
  
        if (--obj->pin_count == 0)
                obj->pin_mappable = false;
@@@ -3705,7 -3723,7 +3695,7 @@@ i915_gem_pin_ioctl(struct drm_device *d
         * as the X server doesn't manage domains yet
         */
        i915_gem_object_flush_cpu_write_domain(obj);
-       args->offset = obj->gtt_offset;
+       args->offset = i915_gem_obj_ggtt_offset(obj);
  out:
        drm_gem_object_unreference(&obj->base);
  unlock:
@@@ -3974,9 -3992,7 +3964,7 @@@ i915_gem_idle(struct drm_device *dev
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
  
-       mutex_lock(&dev->struct_mutex);
-       if (dev_priv->mm.suspended) {
+       if (dev_priv->ums.mm_suspended) {
                mutex_unlock(&dev->struct_mutex);
                return 0;
        }
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_gem_evict_everything(dev);
  
-       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
-        * We need to replace this with a semaphore, or something.
-        * And not confound mm.suspended!
-        */
-       dev_priv->mm.suspended = 1;
 -      i915_gem_reset_fences(dev);
 -
        del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
  
        i915_kernel_lost_context(dev);
        i915_gem_cleanup_ringbuffer(dev);
  
-       mutex_unlock(&dev->struct_mutex);
        /* Cancel the retire work handler, which should be idle now. */
        cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  
@@@ -4213,7 -4224,7 +4194,7 @@@ in
  i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
  {
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
  
        if (drm_core_check_feature(dev, DRIVER_MODESET))
        }
  
        mutex_lock(&dev->struct_mutex);
-       dev_priv->mm.suspended = 0;
+       dev_priv->ums.mm_suspended = 0;
  
        ret = i915_gem_init_hw(dev);
        if (ret != 0) {
  cleanup_ringbuffer:
        mutex_lock(&dev->struct_mutex);
        i915_gem_cleanup_ringbuffer(dev);
-       dev_priv->mm.suspended = 1;
+       dev_priv->ums.mm_suspended = 1;
        mutex_unlock(&dev->struct_mutex);
  
        return ret;
  i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
  {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
  
        drm_irq_uninstall(dev);
-       return i915_gem_idle(dev);
+       mutex_lock(&dev->struct_mutex);
+       ret =  i915_gem_idle(dev);
+       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
+        * We need to replace this with a semaphore, or something.
+        * And not confound ums.mm_suspended!
+        */
+       if (ret != 0)
+               dev_priv->ums.mm_suspended = 1;
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
  }
  
  void
@@@ -4270,9 -4296,11 +4266,11 @@@ i915_gem_lastclose(struct drm_device *d
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return;
  
+       mutex_lock(&dev->struct_mutex);
        ret = i915_gem_idle(dev);
        if (ret)
                DRM_ERROR("failed to idle hardware: %d\n", ret);
+       mutex_unlock(&dev->struct_mutex);
  }
  
  static void
@@@ -4327,8 -4355,7 +4325,8 @@@ i915_gem_load(struct drm_device *dev
                dev_priv->num_fence_regs = 8;
  
        /* Initialize fence registers to zero */
 -      i915_gem_reset_fences(dev);
 +      INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 +      i915_gem_restore_fences(dev);
  
        i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
@@@ -4594,7 -4621,7 +4592,7 @@@ i915_gem_inactive_shrink(struct shrinke
        list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
                if (obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
 -      list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
 +      list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
                if (obj->pin_count == 0 && obj->pages_pin_count == 0)
                        cnt += obj->base.size >> PAGE_SHIFT;
  
index 982d4732cecff93e45370f30dfab03cc1bfdb32a,76c3b8699168f19867e749c7ec22dbc798a23759..5c1a535d5072a34c62b9ab563e592e569d425acc
@@@ -46,6 -46,7 +46,7 @@@ static unsigned long i915_stolen_to_phy
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct pci_dev *pdev = dev_priv->bridge_dev;
+       struct resource *r;
        u32 base;
  
        /* On the machines I have tested the Graphics Base of Stolen Memory
  #endif
        }
  
+       if (base == 0)
+               return 0;
+       /* Verify that nothing else uses this physical address. Stolen
+        * memory should be reserved by the BIOS and hidden from the
+        * kernel. So if the region is already marked as busy, something
+        * is seriously wrong.
+        */
+       r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+                                   "Graphics Stolen Memory");
+       if (r == NULL) {
+               DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+                         base, base + (uint32_t)dev_priv->gtt.stolen_size);
+               base = 0;
+       }
        return base;
  }
  
@@@ -120,7 -137,7 +137,7 @@@ static int i915_setup_compression(struc
                if (!compressed_llb)
                        goto err_fb;
  
-               dev_priv->compressed_llb = compressed_llb;
+               dev_priv->fbc.compressed_llb = compressed_llb;
  
                I915_WRITE(FBC_CFB_BASE,
                           dev_priv->mm.stolen_base + compressed_fb->start);
                           dev_priv->mm.stolen_base + compressed_llb->start);
        }
  
-       dev_priv->compressed_fb = compressed_fb;
-       dev_priv->cfb_size = size;
+       dev_priv->fbc.compressed_fb = compressed_fb;
+       dev_priv->fbc.size = size;
  
        DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
                      size);
@@@ -147,10 -164,10 +164,10 @@@ int i915_gem_stolen_setup_compression(s
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
 -      if (dev_priv->mm.stolen_base == 0)
 +      if (!drm_mm_initialized(&dev_priv->mm.stolen))
                return -ENODEV;
  
-       if (size < dev_priv->cfb_size)
+       if (size < dev_priv->fbc.size)
                return 0;
  
        /* Release any current block */
@@@ -163,25 -180,22 +180,25 @@@ void i915_gem_stolen_cleanup_compressio
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
-       if (dev_priv->cfb_size == 0)
+       if (dev_priv->fbc.size == 0)
                return;
  
-       if (dev_priv->compressed_fb)
-               drm_mm_put_block(dev_priv->compressed_fb);
+       if (dev_priv->fbc.compressed_fb)
+               drm_mm_put_block(dev_priv->fbc.compressed_fb);
  
-       if (dev_priv->compressed_llb)
-               drm_mm_put_block(dev_priv->compressed_llb);
+       if (dev_priv->fbc.compressed_llb)
+               drm_mm_put_block(dev_priv->fbc.compressed_llb);
  
-       dev_priv->cfb_size = 0;
+       dev_priv->fbc.size = 0;
  }
  
  void i915_gem_cleanup_stolen(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
 +      if (!drm_mm_initialized(&dev_priv->mm.stolen))
 +              return;
 +
        i915_gem_stolen_cleanup_compression(dev);
        drm_mm_takedown(&dev_priv->mm.stolen);
  }
@@@ -201,6 -215,9 +218,9 @@@ int i915_gem_init_stolen(struct drm_dev
        if (IS_VALLEYVIEW(dev))
                bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
  
+       if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
+               return 0;
        /* Basic memrange allocator for stolen space */
        drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
                    bios_reserved);
@@@ -303,7 -320,7 +323,7 @@@ i915_gem_object_create_stolen(struct dr
        struct drm_i915_gem_object *obj;
        struct drm_mm_node *stolen;
  
 -      if (dev_priv->mm.stolen_base == 0)
 +      if (!drm_mm_initialized(&dev_priv->mm.stolen))
                return NULL;
  
        DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
@@@ -333,8 -350,9 +353,9 @@@ i915_gem_object_create_stolen_for_preal
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
        struct drm_mm_node *stolen;
+       int ret;
  
 -      if (dev_priv->mm.stolen_base == 0)
 +      if (!drm_mm_initialized(&dev_priv->mm.stolen))
                return NULL;
  
        DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
        if (WARN_ON(size == 0))
                return NULL;
  
-       stolen = drm_mm_create_block(&dev_priv->mm.stolen,
-                                    stolen_offset, size,
-                                    false);
-       if (stolen == NULL) {
+       stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+       if (!stolen)
+               return NULL;
+       stolen->start = stolen_offset;
+       stolen->size = size;
+       ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+       if (ret) {
                DRM_DEBUG_KMS("failed to allocate stolen space\n");
+               kfree(stolen);
                return NULL;
        }
  
        }
  
        /* Some objects just need physical mem from stolen space */
-       if (gtt_offset == -1)
+       if (gtt_offset == I915_GTT_OFFSET_NONE)
                return obj;
  
        /* To simplify the initialisation sequence between KMS and GTT,
         * setting up the GTT space. The actual reservation will occur
         * later.
         */
+       obj->gtt_space.start = gtt_offset;
+       obj->gtt_space.size = size;
        if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
-               obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
-                                                    gtt_offset, size,
-                                                    false);
-               if (obj->gtt_space == NULL) {
+               ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space,
+                                         &obj->gtt_space);
+               if (ret) {
                        DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
-                       drm_gem_object_unreference(&obj->base);
-                       return NULL;
+                       goto unref_out;
                }
-       } else
-               obj->gtt_space = I915_GTT_RESERVED;
+       }
  
-       obj->gtt_offset = gtt_offset;
        obj->has_global_gtt_mapping = 1;
  
        list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
        list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
  
        return obj;
+ unref_out:
+       drm_gem_object_unreference(&obj->base);
+       return NULL;
  }
  
  void
index 26e162bb3a5158d5da3f1b5e9b2614c7515ede6b,11eb697dec0114f50b8c29e51e149f220be6b674..7db2cd76786d5ef789a3f0f0ccbb6a81666aaf49
@@@ -75,12 -75,7 +75,12 @@@ intel_dp_max_link_bw(struct intel_dp *i
        case DP_LINK_BW_1_62:
        case DP_LINK_BW_2_7:
                break;
 +      case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
 +              max_link_bw = DP_LINK_BW_2_7;
 +              break;
        default:
 +              WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
 +                   max_link_bw);
                max_link_bw = DP_LINK_BW_1_62;
                break;
        }
@@@ -1360,6 -1355,13 +1360,13 @@@ static void intel_dp_get_config(struct 
        }
  
        pipe_config->adjusted_mode.flags |= flags;
+       if (dp_to_dig_port(intel_dp)->port == PORT_A) {
+               if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
+                       pipe_config->port_clock = 162000;
+               else
+                       pipe_config->port_clock = 270000;
+       }
  }
  
  static void intel_disable_dp(struct intel_encoder *encoder)
index 021e8daa022d34485583e53eeea71d7f9ba33fb5,b0e1088b2c97876bb5440db8c52f7410f2818396..a0745d143902987b08ed1c69149953b97145a94b
@@@ -115,17 -115,25 +115,25 @@@ static void intel_lvds_get_config(struc
   * This is an exception to the general rule that mode_set doesn't turn
   * things on.
   */
- static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
+ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
  {
        struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        struct drm_display_mode *fixed_mode =
                lvds_encoder->attached_connector->base.panel.fixed_mode;
-       int pipe = intel_crtc->pipe;
+       int pipe = crtc->pipe;
        u32 temp;
  
+       if (HAS_PCH_SPLIT(dev)) {
+               assert_fdi_rx_pll_disabled(dev_priv, pipe);
+               assert_shared_dpll_disabled(dev_priv,
+                                           intel_crtc_to_shared_dpll(crtc));
+       } else {
+               assert_pll_disabled(dev_priv, pipe);
+       }
        temp = I915_READ(lvds_encoder->reg);
        temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
  
  
        /* set the corresponsding LVDS_BORDER bit */
        temp &= ~LVDS_BORDER_ENABLE;
-       temp |= intel_crtc->config.gmch_pfit.lvds_border_bits;
+       temp |= crtc->config.gmch_pfit.lvds_border_bits;
        /* Set the B0-B3 data pairs corresponding to whether we're going to
         * set the DPLLs for dual-channel mode or not.
         */
        if (INTEL_INFO(dev)->gen == 4) {
                /* Bspec wording suggests that LVDS port dithering only exists
                 * for 18bpp panels. */
-               if (intel_crtc->config.dither &&
-                   intel_crtc->config.pipe_bpp == 18)
+               if (crtc->config.dither && crtc->config.pipe_bpp == 18)
                        temp |= LVDS_ENABLE_DITHER;
                else
                        temp &= ~LVDS_ENABLE_DITHER;
@@@ -690,22 -697,6 +697,22 @@@ static const struct dmi_system_id intel
                        DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
                },
        },
 +      {
 +              .callback = intel_no_lvds_dmi_callback,
 +              .ident = "Intel D510MO",
 +              .matches = {
 +                      DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
 +                      DMI_EXACT_MATCH(DMI_BOARD_NAME, "D510MO"),
 +              },
 +      },
 +      {
 +              .callback = intel_no_lvds_dmi_callback,
 +              .ident = "Intel D525MW",
 +              .matches = {
 +                      DMI_MATCH(DMI_BOARD_VENDOR, "Intel"),
 +                      DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
 +              },
 +      },
  
        { }     /* terminating entry */
  };
@@@ -955,7 -946,7 +962,7 @@@ void intel_lvds_init(struct drm_device 
                         DRM_MODE_ENCODER_LVDS);
  
        intel_encoder->enable = intel_enable_lvds;
-       intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
+       intel_encoder->pre_enable = intel_pre_enable_lvds;
        intel_encoder->compute_config = intel_lvds_compute_config;
        intel_encoder->disable = intel_disable_lvds;
        intel_encoder->get_hw_state = intel_lvds_get_hw_state;
index d10e6735771fe55bd738bc3ee2d840f384d04471,a9be0d1c173dbc4e4f89a4a3b02ac4b1970be6d5..fb4afaa8036f144888565e79ed3a6b1e13585757
@@@ -30,6 -30,7 +30,7 @@@
  #include "intel_drv.h"
  #include "../../../platform/x86/intel_ips.h"
  #include <linux/module.h>
+ #include <drm/i915_powerwell.h>
  
  #define FORCEWAKE_ACK_TIMEOUT_MS 2
  
@@@ -86,7 -87,7 +87,7 @@@ static void i8xx_enable_fbc(struct drm_
        int plane, i;
        u32 fbc_ctl, fbc_ctl2;
  
-       cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+       cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
        if (fb->pitches[0] < cfb_pitch)
                cfb_pitch = fb->pitches[0];
  
@@@ -217,7 -218,7 +218,7 @@@ static void ironlake_enable_fbc(struct 
                   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
                   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
        I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
-       I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+       I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
        /* enable it... */
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
  
@@@ -274,7 -275,7 +275,7 @@@ static void gen7_enable_fbc(struct drm_
        struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
  
-       I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
+       I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
  
        I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
                   IVB_DPFC_CTL_FENCE_EN |
@@@ -325,7 -326,7 +326,7 @@@ static void intel_fbc_work_fn(struct wo
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        mutex_lock(&dev->struct_mutex);
-       if (work == dev_priv->fbc_work) {
+       if (work == dev_priv->fbc.fbc_work) {
                /* Double check that we haven't switched fb without cancelling
                 * the prior work.
                 */
                        dev_priv->display.enable_fbc(work->crtc,
                                                     work->interval);
  
-                       dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
-                       dev_priv->cfb_fb = work->crtc->fb->base.id;
-                       dev_priv->cfb_y = work->crtc->y;
+                       dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+                       dev_priv->fbc.fb_id = work->crtc->fb->base.id;
+                       dev_priv->fbc.y = work->crtc->y;
                }
  
-               dev_priv->fbc_work = NULL;
+               dev_priv->fbc.fbc_work = NULL;
        }
        mutex_unlock(&dev->struct_mutex);
  
  
  static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
  {
-       if (dev_priv->fbc_work == NULL)
+       if (dev_priv->fbc.fbc_work == NULL)
                return;
  
        DRM_DEBUG_KMS("cancelling pending FBC enable\n");
  
        /* Synchronisation is provided by struct_mutex and checking of
-        * dev_priv->fbc_work, so we can perform the cancellation
+        * dev_priv->fbc.fbc_work, so we can perform the cancellation
         * entirely asynchronously.
         */
-       if (cancel_delayed_work(&dev_priv->fbc_work->work))
+       if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
                /* tasklet was killed before being run, clean up */
-               kfree(dev_priv->fbc_work);
+               kfree(dev_priv->fbc.fbc_work);
  
        /* Mark the work as no longer wanted so that if it does
         * wake-up (because the work was already running and waiting
         * for our mutex), it will discover that is no longer
         * necessary to run.
         */
-       dev_priv->fbc_work = NULL;
+       dev_priv->fbc.fbc_work = NULL;
  }
  
- void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
  {
        struct intel_fbc_work *work;
        struct drm_device *dev = crtc->dev;
  
        work = kzalloc(sizeof *work, GFP_KERNEL);
        if (work == NULL) {
+               DRM_ERROR("Failed to allocate FBC work structure\n");
                dev_priv->display.enable_fbc(crtc, interval);
                return;
        }
        work->interval = interval;
        INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
  
-       dev_priv->fbc_work = work;
-       DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+       dev_priv->fbc.fbc_work = work;
  
        /* Delay the actual enabling to let pageflipping cease and the
         * display to settle before starting the compression. Note that
@@@ -418,7 -418,7 +418,7 @@@ void intel_disable_fbc(struct drm_devic
                return;
  
        dev_priv->display.disable_fbc(dev);
-       dev_priv->cfb_plane = -1;
+       dev_priv->fbc.plane = -1;
  }
  
  /**
@@@ -448,7 -448,6 +448,6 @@@ void intel_update_fbc(struct drm_devic
        struct drm_framebuffer *fb;
        struct intel_framebuffer *intel_fb;
        struct drm_i915_gem_object *obj;
-       int enable_fbc;
        unsigned int max_hdisplay, max_vdisplay;
  
        if (!i915_powersave)
                    !to_intel_crtc(tmp_crtc)->primary_disabled) {
                        if (crtc) {
                                DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
-                               dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+                               dev_priv->fbc.no_fbc_reason =
+                                       FBC_MULTIPLE_PIPES;
                                goto out_disable;
                        }
                        crtc = tmp_crtc;
  
        if (!crtc || crtc->fb == NULL) {
                DRM_DEBUG_KMS("no output, disabling\n");
-               dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+               dev_priv->fbc.no_fbc_reason = FBC_NO_OUTPUT;
                goto out_disable;
        }
  
        intel_fb = to_intel_framebuffer(fb);
        obj = intel_fb->obj;
  
-       enable_fbc = i915_enable_fbc;
-       if (enable_fbc < 0) {
-               DRM_DEBUG_KMS("fbc set to per-chip default\n");
-               enable_fbc = 1;
-               if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
-                       enable_fbc = 0;
+       if (i915_enable_fbc < 0 &&
+           INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
+               DRM_DEBUG_KMS("disabled per chip default\n");
+               dev_priv->fbc.no_fbc_reason = FBC_CHIP_DEFAULT;
+               goto out_disable;
        }
-       if (!enable_fbc) {
+       if (!i915_enable_fbc) {
                DRM_DEBUG_KMS("fbc disabled per module param\n");
-               dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+               dev_priv->fbc.no_fbc_reason = FBC_MODULE_PARAM;
                goto out_disable;
        }
        if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
            (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
                DRM_DEBUG_KMS("mode incompatible with compression, "
                              "disabling\n");
-               dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+               dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED_MODE;
                goto out_disable;
        }
  
        if ((crtc->mode.hdisplay > max_hdisplay) ||
            (crtc->mode.vdisplay > max_vdisplay)) {
                DRM_DEBUG_KMS("mode too large for compression, disabling\n");
-               dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+               dev_priv->fbc.no_fbc_reason = FBC_MODE_TOO_LARGE;
                goto out_disable;
        }
        if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
            intel_crtc->plane != 0) {
                DRM_DEBUG_KMS("plane not 0, disabling compression\n");
-               dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+               dev_priv->fbc.no_fbc_reason = FBC_BAD_PLANE;
                goto out_disable;
        }
  
        if (obj->tiling_mode != I915_TILING_X ||
            obj->fence_reg == I915_FENCE_REG_NONE) {
                DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
-               dev_priv->no_fbc_reason = FBC_NOT_TILED;
+               dev_priv->fbc.no_fbc_reason = FBC_NOT_TILED;
                goto out_disable;
        }
  
  
        if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
                DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
-               dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+               dev_priv->fbc.no_fbc_reason = FBC_STOLEN_TOO_SMALL;
                goto out_disable;
        }
  
         * cannot be unpinned (and have its GTT offset and fence revoked)
         * without first being decoupled from the scanout and FBC disabled.
         */
-       if (dev_priv->cfb_plane == intel_crtc->plane &&
-           dev_priv->cfb_fb == fb->base.id &&
-           dev_priv->cfb_y == crtc->y)
+       if (dev_priv->fbc.plane == intel_crtc->plane &&
+           dev_priv->fbc.fb_id == fb->base.id &&
+           dev_priv->fbc.y == crtc->y)
                return;
  
        if (intel_fbc_enabled(dev)) {
@@@ -2468,8 -2467,8 +2467,8 @@@ static void hsw_compute_wm_results(stru
  
  /* Find the result with the highest level enabled. Check for enable_fbc_wm in
   * case both are at the same level. Prefer r1 in case they're the same. */
- struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
-                                          struct hsw_wm_values *r2)
+ static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
+                                                 struct hsw_wm_values *r2)
  {
        int i, val_r1 = 0, val_r2 = 0;
  
@@@ -3076,19 -3075,12 +3075,12 @@@ void gen6_set_rps(struct drm_device *de
   */
  static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
  {
-       unsigned long timeout = jiffies + msecs_to_jiffies(10);
        u32 pval;
  
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
  
-       do {
-               pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
-               if (time_after(jiffies, timeout)) {
-                       DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
-                       break;
-               }
-               udelay(10);
-       } while (pval & 1);
+       if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
+               DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
  
        pval >>= 8;
  
@@@ -3143,9 -3135,9 +3135,9 @@@ static void gen6_disable_rps(struct drm
         * register (PMIMR) to mask PM interrupts. The only risk is in leaving
         * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
  
-       spin_lock_irq(&dev_priv->rps.lock);
+       spin_lock_irq(&dev_priv->irq_lock);
        dev_priv->rps.pm_iir = 0;
-       spin_unlock_irq(&dev_priv->rps.lock);
+       spin_unlock_irq(&dev_priv->irq_lock);
  
        I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
  }
@@@ -3162,9 -3154,9 +3154,9 @@@ static void valleyview_disable_rps(stru
         * register (PMIMR) to mask PM interrupts. The only risk is in leaving
         * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
  
-       spin_lock_irq(&dev_priv->rps.lock);
+       spin_lock_irq(&dev_priv->irq_lock);
        dev_priv->rps.pm_iir = 0;
-       spin_unlock_irq(&dev_priv->rps.lock);
+       spin_unlock_irq(&dev_priv->irq_lock);
  
        I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
  
@@@ -3329,13 -3321,13 +3321,13 @@@ static void gen6_enable_rps(struct drm_
  
        /* requires MSI enabled */
        I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
-       spin_lock_irq(&dev_priv->rps.lock);
+       spin_lock_irq(&dev_priv->irq_lock);
        /* FIXME: Our interrupt enabling sequence is bonghits.
         * dev_priv->rps.pm_iir really should be 0 here. */
        dev_priv->rps.pm_iir = 0;
        I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
        I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
-       spin_unlock_irq(&dev_priv->rps.lock);
+       spin_unlock_irq(&dev_priv->irq_lock);
        /* unmask all PM interrupts */
        I915_WRITE(GEN6_PMINTRMSK, 0);
  
@@@ -3482,7 -3474,7 +3474,7 @@@ static void valleyview_setup_pctx(struc
                pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
                pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
                                                                      pcbr_offset,
-                                                                     -1,
+                                                                     I915_GTT_OFFSET_NONE,
                                                                      pctx_size);
                goto out;
        }
@@@ -3609,10 -3601,10 +3601,10 @@@ static void valleyview_enable_rps(struc
  
        /* requires MSI enabled */
        I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
-       spin_lock_irq(&dev_priv->rps.lock);
+       spin_lock_irq(&dev_priv->irq_lock);
        WARN_ON(dev_priv->rps.pm_iir != 0);
        I915_WRITE(GEN6_PMIMR, 0);
-       spin_unlock_irq(&dev_priv->rps.lock);
+       spin_unlock_irq(&dev_priv->irq_lock);
        /* enable all PM interrupts */
        I915_WRITE(GEN6_PMINTRMSK, 0);
  
@@@ -3708,7 -3700,7 +3700,7 @@@ static void ironlake_enable_rc6(struct 
  
        intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
        intel_ring_emit(ring, MI_SET_CONTEXT);
-       intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
+       intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
                        MI_MM_SPACE_GTT |
                        MI_SAVE_EXT_STATE_EN |
                        MI_RESTORE_EXT_STATE_EN |
                return;
        }
  
-       I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
+       I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
        I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
  }
  
@@@ -5500,38 -5492,9 +5492,38 @@@ void intel_gt_init(struct drm_device *d
        if (IS_VALLEYVIEW(dev)) {
                dev_priv->gt.force_wake_get = vlv_force_wake_get;
                dev_priv->gt.force_wake_put = vlv_force_wake_put;
 -      } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
 +      } else if (IS_HASWELL(dev)) {
                dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
                dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
 +      } else if (IS_IVYBRIDGE(dev)) {
 +              u32 ecobus;
 +
 +              /* IVB configs may use multi-threaded forcewake */
 +
 +              /* A small trick here - if the bios hasn't configured
 +               * MT forcewake, and if the device is in RC6, then
 +               * force_wake_mt_get will not wake the device and the
 +               * ECOBUS read will return zero. Which will be
 +               * (correctly) interpreted by the test below as MT
 +               * forcewake being disabled.
 +               */
 +              mutex_lock(&dev->struct_mutex);
 +              __gen6_gt_force_wake_mt_get(dev_priv);
 +              ecobus = I915_READ_NOTRACE(ECOBUS);
 +              __gen6_gt_force_wake_mt_put(dev_priv);
 +              mutex_unlock(&dev->struct_mutex);
 +
 +              if (ecobus & FORCEWAKE_MT_ENABLE) {
 +                      dev_priv->gt.force_wake_get =
 +                                              __gen6_gt_force_wake_mt_get;
 +                      dev_priv->gt.force_wake_put =
 +                                              __gen6_gt_force_wake_mt_put;
 +              } else {
 +                      DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
 +                      DRM_INFO("when using vblank-synced partial screen updates.\n");
 +                      dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
 +                      dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
 +              }
        } else if (IS_GEN6(dev)) {
                dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
                dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
index 664118d8c1d6426353ed97bb61b1113369a7678a,815e3033224797a47a25a0dc8adf9c17d7e26d94..8527ea05124be9d2566a23e974d60136fe478398
@@@ -379,17 -379,6 +379,17 @@@ u32 intel_ring_get_active_head(struct i
        return I915_READ(acthd_reg);
  }
  
 +static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
 +{
 +      struct drm_i915_private *dev_priv = ring->dev->dev_private;
 +      u32 addr;
 +
 +      addr = dev_priv->status_page_dmah->busaddr;
 +      if (INTEL_INFO(ring->dev)->gen >= 4)
 +              addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
 +      I915_WRITE(HWS_PGA, addr);
 +}
 +
  static int init_ring_common(struct intel_ring_buffer *ring)
  {
        struct drm_device *dev = ring->dev;
        if (HAS_FORCE_WAKE(dev))
                gen6_gt_force_wake_get(dev_priv);
  
 +      if (I915_NEED_GFX_HWS(dev))
 +              intel_ring_setup_status_page(ring);
 +      else
 +              ring_setup_phys_status_page(ring);
 +
        /* Stop the ring if it's running. */
        I915_WRITE_CTL(ring, 0);
        I915_WRITE_HEAD(ring, 0);
         * registers with the above sequence (the readback of the HEAD registers
         * also enforces ordering), otherwise the hw might lose the new ring
         * register values. */
-       I915_WRITE_START(ring, obj->gtt_offset);
+       I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
        I915_WRITE_CTL(ring,
                        ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_VALID);
  
        /* If the head is still not zero, the ring is dead */
        if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
-                    I915_READ_START(ring) == obj->gtt_offset &&
+                    I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
                     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
                DRM_ERROR("%s initialization failed "
                                "ctl %08x head %08x tail %08x start %08x\n",
@@@ -505,7 -489,7 +505,7 @@@ init_pipe_control(struct intel_ring_buf
        if (ret)
                goto err_unref;
  
-       pc->gtt_offset = obj->gtt_offset;
+       pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
        pc->cpu_page = kmap(sg_page(obj->pages->sgl));
        if (pc->cpu_page == NULL) {
                ret = -ENOMEM;
@@@ -534,6 -518,9 +534,6 @@@ cleanup_pipe_control(struct intel_ring_
        struct pipe_control *pc = ring->private;
        struct drm_i915_gem_object *obj;
  
 -      if (!ring->private)
 -              return;
 -
        obj = pc->obj;
  
        kunmap(sg_page(obj->pages->sgl));
        drm_gem_object_unreference(&obj->base);
  
        kfree(pc);
 -      ring->private = NULL;
  }
  
  static int init_render_ring(struct intel_ring_buffer *ring)
@@@ -613,10 -601,7 +613,10 @@@ static void render_ring_cleanup(struct 
        if (HAS_BROKEN_CS_TLB(dev))
                drm_gem_object_unreference(to_gem_object(ring->private));
  
 -      cleanup_pipe_control(ring);
 +      if (INTEL_INFO(dev)->gen >= 5)
 +              cleanup_pipe_control(ring);
 +
 +      ring->private = NULL;
  }
  
  static void
@@@ -836,7 -821,7 +836,7 @@@ gen5_ring_get_irq(struct intel_ring_buf
                return false;
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
                I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
                POSTING_READ(GTIMR);
@@@ -854,7 -839,7 +854,7 @@@ gen5_ring_put_irq(struct intel_ring_buf
        unsigned long flags;
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                dev_priv->gt_irq_mask |= ring->irq_enable_mask;
                I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
                POSTING_READ(GTIMR);
@@@ -873,7 -858,7 +873,7 @@@ i9xx_ring_get_irq(struct intel_ring_buf
                return false;
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                dev_priv->irq_mask &= ~ring->irq_enable_mask;
                I915_WRITE(IMR, dev_priv->irq_mask);
                POSTING_READ(IMR);
@@@ -891,7 -876,7 +891,7 @@@ i9xx_ring_put_irq(struct intel_ring_buf
        unsigned long flags;
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                dev_priv->irq_mask |= ring->irq_enable_mask;
                I915_WRITE(IMR, dev_priv->irq_mask);
                POSTING_READ(IMR);
@@@ -910,7 -895,7 +910,7 @@@ i8xx_ring_get_irq(struct intel_ring_buf
                return false;
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                dev_priv->irq_mask &= ~ring->irq_enable_mask;
                I915_WRITE16(IMR, dev_priv->irq_mask);
                POSTING_READ16(IMR);
@@@ -928,7 -913,7 +928,7 @@@ i8xx_ring_put_irq(struct intel_ring_buf
        unsigned long flags;
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                dev_priv->irq_mask |= ring->irq_enable_mask;
                I915_WRITE16(IMR, dev_priv->irq_mask);
                POSTING_READ16(IMR);
@@@ -1021,7 -1006,7 +1021,7 @@@ gen6_ring_get_irq(struct intel_ring_buf
        gen6_gt_force_wake_get(dev_priv);
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (ring->irq_refcount.gt++ == 0) {
+       if (ring->irq_refcount++ == 0) {
                if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
                        I915_WRITE_IMR(ring,
                                       ~(ring->irq_enable_mask |
@@@ -1045,7 -1030,7 +1045,7 @@@ gen6_ring_put_irq(struct intel_ring_buf
        unsigned long flags;
  
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       if (--ring->irq_refcount.gt == 0) {
+       if (--ring->irq_refcount == 0) {
                if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
                        I915_WRITE_IMR(ring,
                                       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
@@@ -1070,14 -1055,14 +1070,14 @@@ hsw_vebox_get_irq(struct intel_ring_buf
        if (!dev->irq_enabled)
                return false;
  
-       spin_lock_irqsave(&dev_priv->rps.lock, flags);
-       if (ring->irq_refcount.pm++ == 0) {
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       if (ring->irq_refcount++ == 0) {
                u32 pm_imr = I915_READ(GEN6_PMIMR);
                I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
                I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
                POSTING_READ(GEN6_PMIMR);
        }
-       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  
        return true;
  }
@@@ -1092,14 -1077,14 +1092,14 @@@ hsw_vebox_put_irq(struct intel_ring_buf
        if (!dev->irq_enabled)
                return;
  
-       spin_lock_irqsave(&dev_priv->rps.lock, flags);
-       if (--ring->irq_refcount.pm == 0) {
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       if (--ring->irq_refcount == 0) {
                u32 pm_imr = I915_READ(GEN6_PMIMR);
                I915_WRITE_IMR(ring, ~0);
                I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
                POSTING_READ(GEN6_PMIMR);
        }
-       spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  }
  
  static int
@@@ -1144,7 -1129,7 +1144,7 @@@ i830_dispatch_execbuffer(struct intel_r
                intel_ring_advance(ring);
        } else {
                struct drm_i915_gem_object *obj = ring->private;
-               u32 cs_offset = obj->gtt_offset;
+               u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
  
                if (len > I830_BATCH_LIMIT)
                        return -ENOSPC;
@@@ -1229,7 -1214,7 +1229,7 @@@ static int init_status_page(struct inte
                goto err_unref;
        }
  
-       ring->status_page.gfx_addr = obj->gtt_offset;
+       ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
        ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
        if (ring->status_page.page_addr == NULL) {
                ret = -ENOMEM;
        ring->status_page.obj = obj;
        memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  
 -      intel_ring_setup_status_page(ring);
        DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
                        ring->name, ring->status_page.gfx_addr);
  
@@@ -1251,9 -1237,10 +1251,9 @@@ err
        return ret;
  }
  
 -static int init_phys_hws_pga(struct intel_ring_buffer *ring)
 +static int init_phys_status_page(struct intel_ring_buffer *ring)
  {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 -      u32 addr;
  
        if (!dev_priv->status_page_dmah) {
                dev_priv->status_page_dmah =
                        return -ENOMEM;
        }
  
 -      addr = dev_priv->status_page_dmah->busaddr;
 -      if (INTEL_INFO(ring->dev)->gen >= 4)
 -              addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
 -      I915_WRITE(HWS_PGA, addr);
 -
        ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
        memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  
@@@ -1289,7 -1281,7 +1289,7 @@@ static int intel_init_ring_buffer(struc
                        return ret;
        } else {
                BUG_ON(ring->id != RCS);
 -              ret = init_phys_hws_pga(ring);
 +              ret = init_phys_status_page(ring);
                if (ret)
                        return ret;
        }
                goto err_unpin;
  
        ring->virtual_start =
-               ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+               ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
                           ring->size);
        if (ring->virtual_start == NULL) {
                DRM_ERROR("Failed to map ringbuffer.\n");
@@@ -1901,7 -1893,7 +1901,7 @@@ int intel_render_ring_init_dri(struct d
        }
  
        if (!I915_NEED_GFX_HWS(dev)) {
 -              ret = init_phys_hws_pga(ring);
 +              ret = init_phys_status_page(ring);
                if (ret)
                        return ret;
        }
@@@ -2008,8 -2000,7 +2008,7 @@@ int intel_init_vebox_ring_buffer(struc
        ring->add_request = gen6_add_request;
        ring->get_seqno = gen6_ring_get_seqno;
        ring->set_seqno = ring_set_seqno;
-       ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT |
-               PM_VEBOX_CS_ERROR_INTERRUPT;
+       ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
        ring->irq_get = hsw_vebox_get_irq;
        ring->irq_put = hsw_vebox_put_irq;
        ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
diff --combined include/drm/drm_mm.h
index 4d06edb56d5fbb74480d0ffc4b24297870f8149e,2de91e3da5cc723a576776c3027b487f831a88f0..b87d05e17d466179dd9c48b6a56c852012c24f16
@@@ -138,10 -138,7 +138,7 @@@ static inline unsigned long drm_mm_hole
  /*
   * Basic range manager support (drm_mm.c)
   */
- extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
-                                              unsigned long start,
-                                              unsigned long size,
-                                              bool atomic);
+ extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
  extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
                                                    unsigned long size,
                                                    unsigned alignment,
@@@ -155,6 -152,7 +152,7 @@@ extern struct drm_mm_node *drm_mm_get_b
                                                unsigned long start,
                                                unsigned long end,
                                                int atomic);
  static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
                                                   unsigned long size,
                                                   unsigned alignment)
@@@ -177,6 -175,17 +175,6 @@@ static inline struct drm_mm_node *drm_m
        return drm_mm_get_block_range_generic(parent, size, alignment, 0,
                                              start, end, 0);
  }
 -static inline struct drm_mm_node *drm_mm_get_color_block_range(
 -                                              struct drm_mm_node *parent,
 -                                              unsigned long size,
 -                                              unsigned alignment,
 -                                              unsigned long color,
 -                                              unsigned long start,
 -                                              unsigned long end)
 -{
 -      return drm_mm_get_block_range_generic(parent, size, alignment, color,
 -                                            start, end, 0);
 -}
  static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
                                                struct drm_mm_node *parent,
                                                unsigned long size,
@@@ -244,10 -253,29 +242,10 @@@ static inline  struct drm_mm_node *drm_
        return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
                                                   start, end, best_match);
  }
 -static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm,
 -                                                         unsigned long size,
 -                                                         unsigned alignment,
 -                                                         unsigned long color,
 -                                                         bool best_match)
 -{
 -      return drm_mm_search_free_generic(mm,size, alignment, color, best_match);
 -}
 -static inline  struct drm_mm_node *drm_mm_search_free_in_range_color(
 -                                              const struct drm_mm *mm,
 -                                              unsigned long size,
 -                                              unsigned alignment,
 -                                              unsigned long color,
 -                                              unsigned long start,
 -                                              unsigned long end,
 -                                              bool best_match)
 -{
 -      return drm_mm_search_free_in_range_generic(mm, size, alignment, color,
 -                                                 start, end, best_match);
 -}
 -extern int drm_mm_init(struct drm_mm *mm,
 -                     unsigned long start,
 -                     unsigned long size);
 +
 +extern void drm_mm_init(struct drm_mm *mm,
 +                      unsigned long start,
 +                      unsigned long size);
  extern void drm_mm_takedown(struct drm_mm *mm);
  extern int drm_mm_clean(struct drm_mm *mm);
  extern int drm_mm_pre_get(struct drm_mm *mm);